Dubbo + Spring Boot 2.1.1.release is adopted in the project. First, let’s look at our dependencies
Note: Common is the custom common JAR package in our project that holds common classes, interfaces, constants, and so on
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope> </dependency> <dependency> <groupId>com.hmily.dubbo</groupId> <artifactId>common</artifactId> < version > 0.0.1 - the SNAPSHOT < / version > < / dependency >Copy the code
Take a look at the configuration of application.properties
server.port=8020
server.servlet.context-path=/
# snowFlake configuration
## server setting for LongID Gene
snowFlake.workerId = 0
snowFlake.datacenterId = 0
# Dubbo configuration
# Dubbo Config properties
dubbo.application.id=rabbitmq-snowFlake
dubbo.application.name=rabbitmq-snowFlake
dubbo.application.qosPort=22212
dubbo.application.qosEnable=truedubbo.scan.basePackages=com.hmily.dubbo.snowFlakeDemo.* dubbo.protocol.id=dubbo dubbo.protocol.name=dubbo dubbo.protocol.port=12343 dubbo.registry.id=rabbitmq-snowFlake-registry Dubbo. Registry. Address = zookeeper: / / 130.80.151.179:2181# Enables Dubbo All Endpoints
management.endpoint.dubbo.enabled = true
management.endpoint.dubbo-shutdown.enabled = true
management.endpoint.dubbo-configs.enabled = true
management.endpoint.dubbo-services.enabled = true
management.endpoint.dubbo-references.enabled = true
management.endpoint.dubbo-properties.enabled = true
# Dubbo Health
## StatusChecker Name defaults (default : "memory", "load" )
management.health.dubbo.status.defaults = memory
## StatusChecker Name extras (default : empty )
management.health.dubbo.status.extras = load,threadpool
Copy the code
Then there’s our core SnowFlake implementation
public class SnowFlake {
protected static final Logger LOG = LoggerFactory.getLogger(SnowFlake.class);
@Value("snowFlake.workerId")
private static long workerId;
@Value("snowFlake.datacenterId") private static long datacenterId; static SnowFlake instance = new SnowFlake(workerId, datacenterId); private long sequence = 0L; private long twepoch = 1288834974657L; Private Long workerIdBits = 5L; Private Long datacenterIdBits = 5L; Private Long maxWorkerId = -1l ^ (-1L << workerIdBits); private Long maxWorkerId = -1L ^ (-1L << workerIdBits); // Maximum datacenter ID private Long maxDatacenterId = -1L ^ (-1L << datacenterIdBits); Private Long sequenceBits = 12L; Private Long workerIdShift = sequenceBits; Private Long datacenterIdShift = sequenceBits + workerIdBits; Private Long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits; private Long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits; private long sequenceMask = -1L ^ (-1L << sequenceBits); private long lastTimestamp = -1L; public SnowFlake(long workerId, long datacenterId) { // sanity checkfor workerId
if (workerId > maxWorkerId || workerId < 0) {
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
}
if (datacenterId > maxDatacenterId || datacenterId < 0) {
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));
}
this.workerId = workerId;
this.datacenterId = datacenterId;
//LOG.info(String.format("worker starting. timestamp left shift %d, datacenter id bits %d, worker id bits %d, sequence bits %d, workerid %d", timestampLeftShift, datacenterIdBits, workerIdBits, sequenceBits, workerId));
}
public synchronized long nextId() {
long timestamp = timeGen();
if (timestamp < lastTimestamp) {
LOG.error(String.format("clock is moving backwards. Rejecting requests until %d.", lastTimestamp));
throw new RuntimeException(String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
}
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask;
if(sequence == 0) { timestamp = tilNextMillis(lastTimestamp); }}else {
sequence = 0L;
}
lastTimestamp = timestamp;
return ((timestamp - twepoch) << timestampLeftShift) | (datacenterId << datacenterIdShift) | (workerId << workerIdShift) | sequence;
}
protected long tilNextMillis(long lastTimestamp) {
long timestamp = timeGen();
while (timestamp <= lastTimestamp) {
timestamp = timeGen();
}
return timestamp;
}
protected long timeGen() {
return System.currentTimeMillis();
}
public static long getId() {
returninstance.nextId(); }}Copy the code
Next, wrap up our Service method
public interface ISnowFlakeService {
long getSnowFlakeID();
long[] getSnowFlakeIDs(int size);
}
Copy the code
Write a service implementation class
@Service
public class SnowFlakeServiceImpl implements ISnowFlakeService {
private final static Logger log = LoggerFactory.getLogger(SnowFlakeServiceImpl.class);
@Override
public long getSnowFlakeID() {
long id = SnowFlake.getId();
log.info("id: {}", id);
return id;
}
@Override
public long[] getSnowFlakeIDs(int size) {
if (size < 1) {
throw new SnowFlakeCustomException(500, " size is illegal");
}
long[] ids = new long[size];
for (int i = 0; i < size; i++) {
long id = SnowFlake.getId();
ids[i] = id;
log.info("id: {}", id);
}
returnids; }}Copy the code
Let’s write a Controller and see if we can generate an ID
@RestController
public class TestController {
private static final Logger log = LoggerFactory.getLogger(TestController.class);
@GetMapping("/test")
public String test() {
return "hello";
}
@GetMapping("/test/longid")
public String testId() {
String res = null;
for(int i = 0; i < 5; i++) {
Long id = SnowFlake.getId();
log.info("id: {}", id);
if(i == 0) { res = id.toString(); }}returnres; }}Copy the code
Test /longid To check whether the id can be obtained. Can obtain the ID to provide dubbo RPC service. First, we have a common JAR package that defines some external RPC interface specifications, such as the ISnowFlakeServiceApi interface, which manages the input and return types of the server.
public interface ISnowFlakeServiceApi {
long getSnowFlakeID();
long[] getSnowFlakeIDs(int size);
}
Copy the code
Then we in snowFlakeDemo, snowflakealgorithm implementation demo, external service to implement this interface
@Service(
version = "1.0.0",
application = "${dubbo.application.id}",
protocol = "${dubbo.protocol.id}",
registry = "${dubbo.registry.id}"
)
public class SnowFlakeProvider implements ISnowFlakeServiceApi {
@Autowired
private ISnowFlakeService snowFlakeService;
@Override
public long getSnowFlakeID() {return snowFlakeService.getSnowFlakeID();
}
@Override
public long[] getSnowFlakeIDs(int size){
returnsnowFlakeService.getSnowFlakeIDs(size); }}Copy the code
Note: here @ Service annotations using the import com. Alibaba. Dubbo. Config. The annotation. The Service;
Our caller rabbitmq-common project also relies on common
< the dependency > < groupId > com. Hmily. Dubbo < / groupId > < artifactId > common < / artifactId > < version > 0.0.1 - the SNAPSHOT < / version > </dependency>Copy the code
Take a look at the configuration of application.properties
server.port=8030
server.servlet.context-path=/
spring.http.encoding.charset=UTF-8
spring.jackson.date-format=yyyy-MM-dd HH:mm:ss
spring.jackson.time-zone=GMT+8
spring.jackson.default-property-inclusion=NON_NULL
# Dubbo Config properties
dubbo.application.id=rabbitmq-common
dubbo.application.name=rabbitmq-common
dubbo.application.qosPort=22212
dubbo.application.qosEnable=truedubbo.scan.basePackages=com.hmily.rabbitmq.rabbitmqcommon.* dubbo.protocol.id=dubbo dubbo.protocol.name=dubbo dubbo.protocol.port=12343 dubbo.registry.id=rabbitmq-common-registry Dubbo. Registry. Address = zookeeper: / / 130.80.151.179:2181# Enables Dubbo All Endpoints
management.endpoint.dubbo.enabled = true
management.endpoint.dubbo-shutdown.enabled = true
management.endpoint.dubbo-configs.enabled = true
management.endpoint.dubbo-services.enabled = true
management.endpoint.dubbo-references.enabled = true
management.endpoint.dubbo-properties.enabled = true
# Dubbo Health
## StatusChecker Name defaults (default : "memory", "load" )
management.health.dubbo.status.defaults = memory
## StatusChecker Name extras (default : empty )
management.health.dubbo.status.extras = load,threadpool
Copy the code
Write a test interface to see if the RPC interface can be successfully called
@RestController
public class TestController {
private static final Logger log = LoggerFactory.getLogger(TestController.class);
@Reference(version = "${snowFlakeServiceApi.version}",
application = "${dubbo.application.id}",
interfaceName = "com.hmily.dubbo.common.service.ISnowFlakeServiceApi",
check = false,
timeout = 1000,
retries = 0
)
private ISnowFlakeServiceApi snowFlakeServiceApi;
@GetMapping("/test/longid/rpc")
public String testIdByRPC() {
Long id = snowFlakeServiceApi.getSnowFlakeID();
log.info("id: {}", id);
returnid.toString(); }}Copy the code
With that, the SnowFlake global unique ID generation service is complete.
Complete code: https://github.com/hmilyos/common.git https://github.com/hmilyos/snowFlakeDemo.git https://github.com/hmilyos/rabbitmq-common.git available branchCopy the code