order
This paper mainly studies pullFromDefaultCFAndPush of chronos
pullFromDefaultCFAndPush
DDMQ/carrera-chronos/src/main/java/com/xiaojukeji/chronos/services/MqPushService.java
public class MqPushService { //...... public void pullFromDefaultCFAndPush() { final long seekTimestamp = MetaService.getSeekTimestamp(); final long zkSeekTimestamp = MetaService.getZkSeekTimestamp(); // The seekTimestamp of backup cannot exceed the seekTimestamp of master if (MasterElection.isBackup()) { if (seekTimestamp >= zkSeekTimestamp) { LOGGER.debug("backup's pull from db should stop for seekTimestamp > zkSeekTimestamp, seekTimestamp:{}, zkSeekTimestamp:{}, Thread:{}", seekTimestamp, zkSeekTimestamp, Thread.currentThread().getName()); try { TimeUnit.SECONDS.sleep(2); } catch (InterruptedException e) { } return; } } // seekTimestamp cannot exceed the current time final long now = TsUtils.genTS(); if (seekTimestamp > now) { LOGGER.debug("pull from db should stop for seekTimestamp > now, seekTimestamp:{}, now:{}, Thread:{}", seekTimestamp, now, round, Thread.currentThread().getName()); try { TimeUnit.MILLISECONDS.sleep(100); } catch (InterruptedException e) { } return; } round++; final long start = System.currentTimeMillis(); final long diff = start / 1000 - seekTimestamp; LOGGER.info("pull from db start, seekTimestamp:{}, currTimestamp:{}, diff:{} round:{}", seekTimestamp, start / 1000, diff, round); MetricService.putSeekLatency(MasterElection.getState().toString(), diff + 10); // Because 0 is not displayed after being uploaded to metric // Iterate out all data under the current seekTimestamp int count = 0; try (RocksIterator it = RDB.newIterator(CFManager.CFH_DEFAULT)) { for (it.seek(KeyUtils.genSeekKey(seekTimestamp)); it.isValid(); it.next()) { final String dMsgId = new String(it.key()); final InternalKey internalKey = new InternalKey(dMsgId); //...... boolean needMetricWriteQpsAfterSplit = false; // Circular message needs to insert a new message. If it fails, it will not be inserted again if (internalKey.getType() == MsgTypes.LOOP_DELAY.getValue() || internalKey.getType() == MsgTypes.LOOP_EXPONENT_DELAY.getValue()) { final InternalKey nextInternalKey = new InternalKey(internalKey).nextUniqDelayMsgId(); if (!KeyUtils.isInvalidMsg(nextInternalKey)) { batcher.putToDefaultCF(nextInternalKey.genUniqDelayMsgIdWithSegmentInfoIfHas(), it.value(), null, nextInternalKey, Actions.ADD.getValue()); needMetricWriteQpsAfterSplit = true; } } byte[] bytes = it.value(); if (internalKey.getSegmentNum() > 0) { try { OUTPUT.write(it.value()); LOGGER.info("segment merge, dMsgId:{}, value.len:{}, value.acc.len:{}", internalKey.genUniqDelayMsgIdWithSegmentInfoIfHas(), it.value().length, OUTPUT.size()); if (internalKey.getSegmentNum() != (internalKey.getSegmentIndex() - Constants.SEGMENT_INDEX_BASE + 1)) { continue; } bytes = OUTPUT.toByteArray(); OUTPUT.reset(); } catch (IOException e) { LOGGER.error("error while output.write byte array, msg:{}", e.getMessage(), e); } } // If it cannot be parsed, it indicates that there is a problem with the format. Discard the message and do not block it final InternalValue internalValue = JsonUtils.fromJsonString(bytes, InternalValue.class); if (internalValue == null) { continue; } //...... count++; try { blockingQueue.put(new InternalPair(internalKey, internalValue)); } catch (InterruptedException e) { LOGGER.error("error while put to blockingQueue, dMsgId:{}", dMsgId); } if (count % INTERNAL_PAIR_COUNT == 0) { sendConcurrent(blockingQueue, round); } } sendConcurrent(blockingQueue, round); } needCancelMap.forEach((uniqDelayMsgId, tombstoneKey) -> { final InternalKey internalKey = new InternalKey(uniqDelayMsgId); final InternalKey tombstoneInternalKey = new InternalKey(tombstoneKey); // The remaining loop message needs to be added again if it is cancelled, otherwise it will not be deleted if (internalKey.getType() == MsgTypes.LOOP_DELAY.getValue() || internalKey.getType() == MsgTypes.LOOP_EXPONENT_DELAY.getValue()) { final InternalKey nextTombstoneKey = tombstoneInternalKey.nextUniqDelayMsgId(); final InternalKey nextInternalKey = internalKey.nextUniqDelayMsgId(); if (!KeyUtils.isInvalidMsg(nextTombstoneKey)) { String topic = needCancelTopicMap.get(uniqDelayMsgId); batcher.putToDefaultCF(nextTombstoneKey.genUniqDelayMsgId(), new CancelWrap(nextInternalKey.genUniqDelayMsgId(), topic).toJsonString(), topic, nextInternalKey, Actions.CANCEL.getValue()); } else { LOGGER.info("pull from db succ cancel message of tombstone key, tombstone dMsgId:{}", nextTombstoneKey.genUniqDelayMsgId()); } } }); batcher.flush(); needCancelMap.clear(); needCancelTopicMap.clear(); // Update offset MetaService.nextSeekTimestamp(); LOGGER.info("pull from db finish push, pushCost:{}ms, count:{}, seekTimestamp:{}, round:{}", System.currentTimeMillis() - start, count, seekTimestamp, round); } //...... }
- The pullFromDefaultCFAndPush method first obtains seekTimestamp and zkSeekTimestamp from the metaService. If seekTimestamp exceeds the current time, it will return in advance. Then, it obtains RocksIterator from RDB.newIterator(CFManager.CFH_DEFAULT) for traversal, reads dMsgId to construct internalKey, if its type is LOOP_DELAY or loop_output_delay, it will put rocksd again through batcher.putToDefaultCF b. Then read it.value() to construct internalValue, then construct InternalPair and put it into blockingQueue, then execute sendConcurrent when count% internal \ pair \ count = = 0, execute sendConcurrent again after the end of the loop, and finally update metaService. Nextsektimestamp()
sendConcurrent
DDMQ/carrera-chronos/src/main/java/com/xiaojukeji/chronos/services/MqPushService.java
public class MqPushService { //...... private void sendConcurrent(final BlockingQueue<InternalPair> blockingQueue, final long round) { if (blockingQueue.size() == 0) { LOGGER.info("pull from db sendConcurrent start, return for no message to send, round:{}", round); return; } final long sendCount = blockingQueue.size(); LOGGER.info("pull from db sendConcurrent start, send count:{}, round:{}", sendCount, round); final long start = System.currentTimeMillis(); final CountDownLatch cdl = new CountDownLatch(blockingQueue.size()); InternalPair internalPair; while ((internalPair = blockingQueue.poll()) != null) { final InternalPair immutableInternalPair = internalPair; pushThreadPool.execute(() -> { while (!send( immutableInternalPair.getInternalValue().getTopic(), immutableInternalPair.getInternalValue().getBody().getBytes(Charsets.UTF_8), immutableInternalPair.getInternalKey(), immutableInternalPair.getInternalValue().getTags(), immutableInternalPair.getInternalValue().getProperties(), false)) { try { TimeUnit.MILLISECONDS.sleep(100); } catch (InterruptedException e) { } } cdl.countDown(); }); } try { cdl.await(); } catch (InterruptedException e) { e.printStackTrace(); } final long cost = System.currentTimeMillis() - start; LOGGER.info("pull from db sendConcurrent end, send count:{}, round:{}, cost:{}ms", sendCount, round, cost); } //...... }
- The sendConcurrent method executes blockingQueue.poll() and then the send method
send
DDMQ/carrera-chronos/src/main/java/com/xiaojukeji/chronos/services/MqPushService.java
public class MqPushService { //...... private boolean send(final String topic, final byte[] body, final InternalKey internalKey, final String tags, final Map<String, String> properties, final boolean direct) { final long start = System.nanoTime(); final String key = internalKey.genUniqDelayMsgId(); MetricMsgType metricMsgType; if (internalKey.getType() == MsgTypes.DELAY.getValue()) { metricMsgType = MetricMsgType.DELAY; } else if (internalKey.getType() == MsgTypes.LOOP_DELAY.getValue()) { metricMsgType = MetricMsgType.LOOP_DELAY; } else { metricMsgType = MetricMsgType.UNKNOWN; } int len = 0; if (body != null) { len = body.length; } if (MasterElection.isBackup()) { if (direct) { LOGGER.info("succ send message(but cancel for backup) directly, topic:{}, dMsgId:{}, len:{}", topic, key, len); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.SEND, MetricPushMsgResult.BACKUP); } else { LOGGER.info("succ send message(but cancel for backup) from db, topic:{}, dMsgId:{}, len:{}", topic, key, len); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.DB, MetricPushMsgResult.BACKUP); } return true; } if (ConfigManager.getConfig().isFakeSend()) { try { TimeUnit.MILLISECONDS.sleep(1); } catch (InterruptedException e) { } if (direct) { LOGGER.info("succ send message directly(fakeSend), topic:{}, dMsgId:{}, len:{}", topic, key, len); } else { LOGGER.info("succ send message from db(fakeSend), topic:{}, dMsgId:{}, len:{}", topic, key, len); } return true; } MessageBuilder messageBuilder = producer.messageBuilder().setTopic(topic).setBody(body).setKey(key).setTags(tags).setRandomPartition(); if (properties != null && properties.size() > 0) { for (Map.Entry<String, String> entry : properties.entrySet()) { LOGGER.debug("properties, topic:{}, dMsgId:{}, key:{}, value:{}", topic, key, entry.getKey(), entry.getValue()); // IMPORTANT: If use addProperty for isPressureTraffic, the property will be ignored if (PRESSURE_TRAFFIC_KEY.equals(entry.getKey())) { messageBuilder.setPressureTraffic(Boolean.parseBoolean(entry.getValue())); } else { messageBuilder.addProperty(entry.getKey(), entry.getValue()); } } } messageBuilder.addProperty(PROPERTY_KEY_FROM_CHRONOS, PROPERTY_KEY_FROM_CHRONOS); final Result result = messageBuilder.send(); final long cost = (System.nanoTime() - start) / 1000; MetricService.putPushLatency(topic, cost); if (result.getCode() == CarreraReturnCode.OK) { if (direct) { LOGGER.info("succ send message directly, topic:{}, dMsgId:{}, len:{}, result:{}, cost:{}us", topic, key, len, result, cost); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.SEND, MetricPushMsgResult.OK); } else { LOGGER.info("succ send message from db, topic:{}, dMsgId:{}, len:{}, result:{}, cost:{}us", topic, key, len, result, cost); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.DB, MetricPushMsgResult.OK); } return true; } else if (result.getCode() == CarreraReturnCode.FAIL_TOPIC_NOT_EXIST || result.getCode() == CarreraReturnCode.FAIL_TOPIC_NOT_ALLOWED || result.getCode() == CarreraReturnCode.FAIL_ILLEGAL_MSG || result.getCode() == CarreraReturnCode.MISSING_PARAMETERS) { if (direct) { LOGGER.error("fail send message directly, topic:{}, dMsgId:{}, len:{}, result:{}, cost:{}us", topic, key, len, result, cost); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.SEND, MetricPushMsgResult.FAIL); } else { LOGGER.error("fail send message from db, topic:{}, dMsgId:{}, len:{}, result:{}, cost:{}us", topic, key, len, result, cost); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.DB, MetricPushMsgResult.FAIL); } return true; } else { if (direct) { LOGGER.error("error while send message directly, topic:{}, dMsgId:{}, len:{}, result:{}, cost:{}us", topic, key, len, result, cost); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.SEND, MetricPushMsgResult.FAIL); } else { LOGGER.error("error while send message from db, topic:{}, dMsgId:{}, len:{}, result:{}, cost:{}us", topic, key, len, result, cost); MetricService.incPushQps(topic, metricMsgType, MetricMsgToOrFrom.DB, MetricPushMsgResult.FAIL); } return false; } } //...... }
- The send method mainly constructs messageBuilder, and then executes messageBuilder.send()
Summary
The pullFromDefaultCFAndPush method first obtains seekTimestamp and zkSeekTimestamp from the metaService. If seekTimestamp exceeds the current time, it will return in advance. Then, it obtains RocksIterator from RDB.newIterator(CFManager.CFH_DEFAULT) for traversal, reads dMsgId to construct internalKey, if its type is LOOP_DELAY or loop_output_delay, it will put rocksd again through batcher.putToDefaultCF b. Then read it.value() to construct internalValue, then construct InternalPair and put it into blockingQueue, then execute sendConcurrent when count% internal \ pair \ count = = 0, execute sendConcurrent again after the end of the loop, and finally update metaService. Nextsektimestamp()