try {
final BrokerState brokerState = new BrokerState();
brokerState.newState(RunningAsBroker.state());
kafkaScheduler = new KafkaScheduler(2, "kafka-journal-scheduler-", false); // TODO make thread count configurable
kafkaScheduler.startup();
// 配置部分基于了graylog的 配置文件进行处理
logManager = new LogManager(
new File[]{journalDirectory.toFile()},
Map$.MODULE$.<String, LogConfig>empty(),
defaultConfig,
cleanerConfig,
NUM_IO_THREADS,
SECONDS.toMillis(60L),
SECONDS.toMillis(60L),
SECONDS.toMillis(60L),
kafkaScheduler, // Broker state
brokerState,
JODA_TIME);
final TopicAndPartition topicAndPartition = new TopicAndPartition("messagejournal", 0);
final Option<Log> messageLog = logManager.getLog(topicAndPartition);
if (messageLog.isEmpty()) {
kafkaLog = logManager.createLog(topicAndPartition, logManager.defaultConfig());
} else {
kafkaLog = messageLog.get();
}
// Set up more metrics
this.metricRegistry.register(name(metricPrefix, METRIC_NAME_SIZE), (Gauge<Long>) kafkaLog::size);
this.metricRegistry.register(name(metricPrefix, METRIC_NAME_LOG_END_OFFSET), (Gauge<Long>) kafkaLog::logEndOffset);
this.metricRegistry.register(name(metricPrefix, METRIC_NAME_NUMBER_OF_SEGMENTS), (Gauge<Integer>) kafkaLog::numberOfSegments);
this.metricRegistry.register(name(metricPrefix, METRIC_NAME_UNFLUSHED_MESSAGES), (Gauge<Long>) kafkaLog::unflushedMessages);
this.metricRegistry.register(name(metricPrefix, METRIC_NAME_RECOVERY_POINT), (Gauge<Long>) kafkaLog::recoveryPoint);
this.metricRegistry.register(name(metricPrefix, METRIC_NAME_LAST_FLUSH_TIME), (Gauge<Long>) kafkaLog::lastFlushTime);
// must not be a lambda, because the serialization cannot determine the proper Metric type :(
this.metricRegistry.register(getOldestSegmentMetricName(), (Gauge<Date>) new Gauge<Date>() {
@Override
public Date getValue() {
long oldestSegment = Long.MAX_VALUE;
for (final LogSegment segment : LocalKafkaJournal.this.getSegments()) {
oldestSegment = Math.min(oldestSegment, segment.created());
}
return new Date(oldestSegment);
}
});
LOG.info("Initialized Kafka based journal at {}", journalDirectory);
offsetFlusher = new OffsetFileFlusher();
dirtyLogFlusher = new DirtyLogFlusher();
recoveryCheckpointFlusher = new RecoveryCheckpointFlusher();
logRetentionCleaner = new LogRetentionCleaner();
} catch (KafkaException e) {
// most likely failed to grab lock
LOG.error("Unable to start logmanager.", e);
throw new RuntimeException(e);
}