kafka 版本 :2.3.0

本文简单的介绍 kafka在java 环境下的 连接和使用方法

kafka for java 简单使用(二)_java

maven.依赖关键配置

<dependencies>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.3.0</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
</dependencies>

制造者 producer

package com.julong.producer;

import java.util.Properties;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

/**
* kafka 生产者
* @author julong
* @date 2021年11月17日 下午10:17:24
* @desc
*/
public class ProducerMain {

public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = new Properties();
//配置地址
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.10.222:9092");
properties.put(ProducerConfig.CLIENT_ID_CONFIG, "julong001");
//消息确认模式 acks=0 消息发送后不需要 acks=1 只需要获得kafka 集群的leader 节点的确认即可返回 acks=all(-1) 需要ISR 中的所有的Replica 集群中的所有节点确认,最安全的 但是也可能丢失数据
properties.put(ProducerConfig.ACKS_CONFIG, "-1");
// 批量发送消息 根据设置的字符大小 来进行发送
//properties.put(ProducerConfig.BATCH_SIZE_CONFIG, "1KB");
//多长时间发送一次
//properties.put(ProducerConfig.LINGER_MS_CONFIG,1000);
//设置请求大小
//properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG,"1M");

//序列化对象的信息
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
Producer<Integer, String> kafkaProducer = new KafkaProducer<Integer, String>(properties);
System.out.println("连接成功");
for (int i = 0; i < 100; i++){
final int index = i;
//消息一
ProducerRecord<Integer, String> producerRecord = new ProducerRecord<Integer, String>("test-001", i, "测试数据_"+i);
//消息二
ProducerRecord<Integer, String> producerRecord1 = new ProducerRecord<Integer, String>("test", i, "测试数据_test"+i);
//消息发送的回调 kafka 默认为异步发送
kafkaProducer.send(producerRecord,new Callback() {
public void onCompletion(RecordMetadata metadata, Exception exception) {
if(exception==null){
System.out.println("发送成功: " + index +","+metadata.partition()+","+ metadata.offset());
}
}
});
kafkaProducer.send(producerRecord1,new Callback() {
public void onCompletion(RecordMetadata metadata, Exception exception) {
if(exception==null){
System.out.println("发送成功: " + index +","+metadata.partition()+","+ metadata.offset());
}
}
});
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
kafkaProducer.close();

}


}

消费者 consumer

package com.julong.consumer;

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

/**
* 消费端接收消息
* @author julong
* @date 2021年11月17日 下午10:18:17
* @desc
*/
public class ConsumerMain {

public static void main(String[] args) {
// TODO Auto-generated method stub
Properties properties = new Properties();
//配置地址
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.10.222:9092");
//调用的组
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "julong001");
//自动提交 true 手动提交 false
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
//消息确认策略 每一秒确认一次
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
//反序列化对象的信息
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.IntegerDeserializer");
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");

// 消息消费规则 earliest: 从最早未消费的消息开始消费 latest:直接读取已经消费 并且提交的最大的offset none:没有消费过就没有 offset 则报错
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");

Consumer<Integer, String> kafkaConsumer = new KafkaConsumer<Integer, String>(properties);
//主题
List<String> listArrays = new ArrayList<String>();
listArrays.add("test-001");
listArrays.add("test");
kafkaConsumer.subscribe(listArrays);
System.out.println("连接成功");
while (true) {
try {
@SuppressWarnings("deprecation")
ConsumerRecords<Integer, String> consumerRecords = kafkaConsumer.poll(1000);
System.out.println("consumerRecords:"+consumerRecords.count());
for (ConsumerRecord<Integer, String> consumerRecord : consumerRecords) {
System.out.println(consumerRecord.topic()+"接收到的消息是:"+consumerRecord.value());
//如果设置 消息确认方式为false 则需要设置此属性 提交消息已经消费
kafkaConsumer.commitAsync();
}
} catch (Exception e) {
// TODO: handle exception
e.printStackTrace();
}
}
}

}