Spark:2.4 ,适用于Kafka:0.10.0及以上
1. zookeeper记录偏移量
object KafkaDirectWordCount_zookeeper {
def main(args: Array[String]): Unit = {
val group = "g001"
val topic = "wordcount"
val topics = Array(topic)
//创建SparkConf,如果将任务提交到集群中,那么要去掉.setMaster("local[2]")
val conf = new SparkConf().setAppName(" KafkaDirectWordCount_zookeeper").setMaster("local[2]")
//创建一个StreamingContext,其里面包含了一个SparkContext
val streamingContext = new StreamingContext(conf, Seconds(5));
//指定kafka的broker地址(sparkStream的Task直连到kafka的分区上,用更加底层的API消费,效率更高)
val brokerList = "node01:9092,node02:9092,node03:9092"
//指定zk的地址,后期更新消费的偏移量时使用(以后可以使用Redis、MySQL来记录偏移量)
val zkQuorum = "node01:2181,node02:2181,node03:2181"
//创建一个 ZKGroupTopicDirs 对象,其实是指定往zk中写入数据的目录,用于保存偏移量
val topicDirs = new ZKGroupTopicDirs(group, topic)
//获取 zookeeper 中的路径 "/g001/offsets/wordcount/"
val zkTopicPath = s"${topicDirs.consumerOffsetDir}"
//配置kafka的参数
val kafkaParams = Map[String, Object](
//指定broker所在位置
"bootstrap.servers" -> brokerList,
//指定写入数据和读取数据的编码方式
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> group,
"auto.offset.reset" -> "earliest", // lastest
//spark 消费kafka中的偏移量自动维护: kafka 0.10之前的版本自动维护在zookeeper kafka 0.10之后偏移量自动维护topic(__consumer_offsets)
"enable.auto.commit" -> (false: java.lang.Boolean)
)
//zookeeper 的host 和 ip,创建一个 client,用于跟新偏移量量的
//是zookeeper的客户端,可以从zk中读取偏移量数据,并更新偏移量
val zkClient = new ZkClient(zkQuorum)
//查询该路径下是否字节点(默认有字节点为我们自己保存不同 partition 时生成的)
// /g001/offsets/wordcount/0/10001"
// /g001/offsets/wordcount/1/30001"
val children = zkClient.countChildren(zkTopicPath)
var kafkaStream: InputDStream[ConsumerRecord[String, String]] = null
//如果保存过offset
if (children>0){
//如果 zookeeper 中有保存 offset,我们会利用这个 offset 作为 kafkaStream 的起始位置
var offsets: collection.mutable.Map[TopicPartition, Long] = collection.mutable.Map[TopicPartition, Long]()
//手动维护过偏移量
//1.先将维护的偏移量读取出来(zookeeper redis mysql)
for (i <- 0 until children){
// /g001/offsets/wordcount/0
val partitionOffset = zkClient.readData[Long](s"$zkTopicPath/${i}")
// wordcount/0
val tp =new TopicPartition(topic, i)
//将不同 partition 对应的 offset 增加到 fromOffsets 中
// wordcount/0 -> 10001
offsets.put(tp,partitionOffset.toLong)
}
//通过KafkaUtils创建直连的DStream(fromOffsets参数的作用是:按照前面计算好了的偏移量继续消费数据)
kafkaStream = KafkaUtils.createDirectStream[String,String](streamingContext,LocationStrategies.PreferConsistent,ConsumerStrategies.Subscribe[String,String](topics,kafkaParams,offsets))
}else{
//如果未保存,根据 kafkaParam 的配置使用最新(largest)或者最旧的(smallest) offset
kafkaStream = KafkaUtils.createDirectStream[String,String](streamingContext,LocationStrategies.PreferConsistent,ConsumerStrategies.Subscribe[String,String](topics,kafkaParams))
}
//记录偏移量
//直连方式只有在KafkaDStream的RDD中才能获取偏移量,那么就不能到调用DStream的Transformation
//所以只能子在kafkaStream调用foreachRDD,获取RDD的偏移量,然后就是对RDD进行操作了
//依次迭代KafkaDStream中的KafkaRDD
kafkaStream.foreachRDD(rdd=>{
//转换rdd为带偏移量的rdd,偏移量的范围
//只有KafkaRDD可以强转成HasOffsetRanges,并获取到偏移量
val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//业务处理,
val lines: RDD[String] = rdd.map(_.value())
//对RDD进行操作,触发Action
lines.foreachPartition(partition =>
partition.foreach(x => {
println(x)
})
)
//记录偏移量
for(osr <- ranges) {
//println(osr.topic +" " + osr.partition +" " + osr.fromOffset +" " + osr.untilOffset )
// /g001/offsets/wordcount/0
val zkPath = s"${topicDirs.consumerOffsetDir}/${osr.partition}"
//将该 partition 的 offset 保存到 zookeeper
// /g001/offsets/test/0/20000
//如果目录不存在先创建
//println(zkPath)
if (!zkClient.exists(zkPath)) {
zkClient.createPersistent(zkPath, true)
}
//写入数据
zkClient.writeData(zkPath, osr.untilOffset)
}
})
streamingContext.start()
streamingContext.awaitTermination()
}
}
2. Kafka记录偏移量
object KafkaDirectWordCount_kafka {
def main(args: Array[String]): Unit = {
val group = "g001"
val topic = "my-orders"
//创建SparkConf,如果将任务提交到集群中,那么要去掉.setMaster("local[2]")
val conf = new SparkConf().setAppName("DirectStream").setMaster("local[2]")
//创建一个StreamingContext,其里面包含了一个SparkContext
val streamingContext = new StreamingContext(conf, Seconds(5));
//指定kafka的broker地址(sparkStream的Task直连到kafka的分区上,用更加底层的API消费,效率更高)
val brokerList = "node01:9092,node02:9092,node03:9092"
//配置kafka的参数
val kafkaParams = Map[String, Object](
//指定broker所在位置
"bootstrap.servers" -> brokerList,
//指定写入数据和读取数据的编码方式
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> group,
"auto.offset.reset" -> "earliest", // lastest
"enable.auto.commit" -> (false: java.lang.Boolean)
)
val topics = Array(topic)
//用直连方式读取kafka中的数据,在Kafka中记录读取偏移量
val stream = KafkaUtils.createDirectStream[String, String](
streamingContext,
//位置策略(如果kafka和spark程序部署在一起,会有最优位置)
PreferConsistent,
//订阅的策略(可以指定用正则的方式读取topic,比如my-ordsers-.*)
Subscribe[String, String](topics, kafkaParams)
)
//迭代DStream中的RDD,将每一个时间点对于的RDD拿出来
stream.foreachRDD { rdd =>
if(!rdd.isEmpty()) {
//获取该RDD对于的偏移量
val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
//拿出对于的数据,foreach是一个aciton
rdd.foreach{ line =>
println(line.key() + " " + line.value())
}
//更新偏移量
// some time later, after outputs have completed
stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
}
}
streamingContext.start()
streamingContext.awaitTermination()
}
}