1.简介
2 创建 Maven 项目
<properties> <flink.version>1.10.0</flink.version> <scala.binary.version>2.11</scala.binary.version> <kafka.version>2.2.0</kafka.version> </properties>
<dependencies> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-scala_${scala.binary.version}</artifactId> <version>${flink.version}</version> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-streaming-scala_${scala.binary.version}</artifactId> <version>${flink.version}</version> </dependency> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka_${scala.binary.version}</artifactId> <version>${kafka.version}</version> </dependency> <dependency> <groupId>org.apache.flink</groupId> <artifactId>flink-connector-kafka_${scala.binary.version}</artifactId> <version>${flink.version}</version> </dependency> </dependencies>
<build> <plugins> <!-- 该插件用于将 Scala 代码编译成 class 文件 --> <plugin> <groupId>net.alchim31.maven</groupId> <artifactId>scala-maven-plugin</artifactId> <version>3.4.6</version> <executions> <execution> <!-- 声明绑定到 maven 的 compile 阶段 --> <goals> <goal>testCompile</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>3.0.0</version> <configuration> <descriptorRefs> <descriptorRef> jar-with-dependencies </descriptorRef> </descriptorRefs> </configuration> <executions> <execution> <id>make-assembly</id> <phase>package</phase> <goals> <goal>single</goal> </goals> </execution> </executions> </plugin> </plugins> </build>
3 模块代码实现
package com.atguigu.hotitems_analysis import java.util.Properties import com.sun.jmx.snmp.Timestamp import org.apache.flink.api.common.functions.AggregateFunction import org.apache.flink.api.common.serialization.SimpleStringSchema import org.apache.flink.api.common.state.{ListState, ListStateDescriptor} import org.apache.flink.api.java.tuple.{Tuple, Tuple1} import org.apache.flink.streaming.api.TimeCharacteristic import org.apache.flink.streaming.api.functions.KeyedProcessFunction import org.apache.flink.streaming.api.scala._ import org.apache.flink.streaming.api.scala.function.WindowFunction import org.apache.flink.streaming.api.windowing.time.Time import org.apache.flink.streaming.api.windowing.windows.TimeWindow import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer import org.apache.flink.util.Collector import scala.collection.mutable.ListBuffer import scala.tools.cmd.Spec.Accumulator //定义样例类 case class UserBehavior(userId:Long, itemId:Long, categoryId:Int, behavior:String, timestamp:Long) //定义窗口聚合结果的样例类 case class ItemViewCount(itemID:Long, windowEnd:Long, count:Long) object HotItems { def main(args: Array[String]): Unit = { //创建流处理环境 val env = StreamExecutionEnvironment.getExecutionEnvironment env.setParallelism(1) env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime) //从文件读取数据 //val inputStream:DataStream[String] = env.readTextFile("C:\\Users\\DELL\\IdeaProjects\\UserBehaviorAnalysis\\HotItemAnalysis\\src\\main\\resources\\UserBehavior.csv") // 从kafka中读取数据 val properties = new Properties() properties.setProperty("bootstrap.servers","192.168.1.122:9092,192.168.1.133:9092,192.168.1.144:9092") properties.setProperty("group.id","consumer-group") properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer") properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer") properties.setProperty("auto.offset.reset", "latest") val inputStream: DataStream[String] = env.addSource(new FlinkKafkaConsumer[String]("hotitems", new SimpleStringSchema(), properties)) // 将数据转换成样例类类型,并提取timestamp定义watermark val dataStream:DataStream[UserBehavior] = inputStream .map(data =>{ val dataArray = data.split(",") UserBehavior(dataArray(0).toLong, dataArray(1).toLong, dataArray(2).toInt, dataArray(3), dataArray(4).toLong) }) .assignAscendingTimestamps(_.timestamp*1000L) // 对数据进行转换,过滤出pv行为,开窗聚合统计个数,并自定义单个窗口输出的结果 val aggStream:DataStream[ItemViewCount] = dataStream .filter(_.behavior == "pv") .keyBy("itemId") .timeWindow(Time.hours(1),Time.minutes(5)) .aggregate(new CountAgg(),new ItemCountWindowResult()) //对窗口聚合结果按照窗口进行分组,并做排序取TopN输出 val resultStream:DataStream[String] = aggStream .keyBy("windowEnd") .process(new TopNHotItem(5)) resultStream.print() env.execute("hot items job") } } //自定义预聚合函数 class CountAgg() extends AggregateFunction[UserBehavior, Long, Long]{ override def add(value: UserBehavior, accumulator: Long): Long = accumulator + 1 override def createAccumulator(): Long = 0L override def getResult(accumulator: Long): Long = accumulator override def merge(a: Long, b: Long): Long = a+b } class ItemCountWindowResult() extends WindowFunction[Long, ItemViewCount, Tuple, TimeWindow]{ override def apply(key: Tuple, window: TimeWindow, input: Iterable[Long], out: Collector[ItemViewCount]): Unit = { val itemId = key.asInstanceOf[Tuple1[Long]].f0 val windEnd = window.getEnd val count = input.iterator.next() out.collect(ItemViewCount(itemId,windEnd,count)) } } class TopNHotItem(n: Int) extends KeyedProcessFunction[Tuple, ItemViewCount, String]{ //定义一个ListState,用来保存当前窗口所有的count结果 lazy val itemCountListState: ListState[ItemViewCount] = getRuntimeContext.getListState(new ListStateDescriptor[ItemViewCount]("itemcount-list", classOf[ItemViewCount])) override def processElement(value: ItemViewCount, ctx: KeyedProcessFunction[Tuple, ItemViewCount, String]#Context, collector: Collector[String]): Unit = { // 每来一条数据,就把他保存到状态中 itemCountListState.add(value) //注册定时器,在windowEnd+100触发 ctx.timerService().registerEventTimeTimer(value.windowEnd + 100) } //定时器触发时,从状态中取数据,然后排序输出 override def onTimer(timestamp: Long, ctx: KeyedProcessFunction[Tuple, ItemViewCount, String]#OnTimerContext, out: Collector[String]): Unit = { // 先把状态的数据提取到一个ListBuffer中 val allItemCountList: ListBuffer[ItemViewCount] = ListBuffer() import scala.collection.JavaConversions._ for( itemCount <- itemCountListState.get()){ allItemCountList += itemCount } //按照count值大小排序 val sortedItemCountList = allItemCountList.sortBy(_.count)(Ordering.Long.reverse).take(n) //清除状态 itemCountListState.clear() //将排名信息格式化成string,方便监控显示 val result:StringBuilder = new StringBuilder result.append("时间: ").append(new Timestamp(timestamp - 100)).append("\n") //遍历sorted列表,输出TopN信息 for(i <- sortedItemCountList.indices){ //获取当前商品的count信息 val currentItemCount = sortedItemCountList(i) result.append("Top").append(i+1).append(":") .append(" 商品ID=").append(currentItemCount.itemID) .append(" 访问量=").append(currentItemCount.count) .append("\n") } result.append("====================================\n\n") // 控制输出频率 Thread.sleep(1000) out.collect(result.toString()) } }