Flink-读取kafka的数据及Checkpoint配置
public class KafkaSourceStream {public static void main(String[] args) throws Exception {StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();env.setParallelism(3);env
·
public class KafkaSourceStream {
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(3);
env.enableCheckpointing(5000);
env.setStateBackend(new FsStateBackend("/"));
// 精准一次性语义
env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
env.getCheckpointConfig().setCheckpointTimeout(50000);
env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
// 将检查点的元数据信息定期写入外部系统,如果job失败时,检查点不会被清除。
env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
// 如果job失败后恢复,从最近的检查点恢复job。
env.getCheckpointConfig().setPreferCheckpointForRecovery(true);
Properties props = new Properties();
props.put("bootstrap.servers", "192.168.3.160:9092");
props.put("group.id", "test");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
// 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费。
props.put("auto.offset.reset","earliest");
// 取消kafka管理偏移量,让flink来管理偏移量
props.put("enable.auto.commit","false");
FlinkKafkaConsumer010<String> dwdConsumer = new FlinkKafkaConsumer010<>("dwd", new SimpleStringSchema(), props);
DataStreamSource<String> source = env.addSource(dwdConsumer);
source.print();
env.execute();
}
}
更多推荐
已为社区贡献1条内容
所有评论(0)