bin/flume-ng agent --conf conf --conf-file conf/hbase.conf --name a1 -Dflume.root.logger=INFO,console
# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = localhost
a1.sources.r1.port = 12345
# Describe the sink
a1.sinks.k1.type = logger
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#HDFS sink
a1.channels = c1
a1.sinks = k1
a1.sinks.k1.type = hdfs
a1.sinks.k1.channel = c1
a1.sinks.k1.hdfs.path = /flume/%y-%m-%d/%H
a1.sinks.k1.hdfs.filePrefix = events-
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = minute
a1.sinks.k1.hdfs.useLocalTimeStamp = true #sink是hdfs,然后使用目录自动生成功能。出现如题的错误,看官网文档说的是需要在每个文件记录行的开头需要有时间戳,但是时间戳的格式可能比较难调节,所以亦可设置 hdfs.useLocalTimeStamp这个参数,比如以每个小时作为一个文件夹,那么配置应该是这样
##解决错误:
java.lang.NullPointerException: Expected timestamp in the Flume event headers, but it was null
#HBASE
a1.channels = c1
a1.sinks = k1
a1.sinks.k1.type = hbase
a1.sinks.k1.table = flume
a1.sinks.k1.columnFamily = f1
a1.sinks.k1.serializer = org.apache.flume.sink.hbase.RegexHbaseEventSerializer
a1.sinks.k1.channel = c1
读取数据通道的方式:
netcat
a1.sources.r1.type = netcat
a1.sources.r1.bind = localhost
a1.sources.r1.port = 12345
根据端口连接传数据telnet localhost 233333
avro
agent1.sources.source1.type = avro
agent1.sources.source1.bind = localhost
agent1.sources.source1.port = 44444
处理序列化数据
exec
a1.sources=r1
a1.channels=c1
a1.sources.r1.type=exec
a1.sources.r1.command=tail -F /var/log/secure
a1.sources.r1.channels=c1
处理命令行
测试端口
netstat -tnl | grep 23
tcp 0 0 0.0.0.0:36232 0.0.0.0:* LISTEN
tcp 0 0 :::23 :::* LISTEN
tcp 0 0 :::23 :::* LISTEN
访问端口
telnet localhost 23
查看端口任务
ps -ef|grep 23
查看端口占用状态
lsof -i:23
# example.conf: A single-node Flume configuration
# Name the components on this agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
# Describe/configure the source
a1.sources.r1.type = netcat
a1.sources.r1.bind = localhost
a1.sources.r1.port = 12345
# Describe the sink
a1.sinks.k1.type = logger
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#HDFS sink
a1.channels = c1
a1.sinks = k1
a1.sinks.k1.type = hdfs
a1.sinks.k1.channel = c1
a1.sinks.k1.hdfs.path = /flume/%y-%m-%d/%H
a1.sinks.k1.hdfs.filePrefix = events-
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
a1.sinks.k1.hdfs.roundUnit = minute
a1.sinks.k1.hdfs.useLocalTimeStamp = true
#HBASE
a1.channels = c1
a1.sinks = k1
a1.sinks.k1.type = hbase
a1.sinks.k1.table = flume
a1.sinks.k1.columnFamily = f1
a1.sinks.k1.serializer = org.apache.flume.sink.hbase.RegexHbaseEventSerializer
a1.sinks.k1.channel = c1
相关推荐
140_hdfs sink收集日志到hdfs b9 o, k, j( G4 l! {* u: | 141_使用spooldir实现批量收集/ s8 F* }% o- n6 g& a9 w 142_使用exec结合tail命令实现实时收集 143_使用seq源和压力源实现测试 144_使用avro源 145_导入...
sink:sink组件是⽤于从channel中取数据并送到⽬的地的组件,⽬的地包括hdfs、logger、avro、thrift、file、hbase等。 其实flume的使⽤就是编写配置⽂件,下⾯是使⽤flume将Nginx的⽇志对接kafka的配置⽂件,我们将...
you’ll learn Flume’s rich set of features for collecting, aggregating, and writing large amounts of streaming data to the Hadoop Distributed File System (HDFS), Apache HBase, SolrCloud, Elastic ...
同时,Flume提供对数据进行简单处理,并写到各种数据接受方(比如文本、HDFS、Hbase等)的能力 。Client:Client生产数据,运行在一个独立的线程。 Event: 一个数据单元,消息头和消息体组成。(Events可以是日志...
you’ll learn Flume’s rich set of features for collecting, aggregating, and writing large amounts of streaming data to the Hadoop Distributed File System (HDFS), Apache HBase, SolrCloud, Elastic ...
分布式文件存储系统:HDFS 分布式计算框架:MapReduce 集群资源管理器:YARN 单机伪集群环境搭建 集群环境搭建 常用 Shell 命令 Java API 的使用 基于 Zookeeper 搭建 Hadoop 高可用集群 二、Hive 简介及核心概念 ...
每周日更新项目系列一、大数据项目面试系列优秀文章目录一、HadoopHDFS是如何设计架构的最新Hadoop面试题总结二、Flink十分钟入门Fink SQLFlink SQL——Table与DataStream之间的互转(超详细)Flink SQL Sink(文件、...
最后再通过 Sink 组件进行保存,分别支持 HDFS,HBase,Hive 和 Kafka 四种存储方式。 下面结合一个大数据实时处理系统阐述下 Flume 在实际应用中所扮演的重要角色。该实时处理系统整体架构如下:通过将 Agent 部署...
IQL | 简体中文 基于SparkSQL实现了一套即席查询服务,具有如下特性: 优雅的交互方式,支持多种datasource/sink,...支持的数据源:hdfs、hive、hbase、kafka、mysql、es、solr、mongo 支持的文件格式:parquet、csv
这其中⽐较关键的技术点:实时数仓的明细层的汇总⼀般是基于 Flink 等接⼊ Kafka 消息进⾏关联的,维度表的数据⼀般会放在 HDFS、 HBase 中作为明细层的补充。另外,在实时数据仓库中要选择不同的 OLAP 库来满⾜...