魏长东

weichangdong

fluentd同时同步两个文件

我们6个前端机用fluentd同步数据到aws的s3.感觉这个很强大的样子。

同时同步两个文件,配置如下:

 

<match debug.**>
  type stdout
</match>
<source>
  type forward
</source>
<source>
  type http
  port 8888
</source>
<source>
  type debug_agent
  bind 127.0.0.1
  port 24230
</source>

<source>
  type tail
  format /^(?<remote>[^ ]*) - (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^\"]*) +\S*)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)" (?<request_length>[^ ]*) (?<request_time>[^ ]*))?$/
  time_format %d/%b/%Y:%H:%M:%S %z
  path wcd/nginx/feedback.m.360.cn_access.log
  pos_file wcd/nginx/feedback.m.360.cn_access.log.pos
  tag s3.nginx.access
</source>

<source>
  type tail
  format none
  path wcd/partner_appsflyer/*
  pos_file wcd/nginx/partner_appsflyer.log.pos
  tag s3.logs.access
</source>

<match s3.logs.*>
  type s3

  aws_key_id ****
  aws_sec_key ***
  s3_bucket wcd
  s3_region ap-southeast-1
  s3_object_key_format %{path}/%{time_slice}_s1_%{index}.%{file_extension}
  path logs
  buffer_path wcd/td-agent/buffer/s3test
  flush_interval 1m

  time_slice_format %Y%m%d/%H
  time_slice_wait 10m
  utc

  format json
  include_time_key true
  include_tag_key false

  buffer_chunk_limit 2g
</match>

<match s3.nginx.*>
  type s3

  aws_key_id *
  aws_sec_key *
  s3_bucket web-server-log
  s3_region ap-southeast-1
  s3_object_key_format %{path}/%{time_slice}_s1.%{file_extension}
  path log
  buffer_path wcd/td-agent/buffer/s3
  #flush_interval 1m

  time_slice_format %Y%m%d/%H
  time_slice_wait 10m
  utc

  format json
  include_time_key true
  include_tag_key false

  buffer_chunk_limit 2g
</match>