-
Notifications
You must be signed in to change notification settings - Fork 0
/
telegraf_sample.conf
13651 lines (12936 loc) · 526 KB
/
telegraf_sample.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Collection offset is used to shift the collection by the given amount.
## This can be be used to avoid many plugins querying constraint devices
## at the same time by manually scheduling them in time.
# collection_offset = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## Collected metrics are rounded to the precision specified. Precision is
## specified as an interval with an integer + unit (e.g. 0s, 10ms, 2us, 4s).
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
##
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s:
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
##
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
precision = "0s"
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0h"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
## Method of translating SNMP objects. Can be "netsnmp" (deprecated) which
## translates by calling external programs snmptranslate and snmptable,
## or "gosmi" which translates using the built-in gosmi library.
# snmp_translator = "netsnmp"
## Name of the file to load the state of plugins from and store the state to.
## If uncommented and not empty, this file will be used to save the state of
## stateful plugins on termination of Telegraf. If the file exists on start,
## the state in the file will be restored for the plugins.
# statefile = ""
## Flag to skip running processors after aggregators
## By default, processors are run a second time after aggregators. Changing
## this setting to true will skip the second run of processors.
# skip_processors_after_aggregators = false
###############################################################################
# SECRETSTORE PLUGINS #
###############################################################################
# # Secret-store to access Docker Secrets
# [[secretstores.docker]]
# ## Unique identifier for the secretstore.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "docker_secretstore"
#
# ## Default Path to directory where docker stores the secrets file
# ## Current implementation in docker compose v2 only allows the following
# ## value for the path where the secrets are mounted at runtime
# # path = "/run/secrets"
#
# ## Allow dynamic secrets that are updated during runtime of telegraf
# ## Dynamic Secrets work only with `file` or `external` configuration
# ## in `secrets` section of the `docker-compose.yml` file
# # dynamic = false
# # Read secrets from a HTTP endpoint
# [[secretstores.http]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## URLs from which to read the secrets
# url = "http://localhost/secrets"
#
# ## Optional HTTP headers
# # headers = {"X-Special-Header" = "Special-Value"}
#
# ## Optional Token for Bearer Authentication via
# ## "Authorization: Bearer <token>" header
# # token = "your-token"
#
# ## Optional Credentials for HTTP Basic Authentication
# # username = "username"
# # password = "pa$$word"
#
# ## OAuth2 Client Credentials. The options 'client_id', 'client_secret', and 'token_url' are required to use OAuth2.
# # client_id = "clientid"
# # client_secret = "secret"
# # token_url = "https://indentityprovider/oauth2/v1/token"
# # scopes = ["urn:opc:idm:__myscopes__"]
#
# ## HTTP Proxy support
# # use_system_proxy = false
# # http_proxy_url = ""
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Minimal TLS version to accept by the client
# # tls_min_version = "TLS12"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional Cookie authentication
# # cookie_auth_url = "https://localhost/authMe"
# # cookie_auth_method = "POST"
# # cookie_auth_username = "username"
# # cookie_auth_password = "pa$$word"
# # cookie_auth_headers = { Content-Type = "application/json", X-MY-HEADER = "hello" }
# # cookie_auth_body = '{"username": "user", "password": "pa$$word", "authenticate": "me"}'
# ## When unset or set to zero the authentication will only happen once
# ## and will never renew the cookie. Set to a suitable duration if you
# ## require cookie renewal!
# # cookie_auth_renewal = "0s"
#
# ## Amount of time allowed to complete the HTTP request
# # timeout = "5s"
#
# ## List of success status codes
# # success_status_codes = [200]
#
# ## JSONata expression to transform the server response into a
# ## { "secret name": "secret value", ... }
# ## form. See https://jsonata.org for more information and a playground.
# # transformation = ''
#
# ## Cipher used to decrypt the secrets.
# ## In case your secrets are transmitted in an encrypted form, you need
# ## to specify the cipher used and provide the corresponding configuration.
# ## Please refer to https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/http/README.md
# ## for supported values.
# # cipher = "none"
#
# ## AES cipher parameters
# # [secretstores.http.aes]
# # ## Key (hex-encoded) and initialization-vector (IV) for the decryption.
# # ## In case the key (and IV) is derived from a password, the values can
# # ## be omitted.
# # key = ""
# # init_vector = ""
# #
# # ## Parameters for password-based-key derivation.
# # ## These parameters must match the encryption side to derive the same
# # ## key on both sides!
# # # kdf_algorithm = "PBKDF2-HMAC-SHA256"
# # # password = ""
# # # salt = ""
# # # iterations = 0
# # File based Javascript Object Signing and Encryption based secret-store
# [[secretstores.jose]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## Directory for storing the secrets
# path = "/etc/telegraf/secrets"
#
# ## Password to access the secrets.
# ## If no password is specified here, Telegraf will prompt for it at startup time.
# # password = ""
# # Secret-store to retrieve and maintain tokens from various OAuth2 services
# [[secretstores.oauth2]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## Service to retrieve the token(s) from
# ## Currently supported services are "custom", "auth0" and "AzureAD"
# # service = "custom"
#
# ## Setting to overwrite the queried token-endpoint
# ## This setting is optional for some services but mandatory for others such
# ## as "custom" or "auth0". Please check the documentation at
# ## https://github.com/influxdata/telegraf/blob/master/plugins/secretstores/oauth2/README.md
# # token_endpoint = ""
#
# ## Tenant ID for the AzureAD service
# # tenant_id = ""
#
# ## Minimal remaining time until the token expires
# ## If a token expires less than the set duration in the future, the token is
# ## renewed. This is useful to avoid race-condition issues where a token is
# ## still valid, but isn't when the request reaches the API endpoint of
# ## your service using the token.
# # token_expiry_margin = "1s"
#
# ## Section for defining a token secret
# [[secretstores.oauth2.token]]
# ## Unique secret-key used for referencing the token via @{<id>:<secret_key>}
# key = ""
# ## Client-ID and secret for the 2-legged OAuth flow
# client_id = ""
# client_secret = ""
# ## Scopes to send in the request
# # scopes = []
#
# ## Additional (optional) parameters to include in the token request
# ## This might for example include the "audience" parameter required for
# ## auth0.
# # [secretstores.oauth2.token.parameters]
# # audience = ""
# # Operating System native secret-store
# [[secretstores.os]]
# ## Unique identifier for the secret-store.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "secretstore"
#
# ## Keyring Name & Collection
# ## * Linux: keyring name used for the secrets, collection is unused
# ## * macOS: keyring specifies the macOS' Keychain name and collection is an
# ## optional Keychain service name
# ## * Windows: keys follow a fixed pattern in the form
# ## `<collection>:<keyring>:<key_name>`. Please keep this in mind when
# ## creating secrets with the Windows credential tool.
# # keyring = "telegraf"
# # collection = ""
#
# ## macOS Keychain password
# ## If no password is specified here, Telegraf will prompt for it at startup
# ## time.
# # password = ""
#
# ## Allow dynamic secrets that are updated during runtime of telegraf
# # dynamic = false
# # Secret-store to access systemd secrets
# [[secretstores.systemd]]
# ## Unique identifier for the secretstore.
# ## This id can later be used in plugins to reference the secrets
# ## in this secret-store via @{<id>:<secret_key>} (mandatory)
# id = "systemd"
#
# ## Path to systemd credentials directory
# ## This should not be required as systemd indicates this directory
# ## via the CREDENTIALS_DIRECTORY environment variable.
# # path = "${CREDENTIALS_DIRECTORY}"
#
# ## Prefix to remove from systemd credential-filenames to derive secret names
# # prefix = "telegraf."
#
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# # Configuration for sending metrics to InfluxDB 2.0
# [[outputs.influxdb_v2]]
# ## The URLs of the InfluxDB cluster nodes.
# ##
# ## Multiple URLs can be specified for a single cluster, only ONE of the
# ## urls will be written to each interval.
# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"]
# urls = ["http://127.0.0.1:8086"]
#
# ## Local address to bind when connecting to the server
# ## If empty or not set, the local address is automatically chosen.
# # local_address = ""
#
# ## Token for authentication.
# token = ""
#
# ## Organization is the name of the organization you wish to write to.
# organization = ""
#
# ## Destination bucket to write into.
# bucket = ""
#
# ## The value of this tag will be used to determine the bucket. If this
# ## tag is not set the 'bucket' option is used as the default.
# # bucket_tag = ""
#
# ## If true, the bucket tag will not be added to the metric.
# # exclude_bucket_tag = false
#
# ## Timeout for HTTP messages.
# # timeout = "5s"
#
# ## Additional HTTP headers
# # http_headers = {"X-Special-Header" = "Special-Value"}
#
# ## HTTP Proxy override, if unset values the standard proxy environment
# ## variables are consulted to determine which proxy, if any, should be used.
# # http_proxy = "http://corporate.proxy:3128"
#
# ## HTTP User-Agent
# # user_agent = "telegraf"
#
# ## Content-Encoding for write request body, can be set to "gzip" to
# ## compress body or "identity" to apply no encoding.
# # content_encoding = "gzip"
#
# ## Enable or disable uint support for writing uints influxdb 2.0.
# # influx_uint_support = false
#
# ## When true, Telegraf will omit the timestamp on data to allow InfluxDB
# ## to set the timestamp of the data during ingestion. This is generally NOT
# ## what you want as it can lead to data points captured at different times
# ## getting omitted due to similar data.
# # influx_omit_timestamp = false
#
# ## HTTP/2 Timeouts
# ## The following values control the HTTP/2 client's timeouts. These settings
# ## are generally not required unless a user is seeing issues with client
# ## disconnects. If a user does see issues, then it is suggested to set these
# ## values to "15s" for ping timeout and "30s" for read idle timeout and
# ## retry.
# ##
# ## Note that the timer for read_idle_timeout begins at the end of the last
# ## successful write and not at the beginning of the next write.
# # ping_timeout = "0s"
# # read_idle_timeout = "0s"
#
# ## Optional TLS Config for use on HTTP connections.
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
# # Configuration for Amon Server to send metrics to.
# [[outputs.amon]]
# ## Amon Server Key
# server_key = "my-server-key" # required.
#
# ## Amon Instance URL
# amon_instance = "https://youramoninstance" # required
#
# ## Connection timeout.
# # timeout = "5s"
# # Publishes metrics to an AMQP broker
# [[outputs.amqp]]
# ## Broker to publish to.
# ## deprecated in 1.7; use the brokers option
# # url = "amqp://localhost:5672/influxdb"
#
# ## Brokers to publish to. If multiple brokers are specified a random broker
# ## will be selected anytime a connection is established. This can be
# ## helpful for load balancing when not using a dedicated load balancer.
# brokers = ["amqp://localhost:5672/influxdb"]
#
# ## Maximum messages to send over a connection. Once this is reached, the
# ## connection is closed and a new connection is made. This can be helpful for
# ## load balancing when not using a dedicated load balancer.
# # max_messages = 0
#
# ## Exchange to declare and publish to.
# exchange = "telegraf"
#
# ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash".
# # exchange_type = "topic"
#
# ## If true, exchange will be passively declared.
# # exchange_passive = false
#
# ## Exchange durability can be either "transient" or "durable".
# # exchange_durability = "durable"
#
# ## Additional exchange arguments.
# # exchange_arguments = { }
# # exchange_arguments = {"hash_property" = "timestamp"}
#
# ## Authentication credentials for the PLAIN auth_method.
# # username = ""
# # password = ""
#
# ## Auth method. PLAIN and EXTERNAL are supported
# ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as
# ## described here: https://www.rabbitmq.com/plugins.html
# # auth_method = "PLAIN"
#
# ## Metric tag to use as a routing key.
# ## ie, if this tag exists, its value will be used as the routing key
# # routing_tag = "host"
#
# ## Static routing key. Used when no routing_tag is set or as a fallback
# ## when the tag specified in routing tag is not found.
# # routing_key = ""
# # routing_key = "telegraf"
#
# ## Delivery Mode controls if a published message is persistent.
# ## One of "transient" or "persistent".
# # delivery_mode = "transient"
#
# ## InfluxDB database added as a message header.
# ## deprecated in 1.7; use the headers option
# # database = "telegraf"
#
# ## InfluxDB retention policy added as a message header
# ## deprecated in 1.7; use the headers option
# # retention_policy = "default"
#
# ## Static headers added to each published message.
# # headers = { }
# # headers = {"database" = "telegraf", "retention_policy" = "default"}
#
# ## Connection timeout. If not provided, will default to 5s. 0s means no
# ## timeout (not recommended).
# # timeout = "5s"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Optional Proxy Configuration
# # use_proxy = false
# # proxy_url = "localhost:8888"
#
# ## If true use batch serialization format instead of line based delimiting.
# ## Only applies to data formats which are not line based such as JSON.
# ## Recommended to set to true.
# # use_batch_format = false
#
# ## Content encoding for message payloads, can be set to "gzip" to or
# ## "identity" to apply no encoding.
# ##
# ## Please note that when use_batch_format = false each amqp message contains only
# ## a single metric, it is recommended to use compression with batch format
# ## for best results.
# # content_encoding = "identity"
#
# ## Data format to output.
# ## Each data format has its own unique set of configuration options, read
# ## more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
# # data_format = "influx"
# # Send metrics to Azure Application Insights
# [[outputs.application_insights]]
# ## Instrumentation key of the Application Insights resource.
# instrumentation_key = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx"
#
# ## Regions that require endpoint modification https://docs.microsoft.com/en-us/azure/azure-monitor/app/custom-endpoints
# # endpoint_url = "https://dc.services.visualstudio.com/v2/track"
#
# ## Timeout for closing (default: 5s).
# # timeout = "5s"
#
# ## Enable additional diagnostic logging.
# # enable_diagnostic_logging = false
#
# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
# ## plugin definition, otherwise additional config options are read as part of
# ## the table
#
# ## Context Tag Sources add Application Insights context tags to a tag value.
# ##
# ## For list of allowed context tag keys see:
# ## https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go
# # [outputs.application_insights.context_tag_sources]
# # "ai.cloud.role" = "kubernetes_container_name"
# # "ai.cloud.roleInstance" = "kubernetes_pod_name"
# # Sends metrics to Azure Data Explorer
# [[outputs.azure_data_explorer]]
# ## The URI property of the Azure Data Explorer resource on Azure
# ## ex: endpoint_url = https://myadxresource.australiasoutheast.kusto.windows.net
# endpoint_url = ""
#
# ## The Azure Data Explorer database that the metrics will be ingested into.
# ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
# ## ex: "exampledatabase"
# database = ""
#
# ## Timeout for Azure Data Explorer operations
# # timeout = "20s"
#
# ## Type of metrics grouping used when pushing to Azure Data Explorer.
# ## Default is "TablePerMetric" for one table per different metric.
# ## For more information, please check the plugin README.
# # metrics_grouping_type = "TablePerMetric"
#
# ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
# # table_name = ""
#
# ## Creates tables and relevant mapping if set to true(default).
# ## Skips table and mapping creation if set to false, this is useful for running Telegraf with the lowest possible permissions i.e. table ingestor role.
# # create_tables = true
#
# ## Ingestion method to use.
# ## Available options are
# ## - managed -- streaming ingestion with fallback to batched ingestion or the "queued" method below
# ## - queued -- queue up metrics data and process sequentially
# # ingestion_type = "queued"
# # Send aggregate metrics to Azure Monitor
# [[outputs.azure_monitor]]
# ## Timeout for HTTP writes.
# # timeout = "20s"
#
# ## Set the namespace prefix, defaults to "Telegraf/<input-name>".
# # namespace_prefix = "Telegraf/"
#
# ## Azure Monitor doesn't have a string value type, so convert string
# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows
# ## a maximum of 10 dimensions so Telegraf will only send the first 10
# ## alphanumeric dimensions.
# # strings_as_dimensions = false
#
# ## Both region and resource_id must be set or be available via the
# ## Instance Metadata service on Azure Virtual Machines.
# #
# ## Azure Region to publish metrics against.
# ## ex: region = "southcentralus"
# # region = ""
# #
# ## The Azure Resource ID against which metric will be logged, e.g.
# ## ex: resource_id = "/subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.Compute/virtualMachines/<vm_name>"
# # resource_id = ""
#
# ## Optionally, if in Azure US Government, China, or other sovereign
# ## cloud environment, set the appropriate REST endpoint for receiving
# ## metrics. (Note: region may be unused in this context)
# # endpoint_url = "https://monitoring.core.usgovcloudapi.net"
# # Configuration for Google Cloud BigQuery to send entries
# [[outputs.bigquery]]
# ## Credentials File
# credentials_file = "/path/to/service/account/key.json"
#
# ## Google Cloud Platform Project
# # project = ""
#
# ## The namespace for the metric descriptor
# dataset = "telegraf"
#
# ## Timeout for BigQuery operations.
# # timeout = "5s"
#
# ## Character to replace hyphens on Metric name
# # replace_hyphen_to = "_"
#
# ## Write all metrics in a single compact table
# # compact_table = ""
# ## Configuration to publish Telegraf metrics to Clarify
# [[outputs.clarify]]
# ## Credentials File (Oauth 2.0 from Clarify integration)
# credentials_file = "/path/to/clarify/credentials.json"
#
# ## Clarify username password (Basic Auth from Clarify integration)
# username = "i-am-bob"
# password = "secret-password"
#
# ## Timeout for Clarify operations
# # timeout = "20s"
#
# ## Optional tags to be included when generating the unique ID for a signal in Clarify
# # id_tags = []
# # clarify_id_tag = 'clarify_input_id'
# # Publish Telegraf metrics to a Google Cloud PubSub topic
# [[outputs.cloud_pubsub]]
# ## Required. Name of Google Cloud Platform (GCP) Project that owns
# ## the given PubSub topic.
# project = "my-project"
#
# ## Required. Name of PubSub topic to publish metrics to.
# topic = "my-topic"
#
# ## Content encoding for message payloads, can be set to "gzip" or
# ## "identity" to apply no encoding.
# # content_encoding = "identity"
#
# ## Required. Data format to consume.
# ## Each data format has its own unique set of configuration options.
# ## Read more about them here:
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
# data_format = "influx"
#
# ## Optional. Filepath for GCP credentials JSON file to authorize calls to
# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use
# ## Application Default Credentials, which is preferred.
# # credentials_file = "path/to/my/creds.json"
#
# ## Optional. If true, will send all metrics per write in one PubSub message.
# # send_batched = true
#
# ## The following publish_* parameters specifically configures batching
# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read
# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings
#
# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
# ## when it has this many PubSub messages. If send_batched is true,
# ## this is ignored and treated as if it were 1.
# # publish_count_threshold = 1000
#
# ## Optional. Send a request to PubSub (i.e. actually publish a batch)
# ## when it has this many PubSub messages. If send_batched is true,
# ## this is ignored and treated as if it were 1
# # publish_byte_threshold = 1000000
#
# ## Optional. Specifically configures requests made to the PubSub API.
# # publish_num_go_routines = 2
#
# ## Optional. Specifies a timeout for requests to the PubSub API.
# # publish_timeout = "30s"
#
# ## Optional. If true, published PubSub message data will be base64-encoded.
# # base64_data = false
#
# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
# ## plugin definition, otherwise additional config options are read as part of
# ## the table
#
# ## Optional. PubSub attributes to add to metrics.
# # [outputs.cloud_pubsub.attributes]
# # my_attr = "tag_value"
# # Configuration for AWS CloudWatch output.
# [[outputs.cloudwatch]]
# ## Amazon REGION
# region = "us-east-1"
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
# ## 2) Assumed credentials via STS if role_arn is specified
# ## 3) explicit credentials from 'access_key' and 'secret_key'
# ## 4) shared profile from 'profile'
# ## 5) environment variables
# ## 6) shared credentials file
# ## 7) EC2 Instance Profile
# #access_key = ""
# #secret_key = ""
# #token = ""
# #role_arn = ""
# #web_identity_token_file = ""
# #role_session_name = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Endpoint to make request against, the correct endpoint is automatically
# ## determined and this option should only be set if you wish to override the
# ## default.
# ## ex: endpoint_url = "http://localhost:8000"
# # endpoint_url = ""
#
# ## Set http_proxy
# # use_system_proxy = false
# # http_proxy_url = "http://localhost:8888"
#
# ## Namespace for the CloudWatch MetricDatums
# namespace = "InfluxData/Telegraf"
#
# ## If you have a large amount of metrics, you should consider to send statistic
# ## values instead of raw metrics which could not only improve performance but
# ## also save AWS API cost. If enable this flag, this plugin would parse the required
# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch.
# ## You could use basicstats aggregator to calculate those fields. If not all statistic
# ## fields are available, all fields would still be sent as raw metrics.
# # write_statistics = false
#
# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision)
# # high_resolution_metrics = false
# # Configuration for AWS CloudWatchLogs output.
# [[outputs.cloudwatch_logs]]
# ## The region is the Amazon region that you wish to connect to.
# ## Examples include but are not limited to:
# ## - us-west-1
# ## - us-west-2
# ## - us-east-1
# ## - ap-southeast-1
# ## - ap-southeast-2
# ## ...
# region = "us-east-1"
#
# ## Amazon Credentials
# ## Credentials are loaded in the following order
# ## 1) Web identity provider credentials via STS if role_arn and web_identity_token_file are specified
# ## 2) Assumed credentials via STS if role_arn is specified
# ## 3) explicit credentials from 'access_key' and 'secret_key'
# ## 4) shared profile from 'profile'
# ## 5) environment variables
# ## 6) shared credentials file
# ## 7) EC2 Instance Profile
# #access_key = ""
# #secret_key = ""
# #token = ""
# #role_arn = ""
# #web_identity_token_file = ""
# #role_session_name = ""
# #profile = ""
# #shared_credential_file = ""
#
# ## Endpoint to make request against, the correct endpoint is automatically
# ## determined and this option should only be set if you wish to override the
# ## default.
# ## ex: endpoint_url = "http://localhost:8000"
# # endpoint_url = ""
#
# ## Cloud watch log group. Must be created in AWS cloudwatch logs upfront!
# ## For example, you can specify the name of the k8s cluster here to group logs from all cluster in oine place
# log_group = "my-group-name"
#
# ## Log stream in log group
# ## Either log group name or reference to metric attribute, from which it can be parsed:
# ## tag:<TAG_NAME> or field:<FIELD_NAME>. If log stream is not exist, it will be created.
# ## Since AWS is not automatically delete logs streams with expired logs entries (i.e. empty log stream)
# ## you need to put in place appropriate house-keeping (https://forums.aws.amazon.com/thread.jspa?threadID=178855)
# log_stream = "tag:location"
#
# ## Source of log data - metric name
# ## specify the name of the metric, from which the log data should be retrieved.
# ## I.e., if you are using docker_log plugin to stream logs from container, then
# ## specify log_data_metric_name = "docker_log"
# log_data_metric_name = "docker_log"
#
# ## Specify from which metric attribute the log data should be retrieved:
# ## tag:<TAG_NAME> or field:<FIELD_NAME>.
# ## I.e., if you are using docker_log plugin to stream logs from container, then
# ## specify log_data_source = "field:message"
# log_data_source = "field:message"
# # Configuration for CrateDB to send metrics to.
# [[outputs.cratedb]]
# ## Connection parameters for accessing the database see
# ## https://pkg.go.dev/github.com/jackc/pgx/v4#ParseConfig
# ## for available options
# url = "postgres://user:password@localhost/schema?sslmode=disable"
#
# ## Timeout for all CrateDB queries.
# # timeout = "5s"
#
# ## Name of the table to store metrics in.
# # table = "metrics"
#
# ## If true, and the metrics table does not exist, create it automatically.
# # table_create = false
#
# ## The character(s) to replace any '.' in an object key with
# # key_separator = "_"
# # Configuration for DataDog API to send metrics to.
# [[outputs.datadog]]
# ## Datadog API key
# apikey = "my-secret-key"
#
# ## Connection timeout.
# # timeout = "5s"
#
# ## Write URL override; useful for debugging.
# ## This plugin only supports the v1 API currently due to the authentication
# ## method used.
# # url = "https://app.datadoghq.com/api/v1/series"
#
# ## Set http_proxy
# # use_system_proxy = false
# # http_proxy_url = "http://localhost:8888"
#
# ## Override the default (none) compression used to send data.
# ## Supports: "zlib", "none"
# # compression = "none"
# # Send metrics to nowhere at all
# [[outputs.discard]]
# # no configuration
# # Send telegraf metrics to a Dynatrace environment
# [[outputs.dynatrace]]
# ## For usage with the Dynatrace OneAgent you can omit any configuration,
# ## the only requirement is that the OneAgent is running on the same host.
# ## Only setup environment url and token if you want to monitor a Host without the OneAgent present.
# ##
# ## Your Dynatrace environment URL.
# ## For Dynatrace OneAgent you can leave this empty or set it to "http://127.0.0.1:14499/metrics/ingest" (default)
# ## For Dynatrace SaaS environments the URL scheme is "https://{your-environment-id}.live.dynatrace.com/api/v2/metrics/ingest"
# ## For Dynatrace Managed environments the URL scheme is "https://{your-domain}/e/{your-environment-id}/api/v2/metrics/ingest"
# url = ""
#
# ## Your Dynatrace API token.
# ## Create an API token within your Dynatrace environment, by navigating to Settings > Integration > Dynatrace API
# ## The API token needs data ingest scope permission. When using OneAgent, no API token is required.
# api_token = ""
#
# ## Optional prefix for metric names (e.g.: "telegraf")
# prefix = "telegraf"
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Optional flag for ignoring tls certificate check
# # insecure_skip_verify = false
#
# ## Connection timeout, defaults to "5s" if not set.
# timeout = "5s"
#
# ## If you want metrics to be treated and reported as delta counters, add the metric names here
# additional_counters = [ ]
#
# ## NOTE: Due to the way TOML is parsed, tables must be at the END of the
# ## plugin definition, otherwise additional config options are read as part of
# ## the table
#
# ## Optional dimensions to be added to every metric
# # [outputs.dynatrace.default_dimensions]
# # default_key = "default value"
# # Configuration for Elasticsearch to send metrics to.
# [[outputs.elasticsearch]]
# ## The full HTTP endpoint URL for your Elasticsearch instance
# ## Multiple urls can be specified as part of the same cluster,
# ## this means that only ONE of the urls will be written to each interval
# urls = [ "http://node1.es.example.com:9200" ] # required.
# ## Elasticsearch client timeout, defaults to "5s" if not set.
# timeout = "5s"
# ## Set to true to ask Elasticsearch a list of all cluster nodes,
# ## thus it is not necessary to list all nodes in the urls config option
# enable_sniffer = false
# ## Set to true to enable gzip compression
# enable_gzip = false
# ## Set the interval to check if the Elasticsearch nodes are available
# ## Setting to "0s" will disable the health check (not recommended in production)
# health_check_interval = "10s"
# ## Set the timeout for periodic health checks.
# # health_check_timeout = "1s"
# ## HTTP basic authentication details.
# ## HTTP basic authentication details
# # username = "telegraf"
# # password = "mypassword"
# ## HTTP bearer token authentication details
# # auth_bearer_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
#
# ## Index Config
# ## The target index for metrics (Elasticsearch will create if it not exists).
# ## You can use the date specifiers below to create indexes per time frame.
# ## The metric timestamp will be used to decide the destination index name
# # %Y - year (2016)
# # %y - last two digits of year (00..99)
# # %m - month (01..12)
# # %d - day of month (e.g., 01)
# # %H - hour (00..23)
# # %V - week of the year (ISO week) (01..53)
# ## Additionally, you can specify a tag name using the notation {{tag_name}}
# ## which will be used as part of the index name. If the tag does not exist,
# ## the default tag value will be used.
# # index_name = "telegraf-{{host}}-%Y.%m.%d"
# # default_tag_value = "none"
# index_name = "telegraf-%Y.%m.%d" # required.
#
# ## Optional TLS Config
# # tls_ca = "/etc/telegraf/ca.pem"
# # tls_cert = "/etc/telegraf/cert.pem"
# # tls_key = "/etc/telegraf/key.pem"
# ## Use TLS but skip chain & host verification
# # insecure_skip_verify = false
#
# ## Template Config
# ## Set to true if you want telegraf to manage its index template.
# ## If enabled it will create a recommended index template for telegraf indexes
# manage_template = true
# ## The template name used for telegraf indexes
# template_name = "telegraf"
# ## Set to true if you want telegraf to overwrite an existing template
# overwrite_template = false
# ## If set to true a unique ID hash will be sent as sha256(concat(timestamp,measurement,series-hash)) string
# ## it will enable data resend and update metric points avoiding duplicated metrics with different id's
# force_document_id = false
#
# ## Specifies the handling of NaN and Inf values.
# ## This option can have the following values:
# ## none -- do not modify field-values (default); will produce an error if NaNs or infs are encountered
# ## drop -- drop fields containing NaNs or infs
# ## replace -- replace with the value in "float_replacement_value" (default: 0.0)
# ## NaNs and inf will be replaced with the given number, -inf with the negative of that number
# # float_handling = "none"
# # float_replacement_value = 0.0
#
# ## Pipeline Config
# ## To use a ingest pipeline, set this to the name of the pipeline you want to use.
# # use_pipeline = "my_pipeline"
# ## Additionally, you can specify a tag name using the notation {{tag_name}}
# ## which will be used as part of the pipeline name. If the tag does not exist,
# ## the default pipeline will be used as the pipeline. If no default pipeline is set,
# ## no pipeline is used for the metric.
# # use_pipeline = "{{es_pipeline}}"
# # default_pipeline = "my_pipeline"
# #
# # Custom HTTP headers
# # To pass custom HTTP headers please define it in a given below section
# # [outputs.elasticsearch.headers]
# # "X-Custom-Header" = "custom-value"