summaryrefslogtreecommitdiff
path: root/conf/example.conf.in
blob: 5ee0c4d2fb1f1cea9a50d08d7c5bb72126c239ed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# @DEFAULT_SYS_DIR@/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# To put this file in a different directory and override @DEFAULT_SYS_DIR@ set
# the environment variable LVM_SYSTEM_DIR before running the tools.
#
# N.B. Take care that each setting only appears once if uncommenting
# example settings in this file.

# This section allows you to set the way the configuration settings are handled.
config {

    # If enabled, any LVM2 configuration mismatch is reported.
    # This implies checking that the configuration key is understood
    # by LVM2 and that the value of the key is of a proper type.
    # If disabled, any configuration mismatch is ignored and default
    # value is used instead without any warning (a message about the
    # configuration key not being found is issued in verbose mode only).
    checks = 1

    # If enabled, any configuration mismatch aborts the LVM2 process.
    abort_on_errors = 0

    # Directory where LVM looks for configuration profiles.
    profile_dir = "@DEFAULT_SYS_DIR@/@DEFAULT_PROFILE_SUBDIR@"
}

# This section allows you to configure which block devices should
# be used by the LVM system.
devices {

    # Where do you want your volume groups to appear ?
    dir = "/dev"

    # An array of directories that contain the device nodes you wish
    # to use with LVM2.
    scan = [ "/dev" ]

    # Select external device information source to use for further and more
    # detailed device determination. Some information may already be available
    # in the system and LVM2 can use this information to determine the exact
    # type or use of the device it processes. Using existing external device
    # information source can speed up device processing as LVM2 does not need
    # to run its own native routines to acquire this information. For example,
    # such information is used to drive LVM2 filtering like MD component
    # detection, multipath component detection, partition detection and others.
    # Possible options are:
    # "none"        - No external device information source is used.
    #
    # "udev"        - Reuse existing udev database records. Applicable
    #                 only if LVM is compiled with udev support.
    #
    external_device_info_source = "none"

    # If set, the cache of block device nodes with all associated symlinks
    # will be constructed out of the existing udev database content.
    # This avoids using and opening any inapplicable non-block devices or
    # subdirectories found in the device directory. This setting is applied
    # to udev-managed device directory only, other directories will be scanned
    # fully. LVM2 needs to be compiled with udev support for this setting to
    # take effect. N.B. Any device node or symlink not managed by udev in
    # udev directory will be ignored with this setting on.
    obtain_device_list_from_udev = 1

    # If several entries in the scanned directories correspond to the
    # same block device and the tools need to display a name for device,
    # all the pathnames are matched against each item in the following
    # list of regular expressions in turn and the first match is used.

    # By default no preferred names are defined.
    # preferred_names = [ ]

    # Try to avoid using undescriptive /dev/dm-N names, if present.
    # preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]

    # In case no prefererred name matches or if preferred_names are not
    # defined at all, builtin rules are used to determine the preference.
    #
    # The first builtin rule checks path prefixes and it gives preference
    # based on this ordering (where "dev" depends on devices/dev setting):
    #   /dev/mapper > /dev/disk > /dev/dm-* > /dev/block
    #
    # If the ordering above cannot be applied, the path with fewer slashes
    # gets preference then.
    #
    # If the number of slashes is the same, a symlink gets preference.
    #
    # Finally, if all the rules mentioned above are not applicable,
    # lexicographical order is used over paths and the smallest one
    # of all gets preference.


    # A filter that tells LVM2 to only use a restricted set of devices.
    # The filter consists of an array of regular expressions.  These
    # expressions can be delimited by a character of your choice, and
    # prefixed with either an 'a' (for accept) or 'r' (for reject).
    # The first expression found to match a device name determines if
    # the device will be accepted or rejected (ignored).  Devices that
    # don't match any patterns are accepted.

    # Be careful if there there are symbolic links or multiple filesystem 
    # entries for the same device as each name is checked separately against
    # the list of patterns.  The effect is that if the first pattern in the 
    # list to match a name is an 'a' pattern for any of the names, the device
    # is accepted; otherwise if the first pattern in the list to match a name
    # is an 'r' pattern for any of the names it is rejected; otherwise it is
    # accepted.

    # Don't have more than one filter line active at once: only one gets used.

    # Run vgscan after you change this parameter to ensure that
    # the cache file gets regenerated (see below).
    # If it doesn't do what you expect, check the output of 'vgscan -vvvv'.

    # If lvmetad is used, then see "A note about device filtering while
    # lvmetad is used" comment that is attached to global/use_lvmetad setting.

    # By default we accept every block device:
    # filter = [ "a/.*/" ]

    # Exclude the cdrom drive
    # filter = [ "r|/dev/cdrom|" ]

    # When testing I like to work with just loopback devices:
    # filter = [ "a/loop/", "r/.*/" ]

    # Or maybe all loops and ide drives except hdc:
    # filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]

    # Use anchors if you want to be really specific
    # filter = [ "a|^/dev/hda8$|", "r/.*/" ]

    # Since "filter" is often overridden from command line, it is not suitable
    # for system-wide device filtering (udev rules, lvmetad). To hide devices
    # from LVM-specific udev processing and/or from lvmetad, you need to set
    # global_filter. The syntax is the same as for normal "filter"
    # above. Devices that fail the global_filter are not even opened by LVM.

    # global_filter = []

    # The results of the filtering are cached on disk to avoid
    # rescanning dud devices (which can take a very long time).
    # By default this cache is stored in the @DEFAULT_SYS_DIR@/@DEFAULT_CACHE_SUBDIR@ directory
    # in a file called '.cache'.
    # It is safe to delete the contents: the tools regenerate it.
    # (The old setting 'cache' is still respected if neither of
    # these new ones is present.)
    # N.B. If obtain_device_list_from_udev is set to 1 the list of
    # devices is instead obtained from udev and any existing .cache
    # file is removed.
    cache_dir = "@DEFAULT_SYS_DIR@/@DEFAULT_CACHE_SUBDIR@"
    cache_file_prefix = ""

    # You can turn off writing this cache file by setting this to 0.
    write_cache_state = 1

    # Advanced settings.

    # List of pairs of additional acceptable block device types found 
    # in /proc/devices with maximum (non-zero) number of partitions.
    # types = [ "fd", 16 ]

    # If sysfs is mounted (2.6 kernels) restrict device scanning to 
    # the block devices it believes are valid.
    # 1 enables; 0 disables.
    sysfs_scan = 1

    # By default, LVM2 will ignore devices used as component paths
    # of device-mapper multipath devices.
    # 1 enables; 0 disables.
    multipath_component_detection = 1

    # By default, LVM2 will ignore devices used as components of
    # software RAID (md) devices by looking for md superblocks.
    # 1 enables; 0 disables.
    md_component_detection = 1

    # By default, LVM2 will not ignore devices used as components of
    # firmware RAID devices. Set to 1 to enable this detection.
    # N.B. LVM2 itself is not detecting firmware RAID - an
    # external_device_info_source other than "none" must
    # be used for this detection to execute.
    # 1 enables; 0 disables
    fw_raid_component_detection = 0

    # By default, if a PV is placed directly upon an md device, LVM2
    # will align its data blocks with the md device's stripe-width.
    # 1 enables; 0 disables.
    md_chunk_alignment = 1

    # Default alignment of the start of a data area in MB.  If set to 0,
    # a value of 64KB will be used.  Set to 1 for 1MiB, 2 for 2MiB, etc.
    # default_data_alignment = @DEFAULT_DATA_ALIGNMENT@

    # By default, the start of a PV's data area will be a multiple of
    # the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
    # - minimum_io_size - the smallest request the device can perform
    #   w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
    # - optimal_io_size - the device's preferred unit of receiving I/O
    #   (e.g. MD's stripe width)
    # minimum_io_size is used if optimal_io_size is undefined (0).
    # If md_chunk_alignment is enabled, that detects the optimal_io_size.
    # This setting takes precedence over md_chunk_alignment.
    # 1 enables; 0 disables.
    data_alignment_detection = 1

    # Alignment (in KB) of start of data area when creating a new PV.
    # md_chunk_alignment and data_alignment_detection are disabled if set.
    # Set to 0 for the default alignment (see: data_alignment_default)
    # or page size, if larger.
    data_alignment = 0

    # By default, the start of the PV's aligned data area will be shifted by
    # the 'alignment_offset' exposed in sysfs.  This offset is often 0 but
    # may be non-zero; e.g.: certain 4KB sector drives that compensate for
    # windows partitioning will have an alignment_offset of 3584 bytes
    # (sector 7 is the lowest aligned logical block, the 4KB sectors start
    # at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
    # But note that pvcreate --dataalignmentoffset will skip this detection.
    # 1 enables; 0 disables.
    data_alignment_offset_detection = 1

    # If, while scanning the system for PVs, LVM2 encounters a device-mapper
    # device that has its I/O suspended, it waits for it to become accessible.
    # Set this to 1 to skip such devices.  This should only be needed
    # in recovery situations.
    ignore_suspended_devices = 0

    # ignore_lvm_mirrors:  Introduced in version 2.02.104
    # This setting determines whether logical volumes of "mirror" segment
    # type are scanned for LVM labels.  This affects the ability of
    # mirrors to be used as physical volumes.  If 'ignore_lvm_mirrors'
    # is set to '1', it becomes impossible to create volume groups on top
    # of mirror logical volumes - i.e. to stack volume groups on mirrors.
    #
    # Allowing mirror logical volumes to be scanned (setting the value to '0')
    # can potentially cause LVM processes and I/O to the mirror to become
    # blocked.  This is due to the way that the "mirror" segment type handles
    # failures.  In order for the hang to manifest itself, an LVM command must
    # be run just after a failure and before the automatic LVM repair process
    # takes place OR there must be failures in multiple mirrors in the same
    # volume group at the same time with write failures occurring moments
    # before a scan of the mirror's labels.
    #
    # Note that these scanning limitations do not apply to the LVM RAID
    # types, like "raid1".  The RAID segment types handle failures in a
    # different way and are not subject to possible process or I/O blocking.
    #
    # It is encouraged that users set 'ignore_lvm_mirrors' to 1 if they
    # are using the "mirror" segment type.  Users that require volume group
    # stacking on mirrored logical volumes should consider using the "raid1"
    # segment type.  The "raid1" segment type is not available for
    # active/active clustered volume groups.
    #
    # Set to 1 to disallow stacking and thereby avoid a possible deadlock.
    ignore_lvm_mirrors = 1

    # During each LVM operation errors received from each device are counted.
    # If the counter of a particular device exceeds the limit set here, no
    # further I/O is sent to that device for the remainder of the respective
    # operation. Setting the parameter to 0 disables the counters altogether.
    disable_after_error_count = 0

    # Allow use of pvcreate --uuid without requiring --restorefile.
    require_restorefile_with_uuid = 1

    # Minimum size (in KB) of block devices which can be used as PVs.
    # In a clustered environment all nodes must use the same value.
    # Any value smaller than 512KB is ignored.

    # Ignore devices smaller than 2MB such as floppy drives.
    pv_min_size = 2048

    # The original built-in setting was 512 up to and including version 2.02.84.
    # pv_min_size = 512

    # Issue discards to a logical volumes's underlying physical volume(s) when
    # the logical volume is no longer using the physical volumes' space (e.g.
    # lvremove, lvreduce, etc).  Discards inform the storage that a region is
    # no longer in use.  Storage that supports discards advertise the protocol
    # specific way discards should be issued by the kernel (TRIM, UNMAP, or
    # WRITE SAME with UNMAP bit set).  Not all storage will support or benefit
    # from discards but SSDs and thinly provisioned LUNs generally do.  If set
    # to 1, discards will only be issued if both the storage and kernel provide
    # support.
    # 1 enables; 0 disables.
    issue_discards = 0
}

# This section allows you to configure the way in which LVM selects
# free space for its Logical Volumes.
allocation {

    # When searching for free space to extend an LV, the "cling"
    # allocation policy will choose space on the same PVs as the last
    # segment of the existing LV.  If there is insufficient space and a
    # list of tags is defined here, it will check whether any of them are
    # attached to the PVs concerned and then seek to match those PV tags
    # between existing extents and new extents.
    # Use the special tag "@*" as a wildcard to match any PV tag.
 
    # Example: LVs are mirrored between two sites within a single VG.
    # PVs are tagged with either @site1 or @site2 to indicate where
    # they are situated.

    # cling_tag_list = [ "@site1", "@site2" ]
    # cling_tag_list = [ "@*" ]

    # Changes made in version 2.02.85 extended the reach of the 'cling'
    # policies to detect more situations where data can be grouped
    # onto the same disks.  Set this to 0 to revert to the previous
    # algorithm.
    maximise_cling = 1

    # Whether to use blkid library instead of native LVM2 code to detect
    # any existing signatures while creating new Physical Volumes and
    # Logical Volumes. LVM2 needs to be compiled with blkid wiping support
    # for this setting to take effect.
    #
    # LVM2 native detection code is currently able to recognize these signatures:
    #   - MD device signature
    #   - swap signature
    #   - LUKS signature
    # To see the list of signatures recognized by blkid, check the output
    # of 'blkid -k' command. The blkid can recognize more signatures than
    # LVM2 native detection code, but due to this higher number of signatures
    # to be recognized, it can take more time to complete the signature scan.
    use_blkid_wiping = 1

    # Set to 1 to wipe any signatures found on newly-created Logical Volumes
    # automatically in addition to zeroing of the first KB on the LV
    # (controlled by the -Z/--zero y option).
    # The command line option -W/--wipesignatures takes precedence over this
    # setting.
    # The default is to wipe signatures when zeroing.
    #
    wipe_signatures_when_zeroing_new_lvs = 1

    # Set to 1 to guarantee that mirror logs will always be placed on 
    # different PVs from the mirror images.  This was the default
    # until version 2.02.85.
    mirror_logs_require_separate_pvs = 0

    # Set to 1 to guarantee that cache_pool metadata will always be
    # placed on  different PVs from the cache_pool data.
    cache_pool_metadata_require_separate_pvs = 0

    # Specify the minimal chunk size (in kiB) for cache pool volumes.
    # Using a chunk_size that is too large can result in wasteful use of
    # the cache, where small reads and writes can cause large sections of
    # an LV to be mapped into the cache.  However, choosing a chunk_size
    # that is too small can result in more overhead trying to manage the
    # numerous chunks that become mapped into the cache.  The former is
    # more of a problem than the latter in most cases, so we default to
    # a value that is on the smaller end of the spectrum.  Supported values
    # range from 32(kiB) to 1048576 in multiples of 32.
    # cache_pool_chunk_size = 64

    # Specify the default cache mode used for new cache pools.
    # Possible options are:
    # "writethrough"    - Data blocks are immediately written from
    #                     the cache to disk.
    # "writeback"       - Data blocks are written from the cache
    #                     back to disk after some delay to improve
    #                     performance.
    # cache_pool_cachemode = "writethrough"

    # Set to 1 to guarantee that thin pool metadata will always
    # be placed on different PVs from the pool data.
    thin_pool_metadata_require_separate_pvs = 0

    # Specify chunk size calculation policy for thin pool volumes.
    # Possible options are:
    # "generic"        - if thin_pool_chunk_size is defined, use it.
    #                    Otherwise, calculate the chunk size based on
    #                    estimation and device hints exposed in sysfs:
    #                    the minimum_io_size. The chunk size is always
    #                    at least 64KiB.
    #
    # "performance"    - if thin_pool_chunk_size is defined, use it.
    # 			 Otherwise, calculate the chunk size for
    # 			 performance based on device hints exposed in
    # 			 sysfs: the optimal_io_size. The chunk size is
    # 			 always at least 512KiB.
    # thin_pool_chunk_size_policy = "generic"

    # Specify the minimal chunk size (in KB) for thin pool volumes.
    # Use of the larger chunk size may improve performance for plain
    # thin volumes, however using them for snapshot volumes is less efficient,
    # as it consumes more space and takes extra time for copying.
    # When unset, lvm tries to estimate chunk size starting from 64KB
    # Supported values are in range from 64 to 1048576.
    # thin_pool_chunk_size = 64

    # Specify discards behaviour of the thin pool volume.
    # Select one of  "ignore", "nopassdown", "passdown"
    # thin_pool_discards = "passdown"

    # Set to 0, to disable zeroing of thin pool data chunks before their
    # first use.
    # N.B. zeroing larger thin pool chunk size degrades performance.
    # thin_pool_zero = 1

    # Default physical extent size to use for newly created VGs (in KB).
    # physical_extent_size = 4096
}

# This section that allows you to configure the nature of the
# information that LVM2 reports.
log {

    # Controls the messages sent to stdout or stderr.
    # There are three levels of verbosity, 3 being the most verbose.
    verbose = 0

    # Set to 1 to suppress all non-essential messages from stdout.
    # This has the same effect as -qq.
    # When this is set, the following commands still produce output:
    # dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay, 
    # pvs, version, vgcfgrestore -l, vgdisplay, vgs.
    # Non-essential messages are shifted from log level 4 to log level 5
    # for syslog and lvm2_log_fn purposes.
    # Any 'yes' or 'no' questions not overridden by other arguments
    # are suppressed and default to 'no'.
    silent = 0

    # Should we send log messages through syslog?
    # 1 is yes; 0 is no.
    syslog = 1

    # Should we log error and debug messages to a file?
    # By default there is no log file.
    #file = "/var/log/lvm2.log"

    # Should we overwrite the log file each time the program is run?
    # By default we append.
    overwrite = 0

    # What level of log messages should we send to the log file and/or syslog?
    # There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
    # 7 is the most verbose (LOG_DEBUG).
    level = 0

    # Format of output messages
    # Whether or not (1 or 0) to indent messages according to their severity
    indent = 1

    # Whether or not (1 or 0) to display the command name on each line output
    command_names = 0

    # A prefix to use before the message text (but after the command name,
    # if selected).  Default is two spaces, so you can see/grep the severity
    # of each message.
    prefix = "  "

    # To make the messages look similar to the original LVM tools use:
    #   indent = 0
    #   command_names = 1
    #   prefix = " -- "

    # Set this if you want log messages during activation.
    # Don't use this in low memory situations (can deadlock).
    # activation = 0

    # Some debugging messages are assigned to a class and only appear
    # in debug output if the class is listed here.
    # Classes currently available:
    #   memory, devices, activation, allocation, lvmetad, metadata, cache,
    #   locking
    # Use "all" to see everything.
    debug_classes = [ "memory", "devices", "activation", "allocation",
		      "lvmetad", "metadata", "cache", "locking" ]
}

# Configuration of metadata backups and archiving.  In LVM2 when we
# talk about a 'backup' we mean making a copy of the metadata for the
# *current* system.  The 'archive' contains old metadata configurations.
# Backups are stored in a human readable text format.
backup {

    # Should we maintain a backup of the current metadata configuration ?
    # Use 1 for Yes; 0 for No.
    # Think very hard before turning this off!
    backup = 1

    # Where shall we keep it ?
    # Remember to back up this directory regularly!
    backup_dir = "@DEFAULT_SYS_DIR@/@DEFAULT_BACKUP_SUBDIR@"

    # Should we maintain an archive of old metadata configurations.
    # Use 1 for Yes; 0 for No.
    # On by default.  Think very hard before turning this off.
    archive = 1

    # Where should archived files go ?
    # Remember to back up this directory regularly!
    archive_dir = "@DEFAULT_SYS_DIR@/@DEFAULT_ARCHIVE_SUBDIR@"

    # What is the minimum number of archive files you wish to keep ?
    retain_min = 10

    # What is the minimum time you wish to keep an archive file for ?
    retain_days = 30
}

# Settings for the running LVM2 in shell (readline) mode.
shell {

    # Number of lines of history to store in ~/.lvm_history
    history_size = 100
}


# Miscellaneous global LVM2 settings
global {
    # The file creation mask for any files and directories created.
    # Interpreted as octal if the first digit is zero.
    umask = 077

    # Allow other users to read the files
    #umask = 022

    # Enabling test mode means that no changes to the on disk metadata
    # will be made.  Equivalent to having the -t option on every
    # command.  Defaults to off.
    test = 0

    # Default value for --units argument
    units = "h"

    # Since version 2.02.54, the tools distinguish between powers of
    # 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
    # KB, MB, GB).
    # If you have scripts that depend on the old behaviour, set this to 0
    # temporarily until you update them.
    si_unit_consistency = 1

    # Whether or not to display unit suffix for sizes. This setting has
    # no effect if the units are in human-readable form (global/units="h")
    # in which case the suffix is always displayed.
    suffix = 1

    # Whether or not to communicate with the kernel device-mapper.
    # Set to 0 if you want to use the tools to manipulate LVM metadata 
    # without activating any logical volumes.
    # If the device-mapper kernel driver is not present in your kernel
    # setting this to 0 should suppress the error messages.
    activation = 1

    # If we can't communicate with device-mapper, should we try running 
    # the LVM1 tools?
    # This option only applies to 2.4 kernels and is provided to help you
    # switch between device-mapper kernels and LVM1 kernels.
    # The LVM1 tools need to be installed with .lvm1 suffices
    # e.g. vgscan.lvm1 and they will stop working after you start using
    # the new lvm2 on-disk metadata format.
    # The default value is set when the tools are built.
    # fallback_to_lvm1 = 0

    # The default metadata format that commands should use - "lvm1" or "lvm2".
    # The command line override is -M1 or -M2.
    # Defaults to "lvm2".
    # format = "lvm2"

    # Location of proc filesystem
    proc = "/proc"

    # Type of locking to use. Defaults to local file-based locking (1).
    # Turn locking off by setting to 0 (dangerous: risks metadata corruption
    # if LVM2 commands get run concurrently).
    # Type 2 uses the external shared library locking_library.
    # Type 3 uses built-in clustered locking.
    # Type 4 uses read-only locking which forbids any operations that might 
    # change metadata.
    # Type 5 offers dummy locking for tools that do not need any locks.
    # You should not need to set this directly: the tools will select when
    # to use it instead of the configured locking_type.  Do not use lvmetad or
    # the kernel device-mapper driver with this locking type.
    # It is used by the --readonly option that offers read-only access to
    # Volume Group metadata that cannot be locked safely because it belongs to
    # an inaccessible domain and might be in use, for example a virtual machine
    # image or a disk that is shared by a clustered machine.  
    #
    # N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
    # supported in clustered environment. If use_lvmetad=1 and locking_type=3
    # is set at the same time, LVM always issues a warning message about this
    # and then it automatically disables lvmetad use.
    locking_type = 1

    # Set to 0 to fail when a lock request cannot be satisfied immediately.
    wait_for_locks = 1

    # If using external locking (type 2) and initialisation fails,
    # with this set to 1 an attempt will be made to use the built-in
    # clustered locking.
    # If you are using a customised locking_library you should set this to 0.
    fallback_to_clustered_locking = 1

    # If an attempt to initialise type 2 or type 3 locking failed, perhaps
    # because cluster components such as clvmd are not running, with this set
    # to 1 an attempt will be made to use local file-based locking (type 1).
    # If this succeeds, only commands against local volume groups will proceed.
    # Volume Groups marked as clustered will be ignored.
    fallback_to_local_locking = 1

    # Local non-LV directory that holds file-based locks while commands are
    # in progress.  A directory like /tmp that may get wiped on reboot is OK.
    locking_dir = "@DEFAULT_LOCK_DIR@"

    # Whenever there are competing read-only and read-write access requests for
    # a volume group's metadata, instead of always granting the read-only
    # requests immediately, delay them to allow the read-write requests to be
    # serviced.  Without this setting, write access may be stalled by a high
    # volume of read-only requests.
    # NB. This option only affects locking_type = 1 viz. local file-based
    # locking.
    prioritise_write_locks = 1

    # Other entries can go here to allow you to load shared libraries
    # e.g. if support for LVM1 metadata was compiled as a shared library use
    #   format_libraries = "liblvm2format1.so" 
    # Full pathnames can be given.

    # Search this directory first for shared libraries.
    #   library_dir = "/lib"

    # The external locking library to load if locking_type is set to 2.
    #   locking_library = "liblvm2clusterlock.so"

    # Treat any internal errors as fatal errors, aborting the process that
    # encountered the internal error. Please only enable for debugging.
    abort_on_internal_errors = 0

    # Check whether CRC is matching when parsed VG is used multiple times.
    # This is useful to catch unexpected internal cached volume group
    # structure modification. Please only enable for debugging.
    detect_internal_vg_cache_corruption = 0

    # If set to 1, no operations that change on-disk metadata will be permitted.
    # Additionally, read-only commands that encounter metadata in need of repair
    # will still be allowed to proceed exactly as if the repair had been 
    # performed (except for the unchanged vg_seqno).
    # Inappropriate use could mess up your system, so seek advice first!
    metadata_read_only = 0

    # 'mirror_segtype_default' defines which segtype will be used when the
    # shorthand '-m' option is used for mirroring.  The possible options are:
    #
    # "mirror" - The original RAID1 implementation provided by LVM2/DM.  It is
    # 	         characterized by a flexible log solution (core, disk, mirrored)
    #		 and by the necessity to block I/O while reconfiguring in the
    #		 event of a failure.
    #
    #		 There is an inherent race in the dmeventd failure handling
    #		 logic with snapshots of devices using this type of RAID1 that
    #		 in the worst case could cause a deadlock.
    #		   Ref: https://bugzilla.redhat.com/show_bug.cgi?id=817130#c10
    #
    # "raid1"  - This implementation leverages MD's RAID1 personality through
    # 	       	 device-mapper.  It is characterized by a lack of log options.
    #		 (A log is always allocated for every device and they are placed
    #		 on the same device as the image - no separate devices are
    #		 required.)  This mirror implementation does not require I/O
    #		 to be blocked in the kernel in the event of a failure.
    #		 This mirror implementation is not cluster-aware and cannot be
    #		 used in a shared (active/active) fashion in a cluster.
    #
    # Specify the '--type <mirror|raid1>' option to override this default
    # setting.
    mirror_segtype_default = "@DEFAULT_MIRROR_SEGTYPE@"

    # 'raid10_segtype_default' determines the segment types used by default
    # when the '--stripes/-i' and '--mirrors/-m' arguments are both specified
    # during the creation of a logical volume.
    # Possible settings include:
    #
    # "raid10" - This implementation leverages MD's RAID10 personality through
    #            device-mapper.
    #
    # "mirror" - LVM will layer the 'mirror' and 'stripe' segment types.  It
    #            will do this by creating a mirror on top of striped sub-LVs;
    #            effectively creating a RAID 0+1 array.  This is suboptimal
    #            in terms of providing redundancy and performance. Changing to
    #            this setting is not advised.
    # Specify the '--type <raid10|mirror>' option to override this default
    # setting.
    raid10_segtype_default = "@DEFAULT_RAID10_SEGTYPE@"

    # 'sparse_segtype_default' defines which segtype will be used when the
    # shorthand '-V and -L' option is used for sparse volume creation.
    #
    # "snapshot" - The original snapshot implementation provided by LVM2/DM.
    #		   It is using old snashot that mixes data and metadata within
    #		   a single COW storage volume and has poor performs when
    #		   the size of stored data passes hundereds of MB.
    #
    # "thin"     - Newer implementation leverages thin provisioning target.
    #		   It has bigger minimal chunk size (64KiB) and uses separate volume
    #		   for metadata. It has better performance especially in case of
    #		   bigger data uses. This device type has also full snapshot support.
    #
    # Specify the '--type <snapshot|thin>' option to override this default
    # setting.
    sparse_segtype_default = "@DEFAULT_SPARSE_SEGTYPE@"


    # The default format for displaying LV names in lvdisplay was changed 
    # in version 2.02.89 to show the LV name and path separately.
    # Previously this was always shown as /dev/vgname/lvname even when that
    # was never a valid path in the /dev filesystem.
    # Set to 1 to reinstate the previous format.
    #
    # lvdisplay_shows_full_device_path = 0

    # Whether to use (trust) a running instance of lvmetad. If this is set to
    # 0, all commands fall back to the usual scanning mechanisms. When set to 1
    # *and* when lvmetad is running (automatically instantiated by making use of
    # systemd's socket-based service activation or run as an initscripts service
    # or run manually), the volume group metadata and PV state flags are obtained
    # from the lvmetad instance and no scanning is done by the individual
    # commands. In a setup with lvmetad, lvmetad udev rules *must* be set up for
    # LVM to work correctly. Without proper udev rules, all changes in block
    # device configuration will be *ignored* until a manual 'pvscan --cache'
    # is performed. These rules are installed by default.
    #
    # If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
    # before changing use_lvmetad to 1 and started again afterwards.
    #
    # If using lvmetad, volume activation is also switched to automatic
    # event-based mode. In this mode, the volumes are activated based on
    # incoming udev events that automatically inform lvmetad about new PVs that
    # appear in the system. Once a VG is complete (all the PVs are present), it
    # is auto-activated. The activation/auto_activation_volume_list setting
    # controls which volumes are auto-activated (all by default).

    # A note about device filtering while lvmetad is used:

    # When lvmetad is updated (either automatically based on udev events or
    # directly by a pvscan --cache <device> call), devices/filter is ignored and
    # all devices are scanned by default -- lvmetad always keeps unfiltered
    # information which is then provided to LVM commands and then each LVM
    # command does the filtering based on devices/filter setting itself.  This
    # does not apply to non-regexp filters though: component filters such as
    # multipath and MD are checked at pvscan --cache time.

    # In order to completely prevent LVM from scanning a device, even when using
    # lvmetad, devices/global_filter must be used.

    # N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
    # supported in clustered environment. If use_lvmetad=1 and locking_type=3
    # is set at the same time, LVM always issues a warning message about this
    # and then it automatically disables use_lvmetad.

    use_lvmetad = 0

    # Full path of the utility called to check that a thin metadata device
    # is in a state that allows it to be used.
    # Each time a thin pool needs to be activated or after it is deactivated
    # this utility is executed. The activation will only proceed if the utility
    # has an exit status of 0.
    # Set to "" to skip this check.  (Not recommended.)
    # The thin tools are available as part of the device-mapper-persistent-data
    # package from https://github.com/jthornber/thin-provisioning-tools.
    #
    # thin_check_executable = "@THIN_CHECK_CMD@"

    # Array of string options passed with thin_check command. By default,
    # option "-q" is for quiet output.
    # With thin_check version 2.1 or newer you can add "--ignore-non-fatal-errors"
    # to let it pass through ignorable errors and fix them later.
    # With thin_check version 3.2 or newer you should add
    # "--clear-needs-check-flag".
    #
    # thin_check_options = [ "-q", "--clear-needs-check-flag" ]

    # Full path of the utility called to repair a thin metadata device
    # is in a state that allows it to be used.
    # Each time a thin pool needs repair this utility is executed.
    # See thin_check_executable how to obtain binaries.
    #
    # thin_repair_executable = "@THIN_REPAIR_CMD@"

    # Array of extra string options passed with thin_repair command.
    # thin_repair_options = [ "" ]

    # Full path of the utility called to dump thin metadata content.
    # See thin_check_executable how to obtain binaries.
    #
    # thin_dump_executable = "@THIN_DUMP_CMD@"

    # If set, given features are not used by thin driver.
    # This can be helpful not just for testing, but i.e. allows to avoid
    # using problematic implementation of some thin feature.
    # Features:
    #   block_size
    #   discards
    #   discards_non_power_2
    #   external_origin
    #   metadata_resize
    #   external_origin_extend
    #   error_if_no_space
    #
    # thin_disabled_features = [ "discards", "block_size" ]

    # Full path of the utility called to check that a cache metadata device
    # is in a state that allows it to be used.
    # Each time a cached LV needs to be used or after it is deactivated
    # this utility is executed. The activation will only proceed if the utility
    # has an exit status of 0.
    # Set to "" to skip this check.  (Not recommended.)
    # The cache tools are available as part of the device-mapper-persistent-data
    # package from https://github.com/jthornber/thin-provisioning-tools.
    #
    # cache_check_executable = "@CACHE_CHECK_CMD@"

    # Array of string options passed with cache_check command. By default,
    # option "-q" is for quiet output.
    #
    # cache_check_options = [ "-q" ]

    # Full path of the utility called to repair a cache metadata device.
    # Each time a cache metadata needs repair this utility is executed.
    # See cache_check_executable how to obtain binaries.
    #
    # cache_repair_executable = "@CACHE_REPAIR_CMD@"

    # Array of extra string options passed with cache_repair command.
    # cache_repair_options = [ "" ]

    # Full path of the utility called to dump cache metadata content.
    # See cache_check_executable how to obtain binaries.
    #
    # cache_dump_executable = "@CACHE_DUMP_CMD@"

    # Defines the method lvm will use to find the local system_id.
    # The options are: none, machineid, uname, lvmlocal, or file.
    # Unset, or set to an empty string is equivalent to "none".
    # See lvmsystemid(7) for more information.
    #
    # system_id_source = ""

    # Specifies the path to a file containing the local system_id.
    # It is only used when system_id_source = "file".
    # See lvmsystemid(7) for more information.
    #
    # system_id_file = ""
}

activation {
    # Set to 1 to perform internal checks on the operations issued to
    # libdevmapper.  Useful for debugging problems with activation.
    # Some of the checks may be expensive, so it's best to use this
    # only when there seems to be a problem.
    checks = 0

    # Set to 0 to disable udev synchronisation (if compiled into the binaries).
    # Processes will not wait for notification from udev.
    # They will continue irrespective of any possible udev processing
    # in the background.  You should only use this if udev is not running
    # or has rules that ignore the devices LVM2 creates.
    # The command line argument --nodevsync takes precedence over this setting.
    # If set to 1 when udev is not running, and there are LVM2 processes
    # waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
    udev_sync = 1

    # Set to 0 to disable the udev rules installed by LVM2 (if built with
    # --enable-udev_rules). LVM2 will then manage the /dev nodes and symlinks
    # for active logical volumes directly itself.
    # N.B. Manual intervention may be required if this setting is changed
    # while any logical volumes are active.
    udev_rules = 1

    # Set to 1 for LVM2 to verify operations performed by udev. This turns on
    # additional checks (and if necessary, repairs) on entries in the device
    # directory after udev has completed processing its events. 
    # Useful for diagnosing problems with LVM2/udev interactions.
    verify_udev_operations = 0

    # If set to 1 and if deactivation of an LV fails, perhaps because
    # a process run from a quick udev rule temporarily opened the device,
    # retry the operation for a few seconds before failing.
    retry_deactivation = 1

    # How to fill in missing stripes if activating an incomplete volume.
    # Using "error" will make inaccessible parts of the device return
    # I/O errors on access.  You can instead use a device path, in which 
    # case, that device will be used to in place of missing stripes.
    # But note that using anything other than "error" with mirrored 
    # or snapshotted volumes is likely to result in data corruption.
    missing_stripe_filler = "error"

    # The linear target is an optimised version of the striped target
    # that only handles a single stripe.  Set this to 0 to disable this
    # optimisation and always use the striped target.
    use_linear_target = 1

    # How much stack (in KB) to reserve for use while devices suspended
    # Prior to version 2.02.89 this used to be set to 256KB
    reserved_stack = 64

    # How much memory (in KB) to reserve for use while devices suspended
    reserved_memory = 8192

    # Nice value used while devices suspended
    process_priority = -18

    # If volume_list is defined, each LV is only activated if there is a
    # match against the list.
    #
    #   "vgname" and "vgname/lvname" are matched exactly.
    #   "@tag" matches any tag set in the LV or VG.
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
    #
    # If any host tags exist but volume_list is not defined, a default
    # single-entry list containing "@*" is assumed.
    #
    # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]

    # If auto_activation_volume_list is defined, each LV that is to be
    # activated with the autoactivation option (--activate ay/-a ay) is
    # first checked against the list. There are two scenarios in which
    # the autoactivation option is used:
    #
    #   - automatic activation of volumes based on incoming PVs. If all the
    #     PVs making up a VG are present in the system, the autoactivation
    #     is triggered. This requires lvmetad (global/use_lvmetad=1) and udev
    #     to be running. In this case, "pvscan --cache -aay" is called
    #     automatically without any user intervention while processing
    #     udev events. Please, make sure you define auto_activation_volume_list
    #     properly so only the volumes you want and expect are autoactivated.
    #
    #   - direct activation on command line with the autoactivation option.
    #     In this case, the user calls "vgchange --activate ay/-a ay" or
    #     "lvchange --activate ay/-a ay" directly.
    #
    # By default, the auto_activation_volume_list is not defined and all
    # volumes will be activated either automatically or by using --activate ay/-a ay.
    #
    # N.B. The "activation/volume_list" is still honoured in all cases so even
    # if the VG/LV passes the auto_activation_volume_list, it still needs to
    # pass the volume_list for it to be activated in the end.

    # If auto_activation_volume_list is defined but empty, no volumes will be
    # activated automatically and --activate ay/-a ay will do nothing.
    #
    # auto_activation_volume_list = []

    # If auto_activation_volume_list is defined and it's not empty, only matching
    # volumes will be activated either automatically or by using --activate ay/-a ay.
    #
    #   "vgname" and "vgname/lvname" are matched exactly.
    #   "@tag" matches any tag set in the LV or VG.
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
    #
    # auto_activation_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]

    # If read_only_volume_list is defined, each LV that is to be activated 
    # is checked against the list, and if it matches, it as activated
    # in read-only mode.  (This overrides '--permission rw' stored in the
    # metadata.)
    #
    #   "vgname" and "vgname/lvname" are matched exactly.
    #   "@tag" matches any tag set in the LV or VG.
    #   "@*" matches if any tag defined on the host is also set in the LV or VG
    #
    # read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]

    # Each LV can have an 'activation skip' flag stored persistently against it.
    # During activation, this flag is used to decide whether such an LV is skipped.
    # The 'activation skip' flag can be set during LV creation and by default it
    # is automatically set for thin snapshot LVs. The 'auto_set_activation_skip'
    # enables or disables this automatic setting of the flag while LVs are created.
    # auto_set_activation_skip = 1

    # Control error behavior when provisioned device becomes full.  This
    # determines the default --errorwhenfull setting of new thin pools.
    # The command line option --errorwhenfull takes precedence over this
    # setting.  error_when_full 0 means --errorwhenfull n.
    #
    # error_when_full = 0

    # For RAID or 'mirror' segment types, 'raid_region_size' is the
    # size (in KiB) of each:
    # - synchronization operation when initializing
    # - each copy operation when performing a 'pvmove' (using 'mirror' segtype)
    # This setting has replaced 'mirror_region_size' since version 2.02.99
    raid_region_size = 512

    # Setting to use when there is no readahead value stored in the metadata.
    #
    # "none" - Disable readahead.
    # "auto" - Use default value chosen by kernel.
    readahead = "auto"

    # 'raid_fault_policy' defines how a device failure in a RAID logical
    # volume is handled.  This includes logical volumes that have the following
    # segment types: raid1, raid4, raid5*, and raid6*.
    #
    # In the event of a failure, the following policies will determine what
    # actions are performed during the automated response to failures (when
    # dmeventd is monitoring the RAID logical volume) and when 'lvconvert' is
    # called manually with the options '--repair' and '--use-policies'.
    #
    # "warn"	- Use the system log to warn the user that a device in the RAID
    # 		  logical volume has failed.  It is left to the user to run
    #		  'lvconvert --repair' manually to remove or replace the failed
    #		  device.  As long as the number of failed devices does not
    #		  exceed the redundancy of the logical volume (1 device for
    #		  raid4/5, 2 for raid6, etc) the logical volume will remain
    #		  usable.
    #
    # "allocate" - Attempt to use any extra physical volumes in the volume
    # 		  group as spares and replace faulty devices.
    #
    raid_fault_policy = "warn"

    # 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
    # how a device failure affecting a mirror (of "mirror" segment type) is
    # handled.  A mirror is composed of mirror images (copies) and a log.
    # A disk log ensures that a mirror does not need to be re-synced
    # (all copies made the same) every time a machine reboots or crashes.
    #
    # In the event of a failure, the specified policy will be used to determine
    # what happens. This applies to automatic repairs (when the mirror is being
    # monitored by dmeventd) and to manual lvconvert --repair when
    # --use-policies is given.
    #
    # "remove" - Simply remove the faulty device and run without it.  If
    #            the log device fails, the mirror would convert to using
    #            an in-memory log.  This means the mirror will not
    #            remember its sync status across crashes/reboots and
    #            the entire mirror will be re-synced.  If a
    #            mirror image fails, the mirror will convert to a
    #            non-mirrored device if there is only one remaining good
    #            copy.
    #
    # "allocate" - Remove the faulty device and try to allocate space on
    #            a new device to be a replacement for the failed device.
    #            Using this policy for the log is fast and maintains the
    #            ability to remember sync state through crashes/reboots.
    #            Using this policy for a mirror device is slow, as it
    #            requires the mirror to resynchronize the devices, but it
    #            will preserve the mirror characteristic of the device.
    #            This policy acts like "remove" if no suitable device and
    #            space can be allocated for the replacement.
    #
    # "allocate_anywhere" - Not yet implemented. Useful to place the log device
    #            temporarily on same physical volume as one of the mirror
    #            images. This policy is not recommended for mirror devices
    #            since it would break the redundant nature of the mirror. This
    #            policy acts like "remove" if no suitable device and space can
    #            be allocated for the replacement.

    mirror_log_fault_policy = "allocate"
    mirror_image_fault_policy = "remove"

    # 'snapshot_autoextend_threshold' and 'snapshot_autoextend_percent' define
    # how to handle automatic snapshot extension. The former defines when the
    # snapshot should be extended: when its space usage exceeds this many
    # percent. The latter defines how much extra space should be allocated for
    # the snapshot, in percent of its current size.
    #
    # For example, if you set snapshot_autoextend_threshold to 70 and
    # snapshot_autoextend_percent to 20, whenever a snapshot exceeds 70% usage,
    # it will be extended by another 20%. For a 1G snapshot, using up 700M will
    # trigger a resize to 1.2G. When the usage exceeds 840M, the snapshot will
    # be extended to 1.44G, and so on.
    #
    # Setting snapshot_autoextend_threshold to 100 disables automatic
    # extensions. The minimum value is 50 (A setting below 50 will be treated
    # as 50).

    snapshot_autoextend_threshold = 100
    snapshot_autoextend_percent = 20

    # 'thin_pool_autoextend_threshold' and 'thin_pool_autoextend_percent' define
    # how to handle automatic pool extension. The former defines when the
    # pool should be extended: when its space usage exceeds this many
    # percent. The latter defines how much extra space should be allocated for
    # the pool, in percent of its current size.
    #
    # For example, if you set thin_pool_autoextend_threshold to 70 and
    # thin_pool_autoextend_percent to 20, whenever a pool exceeds 70% usage,
    # it will be extended by another 20%. For a 1G pool, using up 700M will
    # trigger a resize to 1.2G. When the usage exceeds 840M, the pool will
    # be extended to 1.44G, and so on.
    #
    # Setting thin_pool_autoextend_threshold to 100 disables automatic
    # extensions. The minimum value is 50 (A setting below 50 will be treated
    # as 50).

    thin_pool_autoextend_threshold = 100
    thin_pool_autoextend_percent = 20

    # While activating devices, I/O to devices being (re)configured is
    # suspended, and as a precaution against deadlocks, LVM2 needs to pin
    # any memory it is using so it is not paged out.  Groups of pages that
    # are known not to be accessed during activation need not be pinned
    # into memory.  Each string listed in this setting is compared against
    # each line in /proc/self/maps, and the pages corresponding to any
    # lines that match are not pinned.  On some systems locale-archive was
    # found to make up over 80% of the memory used by the process.
    # mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]

    # Set to 1 to revert to the default behaviour prior to version 2.02.62
    # which used mlockall() to pin the whole process's memory while activating
    # devices.
    use_mlockall = 0

    # Monitoring is enabled by default when activating logical volumes.
    # Set to 0 to disable monitoring or use the --ignoremonitoring option.
    monitoring = 1

    # When pvmove or lvconvert must wait for the kernel to finish
    # synchronising or merging data, they check and report progress
    # at intervals of this number of seconds.  The default is 15 seconds.
    # If this is set to 0 and there is only one thing to wait for, there
    # are no progress reports, but the process is awoken immediately the
    # operation is complete.
    polling_interval = 15

    # 'activation_mode' determines how Logical Volumes are activated if
    # any devices are missing.  Possible settings are:
    #
    #	"complete" -  Only allow activation of an LV if all of the Physical
    #		      Volumes it uses are present.  Other PVs in the Volume
    #		      Group may be missing.
    #
    #	"degraded" -  Like "complete", but additionally RAID Logical Volumes of
    #		      segment type raid1, raid4, raid5, radid6 and raid10 will
    #		      be activated if there is no data loss, i.e. they have
    #		      sufficient redundancy to present the entire addressable
    #		      range of the Logical Volume.
    #
    #	"partial"  -  Allows the activation of any Logical Volume even if
    #		      a missing or failed PV could cause data loss with a
    #		      portion of the Logical Volume inaccessible.
    #		      This setting should not normally be used, but may
    #		      sometimes assist with data recovery.
    #
    # This setting was introduced in LVM version 2.02.108.  It corresponds
    # with the '--activationmode' option for lvchange and vgchange.
    activation_mode = "degraded"
}

# Report settings.
#
# report {
    # If compact output is enabled, fields which don't have value
    # set for any of the rows reported are skipped on output. Compact
    # output is applicable only if report is buffered (report/buffered=1).
    # compact_output=0

    # Align columns on report output.
    # aligned=1

    # When buffered reporting is used, the report's content is appended
    # incrementally to include each object being reported until the report
    # is flushed to output which normally happens at the end of command
    # execution. Otherwise, if buffering is not used, each object is
    # reported as soon as its processing is finished.
    # buffered=1

    # Show headings for columns on report.
    # headings=1

    # A separator to use on report after each field.
    # separator=" "

    # A separator to use for list items when reported.
    # list_item_separator=","

    # Use a field name prefix for each field reported.
    # prefixes=0

    # Quote field values when using field name prefixes.
    # quoted=1

    # Output each column as a row. If set, this also implies report/prefixes=1.
    # colums_as_rows=0

    # Use binary values "0" or "1" instead of descriptive literal values for
    # columns that have exactly two valid values to report (not counting the
    # "unknown" value which denotes that the value could not be determined).
    #
    # binary_values_as_numeric = 0

    # Comma separated list of columns to sort by when reporting 'lvm devtypes' command.
    # See 'lvm devtypes -o help' for the list of possible fields.
    # devtypes_sort="devtype_name"

    # Comma separated list of columns to report for 'lvm devtypes' command.
    # See 'lvm devtypes -o help' for the list of possible fields.
    # devtypes_cols="devtype_name,devtype_max_partitions,devtype_description"

    # Comma separated list of columns to report for 'lvm devtypes' command in verbose mode.
    # See 'lvm devtypes -o help' for the list of possible fields.
    # devtypes_cols_verbose="devtype_name,devtype_max_partitions,devtype_description"

    # Comma separated list of columns to sort by when reporting 'lvs' command.
    # See 'lvs -o help' for the list of possible fields.
    # lvs_sort="vg_name,lv_name"

    # Comma separated list of columns to report for 'lvs' command.
    # See 'lvs -o help' for the list of possible fields.
    # lvs_cols="lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"

    # Comma separated list of columns to report for 'lvs' command in verbose mode.
    # See 'lvs -o help' for the list of possible fields.
    # lvs_cols_verbose="lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert

    # Comma separated list of columns to sort by when reporting 'vgs' command.
    # See 'vgs -o help' for the list of possible fields.
    # vgs_sort="vg_name"

    # Comma separated list of columns to report for 'vgs' command.
    # See 'vgs -o help' for the list of possible fields.
    # vgs_cols="vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"

    # Comma separated list of columns to report for 'vgs' command in verbose mode.
    # See 'vgs -o help' for the list of possible fields.
    # vgs_cols_verbose="vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"

    # Comma separated list of columns to sort by when reporting 'pvs' command.
    # See 'pvs -o help' for the list of possible fields.
    # pvs_sort="pv_name"

    # Comma separated list of columns to report for 'pvs' command.
    # See 'pvs -o help' for the list of possible fields.
    # pvs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"

    # Comma separated list of columns to report for 'pvs' command in verbose mode.
    # See 'pvs -o help' for the list of possible fields.
    # pvs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"

    # Comma separated list of columns to sort by when reporting 'lvs --segments' command.
    # See 'lvs --segments -o help' for the list of possible fields.
    # segs_sort="vg_name,lv_name,seg_start"

    # Comma separated list of columns to report for 'lvs --segments' command.
    # See 'lvs --segments  -o help' for the list of possible fields.
    # segs_cols="lv_name,vg_name,lv_attr,stripes,segtype,seg_size"

    # Comma separated list of columns to report for 'lvs --segments' command in verbose mode.
    # See 'lvs --segments -o help' for the list of possible fields.
    # segs_cols_verbose="lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"

    # Comma separated list of columns to sort by when reporting 'pvs --segments' command.
    # See 'pvs --segments -o help' for the list of possible fields.
    # pvsegs_sort="pv_name,pvseg_start"

    # Comma separated list of columns to sort by when reporting 'pvs --segments' command.
    # See 'pvs --segments -o help' for the list of possible fields.
    # pvsegs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"

    # Comma separated list of columns to sort by when reporting 'pvs --segments' command in verbose mode.
    # See 'pvs --segments -o help' for the list of possible fields.
    # pvsegs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"
#}

####################
# Advanced section #
####################

# Metadata settings
#
# metadata {
    # Default number of copies of metadata to hold on each PV.  0, 1 or 2.
    # You might want to override it from the command line with 0 
    # when running pvcreate on new PVs which are to be added to large VGs.

    # pvmetadatacopies = 1

    # Default number of copies of metadata to maintain for each VG.
    # If set to a non-zero value, LVM automatically chooses which of
    # the available metadata areas to use to achieve the requested
    # number of copies of the VG metadata.  If you set a value larger
    # than the the total number of metadata areas available then
    # metadata is stored in them all.
    # The default value of 0 ("unmanaged") disables this automatic
    # management and allows you to control which metadata areas
    # are used at the individual PV level using 'pvchange
    # --metadataignore y/n'.

    # vgmetadatacopies = 0

    # Approximate default size of on-disk metadata areas in sectors.
    # You should increase this if you have large volume groups or
    # you want to retain a large on-disk history of your metadata changes.

    # pvmetadatasize = 255

    # List of directories holding live copies of text format metadata.
    # These directories must not be on logical volumes!
    # It's possible to use LVM2 with a couple of directories here,
    # preferably on different (non-LV) filesystems, and with no other 
    # on-disk metadata (pvmetadatacopies = 0). Or this can be in
    # addition to on-disk metadata areas.
    # The feature was originally added to simplify testing and is not
    # supported under low memory situations - the machine could lock up.
    #
    # Never edit any files in these directories by hand unless you
    # you are absolutely sure you know what you are doing! Use
    # the supplied toolset to make changes (e.g. vgcfgrestore).

    # dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
#}

# Event daemon
#
dmeventd {
    # mirror_library is the library used when monitoring a mirror device.
    #
    # "libdevmapper-event-lvm2mirror.so" attempts to recover from
    # failures.  It removes failed devices from a volume group and
    # reconfigures a mirror as necessary. If no mirror library is
    # provided, mirrors are not monitored through dmeventd.

    mirror_library = "libdevmapper-event-lvm2mirror.so"

    # snapshot_library is the library used when monitoring a snapshot device.
    #
    # "libdevmapper-event-lvm2snapshot.so" monitors the filling of
    # snapshots and emits a warning through syslog when the use of
    # the snapshot exceeds 80%. The warning is repeated when 85%, 90% and
    # 95% of the snapshot is filled.

    snapshot_library = "libdevmapper-event-lvm2snapshot.so"

    # thin_library is the library used when monitoring a thin device.
    #
    # "libdevmapper-event-lvm2thin.so" monitors the filling of
    # pool and emits a warning through syslog when the use of
    # the pool exceeds 80%. The warning is repeated when 85%, 90% and
    # 95% of the pool is filled.

    thin_library = "libdevmapper-event-lvm2thin.so"

    # Full path of the dmeventd binary.
    #
    # executable = "@DMEVENTD_PATH@"
}