summaryrefslogtreecommitdiff
path: root/daemons/lvmlockd/lvmlockd-sanlock.c
blob: 4317aad40a21fccf9cbaae9dd13b6d116d11b30b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
/*
 * Copyright (C) 2014-2015 Red Hat, Inc.
 *
 * This file is part of LVM2.
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
 * of the GNU Lesser General Public License v.2.1.
 */

#define _XOPEN_SOURCE 500  /* pthread */
#define _ISOC99_SOURCE

#include "tool.h"

#include "daemon-server.h"
#include "daemon-log.h"
#include "xlate.h"

#include "lvmlockd-internal.h"
#include "lvmlockd-client.h"

#include "sanlock.h"
#include "sanlock_rv.h"
#include "sanlock_admin.h"
#include "sanlock_resource.h"

#include <pthread.h>
#include <stddef.h>
#include <poll.h>
#include <errno.h>
#include <syslog.h>
#include <sys/socket.h>

/*
-------------------------------------------------------------------------------
For each VG, lvmlockd creates a sanlock lockspace that holds the leases for
that VG.  There's a lease for the VG lock, and there's a lease for each active
LV.  sanlock maintains (reads/writes) these leases, which exist on storage.
That storage is a hidden LV within the VG: /dev/vg/lvmlock.  lvmlockd gives the
path of this internal LV to sanlock, which then reads/writes the leases on it.

# lvs -a cc -o+uuid
  LV        VG   Attr       LSize   LV UUID
  lv1       cc   -wi-a-----   2.00g 7xoDtu-yvNM-iwQx-C94t-BbYs-UzBl-o8hAIa
  lv2       cc   -wi-a----- 100.00g exxNPX-wZdO-uCNy-yiGa-aJGT-JKVl-arfcYT
  [lvmlock] cc   -wi-ao---- 256.00m iLpDel-hR0T-hJ3u-rnVo-PcDh-mcjt-sF9egM

# sanlock status
s lvm_cc:1:/dev/mapper/cc-lvmlock:0
r lvm_cc:exxNPX-wZdO-uCNy-yiGa-aJGT-JKVl-arfcYT:/dev/mapper/cc-lvmlock:71303168:13 p 26099
r lvm_cc:7xoDtu-yvNM-iwQx-C94t-BbYs-UzBl-o8hAIa:/dev/mapper/cc-lvmlock:70254592:3 p 26099

This shows that sanlock is maintaining leases on /dev/mapper/cc-lvmlock.

sanlock acquires a lockspace lease when the lockspace is joined, i.e. when the
VG is started by 'vgchange --lock-start cc'.  This lockspace lease exists at
/dev/mapper/cc-lvmlock offset 0, and sanlock regularly writes to it to maintain
ownership of it.  Joining the lockspace (by acquiring the lockspace lease in
it) then allows standard resource leases to be acquired in the lockspace for
whatever the application wants.  lvmlockd uses resource leases for the VG lock
and LV locks.

sanlock acquires a resource lease for each actual lock that lvm commands use.
Above, there are two LV locks that are held because the two LVs are active.
These are on /dev/mapper/cc-lvmlock at offsets 71303168 and 70254592.  sanlock
does not write to these resource leases except when acquiring and releasing
them (e.g. lvchange -ay/-an).  The renewal of the lockspace lease maintains
ownership of all the resource leases in the lockspace.

If the host loses access to the disk that the sanlock lv lives on, then sanlock
can no longer renew its lockspace lease.  The lockspace lease will eventually
expire, at which point the host will lose ownership of it, and of all resource
leases it holds in the lockspace.  Eventually, other hosts will be able to
acquire those leases.  sanlock ensures that another host will not be able to
acquire one of the expired leases until the current host has quit using it.

It is important that the host "quit using" the leases it is holding if the
sanlock storage is lost and they begin expiring.  If the host cannot quit using
the leases and release them within a limited time, then sanlock will use the
local watchdog to forcibly reset the host before any other host can acquire
them.  This is severe, but preferable to possibly corrupting the data protected
by the lease.  It ensures that two nodes will not be using the same lease at
once.  For LV leases, that means that another host will not be able to activate
the LV while another host still has it active.

sanlock notifies the application that it cannot renew the lockspace lease.  The
application needs to quit using all leases in the lockspace and release them as
quickly as possible.  In the initial version, lvmlockd ignored this
notification, so sanlock would eventually reach the point where it would use
the local watchdog to reset the host.  However, it's better to attempt a
response.  If that response succeeds, the host can avoid being reset.  If the
response fails, then sanlock will eventually reset the host as the last resort.
sanlock gives the application about 40 seconds to complete its response and
release its leases before resetting the host.

An application can specify the path and args of a program that sanlock should
run to notify it if the lockspace lease cannot be renewed.  This program should
carry out the application's response to the expiring leases: attempt to quit
using the leases and then release them.  lvmlockd gives this command to sanlock
for each VG when that VG is started: 'lvmlockctl --kill vg_name'

If sanlock loses access to lease storage in that VG, it runs lvmlockctl --kill,
which:

1. Uses syslog to explain what is happening.

2. Notifies lvmlockd that the VG is being killed, so lvmlockd can
   immediatley return an error for this condition if any new lock
   requests are made.  (This step would not be strictly necessary.)

3. Attempts to quit using the VG.  This is not yet implemented, but
   will eventually use blkdeactivate on the VG (or a more forceful
   equivalent.)

4. If step 3 was successful at terminating all use of the VG, then
   lvmlockd is told to release all the leases for the VG.  If this
   is all done without about 40 seconds, the host can avoid being
   reset.

Until steps 3 and 4 are fully implemented, manual steps can be substituted.
This is primarily for testing since the problem needs to be noticed and
responded to in a very short time.  The manual alternative to step 3 is to kill
any processes using file systems on LV's in the VG, unmount all file systems on
the LVs, and deactivate all the LVs.  Once this is done, the manual alternative
to step 4 is to run 'lvmlockctl --drop vg_name', which tells lvmlockd to
release all the leases for the VG.
-------------------------------------------------------------------------------
*/


/*
 * Each lockspace thread has its own sanlock daemon connection.
 * If they shared one, sanlock acquire/release calls would be
 * serialized.  Some aspects of sanlock expect a single connection
 * from each pid: signals due to a sanlock_request, and
 * acquire/release/convert/inquire.  The later can probably be
 * addressed with a flag to indicate that the pid field should be
 * interpretted as 'ci' (which the caller would need to figure
 * out somehow.)
 */

struct lm_sanlock {
	struct sanlk_lockspace ss;
	int align_size;
	int sock; /* sanlock daemon connection */
};

struct rd_sanlock {
	union {
		struct sanlk_resource rs;
		char buf[sizeof(struct sanlk_resource) + sizeof(struct sanlk_disk)];
	};
	struct val_blk *vb;
};

struct sanlk_resourced {
	union {
		struct sanlk_resource rs;
		char buf[sizeof(struct sanlk_resource) + sizeof(struct sanlk_disk)];
	};
};

int lm_data_size_sanlock(void)
{
	return sizeof(struct rd_sanlock);
}

/*
 * lock_args format
 *
 * vg_lock_args format for sanlock is
 * vg_version_string:undefined:lock_lv_name
 *
 * lv_lock_args format for sanlock is
 * lv_version_string:undefined:offset
 *
 * version_string is MAJOR.MINOR.PATCH
 * undefined may contain ":"
 *
 * If a new version of the lock_args string cannot be
 * handled by an old version of lvmlockd, then the
 * new lock_args string should contain a larger major number.
 */

#define VG_LOCK_ARGS_MAJOR 1
#define VG_LOCK_ARGS_MINOR 0
#define VG_LOCK_ARGS_PATCH 0

#define LV_LOCK_ARGS_MAJOR 1
#define LV_LOCK_ARGS_MINOR 0
#define LV_LOCK_ARGS_PATCH 0

/*
 * offset 0 is lockspace
 * offset align_size * 1 is unused
 * offset align_size * 2 is unused
 * ...
 * offset align_size * 64 is unused
 * offset align_size * 65 is gl lock
 * offset align_size * 66 is vg lock
 * offset align_size * 67 is first lv lock
 * offset align_size * 68 is second lv lock
 * ...
 */

#define LS_BEGIN 0
#define GL_LOCK_BEGIN 65
#define VG_LOCK_BEGIN 66
#define LV_LOCK_BEGIN 67

static int lock_lv_name_from_args(char *vg_args, char *lock_lv_name)
{
	return last_string_from_args(vg_args, lock_lv_name);
}

static int lock_lv_offset_from_args(char *lv_args, uint64_t *lock_lv_offset)
{
	char offset_str[MAX_ARGS+1];
	int rv;

	memset(offset_str, 0, sizeof(offset_str));

	rv = last_string_from_args(lv_args, offset_str);
	if (rv < 0)
		return rv;

	*lock_lv_offset = strtoull(offset_str, NULL, 10);
	return 0;
}

static int check_args_version(char *args, unsigned int our_major)
{
	unsigned int major = 0;
	int rv;

	rv = version_from_args(args, &major, NULL, NULL);
	if (rv < 0) {
		log_error("check_args_version %s error %d", args, rv);
		return rv;
	}

	if (major > our_major) {
		log_error("check_args_version %s major %u %u", args, major, our_major);
		return -1;
	}

	return 0;
}

#define MAX_LINE 64

static int read_host_id_file(void)
{
	FILE *file;
	char line[MAX_LINE];
	char key_str[MAX_LINE];
	char val_str[MAX_LINE];
	char *key, *val, *sep;
	int host_id = 0;

	file = fopen(daemon_host_id_file, "r");
	if (!file)
		goto out;

	while (fgets(line, MAX_LINE, file)) {
		if (line[0] == '#' || line[0] == '\n')
			continue;

		key = line;
		sep = strstr(line, "=");
		val = sep + 1;

		if (!sep || !val)
			continue;

		*sep = '\0';
		memset(key_str, 0, sizeof(key_str));
		memset(val_str, 0, sizeof(val_str));
		sscanf(key, "%s", key_str);
		sscanf(val, "%s", val_str);

		if (!strcmp(key_str, "host_id")) {
			host_id = atoi(val_str);
			break;
		}
	}
	if (fclose(file))
		log_error("failed to close host id file %s", daemon_host_id_file);
out:
	log_debug("host_id %d from %s", host_id, daemon_host_id_file);
	return host_id;
}

/*
 * vgcreate
 *
 * For init_vg, vgcreate passes the internal lv name as vg_args.
 * This constructs the full/proper vg_args format, containing the
 * version and lv name, and returns the real lock_args in vg_args.
 */

int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args)
{
	struct sanlk_lockspace ss;
	struct sanlk_resourced rd;
	struct sanlk_disk disk;
	char lock_lv_name[MAX_ARGS+1];
	char lock_args_version[MAX_ARGS+1];
	const char *gl_name = NULL;
	uint32_t daemon_version;
	uint32_t daemon_proto;
	uint64_t offset;
	int align_size;
	int i, rv;

	memset(&ss, 0, sizeof(ss));
	memset(&rd, 0, sizeof(rd));
	memset(&disk, 0, sizeof(disk));
	memset(lock_lv_name, 0, sizeof(lock_lv_name));
	memset(lock_args_version, 0, sizeof(lock_args_version));

	if (!vg_args || !vg_args[0] || !strcmp(vg_args, "none")) {
		log_error("S %s init_vg_san vg_args missing", ls_name);
		return -EARGS;
	}

	snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u",
		 VG_LOCK_ARGS_MAJOR, VG_LOCK_ARGS_MINOR, VG_LOCK_ARGS_PATCH);

	/* see comment above about input vg_args being only lock_lv_name */
	snprintf(lock_lv_name, MAX_ARGS, "%s", vg_args);

	if (strlen(lock_lv_name) + strlen(lock_args_version) + 2 > MAX_ARGS)
		return -EARGS;

	snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);

	log_debug("S %s init_vg_san path %s", ls_name, disk.path);

	if (daemon_test) {
		if (!gl_lsname_sanlock[0])
			strncpy(gl_lsname_sanlock, ls_name, MAX_NAME);
		return 0;
	}

	rv = sanlock_version(0, &daemon_version, &daemon_proto);
	if (rv < 0) {
		log_error("S %s init_vg_san failed to connect to sanlock daemon", ls_name);
		return -EMANAGER;
	}

	log_debug("sanlock daemon version %08x proto %08x",
		  daemon_version, daemon_proto);

	align_size = sanlock_align(&disk);
	if (align_size <= 0) {
		log_error("S %s init_vg_san bad disk align size %d %s",
			  ls_name, align_size, disk.path);
		return -EARGS;
	}

	strncpy(ss.name, ls_name, SANLK_NAME_LEN);
	memcpy(ss.host_id_disk.path, disk.path, SANLK_PATH_LEN);
	ss.host_id_disk.offset = LS_BEGIN * align_size;

	rv = sanlock_write_lockspace(&ss, 0, 0, sanlock_io_timeout);
	if (rv < 0) {
		log_error("S %s init_vg_san write_lockspace error %d %s",
			  ls_name, rv, ss.host_id_disk.path);
		return rv;
	}
	
	/*
	 * We want to create the global lock in the first sanlock vg.
	 * If other sanlock vgs exist, then one of them must contain
	 * the gl.  If gl_lsname_sanlock is not set, then perhaps
	 * the sanlock vg with the gl has been removed or has not yet
	 * been seen. (Would vgcreate get this far in that case?)
	 * If dlm vgs exist, then we choose to use the dlm gl and
	 * not a sanlock gl.
	 */

	if (flags & LD_AF_ENABLE)
		gl_name = R_NAME_GL;
	else if (flags & LD_AF_DISABLE)
		gl_name = R_NAME_GL_DISABLED;
	else if (!gl_use_sanlock || gl_lsname_sanlock[0] || !lockspaces_empty())
		gl_name = R_NAME_GL_DISABLED;
	else
		gl_name = R_NAME_GL;

	memcpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN);
	strncpy(rd.rs.name, gl_name, SANLK_NAME_LEN);
	memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN);
	rd.rs.disks[0].offset = align_size * GL_LOCK_BEGIN;
	rd.rs.num_disks = 1;

	rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
	if (rv < 0) {
		log_error("S %s init_vg_san write_resource gl error %d %s",
			  ls_name, rv, rd.rs.disks[0].path);
		return rv;
	}

	memcpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN);
	strncpy(rd.rs.name, R_NAME_VG, SANLK_NAME_LEN);
	memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN);
	rd.rs.disks[0].offset = align_size * VG_LOCK_BEGIN;
	rd.rs.num_disks = 1;

	rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
	if (rv < 0) {
		log_error("S %s init_vg_san write_resource vg error %d %s",
			  ls_name, rv, rd.rs.disks[0].path);
		return rv;
	}

	if (!strcmp(gl_name, R_NAME_GL))
		strncpy(gl_lsname_sanlock, ls_name, MAX_NAME);
 
	snprintf(vg_args, MAX_ARGS, "%s:%s", lock_args_version, lock_lv_name);

	log_debug("S %s init_vg_san done vg_args %s", ls_name, vg_args);

	/*
	 * Go through all lv resource slots and initialize them with the
	 * correct lockspace name but a special resource name that indicates
	 * it is unused.
	 */

	memset(&rd, 0, sizeof(rd));
	rd.rs.num_disks = 1;
	memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN);
	strncpy(rd.rs.lockspace_name, ls_name, SANLK_NAME_LEN);
	strcpy(rd.rs.name, "#unused");

	offset = align_size * LV_LOCK_BEGIN;

	log_debug("S %s init_vg_san clearing lv lease areas", ls_name);

	for (i = 0; ; i++) {
		rd.rs.disks[0].offset = offset;

		rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
		if (rv == -EMSGSIZE || rv == -ENOSPC) {
			/* This indicates the end of the device is reached. */
			rv = -EMSGSIZE;
			break;
		}

		if (rv) {
			log_error("clear lv resource area %llu error %d",
				  (unsigned long long)offset, rv);
			break;
		}
		offset += align_size;
	}

	return 0;
}

/*
 * lvcreate
 *
 * The offset at which the lv lease is written is passed
 * all the way back to the lvcreate command so that it
 * can be saved in the lv's lock_args in the vg metadata.
 */

int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
		       char *vg_args, char *lv_args, uint64_t free_offset)
{
	struct sanlk_resourced rd;
	char lock_lv_name[MAX_ARGS+1];
	char lock_args_version[MAX_ARGS+1];
	uint64_t offset;
	int align_size;
	int rv;

	memset(&rd, 0, sizeof(rd));
	memset(lock_lv_name, 0, sizeof(lock_lv_name));
	memset(lock_args_version, 0, sizeof(lock_args_version));

	rv = lock_lv_name_from_args(vg_args, lock_lv_name);
	if (rv < 0) {
		log_error("S %s init_lv_san lock_lv_name_from_args error %d %s",
			  ls_name, rv, vg_args);
		return rv;
	}

	snprintf(lock_args_version, MAX_ARGS, "%u.%u.%u",
		 LV_LOCK_ARGS_MAJOR, LV_LOCK_ARGS_MINOR, LV_LOCK_ARGS_PATCH);

	strncpy(rd.rs.lockspace_name, ls_name, SANLK_NAME_LEN);
	rd.rs.num_disks = 1;
	snprintf(rd.rs.disks[0].path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);

	align_size = sanlock_align(&rd.rs.disks[0]);
	if (align_size <= 0) {
		log_error("S %s init_lv_san align error %d", ls_name, align_size);
		return -EINVAL;
	}

	if (free_offset)
		offset = free_offset;
	else
		offset = align_size * LV_LOCK_BEGIN;
	rd.rs.disks[0].offset = offset;

	if (daemon_test) {
		snprintf(lv_args, MAX_ARGS, "%s:%llu",
			 lock_args_version, (unsigned long long)1111);
		return 0;
	}

	while (1) {
		rd.rs.disks[0].offset = offset;

		memset(rd.rs.name, 0, SANLK_NAME_LEN);

		rv = sanlock_read_resource(&rd.rs, 0);
		if (rv == -EMSGSIZE || rv == -ENOSPC) {
			/* This indicates the end of the device is reached. */
			log_debug("S %s init_lv_san read limit offset %llu",
				  ls_name, (unsigned long long)offset);
			rv = -EMSGSIZE;
			return rv;
		}

		if (rv && rv != SANLK_LEADER_MAGIC) {
			log_error("S %s init_lv_san read error %d offset %llu",
				  ls_name, rv, (unsigned long long)offset);
			break;
		}

		if (!strncmp(rd.rs.name, lv_name, SANLK_NAME_LEN)) {
			log_error("S %s init_lv_san resource name %s already exists at %llu",
				  ls_name, lv_name, (unsigned long long)offset);
			return -EEXIST;
		}

		/*
		 * If we read newly extended space, it will not be initialized
		 * with an "#unused" resource, but will return SANLK_LEADER_MAGIC
		 * indicating an uninitialized paxos structure on disk.
		 */
		if ((rv == SANLK_LEADER_MAGIC) || !strcmp(rd.rs.name, "#unused")) {
			log_debug("S %s init_lv_san %s found unused area at %llu",
				  ls_name, lv_name, (unsigned long long)offset);

			strncpy(rd.rs.name, lv_name, SANLK_NAME_LEN);

			rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
			if (!rv) {
				snprintf(lv_args, MAX_ARGS, "%s:%llu",
				         lock_args_version, (unsigned long long)offset);
			} else {
				log_error("S %s init_lv_san write error %d offset %llu",
					  ls_name, rv, (unsigned long long)rv);
			}
			break;
		}

		offset += align_size;
	}

	return rv;
}

/*
 * Read the lockspace and each resource, replace the lockspace name,
 * and write it back.
 */

int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args)
{
	struct sanlk_lockspace ss;
	struct sanlk_resourced rd;
	struct sanlk_disk disk;
	char lock_lv_name[MAX_ARGS+1];
	uint64_t offset;
	uint32_t io_timeout;
	int align_size;
	int i, rv;

	memset(&disk, 0, sizeof(disk));
	memset(lock_lv_name, 0, sizeof(lock_lv_name));

	if (!vg_args || !vg_args[0] || !strcmp(vg_args, "none")) {
		log_error("S %s rename_vg_san vg_args missing", ls_name);
		return -EINVAL;
	}

	rv = lock_lv_name_from_args(vg_args, lock_lv_name);
	if (rv < 0) {
		log_error("S %s init_lv_san lock_lv_name_from_args error %d %s",
			  ls_name, rv, vg_args);
		return rv;
	}

	snprintf(disk.path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s", vg_name, lock_lv_name);

	log_debug("S %s rename_vg_san path %s", ls_name, disk.path);

	if (daemon_test)
		return 0;

	/* FIXME: device is not always ready for us here */
	sleep(1);

	align_size = sanlock_align(&disk);
	if (align_size <= 0) {
		log_error("S %s rename_vg_san bad align size %d %s",
			  ls_name, align_size, disk.path);
		return -EINVAL;
	}

	/*
	 * Lockspace
	 */

	memset(&ss, 0, sizeof(ss));
	memcpy(ss.host_id_disk.path, disk.path, SANLK_PATH_LEN);
	ss.host_id_disk.offset = LS_BEGIN * align_size;

	rv = sanlock_read_lockspace(&ss, 0, &io_timeout);
	if (rv < 0) {
		log_error("S %s rename_vg_san read_lockspace error %d %s",
			  ls_name, rv, ss.host_id_disk.path);
		return rv;
	}

	strncpy(ss.name, ls_name, SANLK_NAME_LEN);

	rv = sanlock_write_lockspace(&ss, 0, 0, sanlock_io_timeout);
	if (rv < 0) {
		log_error("S %s rename_vg_san write_lockspace error %d %s",
			  ls_name, rv, ss.host_id_disk.path);
		return rv;
	}

	/*
	 * GL resource
	 */

	memset(&rd, 0, sizeof(rd));
	memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN);
	rd.rs.disks[0].offset = align_size * GL_LOCK_BEGIN;
	rd.rs.num_disks = 1;

	rv = sanlock_read_resource(&rd.rs, 0);
	if (rv < 0) {
		log_error("S %s rename_vg_san read_resource gl error %d %s",
			  ls_name, rv, rd.rs.disks[0].path);
		return rv;
	}

	strncpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN);

	rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
	if (rv < 0) {
		log_error("S %s rename_vg_san write_resource gl error %d %s",
			  ls_name, rv, rd.rs.disks[0].path);
		return rv;
	}

	/*
	 * VG resource
	 */

	memset(&rd, 0, sizeof(rd));
	memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN);
	rd.rs.disks[0].offset = align_size * VG_LOCK_BEGIN;
	rd.rs.num_disks = 1;

	rv = sanlock_read_resource(&rd.rs, 0);
	if (rv < 0) {
		log_error("S %s rename_vg_san write_resource vg error %d %s",
			  ls_name, rv, rd.rs.disks[0].path);
		return rv;
	}

	strncpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN);

	rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
	if (rv < 0) {
		log_error("S %s rename_vg_san write_resource vg error %d %s",
			  ls_name, rv, rd.rs.disks[0].path);
		return rv;
	}

	/*
	 * LV resources
	 */

	offset = align_size * LV_LOCK_BEGIN;

	for (i = 0; ; i++) {
		memset(&rd, 0, sizeof(rd));
		memcpy(rd.rs.disks[0].path, disk.path, SANLK_PATH_LEN);
		rd.rs.disks[0].offset = offset;
		rd.rs.num_disks = 1;

		rv = sanlock_read_resource(&rd.rs, 0);
		if (rv == -EMSGSIZE || rv == -ENOSPC) {
			/* This indicates the end of the device is reached. */
			rv = -EMSGSIZE;
			break;
		}

		if (rv < 0) {
			log_error("S %s rename_vg_san read_resource resource area %llu error %d",
				  ls_name, (unsigned long long)offset, rv);
			break;
		}

		strncpy(rd.rs.lockspace_name, ss.name, SANLK_NAME_LEN);

		rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
		if (rv) {
			log_error("S %s rename_vg_san write_resource resource area %llu error %d",
				  ls_name, (unsigned long long)offset, rv);
			break;
		}
		offset += align_size;
	}

	return 0;
}

/* lvremove */
int lm_free_lv_sanlock(struct lockspace *ls, struct resource *r)
{
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;
	struct sanlk_resource *rs = &rds->rs;
	int rv;

	log_debug("S %s R %s free_lv_san", ls->name, r->name);

	if (daemon_test)
		return 0;

	strcpy(rs->name, "#unused");

	rv = sanlock_write_resource(rs, 0, 0, 0);
	if (rv < 0) {
		log_error("S %s R %s free_lv_san write error %d",
			  ls->name, r->name, rv);
	}

	return rv;
}

int lm_ex_disable_gl_sanlock(struct lockspace *ls)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct sanlk_resourced rd1;
	struct sanlk_resourced rd2;
	struct sanlk_resource *rs1;
	struct sanlk_resource *rs2;
	struct sanlk_resource **rs_args;
	int rv;

	rs_args = malloc(2 * sizeof(struct sanlk_resource *));
	if (!rs_args)
		return -ENOMEM;

	rs1 = &rd1.rs;
	rs2 = &rd2.rs;

	memset(&rd1, 0, sizeof(rd1));
	memset(&rd2, 0, sizeof(rd2));

	strncpy(rd1.rs.lockspace_name, ls->name, SANLK_NAME_LEN);
	strncpy(rd1.rs.name, R_NAME_GL, SANLK_NAME_LEN);

	strncpy(rd2.rs.lockspace_name, ls->name, SANLK_NAME_LEN);
	strncpy(rd2.rs.name, R_NAME_GL_DISABLED, SANLK_NAME_LEN);

	rd1.rs.num_disks = 1;
	strncpy(rd1.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1);
	rd1.rs.disks[0].offset = lms->align_size * GL_LOCK_BEGIN;

	rv = sanlock_acquire(lms->sock, -1, 0, 1, &rs1, NULL);
	if (rv < 0) {
		log_error("S %s ex_disable_gl_san acquire error %d",
			  ls->name, rv);
		goto out;
	}

	rs_args[0] = rs1;
	rs_args[1] = rs2;

	rv = sanlock_release(lms->sock, -1, SANLK_REL_RENAME, 2, rs_args);
	if (rv < 0) {
		log_error("S %s ex_disable_gl_san release_rename error %d",
			  ls->name, rv);
	}

out:
	free(rs_args);
	return rv;
}

/*
 * enable/disable exist because each vg contains a global lock,
 * but we only want to use the gl from one of them.  The first
 * sanlock vg created, has its gl enabled, and subsequent
 * sanlock vgs have their gl disabled.  If the vg containing the
 * gl is removed, the gl from another sanlock vg needs to be
 * enabled.  Or, if gl in multiple vgs are somehow enabled, we
 * want to be able to disable one of them.
 *
 * Disable works by naming/renaming the gl resource to have a
 * name that is different from the predefined name.
 * When a host attempts to acquire the gl with its standard
 * predefined name, it will fail because the resource's name
 * on disk doesn't match.
 */

int lm_able_gl_sanlock(struct lockspace *ls, int enable)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct sanlk_resourced rd;
	const char *gl_name;
	int rv;

	if (enable)
		gl_name = R_NAME_GL;
	else
		gl_name = R_NAME_GL_DISABLED;

	memset(&rd, 0, sizeof(rd));

	strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN);
	strncpy(rd.rs.name, gl_name, SANLK_NAME_LEN);

	rd.rs.num_disks = 1;
	strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1);
	rd.rs.disks[0].offset = lms->align_size * GL_LOCK_BEGIN;

	rv = sanlock_write_resource(&rd.rs, 0, 0, 0);
	if (rv < 0) {
		log_error("S %s able_gl %d write_resource gl error %d %s",
			  ls->name, enable, rv, rd.rs.disks[0].path);
		return rv;
	}

	log_debug("S %s able_gl %s", ls->name, gl_name);

	ls->sanlock_gl_enabled = enable;

	if (enable)
		strncpy(gl_lsname_sanlock, ls->name, MAX_NAME);

	if (!enable && !strcmp(gl_lsname_sanlock, ls->name))
		memset(gl_lsname_sanlock, 0, sizeof(gl_lsname_sanlock));

	return 0;
}

static int gl_is_enabled(struct lockspace *ls, struct lm_sanlock *lms)
{
	char strname[SANLK_NAME_LEN + 1];
	struct sanlk_resourced rd;
	uint64_t offset;
	int rv;

	memset(&rd, 0, sizeof(rd));

	strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN);

	/* leave rs.name empty, it is what we're checking */

	rd.rs.num_disks = 1;
	strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1);

	offset = lms->align_size * GL_LOCK_BEGIN;
	rd.rs.disks[0].offset = offset;

	rv = sanlock_read_resource(&rd.rs, 0);
	if (rv < 0) {
		log_error("gl_is_enabled read_resource error %d", rv);
		return rv;
	}

	memset(strname, 0, sizeof(strname));
	memcpy(strname, rd.rs.name, SANLK_NAME_LEN);

	if (!strcmp(strname, R_NAME_GL_DISABLED)) {
		return 0;
	}

	if (!strcmp(strname, R_NAME_GL)) {
		return 1;
	}

	log_error("gl_is_enabled invalid gl name %s", strname);
	return -1;
}

int lm_gl_is_enabled(struct lockspace *ls)
{
	int rv;
	rv = gl_is_enabled(ls, ls->lm_data);
	ls->sanlock_gl_enabled = rv;
	return rv;
}

/*
 * This is called at the beginning of lvcreate to
 * ensure there is free space for a new LV lock.
 * If not, lvcreate will extend the lvmlock lv
 * before continuing with creating the new LV.
 * This way, lm_init_lv_san() should find a free
 * lock (unless the autoextend of lvmlock lv has
 * been disabled.)
 */

int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct sanlk_resourced rd;
	uint64_t offset;
	int rv;

	if (daemon_test)
		return 0;

	memset(&rd, 0, sizeof(rd));

	strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN);
	rd.rs.num_disks = 1;
	strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN-1);

	offset = lms->align_size * LV_LOCK_BEGIN;

	while (1) {
		rd.rs.disks[0].offset = offset;

		memset(rd.rs.name, 0, SANLK_NAME_LEN);

		rv = sanlock_read_resource(&rd.rs, 0);
		if (rv == -EMSGSIZE || rv == -ENOSPC) {
			/* This indicates the end of the device is reached. */
			log_debug("S %s find_free_lock_san read limit offset %llu",
				  ls->name, (unsigned long long)offset);
			return -EMSGSIZE;
		}

		/*
		 * If we read newly extended space, it will not be initialized
		 * with an "#unused" resource, but will return an error about
		 * an invalid paxos structure on disk.
		 */
		if (rv == SANLK_LEADER_MAGIC) {
			log_debug("S %s find_free_lock_san found empty area at %llu",
				  ls->name, (unsigned long long)offset);
			*free_offset = offset;
			return 0;
		}

		if (rv) {
			log_error("S %s find_free_lock_san read error %d offset %llu",
				  ls->name, rv, (unsigned long long)offset);
			break;
		}

		if (!strcmp(rd.rs.name, "#unused")) {
			log_debug("S %s find_free_lock_san found unused area at %llu",
				  ls->name, (unsigned long long)offset);
			*free_offset = offset;
			return 0;
		}

		offset += lms->align_size;
	}

	return rv;
}

/*
 * host A: start_vg/add_lockspace
 * host B: vgremove
 *
 * The global lock cannot always be held around start_vg
 * on host A because the gl is in a vg that may not be
 * started yet, or may be in the vg we are starting.
 *
 * If B removes the vg, destroying the delta leases,
 * while A is a lockspace member, it will cause A's
 * sanlock delta lease renewal to fail, and lockspace
 * recovery.
 *
 * I expect this overlap would usually cause a failure
 * in the add_lockspace() on host A when it sees that
 * the lockspace structures have been clobbered by B.
 * Having add_lockspace() fail should be a fine result.
 *
 * If add_lockspace was somehow able to finish, the
 * subsequent renewal would probably fail instead.
 * This should also not create any major problems.
 */

int lm_prepare_lockspace_sanlock(struct lockspace *ls)
{
	struct stat st;
	struct lm_sanlock *lms = NULL;
	char lock_lv_name[MAX_ARGS+1];
	char lsname[SANLK_NAME_LEN + 1];
	char disk_path[SANLK_PATH_LEN];
	char killpath[SANLK_PATH_LEN];
	char killargs[SANLK_PATH_LEN];
	int gl_found;
	int ret, rv;

	memset(disk_path, 0, sizeof(disk_path));
	memset(lock_lv_name, 0, sizeof(lock_lv_name));

	/*
	 * Construct the path to lvmlockctl by using the path to the lvm binary
	 * and appending "lockctl" to get /path/to/lvmlockctl.
	 */
	memset(killpath, 0, sizeof(killpath));
	snprintf(killpath, SANLK_PATH_LEN - 1, "%slockctl", LVM_PATH);

	memset(killargs, 0, sizeof(killargs));
	snprintf(killargs, SANLK_PATH_LEN - 1, "--kill %s", ls->vg_name);

	rv = check_args_version(ls->vg_args, VG_LOCK_ARGS_MAJOR);
	if (rv < 0) {
		ret = -EARGS;
		goto fail;
	}

	rv = lock_lv_name_from_args(ls->vg_args, lock_lv_name);
	if (rv < 0) {
		log_error("S %s prepare_lockspace_san lock_lv_name_from_args error %d %s",
			  ls->name, rv, ls->vg_args);
		ret = -EARGS;
		goto fail;
	}

	snprintf(disk_path, SANLK_PATH_LEN-1, "/dev/mapper/%s-%s",
		 ls->vg_name, lock_lv_name);

	/*
	 * When a vg is started, the internal sanlock lv should be
	 * activated before lvmlockd is asked to add the lockspace.
	 * (sanlock needs to use the lv.)
	 *
	 * In the future we might be able to ask something on the system
	 * to activate the sanlock lv from here, and with that we might be
	 * able to start sanlock VGs without requiring a
	 * vgchange --lock-start command.
	 */

	/* FIXME: device is not always ready for us here */
	sleep(1);

	rv = stat(disk_path, &st);
	if (rv < 0) {
		log_error("S %s prepare_lockspace_san stat error %d disk_path %s",
			  ls->name, errno, disk_path);
		ret = -EARGS;
		goto fail;
	}

	if (!ls->host_id) {
		if (daemon_host_id)
			ls->host_id = daemon_host_id;
		else if (daemon_host_id_file)
			ls->host_id = read_host_id_file();
	}

	if (!ls->host_id || ls->host_id > 2000) {
		log_error("S %s prepare_lockspace_san invalid host_id %llu",
			  ls->name, (unsigned long long)ls->host_id);
		ret = -EHOSTID;
		goto fail;
	}

	lms = malloc(sizeof(struct lm_sanlock));
	if (!lms) {
		ret = -ENOMEM;
		goto fail;
	}

	memset(lsname, 0, sizeof(lsname));
	strncpy(lsname, ls->name, SANLK_NAME_LEN);

	memset(lms, 0, sizeof(struct lm_sanlock));
	memcpy(lms->ss.name, lsname, SANLK_NAME_LEN);
	lms->ss.host_id_disk.offset = 0;
	lms->ss.host_id = ls->host_id;
	strncpy(lms->ss.host_id_disk.path, disk_path, SANLK_PATH_LEN-1);

	if (daemon_test) {
		if (!gl_lsname_sanlock[0]) {
			strncpy(gl_lsname_sanlock, lsname, MAX_NAME);
			log_debug("S %s prepare_lockspace_san use global lock", lsname);
		}
		goto out;
	}

	lms->sock = sanlock_register();
	if (lms->sock < 0) {
		log_error("S %s prepare_lockspace_san register error %d", lsname, lms->sock);
		lms->sock = 0;
		ret = -EMANAGER;
		goto fail;
	}

	log_debug("set killpath to %s %s", killpath, killargs);

	rv = sanlock_killpath(lms->sock, 0, killpath, killargs);
	if (rv < 0) {
		log_error("S %s killpath error %d", lsname, rv);
		ret = -EMANAGER;
		goto fail;
	}

	rv = sanlock_restrict(lms->sock, SANLK_RESTRICT_SIGKILL);
	if (rv < 0) {
		log_error("S %s restrict error %d", lsname, rv);
		ret = -EMANAGER;
		goto fail;
	}

	lms->align_size = sanlock_align(&lms->ss.host_id_disk);
	if (lms->align_size <= 0) {
		log_error("S %s prepare_lockspace_san align error %d", lsname, lms->align_size);
		ret = -EMANAGER;
		goto fail;
	}

	gl_found = gl_is_enabled(ls, lms);
	if (gl_found < 0) {
		log_error("S %s prepare_lockspace_san gl_enabled error %d", lsname, gl_found);
		ret = -EARGS;
		goto fail;
	}

	ls->sanlock_gl_enabled = gl_found;

	if (gl_found) {
		if (gl_use_dlm) {
			log_error("S %s prepare_lockspace_san gl_use_dlm is set", lsname);
		} else if (gl_lsname_sanlock[0] && strcmp(gl_lsname_sanlock, lsname)) {
			log_error("S %s prepare_lockspace_san multiple sanlock global locks current %s",
				  lsname, gl_lsname_sanlock);
		} else {
			strncpy(gl_lsname_sanlock, lsname, MAX_NAME);
			log_debug("S %s prepare_lockspace_san use global lock %s",
				  lsname, gl_lsname_sanlock);
		}
	}

out:
	ls->lm_data = lms;
	log_debug("S %s prepare_lockspace_san done", lsname);
	return 0;

fail:
	if (lms && lms->sock)
		close(lms->sock);
	if (lms)
		free(lms);
	return ret;
}

int lm_add_lockspace_sanlock(struct lockspace *ls, int adopt)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	int rv;

	rv = sanlock_add_lockspace_timeout(&lms->ss, 0, sanlock_io_timeout);
	if (rv == -EEXIST && adopt) {
		/* We could alternatively just skip the sanlock call for adopt. */
		log_debug("S %s add_lockspace_san adopt found ls", ls->name);
		goto out;
	}
	if (rv < 0) {
		/* retry for some errors? */
		log_error("S %s add_lockspace_san add_lockspace error %d", ls->name, rv);
		goto fail;
	}

	/*
	 * Don't let the lockspace be cleanly released if orphan locks
	 * exist, because the orphan locks are still protecting resources
	 * that are being used on the host, e.g. active lvs.  If the
	 * lockspace is cleanly released, another host could acquire the
	 * orphan leases.
	 */

	rv = sanlock_set_config(ls->name, 0, SANLK_CONFIG_USED_BY_ORPHANS, NULL);
	if (rv < 0) {
		log_error("S %s add_lockspace_san set_config error %d", ls->name, rv);
		sanlock_rem_lockspace(&lms->ss, 0);
		goto fail;
	}

out:
	log_debug("S %s add_lockspace_san done", ls->name);
	return 0;

fail:
	if (close(lms->sock))
		log_error("failed to close sanlock daemon socket connection");
	free(lms);
	ls->lm_data = NULL;
	return rv;
}

int lm_rem_lockspace_sanlock(struct lockspace *ls, int free_vg)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	int rv;

	if (daemon_test)
		goto out;

	rv = sanlock_rem_lockspace(&lms->ss, 0);
	if (rv < 0) {
		log_error("S %s rem_lockspace_san error %d", ls->name, rv);
		return rv;
	}

	if (free_vg) {
		/*
		 * Destroy sanlock lockspace (delta leases).  Forces failure for any
		 * other host that is still using or attempts to use this lockspace.
		 * This shouldn't be generally necessary, but there may some races
		 * between nodes starting and removing a vg which this could help.
		 */
		strncpy(lms->ss.name, "#unused", SANLK_NAME_LEN);

		rv = sanlock_write_lockspace(&lms->ss, 0, 0, sanlock_io_timeout);
		if (rv < 0) {
			log_error("S %s rem_lockspace free_vg write_lockspace error %d %s",
				  ls->name, rv, lms->ss.host_id_disk.path);
		}
	}
out:
	if (close(lms->sock))
		log_error("failed to close sanlock daemon socket connection");

	free(lms);
	ls->lm_data = NULL;

	/* FIXME: should we only clear gl_lsname when doing free_vg? */

	if (!strcmp(ls->name, gl_lsname_sanlock))
		memset(gl_lsname_sanlock, 0, sizeof(gl_lsname_sanlock));

	return 0;
}

static int lm_add_resource_sanlock(struct lockspace *ls, struct resource *r)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;

	strncpy(rds->rs.lockspace_name, ls->name, SANLK_NAME_LEN);
	strncpy(rds->rs.name, r->name, SANLK_NAME_LEN);
	rds->rs.num_disks = 1;
	memcpy(rds->rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN);

	if (r->type == LD_RT_GL)
		rds->rs.disks[0].offset = GL_LOCK_BEGIN * lms->align_size;
	else if (r->type == LD_RT_VG)
		rds->rs.disks[0].offset = VG_LOCK_BEGIN * lms->align_size;

	/* LD_RT_LV offset is set in each lm_lock call from lv_args. */

	if (r->type == LD_RT_GL || r->type == LD_RT_VG) {
		rds->vb = malloc(sizeof(struct val_blk));
		if (!rds->vb)
			return -ENOMEM;
		memset(rds->vb, 0, sizeof(struct val_blk));
	}

	return 0;
}

int lm_rem_resource_sanlock(struct lockspace *ls, struct resource *r)
{
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;

	/* FIXME: assert r->mode == UN or unlock if it's not? */

	if (rds->vb)
		free(rds->vb);

	memset(rds, 0, sizeof(struct rd_sanlock));
	r->lm_init = 0;
	return 0;
}

int lm_lock_sanlock(struct lockspace *ls, struct resource *r, int ld_mode,
		    uint32_t *r_version, int *retry, int adopt)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;
	struct sanlk_resource *rs;
	uint64_t lock_lv_offset;
	uint32_t flags = 0;
	struct val_blk vb;
	uint16_t vb_version;
	int added = 0;
	int rv;

	if (!r->lm_init) {
		rv = lm_add_resource_sanlock(ls, r);
		if (rv < 0)
			return rv;
		r->lm_init = 1;
		added = 1;
	}

	rs = &rds->rs;

	/*
	 * While there are duplicate global locks, keep checking
	 * to see if any have been disabled.
	 */
	if (sanlock_gl_dup && ls->sanlock_gl_enabled &&
	    (r->type == LD_RT_GL || r->type == LD_RT_VG))
		ls->sanlock_gl_enabled = gl_is_enabled(ls, ls->lm_data);

	if (r->type == LD_RT_LV) {
		/*
		 * The lv may have been removed and recreated with a new lease
		 * offset, so we need to get the offset from lv_args each time
		 * instead of reusing the value that we last set in rds->rs.
		 * act->lv_args is copied to r->lv_args before every lm_lock().
		 */

		rv = check_args_version(r->lv_args, LV_LOCK_ARGS_MAJOR);
		if (rv < 0) {
			log_error("S %s R %s lock_san wrong lv_args version %s",
				  ls->name, r->name, r->lv_args);
			return rv;
		}

		rv = lock_lv_offset_from_args(r->lv_args, &lock_lv_offset);
		if (rv < 0) {
			log_error("S %s R %s lock_san lv_offset_from_args error %d %s",
				  ls->name, r->name, rv, r->lv_args);
			return rv;
		}

		if (!added && (rds->rs.disks[0].offset != lock_lv_offset)) {
			log_debug("S %s R %s lock_san offset old %llu new %llu",
				  ls->name, r->name,
				  (unsigned long long)rds->rs.disks[0].offset,
				  (unsigned long long)lock_lv_offset);
		}

		rds->rs.disks[0].offset = lock_lv_offset;
	}

	if (ld_mode == LD_LK_SH) {
		rs->flags |= SANLK_RES_SHARED;
	} else if (ld_mode == LD_LK_EX) {
		rs->flags &= ~SANLK_RES_SHARED;
	} else {
		log_error("lock_san invalid mode %d", ld_mode);
		return -EINVAL;
	}

	/*
	 * Use PERSISTENT because if lvmlockd exits while holding
	 * a lock, it's not safe to simply clear/drop the lock while
	 * a command or lv is using it.
	 */

	rs->flags |= SANLK_RES_PERSISTENT;

	log_debug("S %s R %s lock_san acquire %s:%llu",
		  ls->name, r->name, rs->disks[0].path,
		  (unsigned long long)rs->disks[0].offset);

	if (daemon_test) {
		*r_version = 0;
		return 0;
	}

	if (rds->vb)
		flags |= SANLK_ACQUIRE_LVB;
	if (adopt)
		flags |= SANLK_ACQUIRE_ORPHAN_ONLY;

	rv = sanlock_acquire(lms->sock, -1, flags, 1, &rs, NULL);

	if (rv == -EAGAIN) {
		/*
		 * It appears that sanlock_acquire returns EAGAIN when we request
		 * a shared lock but the lock is held ex by another host.
		 * There's no point in retrying this case, just return an error.
		 */
		log_debug("S %s R %s lock_san acquire mode %d rv EAGAIN", ls->name, r->name, ld_mode);
		*retry = 0;
		return -EAGAIN;
	}

	if ((rv == -EMSGSIZE) && (r->type == LD_RT_LV)) {
		/*
		 * sanlock tried to read beyond the end of the device,
		 * so the offset of the lv lease is beyond the end of the
		 * device, which means that the lease lv was extended, and
		 * the lease for this lv was allocated in the new space.
		 * The lvm command will see this error, refresh the lvmlock
		 * lv, and try again.
		 */
		log_debug("S %s R %s lock_san acquire offset %llu rv EMSGSIZE",
			  ls->name, r->name, (unsigned long long)rs->disks[0].offset);
		*retry = 0;
		return -EMSGSIZE;
	}

	if (adopt && (rv == -EUCLEAN)) {
		/*
		 * The orphan lock exists but in a different mode than we asked
		 * for, so the caller should try again with the other mode.
		 */
		log_debug("S %s R %s lock_san adopt mode %d try other mode",
			  ls->name, r->name, ld_mode);
		*retry = 0;
		return -EUCLEAN;
	}

	if (adopt && (rv == -ENOENT)) {
		/*
		 * No orphan lock exists.
		 */
		log_debug("S %s R %s lock_san adopt mode %d no orphan found",
			  ls->name, r->name, ld_mode);
		*retry = 0;
		return -ENOENT;
	}

	if (rv == SANLK_ACQUIRE_IDLIVE || rv == SANLK_ACQUIRE_OWNED || rv == SANLK_ACQUIRE_OTHER) {
		/*
		 * The lock is held by another host.  These failures can
		 * happen while multiple hosts are concurrently acquiring
		 * shared locks.  We want to retry a couple times in this
		 * case because we'll probably get the sh lock.
		 *
		 * I believe these are also the errors when requesting an
		 * ex lock that another host holds ex.  We want to report
		 * something like: "lock is held by another host" in this case.
		 * Retry is pointless here.
		 *
		 * We can't distinguish between the two cases above,
		 * so if requesting a sh lock, retry a couple times,
		 * otherwise don't.
		 */
		log_debug("S %s R %s lock_san acquire mode %d rv %d", ls->name, r->name, ld_mode, rv);
		*retry = (ld_mode == LD_LK_SH) ? 1 : 0;
		return -EAGAIN;
	}

	if (rv < 0) {
		log_error("S %s R %s lock_san acquire error %d",
			  ls->name, r->name, rv);

		/* if the gl has been disabled, remove and free the gl resource */
		if ((rv == SANLK_LEADER_RESOURCE) && (r->type == LD_RT_GL)) {
			if (!lm_gl_is_enabled(ls)) {
				log_error("S %s R %s lock_san gl has been disabled",
					  ls->name, r->name);
				if (!strcmp(gl_lsname_sanlock, ls->name))
					memset(gl_lsname_sanlock, 0, sizeof(gl_lsname_sanlock));
				return -EUNATCH;
			}
		}

		if (added)
			lm_rem_resource_sanlock(ls, r);

		/* sanlock gets i/o errors trying to read/write the leases. */
		if (rv == -EIO)
			rv = -ELOCKIO;

		/*
		 * The sanlock lockspace can disappear if the lease storage fails,
		 * the delta lease renewals fail, the lockspace enters recovery,
		 * lvmlockd holds no leases in the lockspace, so sanlock can
		 * stop and free the lockspace.
		 */
		if (rv == -ENOSPC)
			rv = -ELOCKIO;

		return rv;
	}

	if (rds->vb) {
		rv = sanlock_get_lvb(0, rs, (char *)&vb, sizeof(vb));
		if (rv < 0) {
			log_error("S %s R %s lock_san get_lvb error %d", ls->name, r->name, rv);
			*r_version = 0;
			goto out;
		}

		vb_version = le16_to_cpu(vb.version);

		if (vb_version && ((vb_version & 0xFF00) > (VAL_BLK_VERSION & 0xFF00))) {
			log_error("S %s R %s lock_san ignore vb_version %x",
				  ls->name, r->name, vb_version);
			*r_version = 0;
			free(rds->vb);
			rds->vb = NULL;
			goto out;
		}

		*r_version = le32_to_cpu(vb.r_version);
		memcpy(rds->vb, &vb, sizeof(vb)); /* rds->vb saved as le */

		log_debug("S %s R %s lock_san get r_version %u",
			  ls->name, r->name, *r_version);
	}
out:
	return rv;
}

int lm_convert_sanlock(struct lockspace *ls, struct resource *r,
		       int ld_mode, uint32_t r_version)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;
	struct sanlk_resource *rs = &rds->rs;
	struct val_blk vb;
	uint32_t flags = 0;
	int rv;

	log_debug("S %s R %s convert_san", ls->name, r->name);

	if (daemon_test)
		goto rs_flag;

	if (rds->vb && r_version && (r->mode == LD_LK_EX)) {
		if (!rds->vb->version) {
			/* first time vb has been written */
			rds->vb->version = cpu_to_le16(VAL_BLK_VERSION);
		}
		if (r_version)
			rds->vb->r_version = cpu_to_le32(r_version);
		memcpy(&vb, rds->vb, sizeof(vb));

		log_debug("S %s R %s convert_san set r_version %u",
			  ls->name, r->name, r_version);

		rv = sanlock_set_lvb(0, rs, (char *)&vb, sizeof(vb));
		if (rv < 0) {
			log_error("S %s R %s convert_san set_lvb error %d",
				  ls->name, r->name, rv);
		}
	}

 rs_flag:
	if (ld_mode == LD_LK_SH)
		rs->flags |= SANLK_RES_SHARED;
	else
		rs->flags &= ~SANLK_RES_SHARED;

	if (daemon_test)
		return 0;

	rv = sanlock_convert(lms->sock, -1, flags, rs);
	if (rv == -EAGAIN) {
		/* FIXME: When could this happen?  Should something different be done? */
		log_error("S %s R %s convert_san EAGAIN", ls->name, r->name);
		return -EAGAIN;
	}
	if (rv < 0) {
		log_error("S %s R %s convert_san convert error %d", ls->name, r->name, rv);
	}

	return rv;
}

static int release_rename(struct lockspace *ls, struct resource *r)
{
	struct rd_sanlock rd1;
	struct rd_sanlock rd2;
	struct sanlk_resource *res1;
	struct sanlk_resource *res2;
	struct sanlk_resource **res_args;
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;
	int rv;

	log_debug("S %s R %s release rename", ls->name, r->name);

	res_args = malloc(2 * sizeof(struct sanlk_resource *));
	if (!res_args)
		return -ENOMEM;

	memcpy(&rd1, rds, sizeof(struct rd_sanlock));
	memcpy(&rd2, rds, sizeof(struct rd_sanlock));

	res1 = (struct sanlk_resource *)&rd1;
	res2 = (struct sanlk_resource *)&rd2;

	strcpy(res2->name, "invalid_removed");

	res_args[0] = res1;
	res_args[1] = res2;

	rv = sanlock_release(lms->sock, -1, SANLK_REL_RENAME, 2, res_args);
	if (rv < 0) {
		log_error("S %s R %s unlock_san release rename error %d", ls->name, r->name, rv);
	}

	free(res_args);

	return rv;
}

/*
 * rds->vb is stored in le
 * 
 * r_version is r->version
 *
 * for GL locks lvmlockd just increments this value
 * each time the global lock is released from ex.
 *
 * for VG locks it is the seqno from the vg metadata.
 */

int lm_unlock_sanlock(struct lockspace *ls, struct resource *r,
		      uint32_t r_version, uint32_t lmu_flags)
{
	struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
	struct rd_sanlock *rds = (struct rd_sanlock *)r->lm_data;
	struct sanlk_resource *rs = &rds->rs;
	struct val_blk vb;
	int rv;

	log_debug("S %s R %s unlock_san r_version %u flags %x",
		  ls->name, r->name, r_version, lmu_flags);

	if (daemon_test)
		return 0;

	if (rds->vb && r_version && (r->mode == LD_LK_EX)) {
		if (!rds->vb->version) {
			/* first time vb has been written */
			rds->vb->version = cpu_to_le16(VAL_BLK_VERSION);
		}
		if (r_version)
			rds->vb->r_version = cpu_to_le32(r_version);
		memcpy(&vb, rds->vb, sizeof(vb));

		log_debug("S %s R %s unlock_san set r_version %u",
			  ls->name, r->name, r_version);

		rv = sanlock_set_lvb(0, rs, (char *)&vb, sizeof(vb));
		if (rv < 0) {
			log_error("S %s R %s unlock_san set_lvb error %d",
				  ls->name, r->name, rv);
		}
	}

	/*
	 * For vgremove (FREE_VG) we unlock-rename the vg and gl locks
	 * so they cannot be reacquired.
	 */
	if ((lmu_flags & LMUF_FREE_VG) &&
	    (r->type == LD_RT_GL || r->type == LD_RT_VG)) {
		return release_rename(ls, r);
	}

	rv = sanlock_release(lms->sock, -1, 0, 1, &rs);
	if (rv < 0)
		log_error("S %s R %s unlock_san release error %d", ls->name, r->name, rv);

	if (rv == -EIO)
		rv = -ELOCKIO;

	return rv;
}

int lm_hosts_sanlock(struct lockspace *ls, int notify)
{
	struct sanlk_host *hss = NULL;
	struct sanlk_host *hs;
	uint32_t state;
	int hss_count = 0;
	int found_self = 0;
	int found_others = 0;
	int i, rv;

	rv = sanlock_get_hosts(ls->name, 0, &hss, &hss_count, 0);
	if (rv < 0) {
		log_error("S %s hosts_san get_hosts error %d", ls->name, rv);
		return 0;
	}

	if (!hss || !hss_count) {
		log_error("S %s hosts_san zero hosts", ls->name);
		return 0;
	}

	hs = hss;

	for (i = 0; i < hss_count; i++) {
		log_debug("S %s hosts_san host_id %llu gen %llu flags %x",
			  ls->name,
			  (unsigned long long)hs->host_id,
			  (unsigned long long)hs->generation,
			  hs->flags);

		if (hs->host_id == ls->host_id) {
			found_self = 1;
			hs++;
			continue;
		}

		state = hs->flags & SANLK_HOST_MASK;
		if (state == SANLK_HOST_LIVE)
			found_others++;
		hs++;
	}
	free(hss);

	if (found_others && notify) {
		/*
		 * We could use the sanlock event mechanism to notify lvmlockd
		 * on other hosts to stop this VG.  lvmlockd would need to
		 * register for and listen for sanlock events in the main loop.
		 * The events are slow to propagate.  We'd need to retry for a
		 * while before all the hosts see the event and stop the VG.
		 * sanlock_set_event(ls->name, &he, SANLK_SETEV_ALL_HOSTS);
		 *
		 * Wait to try this until there appears to be real value/interest
		 * in doing it.
		 */
	}

	if (!found_self) {
		log_error("S %s hosts_san self not found others %d", ls->name, found_others);
		return 0;
	}

	return found_others;
}

int lm_get_lockspaces_sanlock(struct list_head *ls_rejoin)
{
	struct sanlk_lockspace *ss_all = NULL;
	struct sanlk_lockspace *ss;
	struct lockspace *ls;
	int ss_count = 0;
	int i, rv;

	rv = sanlock_get_lockspaces(&ss_all, &ss_count, 0);
	if (rv < 0)
		return rv;

	if (!ss_all || !ss_count)
		return 0;

	ss = ss_all;

	for (i = 0; i < ss_count; i++) {

		if (strncmp(ss->name, LVM_LS_PREFIX, strlen(LVM_LS_PREFIX)))
			continue;

		if (!(ls = alloc_lockspace()))
			return -ENOMEM;

		ls->lm_type = LD_LM_SANLOCK;
		ls->host_id = ss->host_id;
		strncpy(ls->name, ss->name, MAX_NAME);
		strncpy(ls->vg_name, ss->name + strlen(LVM_LS_PREFIX), MAX_NAME);
		list_add_tail(&ls->list, ls_rejoin);

		ss++;
	}

	free(ss_all);
	return 0;
}

int lm_is_running_sanlock(void)
{
	uint32_t daemon_version;
	uint32_t daemon_proto;
	int rv;

	rv = sanlock_version(0, &daemon_version, &daemon_proto);
	if (rv < 0)
		return 0;
	return 1;
}