summaryrefslogtreecommitdiff
path: root/cinder/volume/driver.py
blob: 96c76faed250b698f16d4889682c5d18b4633c8d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.
"""Drivers for volumes."""

import abc
import time

from os_brick import exception as brick_exception
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_config import types
from oslo_log import log as logging
from oslo_utils import excutils

from cinder.common import constants
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import objects
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver_utils
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import throttling
from cinder.volume import volume_utils

LOG = logging.getLogger(__name__)


volume_opts = [
    cfg.IntOpt('num_shell_tries',
               default=3,
               help='Number of times to attempt to run flakey shell commands'),
    cfg.IntOpt('reserved_percentage',
               default=0,
               min=0, max=100,
               help='The percentage of backend capacity is reserved'),
    cfg.StrOpt('target_prefix',
               default='iqn.2010-10.org.openstack:',
               help='Prefix for iSCSI/NVMEoF volumes'),
    cfg.StrOpt('target_ip_address',
               default='$my_ip',
               help='The IP address that the iSCSI/NVMEoF daemon is '
                    'listening on'),
    cfg.ListOpt('iscsi_secondary_ip_addresses',
                default=[],
                help='The list of secondary IP addresses of the '
                     'iSCSI/NVMEoF daemon'),
    cfg.PortOpt('target_port',
                default=3260,
                help='The port that the iSCSI/NVMEoF daemon is listening '
                     'on'),
    cfg.IntOpt('num_volume_device_scan_tries',
               default=3,
               help='The maximum number of times to rescan targets'
                    ' to find volume'),
    cfg.StrOpt('volume_backend_name',
               help='The backend name for a given driver implementation'),
    cfg.StrOpt('volume_clear',
               default='zero',
               choices=['none', 'zero'],
               help='Method used to wipe old volumes'),
    cfg.IntOpt('volume_clear_size',
               default=0,
               max=1024,
               help='Size in MiB to wipe at start of old volumes. 1024 MiB '
                    'at max. 0 => all'),
    cfg.StrOpt('volume_clear_ionice',
               help='The flag to pass to ionice to alter the i/o priority '
                    'of the process used to zero a volume after deletion, '
                    'for example "-c3" for idle only priority.'),
    cfg.StrOpt('target_helper',
               default='tgtadm',
               choices=['tgtadm', 'lioadm', 'scstadmin', 'iscsictl',
                        'ietadm', 'nvmet', 'spdk-nvmeof', 'fake'],
               help='Target user-land tool to use. tgtadm is default, '
                    'use lioadm for LIO iSCSI support, scstadmin for SCST '
                    'target support, ietadm for iSCSI Enterprise Target, '
                    'iscsictl for Chelsio iSCSI Target, nvmet for NVMEoF '
                    'support, spdk-nvmeof for SPDK NVMe-oF, '
                    'or fake for testing. Note: The IET driver is deprecated '
                    'and will be removed in the V release.'),
    cfg.StrOpt('volumes_dir',
               default='$state_path/volumes',
               help='Volume configuration file storage '
               'directory'),
    cfg.StrOpt('iet_conf',
               default='/etc/iet/ietd.conf',
               deprecated_for_removal=True,
               deprecated_reason='IET target driver is no longer supported.',
               help='DEPRECATED: IET configuration file'),
    cfg.StrOpt('chiscsi_conf',
               default='/etc/chelsio-iscsi/chiscsi.conf',
               help='Chiscsi (CXT) global defaults configuration file'),
    cfg.StrOpt('iscsi_iotype',
               default='fileio',
               choices=['blockio', 'fileio', 'auto'],
               help=('Sets the behavior of the iSCSI target '
                     'to either perform blockio or fileio '
                     'optionally, auto can be set and Cinder '
                     'will autodetect type of backing device')),
    cfg.StrOpt('volume_dd_blocksize',
               default='1M',
               help='The default block size used when copying/clearing '
                    'volumes'),
    cfg.StrOpt('volume_copy_blkio_cgroup_name',
               default='cinder-volume-copy',
               help='The blkio cgroup name to be used to limit bandwidth '
                    'of volume copy'),
    cfg.IntOpt('volume_copy_bps_limit',
               default=0,
               help='The upper limit of bandwidth of volume copy. '
                    '0 => unlimited'),
    cfg.StrOpt('iscsi_write_cache',
               default='on',
               choices=['on', 'off'],
               help='Sets the behavior of the iSCSI target to either '
                    'perform write-back(on) or write-through(off). '
                    'This parameter is valid if target_helper is set '
                    'to tgtadm.'),
    cfg.StrOpt('iscsi_target_flags',
               default='',
               help='Sets the target-specific flags for the iSCSI target. '
                    'Only used for tgtadm to specify backing device flags '
                    'using bsoflags option. The specified string is passed '
                    'as is to the underlying tool.'),
    cfg.StrOpt('target_protocol',
               default='iscsi',
               choices=['iscsi', 'iser', 'nvmet_rdma', 'nvmet_tcp'],
               help='Determines the target protocol for new volumes, '
                    'created with tgtadm, lioadm and nvmet target helpers. '
                    'In order to enable RDMA, this parameter should be set '
                    'with the value "iser". The supported iSCSI protocol '
                    'values are "iscsi" and "iser", in case of nvmet target '
                    'set to "nvmet_rdma" or "nvmet_tcp".'),
    cfg.StrOpt('driver_client_cert_key',
               help='The path to the client certificate key for verification, '
                    'if the driver supports it.'),
    cfg.StrOpt('driver_client_cert',
               help='The path to the client certificate for verification, '
                    'if the driver supports it.'),
    cfg.BoolOpt('driver_use_ssl',
                default=False,
                help='Tell driver to use SSL for connection to backend '
                     'storage if the driver supports it.'),
    cfg.StrOpt('max_over_subscription_ratio',
               default='20.0',
               regex=r'^(auto|\d*\.\d+|\d+)$',
               help='Representation of the over subscription ratio '
                    'when thin provisioning is enabled. Default ratio is '
                    '20.0, meaning provisioned capacity can be 20 times of '
                    'the total physical capacity. If the ratio is 10.5, it '
                    'means provisioned capacity can be 10.5 times of the '
                    'total physical capacity. A ratio of 1.0 means '
                    'provisioned capacity cannot exceed the total physical '
                    'capacity. If ratio is \'auto\', Cinder will '
                    'automatically calculate the ratio based on the '
                    'provisioned capacity and the used space. If not set to '
                    'auto, the ratio has to be a minimum of 1.0.'),
    cfg.BoolOpt('use_chap_auth',
                default=False,
                help='Option to enable/disable CHAP authentication for '
                     'targets.'),
    cfg.StrOpt('chap_username',
               default='',
               help='CHAP user name.'),
    cfg.StrOpt('chap_password',
               default='',
               help='Password for specified CHAP account name.',
               secret=True),
    cfg.StrOpt('driver_data_namespace',
               help='Namespace for driver private data values to be '
                    'saved in.'),
    cfg.StrOpt('filter_function',
               help='String representation for an equation that will be '
                    'used to filter hosts. Only used when the driver '
                    'filter is set to be used by the Cinder scheduler.'),
    cfg.StrOpt('goodness_function',
               help='String representation for an equation that will be '
                    'used to determine the goodness of a host. Only used '
                    'when using the goodness weigher is set to be used by '
                    'the Cinder scheduler.'),
    cfg.BoolOpt('driver_ssl_cert_verify',
                default=False,
                help='If set to True the http client will validate the SSL '
                     'certificate of the backend endpoint.'),
    cfg.StrOpt('driver_ssl_cert_path',
               help='Can be used to specify a non default path to a '
               'CA_BUNDLE file or directory with certificates of '
               'trusted CAs, which will be used to validate the backend'),
    cfg.ListOpt('trace_flags',
                help='List of options that control which trace info '
                     'is written to the DEBUG log level to assist '
                     'developers. Valid values are method and api.'),
    cfg.MultiOpt('replication_device',
                 item_type=types.Dict(),
                 secret=True,
                 help="Multi opt of dictionaries to represent a replication "
                      "target device.  This option may be specified multiple "
                      "times in a single config section to specify multiple "
                      "replication target devices.  Each entry takes the "
                      "standard dict config form: replication_device = "
                      "target_device_id:<required>,"
                      "key1:value1,key2:value2..."),
    cfg.BoolOpt('report_discard_supported',
                default=False,
                help='Report to clients of Cinder that the backend supports '
                     'discard (aka. trim/unmap). This will not actually '
                     'change the behavior of the backend or the client '
                     'directly, it will only notify that it can be used.'),
    cfg.StrOpt('storage_protocol',
               ignore_case=True,
               default=constants.ISCSI,
               choices=[constants.ISCSI, constants.FC],
               help='Protocol for transferring data between host and '
                    'storage back-end.'),
    cfg.BoolOpt('enable_unsupported_driver',
                default=False,
                help="Set this to True when you want to allow an unsupported "
                     "driver to start.  Drivers that haven't maintained a "
                     "working CI system and testing are marked as unsupported "
                     "until CI is working again.  This also marks a driver as "
                     "deprecated and may be removed in the next release."),
    cfg.StrOpt('backend_availability_zone',
               default=None,
               help='Availability zone for this volume backend. If not set, '
                    'the storage_availability_zone option value is used as '
                    'the default for all backends.'),
]

# for backward compatibility
iser_opts = [
    cfg.IntOpt('num_iser_scan_tries',
               default=3,
               help='The maximum number of times to rescan iSER target '
                    'to find volume'),
    cfg.StrOpt('iser_target_prefix',
               default='iqn.2010-10.org.openstack:',
               help='Prefix for iSER volumes'),
    cfg.StrOpt('iser_ip_address',
               default='$my_ip',
               help='The IP address that the iSER daemon is listening on'),
    cfg.PortOpt('iser_port',
                default=3260,
                help='The port that the iSER daemon is listening on'),
    cfg.StrOpt('iser_helper',
               default='tgtadm',
               help='The name of the iSER target user-land tool to use'),
]

nvmet_opts = [
    cfg.PortOpt('nvmet_port_id',
                default=1,
                help='The port that the NVMe target is listening on.'),
    cfg.IntOpt('nvmet_ns_id',
               default=10,
               help='The namespace id associated with the subsystem '
                    'that will be created with the path for the LVM volume.'),
]

scst_opts = [
    cfg.StrOpt('scst_target_iqn_name',
               help='Certain ISCSI targets have predefined target names, '
                    'SCST target driver uses this name.'),
    cfg.StrOpt('scst_target_driver',
               default='iscsi',
               help='SCST target implementation can choose from multiple '
                    'SCST target drivers.'),
]

backup_opts = [
    cfg.BoolOpt('backup_use_temp_snapshot',
                default=False,
                help='If this is set to True, a temporary snapshot will '
                     'be created for performing non-disruptive backups. '
                     'Otherwise a temporary volume will be cloned '
                     'in order to perform a backup.'),
]

image_opts = [
    cfg.BoolOpt('image_upload_use_cinder_backend',
                default=False,
                help='If set to True, upload-to-image in raw format will '
                     'create a cloned volume and register its location to '
                     'the image service, instead of uploading the volume '
                     'content. The cinder backend and locations support '
                     'must be enabled in the image service.'),
    cfg.BoolOpt('image_upload_use_internal_tenant',
                default=False,
                help='If set to True, the image volume created by '
                     'upload-to-image will be placed in the internal tenant. '
                     'Otherwise, the image volume is created in the current '
                     'context\'s tenant.'),
    cfg.BoolOpt('image_volume_cache_enabled',
                default=False,
                help='Enable the image volume cache for this backend.'),
    cfg.IntOpt('image_volume_cache_max_size_gb',
               default=0,
               help='Max size of the image volume cache for this backend in '
                    'GB. 0 => unlimited.'),
    cfg.IntOpt('image_volume_cache_max_count',
               default=0,
               help='Max number of entries allowed in the image volume cache. '
                    '0 => unlimited.'),
    cfg.BoolOpt('use_multipath_for_image_xfer',
                default=False,
                help='Do we attach/detach volumes in cinder using multipath '
                     'for volume to image and image to volume transfers? '
                     'This parameter needs to be configured for each backend '
                     'section or in [backend_defaults] section as a common '
                     'configuration for all backends.'),
    cfg.BoolOpt('enforce_multipath_for_image_xfer',
                default=False,
                help='If this is set to True, attachment of volumes for '
                     'image transfer will be aborted when multipathd is not '
                     'running. Otherwise, it will fallback to single path. '
                     'This parameter needs to be configured for each backend '
                     'section or in [backend_defaults] section as a common '
                     'configuration for all backends.'),
]
fqdn_opts = [
    cfg.BoolOpt('unique_fqdn_network',
                default=True,
                help="Whether or not our private network has unique FQDN on "
                     "each initiator or not. For example networks with QA "
                     "systems usually have multiple servers/VMs with the same "
                     "FQDN. When true this will create host entries on 3PAR "
                     "using the FQDN, when false it will use the reversed "
                     "IQN/WWNN."),
]


CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(iser_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(nvmet_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(scst_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(image_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(volume_opts)
CONF.register_opts(iser_opts)
CONF.register_opts(nvmet_opts)
CONF.register_opts(scst_opts)
CONF.register_opts(backup_opts)
CONF.register_opts(image_opts)
CONF.register_opts(fqdn_opts, group=configuration.SHARED_CONF_GROUP)
CONF.import_opt('backup_use_same_host', 'cinder.backup.api')


class BaseVD(object, metaclass=abc.ABCMeta):
    """Executes commands relating to Volumes.

    Base Driver for Cinder Volume Control Path,
    This includes supported/required implementation
    for API calls.  Also provides *generic* implementation
    of core features like cloning, copy_image_to_volume etc,
    this way drivers that inherit from this base class and
    don't offer their own impl can fall back on a general
    solution here.

    Key thing to keep in mind with this driver is that it's
    intended that these drivers ONLY implement Control Path
    details (create, delete, extend...), while transport or
    data path related implementation should be a *member object*
    that we call a connector.  The point here is that for example
    don't allow the LVM driver to implement iSCSI methods, instead
    call whatever connector it has configured via conf file
    (iSCSI{LIO, TGT, ET}, FC, etc).

    In the base class and for example the LVM driver we do this via a has-a
    relationship and just provide an interface to the specific connector
    methods.  How you do this in your own driver is of course up to you.
    """
    VERSION = "N/A"

    # NOTE(geguileo): By default we assume drivers don't support Active-Active
    # configurations.  If driver supports it then they can set this class
    # attribute on the driver, and if support depends on configuration options
    # then they can set it at the instance level on the driver's __init__
    # method since the manager will do the check after that.
    SUPPORTS_ACTIVE_ACTIVE = False

    # If a driver hasn't maintained their CI system, this will get
    # set to False, which prevents the driver from starting.
    # Add enable_unsupported_driver = True in cinder.conf to get
    # the unsupported driver started.
    SUPPORTED = True

    # Methods checked to detect a driver implements a replication feature
    REPLICATION_FEATURE_CHECKERS = {'v2.1': 'failover_host',
                                    'a/a': 'failover_completed'}

    def __init__(self, execute=utils.execute, *args, **kwargs):
        # TODO(stephenfin): Drop this in favour of using 'db' directly
        self.db = db
        self.host = kwargs.get('host')
        self.cluster_name = kwargs.get('cluster_name')
        self.configuration = kwargs.get('configuration', None)

        if self.configuration:
            self.configuration.append_config_values(volume_opts)
            self.configuration.append_config_values(iser_opts)
            self.configuration.append_config_values(nvmet_opts)
            self.configuration.append_config_values(scst_opts)
            self.configuration.append_config_values(backup_opts)
            self.configuration.append_config_values(image_opts)
            self.configuration.append_config_values(fqdn_opts)
            volume_utils.setup_tracing(
                self.configuration.safe_get('trace_flags'))

            # NOTE(geguileo): Don't allow to start if we are enabling
            # replication on a cluster service with a backend that doesn't
            # support the required mechanism for Active-Active.
            replication_devices = self.configuration.safe_get(
                'replication_device')
            if (self.cluster_name and replication_devices and
                    not self.supports_replication_feature('a/a')):
                raise exception.Invalid(_("Driver doesn't support clustered "
                                          "replication."))

        self.driver_utils = driver_utils.VolumeDriverUtils(
            self._driver_data_namespace(), self.db)

        self._execute = execute
        self._stats = {}
        self._throttle = None

        self.pools = []
        self.capabilities = {}

        # We set these mappings up in the base driver so they
        # can be used by children
        # (intended for LVM, but others could use as well)
        self.target_mapping = {
            'fake': 'cinder.volume.targets.fake.FakeTarget',
            'ietadm': 'cinder.volume.targets.iet.IetAdm',
            'lioadm': 'cinder.volume.targets.lio.LioAdm',
            'tgtadm': 'cinder.volume.targets.tgt.TgtAdm',
            'scstadmin': 'cinder.volume.targets.scst.SCSTAdm',
            'iscsictl': 'cinder.volume.targets.cxt.CxtAdm',
            'nvmet': 'cinder.volume.targets.nvmet.NVMET',
            'spdk-nvmeof': 'cinder.volume.targets.spdknvmf.SpdkNvmf'}

        # set True by manager after successful check_for_setup
        self._initialized = False

    def _driver_data_namespace(self):
        namespace = self.__class__.__name__
        if self.configuration:
            namespace = self.configuration.safe_get('driver_data_namespace')
            if not namespace:
                namespace = self.configuration.safe_get('volume_backend_name')
        return namespace

    def _is_non_recoverable(self, err, non_recoverable_list):
        for item in non_recoverable_list:
            if item in err:
                return True

        return False

    def _try_execute(self, *command, **kwargs):
        # NOTE(vish): Volume commands can partially fail due to timing, but
        #             running them a second time on failure will usually
        #             recover nicely.

        non_recoverable = kwargs.pop('no_retry_list', [])

        tries = 0
        while True:
            try:
                self._execute(*command, **kwargs)
                return True
            except processutils.ProcessExecutionError as ex:
                tries = tries + 1

                if tries >= self.configuration.num_shell_tries or\
                        self._is_non_recoverable(ex.stderr, non_recoverable):
                    raise

                LOG.exception("Recovering from a failed execute. "
                              "Try number %s", tries)
                time.sleep(tries ** 2)

    def _detach_volume(self, context, attach_info, volume, properties,
                       force=False, remote=False, ignore_errors=False):
        """Disconnect the volume from the host.

        With the force parameter we can indicate if we give more importance to
        cleaning up as much as possible or if data integrity has higher
        priority.  This requires the latests OS-Brick code that adds this
        feature.

        We can also force errors to be ignored using ignore_errors.
        """
        # Use Brick's code to do attach/detach
        exc = brick_exception.ExceptionChainer()
        if attach_info:
            connector = attach_info['connector']
            with exc.context(force, 'Disconnect failed'):
                connector.disconnect_volume(attach_info['conn']['data'],
                                            attach_info['device'], force=force,
                                            ignore_errors=ignore_errors)

        if remote:
            # Call remote manager's terminate_connection which includes
            # driver's terminate_connection and remove export
            rpcapi = volume_rpcapi.VolumeAPI()
            with exc.context(force, 'Remote terminate connection failed'):
                rpcapi.terminate_connection(context, volume, properties,
                                            force=force)
        else:
            # Call local driver's terminate_connection and remove export.
            # NOTE(avishay) This is copied from the manager's code - need to
            # clean this up in the future.
            with exc.context(force,
                             _('Unable to terminate volume connection')):
                try:
                    self.terminate_connection(volume, properties, force=force)
                except Exception as err:
                    err_msg = (
                        _('Unable to terminate volume connection: %(err)s')
                        % {'err': err})
                    LOG.error(err_msg)
                    raise exception.VolumeBackendAPIException(data=err_msg)

            with exc.context(force, _('Unable to remove export')):
                try:
                    LOG.debug("volume %s: removing export", volume['id'])
                    self.remove_export(context, volume)
                except Exception as ex:
                    LOG.exception("Error detaching volume %(volume)s, "
                                  "due to remove export failure.",
                                  {"volume": volume['id']})
                    raise exception.RemoveExportException(volume=volume['id'],
                                                          reason=ex)
        if exc and not ignore_errors:
            raise exc

    def set_initialized(self):
        self._initialized = True

    @property
    def initialized(self):
        return self._initialized

    @property
    def supported(self):
        return self.SUPPORTED

    def set_throttle(self):
        bps_limit = ((self.configuration and
                      self.configuration.safe_get('volume_copy_bps_limit')) or
                     CONF.volume_copy_bps_limit)
        cgroup_name = ((self.configuration and
                        self.configuration.safe_get(
                            'volume_copy_blkio_cgroup_name')) or
                       CONF.volume_copy_blkio_cgroup_name)
        self._throttle = None
        if bps_limit:
            try:
                self._throttle = throttling.BlkioCgroup(int(bps_limit),
                                                        cgroup_name)
            except processutils.ProcessExecutionError as err:
                LOG.warning('Failed to activate volume copy throttling: '
                            '%(err)s', {'err': err})
        throttling.Throttle.set_default(self._throttle)

    def get_version(self):
        """Get the current version of this driver."""
        return self.VERSION

    @abc.abstractmethod
    def check_for_setup_error(self):
        return

    @staticmethod
    def get_driver_options():
        """Return the oslo_config options specific to the driver."""
        return volume_opts

    @abc.abstractmethod
    def create_volume(self, volume):
        """Creates a volume.

        Can optionally return a Dictionary of changes to the volume object to
        be persisted.

        If volume_type extra specs includes
        'capabilities:replication <is> True' the driver
        needs to create a volume replica (secondary), and setup replication
        between the newly created volume and the secondary volume.
        Returned dictionary should include:

        .. code-block:: python

            volume['replication_status'] = 'copying'
            volume['replication_extended_status'] = <driver specific value>
            volume['driver_data'] = <driver specific value>

        """
        return

    @abc.abstractmethod
    def delete_volume(self, volume):
        """Deletes a volume.

        If volume_type extra specs includes 'replication: <is> True'
        then the driver needs to delete the volume replica too.

        It is imperative that this operation ensures that the data from the
        deleted volume cannot leak into new volumes when they are created, as
        new volumes are likely to belong to a different tenant/project.

        If the driver uses custom file locks they should be cleaned on success
        using cinder.utils.synchronized_remove
        """
        return

    def secure_file_operations_enabled(self):
        """Determine if driver is running in Secure File Operations mode.

        The Cinder Volume driver needs to query if this driver is running
        in a secure file operations mode. By default, it is False: any driver
        that does support secure file operations should override this method.
        """
        return False

    def get_volume_stats(self, refresh=False):
        """Get volume stats.

        If 'refresh' is True, run update the stats first.
        """
        if not self._stats or refresh:
            self._update_volume_stats()

        return self._stats

    def _set_property(self, properties, entry, title, description,
                      type, **kwargs):
        prop = dict(title=title, description=description, type=type)
        allowed_keys = ('enum', 'default', 'minimum', 'maximum')
        for key in kwargs:
            if key in allowed_keys:
                prop[key] = kwargs[key]
        properties[entry] = prop

    def _init_standard_capabilities(self):
        """Create a dictionary of Cinder standard capabilities.

        This method creates a dictionary of Cinder standard capabilities
        and returns the created dictionary.
        The keys of this dictionary don't contain prefix and separator(:).
        """

        properties = {}
        self._set_property(
            properties,
            "thin_provisioning",
            "Thin Provisioning",
            _("Sets thin provisioning."),
            "boolean")

        self._set_property(
            properties,
            "compression",
            "Compression",
            _("Enables compression."),
            "boolean")

        self._set_property(
            properties,
            "qos",
            "QoS",
            _("Enables QoS."),
            "boolean")

        self._set_property(
            properties,
            "replication_enabled",
            "Replication",
            _("Enables replication."),
            "boolean")

        return properties

    def _init_vendor_properties(self):
        """Create a dictionary of vendor unique properties.

        This method creates a dictionary of vendor unique properties
        and returns both created dictionary and vendor name.
        Returned vendor name is used to check for name of vendor
        unique properties.

        - Vendor name shouldn't include colon(:) because of the separator
          and it is automatically replaced by underscore(_).
          ex. abc:d -> abc_d
        - Vendor prefix is equal to vendor name.
          ex. abcd
        - Vendor unique properties must start with vendor prefix + ':'.
          ex. abcd:maxIOPS

        Each backend driver needs to override this method to expose
        its own properties using _set_property() like this:

        self._set_property(
            properties,
            "vendorPrefix:specific_property",
            "Title of property",
            _("Description of property"),
            "type")

        : return dictionary of vendor unique properties
        : return vendor name

        Example of implementation::

        properties = {}
        self._set_property(
            properties,
            "abcd:compression_type",
            "Compression type",
            _("Specifies compression type."),
            "string",
            enum=["lossy", "lossless", "special"])

        self._set_property(
            properties,
            "abcd:minIOPS",
            "Minimum IOPS QoS",
            _("Sets minimum IOPS if QoS is enabled."),
            "integer",
            minimum=10,
            default=100)

        return properties, 'abcd'
        """

        LOG.info("Driver hasn't implemented _init_vendor_properties()")
        return {}, None

    def init_capabilities(self):
        """Obtain backend volume stats and capabilities list.

        This stores a dictionary which is consisted of two parts.
        First part includes static backend capabilities which are
        obtained by get_volume_stats(). Second part is properties,
        which includes parameters correspond to extra specs.
        This properties part is consisted of cinder standard
        capabilities and vendor unique properties.

        Using this capabilities list, operator can manage/configure
        backend using key/value from capabilities without specific
        knowledge of backend.
        """

        # Set static backend capabilities from get_volume_stats()
        stats = self.get_volume_stats(True)
        if stats:
            self.capabilities = stats.copy()

        # Set cinder standard capabilities
        self.capabilities['properties'] = self._init_standard_capabilities()

        # Set Vendor unique properties
        vendor_prop, vendor_name = self._init_vendor_properties()
        if vendor_name and vendor_prop:
            updated_vendor_prop = {}
            old_name = None
            # Replace colon in vendor name to underscore.
            if ':' in vendor_name:
                old_name = vendor_name
                vendor_name = vendor_name.replace(':', '_')
                LOG.warning('The colon in vendor name was replaced '
                            'by underscore. Updated vendor name is '
                            '%(name)s".', {'name': vendor_name})

            for key in vendor_prop:
                # If key has colon in vendor name field, we replace it to
                # underscore.
                # ex. abc:d:storagetype:provisioning
                #     -> abc_d:storagetype:provisioning
                if old_name and key.startswith(old_name + ':'):
                    new_key = key.replace(old_name, vendor_name, 1)
                    updated_vendor_prop[new_key] = vendor_prop[key]
                    continue
                if not key.startswith(vendor_name + ':'):
                    LOG.warning('Vendor unique property "%(property)s" '
                                'must start with vendor prefix with colon '
                                '"%(prefix)s". The property was '
                                'not registered on capabilities list.',
                                {'prefix': vendor_name + ':',
                                 'property': key})
                    continue
                updated_vendor_prop[key] = vendor_prop[key]

            # Update vendor unique properties to the dictionary
            self.capabilities['vendor_prefix'] = vendor_name
            self.capabilities['properties'].update(updated_vendor_prop)

        LOG.debug("Initialized capabilities list: %s.", self.capabilities)

    def _update_pools_and_stats(self, data):
        """Updates data for pools and volume stats based on provided data."""
        # provisioned_capacity_gb is set to None by default below, but
        # None won't be used in calculation. It will be overridden by
        # driver's provisioned_capacity_gb if reported, otherwise it
        # defaults to allocated_capacity_gb in host_manager.py.
        if self.pools:
            for pool in self.pools:
                new_pool = {}
                new_pool.update(dict(
                    pool_name=pool,
                    total_capacity_gb=0,
                    free_capacity_gb=0,
                    provisioned_capacity_gb=None,
                    reserved_percentage=100,
                    QoS_support=False,
                    filter_function=self.get_filter_function(),
                    goodness_function=self.get_goodness_function()
                ))
                data["pools"].append(new_pool)
        else:
            # No pool configured, the whole backend will be treated as a pool
            single_pool = {}
            single_pool.update(dict(
                pool_name=data["volume_backend_name"],
                total_capacity_gb=0,
                free_capacity_gb=0,
                provisioned_capacity_gb=None,
                reserved_percentage=100,
                QoS_support=False,
                filter_function=self.get_filter_function(),
                goodness_function=self.get_goodness_function()
            ))
            data["pools"].append(single_pool)
        self._stats = data

    def copy_image_to_volume(self, context, volume, image_service, image_id):
        """Fetch image from image_service and write to unencrypted volume.

        This does not attach an encryptor layer when connecting to the volume.
        """
        self._copy_image_data_to_volume(
            context, volume, image_service, image_id, encrypted=False)

    def copy_image_to_encrypted_volume(
            self, context, volume, image_service, image_id):
        """Fetch image from image_service and write to encrypted volume.

        This attaches the encryptor layer when connecting to the volume.
        """
        self._copy_image_data_to_volume(
            context, volume, image_service, image_id, encrypted=True)

    def _copy_image_data_to_volume(self, context, volume, image_service,
                                   image_id, encrypted=False):
        """Fetch the image from image_service and write it to the volume."""
        LOG.debug('copy_image_to_volume %s.', volume['name'])

        use_multipath = self.configuration.use_multipath_for_image_xfer
        enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
        properties = volume_utils.brick_get_connector_properties(
            use_multipath,
            enforce_multipath)
        attach_info, volume = self._attach_volume(context, volume, properties)
        try:
            if encrypted:
                encryption = self.db.volume_encryption_metadata_get(context,
                                                                    volume.id)
                volume_utils.brick_attach_volume_encryptor(context,
                                                           attach_info,
                                                           encryption)
            try:
                image_utils.fetch_to_raw(
                    context,
                    image_service,
                    image_id,
                    attach_info['device']['path'],
                    self.configuration.volume_dd_blocksize,
                    size=volume['size'])
            except exception.ImageTooBig:
                with excutils.save_and_reraise_exception():
                    LOG.exception("Copying image %(image_id)s "
                                  "to volume failed due to "
                                  "insufficient available space.",
                                  {'image_id': image_id})

            finally:
                if encrypted:
                    volume_utils.brick_detach_volume_encryptor(attach_info,
                                                               encryption)
        finally:
            self._detach_volume(context, attach_info, volume, properties,
                                force=True)

    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        """Copy the volume to the specified image."""
        LOG.debug('copy_volume_to_image %s.', volume['name'])

        use_multipath = self.configuration.use_multipath_for_image_xfer
        enforce_multipath = self.configuration.enforce_multipath_for_image_xfer
        properties = volume_utils.brick_get_connector_properties(
            use_multipath,
            enforce_multipath)
        attach_info, volume = self._attach_volume(context, volume, properties)

        try:
            volume_utils.upload_volume(context,
                                       image_service,
                                       image_meta,
                                       attach_info['device']['path'],
                                       volume,
                                       compress=True)
        finally:
            # Since attached volume was not used for writing we can force
            # detach it
            self._detach_volume(context, attach_info, volume, properties,
                                force=True, ignore_errors=True)

    def before_volume_copy(self, context, src_vol, dest_vol, remote=None):
        """Driver-specific actions before copyvolume data.

        This method will be called before _copy_volume_data during volume
        migration
        """
        pass

    def after_volume_copy(self, context, src_vol, dest_vol, remote=None):
        """Driver-specific actions after copyvolume data.

        This method will be called after _copy_volume_data during volume
        migration
        """
        pass

    def get_filter_function(self):
        """Get filter_function string.

        Returns either the string from the driver instance or global section
        in cinder.conf. If nothing is specified in cinder.conf, then try to
        find the default filter_function. When None is returned the scheduler
        will always pass the driver instance.

        :returns: a filter_function string or None
        """
        ret_function = self.configuration.filter_function
        if not ret_function:
            ret_function = CONF.filter_function
        if not ret_function:
            ret_function = self.get_default_filter_function()
        return ret_function

    def get_goodness_function(self):
        """Get good_function string.

        Returns either the string from the driver instance or global section
        in cinder.conf. If nothing is specified in cinder.conf, then try to
        find the default goodness_function. When None is returned the scheduler
        will give the lowest score to the driver instance.

        :returns: a goodness_function string or None
        """
        ret_function = self.configuration.goodness_function
        if not ret_function:
            ret_function = CONF.goodness_function
        if not ret_function:
            ret_function = self.get_default_goodness_function()
        return ret_function

    def get_default_filter_function(self):
        """Get the default filter_function string.

        Each driver could overwrite the method to return a well-known
        default string if it is available.

        :returns: None
        """
        return None

    def get_default_goodness_function(self):
        """Get the default goodness_function string.

        Each driver could overwrite the method to return a well-known
        default string if it is available.

        :returns: None
        """
        return None

    def _attach_volume(self, context, volume, properties, remote=False):
        """Attach the volume."""
        if remote:
            # Call remote manager's initialize_connection which includes
            # driver's create_export and initialize_connection
            rpcapi = volume_rpcapi.VolumeAPI()
            try:
                conn = rpcapi.initialize_connection(context, volume,
                                                    properties)
            except Exception:
                with excutils.save_and_reraise_exception():
                    # It is possible that initialize_connection fails due to
                    # timeout. In fact, the volume is already attached after
                    # the timeout error is raised, so the connection worths
                    # a try of terminating.
                    try:
                        rpcapi.terminate_connection(context, volume,
                                                    properties, force=True)
                    except Exception:
                        LOG.warning("Failed terminating the connection "
                                    "of volume %(volume_id)s, but it is "
                                    "acceptable.",
                                    {'volume_id': volume['id']})
        else:
            # Call local driver's create_export and initialize_connection.
            # NOTE(avishay) This is copied from the manager's code - need to
            # clean this up in the future.
            model_update = None
            try:
                LOG.debug("Volume %s: creating export", volume['id'])
                model_update = self.create_export(context, volume, properties)
                if model_update:
                    volume.update(model_update)
                    volume.save()
            except exception.CinderException as ex:
                if model_update:
                    LOG.exception("Failed updating model of volume "
                                  "%(volume_id)s with driver provided "
                                  "model %(model)s",
                                  {'volume_id': volume['id'],
                                   'model': model_update})
                    raise exception.ExportFailure(reason=ex)

            try:
                conn = self.initialize_connection(volume, properties)
            except Exception as err:
                try:
                    err_msg = (_('Unable to fetch connection information from '
                                 'backend: %(err)s') % {'err': err})
                    LOG.error(err_msg)
                    LOG.debug("Cleaning up failed connect initialization.")
                    self.remove_export(context, volume)
                except Exception as ex:
                    ex_msg = (_('Error encountered during cleanup '
                                'of a failed attach: %(ex)s') % {'ex': ex})
                    LOG.error(err_msg)
                    raise exception.VolumeBackendAPIException(data=ex_msg)
                raise exception.VolumeBackendAPIException(data=err_msg)

            # Add encrypted flag to connection_info if not set in the driver.
            if conn['data'].get('encrypted') is None:
                encrypted = bool(volume.encryption_key_id)
                conn['data']['encrypted'] = encrypted

        try:
            attach_info = self._connect_device(conn)
        except Exception as exc:
            # We may have reached a point where we have attached the volume,
            # so we have to detach it (do the cleanup).
            attach_info = getattr(exc, 'kwargs', {}).get('attach_info', None)

            try:
                LOG.debug('Device for volume %s is unavailable but did '
                          'attach, detaching it.', volume['id'])
                self._detach_volume(context, attach_info, volume,
                                    properties, force=True,
                                    remote=remote)
            except Exception:
                LOG.exception('Error detaching volume %s',
                              volume['id'])
            raise

        return (attach_info, volume)

    def _attach_snapshot(self, ctxt, snapshot, properties):
        """Attach the snapshot."""
        model_update = None
        try:
            LOG.debug("Snapshot %s: creating export.", snapshot.id)
            model_update = self.create_export_snapshot(ctxt, snapshot,
                                                       properties)
            if model_update:
                snapshot.provider_location = model_update.get(
                    'provider_location', None)
                snapshot.provider_auth = model_update.get(
                    'provider_auth', None)
                snapshot.save()
        except exception.CinderException as ex:
            if model_update:
                LOG.exception("Failed updating model of snapshot "
                              "%(snapshot_id)s with driver provided "
                              "model %(model)s.",
                              {'snapshot_id': snapshot.id,
                               'model': model_update})
                raise exception.ExportFailure(reason=ex)

        try:
            conn = self.initialize_connection_snapshot(
                snapshot, properties)
        except Exception as err:
            try:
                err_msg = (_('Unable to fetch connection information from '
                             'backend: %(err)s') % {'err': err})
                LOG.error(err_msg)
                LOG.debug("Cleaning up failed connect initialization.")
                self.remove_export_snapshot(ctxt, snapshot)
            except Exception as ex:
                ex_msg = (_('Error encountered during cleanup '
                            'of a failed attach: %(ex)s') % {'ex': ex})
                LOG.error(err_msg)
                raise exception.VolumeBackendAPIException(data=ex_msg)
            raise exception.VolumeBackendAPIException(data=err_msg)
        return conn

    def _connect_device(self, conn):
        # Use Brick's code to do attach/detach
        use_multipath = self.configuration.use_multipath_for_image_xfer
        device_scan_attempts = self.configuration.num_volume_device_scan_tries
        protocol = conn['driver_volume_type']
        connector = volume_utils.brick_get_connector(
            protocol,
            use_multipath=use_multipath,
            device_scan_attempts=device_scan_attempts,
            conn=conn)
        device = connector.connect_volume(conn['data'])
        host_device = device['path']

        attach_info = {'conn': conn, 'device': device, 'connector': connector}

        unavailable = True
        try:
            # Secure network file systems will NOT run as root.
            root_access = not self.secure_file_operations_enabled()
            unavailable = not connector.check_valid_device(host_device,
                                                           root_access)
        except Exception:
            LOG.exception('Could not validate device %s', host_device)

        if unavailable:
            raise exception.DeviceUnavailable(path=host_device,
                                              attach_info=attach_info,
                                              reason=(_("Unable to access "
                                                        "the backend storage "
                                                        "via the path "
                                                        "%(path)s.") %
                                                      {'path': host_device}))
        return attach_info

    def clone_image(self, context, volume,
                    image_location, image_meta, image_service):
        """Create a volume efficiently from an existing image.

        Refer to
        :obj:`cinder.interface.volume_driver.VolumeDriverCore.clone_image`
        for additional information.
        """
        return None, False

    def backup_use_temp_snapshot(self):
        """Get the configured setting for backup from snapshot.

        If an inheriting driver does not support this operation,
        the driver should override this method to return false
        and log a warning letting the administrator know they
        have configured something that cannot be done.
        """
        return self.configuration.safe_get("backup_use_temp_snapshot")

    def snapshot_revert_use_temp_snapshot(self):
        # Specify whether a temporary backup snapshot should be used when
        # reverting a snapshot. For some backends, this operation is not
        # needed or not supported, in which case the driver should override
        # this method.
        return True

    def get_backup_device(self, context, backup):
        """Get a backup device from an existing volume.

        The function returns a volume or snapshot to backup service,
        and then backup service attaches the device and does backup.
        """
        backup_device = None
        is_snapshot = False
        if self.backup_use_temp_snapshot():
            (backup_device, is_snapshot) = (
                self._get_backup_volume_temp_snapshot(context, backup))
        else:
            backup_device = self._get_backup_volume_temp_volume(
                context, backup)
            is_snapshot = False
        return (backup_device, is_snapshot)

    def _get_backup_volume_temp_volume(self, context, backup):
        """Return a volume to do backup.

        To backup a snapshot, create a temp volume from the snapshot and
        back it up.

        Otherwise to backup an in-use volume, create a temp volume and
        back it up.
        """
        volume = objects.Volume.get_by_id(context, backup.volume_id)
        snapshot = None
        if backup.snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)

        LOG.debug('Creating a new backup for volume %s.', volume['name'])

        temp_vol_ref = None
        device_to_backup = volume

        # NOTE(xyang): If it is to backup from snapshot, create a temp
        # volume from the source snapshot, backup the temp volume, and
        # then clean up the temp volume.
        if snapshot:
            temp_vol_ref = self._create_temp_volume_from_snapshot(
                context, volume, snapshot,
                status=fields.VolumeStatus.BACKING_UP)
            backup.temp_volume_id = temp_vol_ref.id
            backup.save()
            device_to_backup = temp_vol_ref

        else:
            # NOTE(xyang): Check volume status if it is not to backup from
            # snapshot; if 'in-use', create a temp volume from the source
            # volume, backup the temp volume, and then clean up the temp
            # volume; if 'available', just backup the volume.
            previous_status = volume.get('previous_status')
            if previous_status == "in-use":
                temp_vol_ref = self._create_temp_cloned_volume(
                    context, volume, status=fields.VolumeStatus.BACKING_UP)
                backup.temp_volume_id = temp_vol_ref.id
                backup.save()
                device_to_backup = temp_vol_ref

        return device_to_backup

    def _get_backup_volume_temp_snapshot(self, context, backup):
        """Return a device to backup.

        If it is to backup from snapshot, back it up directly.

        Otherwise for in-use volume, create a temp snapshot and back it up.
        """
        volume = objects.Volume.get_by_id(context, backup.volume_id)
        snapshot = None
        if backup.snapshot_id:
            snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)

        LOG.debug('Creating a new backup for volume %s.', volume['name'])

        device_to_backup = volume
        is_snapshot = False
        temp_snapshot = None

        # NOTE(xyang): If it is to backup from snapshot, back it up
        # directly. No need to clean it up.
        if snapshot:
            device_to_backup = snapshot
            is_snapshot = True

        else:
            # NOTE(xyang): If it is not to backup from snapshot, check volume
            # status. If the volume status is 'in-use', create a temp snapshot
            # from the source volume, backup the temp snapshot, and then clean
            # up the temp snapshot; if the volume status is 'available', just
            # backup the volume.
            previous_status = volume.get('previous_status')
            if previous_status == "in-use":
                temp_snapshot = self._create_temp_snapshot(context, volume)
                backup.temp_snapshot_id = temp_snapshot.id
                backup.save()
                device_to_backup = temp_snapshot
                is_snapshot = True

        return (device_to_backup, is_snapshot)

    def _create_temp_snapshot(self, context, volume):
        kwargs = {
            'volume_id': volume['id'],
            'cgsnapshot_id': None,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': fields.SnapshotStatus.CREATING,
            'progress': '0%',
            'volume_size': volume['size'],
            'display_name': 'backup-snap-%s' % volume['id'],
            'display_description': None,
            'volume_type_id': volume['volume_type_id'],
            'encryption_key_id': volume['encryption_key_id'],
            'use_quota': False,  # Don't count for quota
            'metadata': {},
        }
        temp_snap_ref = objects.Snapshot(context=context, **kwargs)
        temp_snap_ref.create()
        try:
            model_update = self.create_snapshot(temp_snap_ref)
            if model_update:
                temp_snap_ref.update(model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                with temp_snap_ref.obj_as_admin():
                    self.db.volume_glance_metadata_delete_by_snapshot(
                        context, temp_snap_ref.id)
                    temp_snap_ref.destroy()

        temp_snap_ref.status = fields.SnapshotStatus.AVAILABLE
        temp_snap_ref.progress = '100%'
        temp_snap_ref.save()
        return temp_snap_ref

    def _create_temp_volume(self, context, volume, volume_options=None):
        kwargs = {
            'size': volume.size,
            'display_name': 'backup-vol-%s' % volume.id,
            'host': volume.host,
            'cluster_name': volume.cluster_name,
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': 'creating',
            'attach_status': fields.VolumeAttachStatus.DETACHED,
            'availability_zone': volume.availability_zone,
            'volume_type_id': volume.volume_type_id,
            'use_quota': False,  # Don't count for quota
            # TODO: (Y release) Remove admin_metadata and only use use_quota
            'admin_metadata': {'temporary': 'True'},
        }
        kwargs.update(volume_options or {})
        temp_vol_ref = objects.Volume(context=context.elevated(), **kwargs)
        temp_vol_ref.create()
        return temp_vol_ref

    def _create_temp_cloned_volume(self, context, volume,
                                   status=fields.VolumeStatus.AVAILABLE):
        temp_vol_ref = self._create_temp_volume(context, volume)
        try:
            model_update = self.create_cloned_volume(temp_vol_ref, volume)
            if model_update:
                temp_vol_ref.update(model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                temp_vol_ref.destroy()

        temp_vol_ref.status = status
        temp_vol_ref.save()
        return temp_vol_ref

    def _create_temp_volume_from_snapshot(
            self, context, volume, snapshot, volume_options=None,
            status=fields.VolumeStatus.AVAILABLE):
        temp_vol_ref = self._create_temp_volume(context, volume,
                                                volume_options=volume_options)
        try:
            model_update = self.create_volume_from_snapshot(temp_vol_ref,
                                                            snapshot)
            if model_update:
                temp_vol_ref.update(model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                temp_vol_ref.destroy()

        temp_vol_ref.status = status
        temp_vol_ref.save()
        return temp_vol_ref

    def clear_download(self, context, volume):
        """Clean up after an interrupted image copy."""
        pass

    def do_setup(self, context):
        """Any initialization the volume driver does while starting."""
        pass

    def validate_connector(self, connector):
        """Fail if connector doesn't contain all the data needed by driver."""
        pass

    def update_migrated_volume(self, ctxt, volume, new_volume,
                               original_volume_status):
        """Return model update for migrated volume.

        Each driver implementing this method needs to be responsible for the
        values of _name_id and provider_location. If None is returned or either
        key is not set, it means the volume table does not need to change the
        value(s) for the key(s).
        The return format is {"_name_id": value, "provider_location": value}.

        :param volume: The original volume that was migrated to this backend
        :param new_volume: The migration volume object that was created on
                           this backend as part of the migration process
        :param original_volume_status: The status of the original volume
        :returns: model_update to update DB with any needed changes
        """
        msg = _("The method update_migrated_volume is not implemented.")
        raise NotImplementedError(msg)

    @staticmethod
    def validate_connector_has_setting(connector, setting):
        pass

    def retype(self, context, volume, new_type, diff, host):
        return False, None

    def create_cloned_volume(self, volume, src_vref):
        """Creates a clone of the specified volume.

        If volume_type extra specs includes 'replication: <is> True' the
        driver needs to create a volume replica (secondary)
        and setup replication between the newly created volume
        and the secondary volume.
        """
        raise NotImplementedError()

    # #######  Interface methods for DataPath (Connector) ########
    @abc.abstractmethod
    def ensure_export(self, context, volume):
        """Synchronously recreates an export for a volume."""
        return

    @abc.abstractmethod
    def create_export(self, context, volume, connector):
        """Exports the volume.

        Can optionally return a Dictionary of changes
        to the volume object to be persisted.
        """
        return

    def create_export_snapshot(self, context, snapshot, connector):
        """Exports the snapshot.

        Can optionally return a Dictionary of changes
        to the snapshot object to be persisted.
        """
        return

    @abc.abstractmethod
    def remove_export(self, context, volume):
        """Removes an export for a volume."""
        return

    def remove_export_snapshot(self, context, snapshot):
        """Removes an export for a snapshot."""
        return

    @abc.abstractmethod
    def initialize_connection(self, volume, connector):
        """Allow connection to connector and return connection info.

        ..note::
            Whether or not a volume is 'cacheable' for volume local cache on
            the hypervisor is normally configured in the volume-type
            extra-specs. Support may be disabled at the driver level, however,
            by returning "cacheable": False in the conn_info. This will
            override any setting in the volume-type extra-specs.

        :param volume: The volume to be attached
        :param connector: Dictionary containing information about what is being
                          connected to.
        :returns conn_info: A dictionary of connection information.
        """
        return

    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
        """Allow connection to connector and return connection info.

        :param snapshot: The snapshot to be attached
        :param connector: Dictionary containing information about what
                          is being connected to.
        :returns conn_info: A dictionary of connection information. This
                            can optionally include a "initiator_updates"
                            field.

        The "initiator_updates" field must be a dictionary containing a
        "set_values" and/or "remove_values" field. The "set_values" field must
        be a dictionary of key-value pairs to be set/updated in the db. The
        "remove_values" field must be a list of keys, previously set with
        "set_values", that will be deleted from the db.
        """
        return

    @abc.abstractmethod
    def terminate_connection(self, volume, connector, **kwargs):
        """Disallow connection from connector.

        :param volume: The volume to be disconnected.
        :param connector: A dictionary describing the connection with details
                          about the initiator. Can be None.
        """
        return

    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
        """Disallow connection from connector."""
        return

    def get_pool(self, volume):
        """Return pool name where volume reside on.

        :param volume: The volume hosted by the driver.
        :returns: name of the pool where given volume is in.
        """
        return None

    def update_provider_info(self, volumes, snapshots):
        """Get provider info updates from driver.

        :param volumes: List of Cinder volumes to check for updates
        :param snapshots: List of Cinder snapshots to check for updates
        :returns: tuple (volume_updates, snapshot_updates)

        where volume updates {'id': uuid, provider_id: <provider-id>}
        and snapshot updates {'id': uuid, provider_id: <provider-id>}
        """
        return None, None

    def migrate_volume(self, context, volume, host):
        """Migrate volume stub.

        This is for drivers that don't implement an enhanced version
        of this operation.
        """
        return (False, None)

    def manage_existing(self, volume, existing_ref):
        """Manage exiting stub.

        This is for drivers that don't implement manage_existing().
        """
        msg = _("Manage existing volume not implemented.")
        raise NotImplementedError(msg)

    def unmanage(self, volume):
        """Unmanage stub.

        This is for drivers that don't implement unmanage().
        """
        msg = _("Unmanage volume not implemented.")
        raise NotImplementedError(msg)

    def freeze_backend(self, context):
        """Notify the backend that it's frozen.

        We use set to prohibit the creation of any new resources
        on the backend, or any modifications to existing items on
        a backend.  We set/enforce this by not allowing scheduling
        of new volumes to the specified backend, and checking at the
        api for modifications to resources and failing.

        In most cases the driver may not need to do anything, but
        this provides a handle if they need it.

        :param context: security context
        :response: True|False
        """
        return True

    def thaw_backend(self, context):
        """Notify the backend that it's unfrozen/thawed.

        Returns the backend to a normal state after a freeze
        operation.

        In most cases the driver may not need to do anything, but
        this provides a handle if they need it.

        :param context: security context
        :response: True|False
        """
        return True

    def failover_host(self, context, volumes, secondary_id=None, groups=None):
        """Failover a backend to a secondary replication target.

        Instructs a replication capable/configured backend to failover
        to one of it's secondary replication targets. host=None is
        an acceptable input, and leaves it to the driver to failover
        to the only configured target, or to choose a target on it's
        own. All of the hosts volumes will be passed on to the driver
        in order for it to determine the replicated volumes on the host,
        if needed.

        Response is a tuple, including the new target backend_id
        AND a lit of dictionaries with volume_id and updates.
        Key things to consider (attaching failed-over volumes):
        - provider_location
        - provider_auth
        - provider_id
        - replication_status

        :param context: security context
        :param volumes: list of volume objects, in case the driver needs
                        to take action on them in some way
        :param secondary_id: Specifies rep target backend to fail over to
        :param groups: replication groups
        :returns: ID of the backend that was failed-over to,
                  model update for volumes, and model update for groups
        """

        # Example volume_updates data structure:
        # [{'volume_id': <cinder-uuid>,
        #   'updates': {'provider_id': 8,
        #               'replication_status': 'failed-over',
        #               'replication_extended_status': 'whatever',...}},]
        # Example group_updates data structure:
        # [{'group_id': <cinder-uuid>,
        #   'updates': {'replication_status': 'failed-over',...}},]
        raise NotImplementedError()

    def failover(self, context, volumes, secondary_id=None, groups=None):
        """Like failover but for a host that is clustered.

        Most of the time this will be the exact same behavior as failover_host,
        so if it's not overwritten, it is assumed to be the case.
        """
        return self.failover_host(context, volumes, secondary_id, groups)

    def failover_completed(self, context, active_backend_id=None):
        """This method is called after failover for clustered backends."""
        raise NotImplementedError()

    @classmethod
    def _is_base_method(cls, method_name):
        method = getattr(cls, method_name)
        return method.__module__ == getattr(BaseVD, method_name).__module__

    # Replication Group (Tiramisu)
    def enable_replication(self, context, group, volumes):
        """Enables replication for a group and volumes in the group.

        :param group: group object
        :param volumes: list of volume objects in the group
        :returns: model_update - dict of group updates
        :returns: volume_model_updates - list of dicts of volume updates
        """
        raise NotImplementedError()

    # Replication Group (Tiramisu)
    def disable_replication(self, context, group, volumes):
        """Disables replication for a group and volumes in the group.

        :param group: group object
        :param volumes: list of volume objects in the group
        :returns: model_update - dict of group updates
        :returns: volume_model_updates - list of dicts of volume updates
        """
        raise NotImplementedError()

    # Replication Group (Tiramisu)
    def failover_replication(self, context, group, volumes,
                             secondary_backend_id=None):
        """Fails over replication for a group and volumes in the group.

        :param group: group object
        :param volumes: list of volume objects in the group
        :param secondary_backend_id: backend_id of the secondary site
        :returns: model_update - dict of group updates
        :returns: volume_model_updates - list of dicts of volume updates
        """
        raise NotImplementedError()

    def get_replication_error_status(self, context, groups):
        """Returns error info for replicated groups and its volumes.

        :returns: group_model_updates - list of dicts of group updates

        if error happens. For example, a dict of a group can be as follows:

        .. code:: python

          {'group_id': xxxx,
           'replication_status': fields.ReplicationStatus.ERROR}

        :returns: volume_model_updates - list of dicts of volume updates

        if error happens. For example, a dict of a volume can be as follows:

        .. code:: python

          {'volume_id': xxxx,
           'replication_status': fields.ReplicationStatus.ERROR}

        """
        return [], []

    @classmethod
    def supports_replication_feature(cls, feature):
        """Check if driver class supports replication features.

        Feature is a string that must be one of:
            - v2.1
            - a/a
        """
        if feature not in cls.REPLICATION_FEATURE_CHECKERS:
            return False

        # Check if method is being implemented/overwritten by the driver
        method_name = cls.REPLICATION_FEATURE_CHECKERS[feature]
        return not cls._is_base_method(method_name)

    def create_group(self, context, group):
        """Creates a group.

        :param context: the context of the caller.
        :param group: the Group object of the group to be created.
        :returns: model_update

        model_update will be in this format: {'status': xxx, ......}.

        If the status in model_update is 'error', the manager will throw
        an exception and it will be caught in the try-except block in the
        manager. If the driver throws an exception, the manager will also
        catch it in the try-except block. The group status in the db will
        be changed to 'error'.

        For a successful operation, the driver can either build the
        model_update and return it or return None. The group status will
        be set to 'available'.
        """
        raise NotImplementedError()

    def delete_group(self, context, group, volumes):
        """Deletes a group.

        :param context: the context of the caller.
        :param group: the Group object of the group to be deleted.
        :param volumes: a list of Volume objects in the group.
        :returns: model_update, volumes_model_update

        param volumes is a list of objects retrieved from the db. It cannot
        be assigned to volumes_model_update. volumes_model_update is a list
        of dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        The driver should populate volumes_model_update and model_update
        and return them.

        The manager will check volumes_model_update and update db accordingly
        for each volume. If the driver successfully deleted some volumes
        but failed to delete others, it should set statuses of the volumes
        accordingly so that the manager can update db correctly.

        If the status in any entry of volumes_model_update is 'error_deleting'
        or 'error', the status in model_update will be set to the same if it
        is not already 'error_deleting' or 'error'.

        If the status in model_update is 'error_deleting' or 'error', the
        manager will raise an exception and the status of the group will be
        set to 'error' in the db. If volumes_model_update is not returned by
        the driver, the manager will set the status of every volume in the
        group to 'error' in the except block.

        If the driver raises an exception during the operation, it will be
        caught by the try-except block in the manager. The statuses of the
        group and all volumes in it will be set to 'error'.

        For a successful operation, the driver can either build the
        model_update and volumes_model_update and return them or
        return None, None. The statuses of the group and all volumes
        will be set to 'deleted' after the manager deletes them from db.
        """
        raise NotImplementedError()

    def update_group(self, context, group,
                     add_volumes=None, remove_volumes=None):
        """Updates a group.

        :param context: the context of the caller.
        :param group: the Group object of the group to be updated.
        :param add_volumes: a list of Volume objects to be added.
        :param remove_volumes: a list of Volume objects to be removed.
        :returns: model_update, add_volumes_update, remove_volumes_update

        model_update is a dictionary that the driver wants the manager
        to update upon a successful return. If None is returned, the manager
        will set the status to 'available'.

        add_volumes_update and remove_volumes_update are lists of dictionaries
        that the driver wants the manager to update upon a successful return.
        Note that each entry requires a {'id': xxx} so that the correct
        volume entry can be updated. If None is returned, the volume will
        remain its original status. Also note that you cannot directly
        assign add_volumes to add_volumes_update as add_volumes is a list of
        volume objects and cannot be used for db update directly. Same with
        remove_volumes.

        If the driver throws an exception, the status of the group as well as
        those of the volumes to be added/removed will be set to 'error'.
        """
        raise NotImplementedError()

    def create_group_from_src(self, context, group, volumes,
                              group_snapshot=None, snapshots=None,
                              source_group=None, source_vols=None):
        """Creates a group from source.

        :param context: the context of the caller.
        :param group: the Group object to be created.
        :param volumes: a list of Volume objects in the group.
        :param group_snapshot: the GroupSnapshot object as source.
        :param snapshots: a list of Snapshot objects in group_snapshot.
        :param source_group: the Group object as source.
        :param source_vols: a list of Volume objects in the source_group.
        :returns: model_update, volumes_model_update

        The source can be group_snapshot or a source_group.

        param volumes is a list of objects retrieved from the db. It cannot
        be assigned to volumes_model_update. volumes_model_update is a list
        of dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        To be consistent with other volume operations, the manager will
        assume the operation is successful if no exception is thrown by
        the driver. For a successful operation, the driver can either build
        the model_update and volumes_model_update and return them or
        return None, None.
        """
        raise NotImplementedError()

    def create_group_snapshot(self, context, group_snapshot, snapshots):
        """Creates a group_snapshot.

        :param context: the context of the caller.
        :param group_snapshot: the GroupSnapshot object to be created.
        :param snapshots: a list of Snapshot objects in the group_snapshot.
        :returns: model_update, snapshots_model_update

        param snapshots is a list of Snapshot objects. It cannot be assigned
        to snapshots_model_update. snapshots_model_update is a list of
        dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        The driver should populate snapshots_model_update and model_update
        and return them.

        The manager will check snapshots_model_update and update db accordingly
        for each snapshot. If the driver successfully deleted some snapshots
        but failed to delete others, it should set statuses of the snapshots
        accordingly so that the manager can update db correctly.

        If the status in any entry of snapshots_model_update is 'error', the
        status in model_update will be set to the same if it is not already
        'error'.

        If the status in model_update is 'error', the manager will raise an
        exception and the status of group_snapshot will be set to 'error' in
        the db. If snapshots_model_update is not returned by the driver, the
        manager will set the status of every snapshot to 'error' in the except
        block.

        If the driver raises an exception during the operation, it will be
        caught by the try-except block in the manager and the statuses of
        group_snapshot and all snapshots will be set to 'error'.

        For a successful operation, the driver can either build the
        model_update and snapshots_model_update and return them or
        return None, None. The statuses of group_snapshot and all snapshots
        will be set to 'available' at the end of the manager function.
        """
        raise NotImplementedError()

    def delete_group_snapshot(self, context, group_snapshot, snapshots):
        """Deletes a group_snapshot.

        :param context: the context of the caller.
        :param group_snapshot: the GroupSnapshot object to be deleted.
        :param snapshots: a list of Snapshot objects in the group_snapshot.
        :returns: model_update, snapshots_model_update

        param snapshots is a list of objects. It cannot be assigned to
        snapshots_model_update. snapshots_model_update is a list of of
        dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        The driver should populate snapshots_model_update and model_update
        and return them.

        The manager will check snapshots_model_update and update db accordingly
        for each snapshot. If the driver successfully deleted some snapshots
        but failed to delete others, it should set statuses of the snapshots
        accordingly so that the manager can update db correctly.

        If the status in any entry of snapshots_model_update is
        'error_deleting' or 'error', the status in model_update will be set to
        the same if it is not already 'error_deleting' or 'error'.

        If the status in model_update is 'error_deleting' or 'error', the
        manager will raise an exception and the status of group_snapshot will
        be set to 'error' in the db. If snapshots_model_update is not returned
        by the driver, the manager will set the status of every snapshot to
        'error' in the except block.

        If the driver raises an exception during the operation, it will be
        caught by the try-except block in the manager and the statuses of
        group_snapshot and all snapshots will be set to 'error'.

        For a successful operation, the driver can either build the
        model_update and snapshots_model_update and return them or
        return None, None. The statuses of group_snapshot and all snapshots
        will be set to 'deleted' after the manager deletes them from db.
        """
        raise NotImplementedError()

    def extend_volume(self, volume, new_size):
        msg = _("Extend volume not implemented")
        raise NotImplementedError(msg)

    def accept_transfer(self, context, volume, new_user, new_project):
        pass

    def create_volume_from_backup(self, volume, backup):
        """Creates a volume from a backup.

        Can optionally return a Dictionary of changes to the volume object to
        be persisted.

        :param volume: the volume object to be created.
        :param backup: the backup object as source.
        :returns: volume_model_update
        """

        raise NotImplementedError()

    @staticmethod
    def _get_oslo_driver_opts(*cfg_names):
        """Return an oslo driver options list from argument string (names)."""
        return [CONF.backend_defaults._group._opts[cfg_name]['opt']
                for cfg_name in cfg_names]

    @classmethod
    def clean_volume_file_locks(cls, volume_id):
        """Clean up driver specific volume locks.

        This method will be called when a volume has been removed from Cinder
        or when we detect that the volume doesn't exist.

        There are 3 types of locks in Cinder:

        - Process locks: Don't need cleanup
        - Node locks: Must use cinder.utils.synchronized_remove
        - Global locks: Must use cinder.coordination.synchronized_remove

        When using method cinder.utils.synchronized_remove we must pass the
        exact lock name, whereas method cinder.coordination.synchronized_remove
        accepts a glob.

        Refer to clean_volume_file_locks, api_clean_volume_file_locks, and
        clean_snapshot_file_locks in cinder.utils for examples.
        """
        pass

    @classmethod
    def clean_snapshot_file_locks(self, snapshot_id):
        """Clean up driver specific snapshot locks.

        This method will be called when a snapshot has been removed from cinder
        or when we detect that the snapshot doesn't exist.

        There are 3 types of locks in Cinder:

        - Process locks: Don't need cleanup
        - Node locks: Must use cinder.utils.synchronized_remove
        - Global locks: Must use cinder.coordination.synchronized_remove

        When using method cinder.utils.synchronized_remove we must pass the
        exact lock name, whereas method cinder.coordination.synchronized_remove
        accepts a glob.

        Refer to clean_volume_file_locks, api_clean_volume_file_locks, and
        clean_snapshot_file_locks in cinder.utils for examples.
        """
        pass


class CloneableImageVD(object, metaclass=abc.ABCMeta):
    @abc.abstractmethod
    def clone_image(self, context, volume,
                    image_location, image_meta, image_service):
        """Create a volume efficiently from an existing image.

        Refer to
        :obj:`cinder.interface.volume_driver.VolumeDriverCore.clone_image`
        for additional information.
        """
        return None, False


class MigrateVD(object, metaclass=abc.ABCMeta):
    @abc.abstractmethod
    def migrate_volume(self, context, volume, host):
        """Migrate the volume to the specified host.

        Returns a boolean indicating whether the migration occurred, as well as
        model_update.

        :param context: Context
        :param volume: A dictionary describing the volume to migrate
        :param host: A dictionary describing the host to migrate to, where
                     host['host'] is its name, and host['capabilities'] is a
                     dictionary of its reported capabilities.
        """
        return (False, None)


class ManageableVD(object, metaclass=abc.ABCMeta):
    @abc.abstractmethod
    def manage_existing(self, volume, existing_ref):
        """Brings an existing backend storage object under Cinder management.

        existing_ref is passed straight through from the API request's
        manage_existing_ref value, and it is up to the driver how this should
        be interpreted.  It should be sufficient to identify a storage object
        that the driver should somehow associate with the newly-created cinder
        volume structure.

        There are two ways to do this:

        1. Rename the backend storage object so that it matches the,
           volume['name'] which is how drivers traditionally map between a
           cinder volume and the associated backend storage object.

        2. Place some metadata on the volume, or somewhere in the backend, that
           allows other driver requests (e.g. delete, clone, attach, detach...)
           to locate the backend storage object when required.

        If the existing_ref doesn't make sense, or doesn't refer to an existing
        backend storage object, raise a ManageExistingInvalidReference
        exception.

        The volume may have a volume_type, and the driver can inspect that and
        compare against the properties of the referenced backend storage
        object.  If they are incompatible, raise a
        ManageExistingVolumeTypeMismatch, specifying a reason for the failure.

        :param volume:       Cinder volume to manage
        :param existing_ref: Driver-specific information used to identify a
                             volume
        """
        return

    @abc.abstractmethod
    def manage_existing_get_size(self, volume, existing_ref):
        """Return size of volume to be managed by manage_existing.

        When calculating the size, round up to the next GB.

        :param volume:       Cinder volume to manage
        :param existing_ref: Driver-specific information used to identify a
                             volume
        :returns size:       Volume size in GiB (integer)
        """
        return

    def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
                               sort_keys, sort_dirs):
        """List volumes on the backend available for management by Cinder.

        Returns a list of dictionaries, each specifying a volume in the host,
        with the following keys:
        - reference (dictionary): The reference for a volume, which can be
        passed to "manage_existing".
        - size (int): The size of the volume according to the storage
        backend, rounded up to the nearest GB.
        - safe_to_manage (boolean): Whether or not this volume is safe to
        manage according to the storage backend. For example, is the volume
        in use or invalid for any reason.
        - reason_not_safe (string): If safe_to_manage is False, the reason why.
        - cinder_id (string): If already managed, provide the Cinder ID.
        - extra_info (string): Any extra information to return to the user

        :param cinder_volumes: A list of volumes in this host that Cinder
                               currently manages, used to determine if
                               a volume is manageable or not.
        :param marker:    The last item of the previous page; we return the
                          next results after this value (after sorting)
        :param limit:     Maximum number of items to return
        :param offset:    Number of items to skip after marker
        :param sort_keys: List of keys to sort results by (valid keys are
                          'identifier' and 'size')
        :param sort_dirs: List of directions to sort by, corresponding to
                          sort_keys (valid directions are 'asc' and 'desc')
        """
        return []

    @abc.abstractmethod
    def unmanage(self, volume):
        """Removes the specified volume from Cinder management.

        Does not delete the underlying backend storage object.

        For most drivers, this will not need to do anything.  However, some
        drivers might use this call as an opportunity to clean up any
        Cinder-specific configuration that they have associated with the
        backend storage object.

        :param volume: Cinder volume to unmanage
        """
        pass


class ManageableSnapshotsVD(object, metaclass=abc.ABCMeta):
    # NOTE: Can't use abstractmethod before all drivers implement it
    def manage_existing_snapshot(self, snapshot, existing_ref):
        """Brings an existing backend storage object under Cinder management.

        existing_ref is passed straight through from the API request's
        manage_existing_ref value, and it is up to the driver how this should
        be interpreted.  It should be sufficient to identify a storage object
        that the driver should somehow associate with the newly-created cinder
        snapshot structure.

        There are two ways to do this:

        1. Rename the backend storage object so that it matches the
           snapshot['name'] which is how drivers traditionally map between a
           cinder snapshot and the associated backend storage object.

        2. Place some metadata on the snapshot, or somewhere in the backend,
           that allows other driver requests (e.g. delete) to locate the
           backend storage object when required.

        If the existing_ref doesn't make sense, or doesn't refer to an existing
        backend storage object, raise a ManageExistingInvalidReference
        exception.

        :param snapshot:     Cinder volume snapshot to manage
        :param existing_ref: Driver-specific information used to identify a
                             volume snapshot
        """
        return

    # NOTE: Can't use abstractmethod before all drivers implement it
    def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
        """Return size of snapshot to be managed by manage_existing.

        When calculating the size, round up to the next GB.

        :param snapshot:     Cinder volume snapshot to manage
        :param existing_ref: Driver-specific information used to identify a
                             volume snapshot
        :returns size:       Volume snapshot size in GiB (integer)
        """
        return

    def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
                                 sort_keys, sort_dirs):
        """List snapshots on the backend available for management by Cinder.

        Returns a list of dictionaries, each specifying a snapshot in the host,
        with the following keys:
        - reference (dictionary): The reference for a snapshot, which can be
        passed to "manage_existing_snapshot".
        - size (int): The size of the snapshot according to the storage
        backend, rounded up to the nearest GB.
        - safe_to_manage (boolean): Whether or not this snapshot is safe to
        manage according to the storage backend. For example, is the snapshot
        in use or invalid for any reason.
        - reason_not_safe (string): If safe_to_manage is False, the reason why.
        - cinder_id (string): If already managed, provide the Cinder ID.
        - extra_info (string): Any extra information to return to the user
        - source_reference (string): Similar to "reference", but for the
        snapshot's source volume.

        :param cinder_snapshots: A list of snapshots in this host that Cinder
                                 currently manages, used to determine if
                                 a snapshot is manageable or not.
        :param marker:    The last item of the previous page; we return the
                          next results after this value (after sorting)
        :param limit:     Maximum number of items to return
        :param offset:    Number of items to skip after marker
        :param sort_keys: List of keys to sort results by (valid keys are
                          'identifier' and 'size')
        :param sort_dirs: List of directions to sort by, corresponding to
                          sort_keys (valid directions are 'asc' and 'desc')

        """
        return []

    # NOTE: Can't use abstractmethod before all drivers implement it
    def unmanage_snapshot(self, snapshot):
        """Removes the specified snapshot from Cinder management.

        Does not delete the underlying backend storage object.

        For most drivers, this will not need to do anything. However, some
        drivers might use this call as an opportunity to clean up any
        Cinder-specific configuration that they have associated with the
        backend storage object.

        :param snapshot: Cinder volume snapshot to unmanage
        """
        pass


class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD,
                   MigrateVD, BaseVD):
    def check_for_setup_error(self):
        raise NotImplementedError()

    def create_volume(self, volume):
        raise NotImplementedError()

    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.

        If volume_type extra specs includes 'replication: <is> True'
        the driver needs to create a volume replica (secondary),
        and setup replication between the newly created volume and
        the secondary volume.
        """

        raise NotImplementedError()

    def delete_volume(self, volume):
        raise NotImplementedError()

    def create_snapshot(self, snapshot):
        """Creates a snapshot."""
        raise NotImplementedError()

    def delete_snapshot(self, snapshot):
        """Deletes a snapshot.

        If the driver uses custom file locks they should be cleaned on success
        using cinder.utils.synchronized_remove
        """
        raise NotImplementedError()

    def local_path(self, volume):
        raise NotImplementedError()

    def clear_download(self, context, volume):
        pass

    def extend_volume(self, volume, new_size):
        msg = _("Extend volume not implemented")
        raise NotImplementedError(msg)

    def manage_existing(self, volume, existing_ref):
        msg = _("Manage existing volume not implemented.")
        raise NotImplementedError(msg)

    def revert_to_snapshot(self, context, volume, snapshot):
        """Revert volume to snapshot.

        Note: the revert process should not change the volume's
        current size, that means if the driver shrank
        the volume during the process, it should extend the
        volume internally.
        """
        msg = _("Revert volume to snapshot not implemented.")
        raise NotImplementedError(msg)

    def manage_existing_get_size(self, volume, existing_ref):
        msg = _("Manage existing volume not implemented.")
        raise NotImplementedError(msg)

    def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
                               sort_keys, sort_dirs):
        msg = _("Get manageable volumes not implemented.")
        raise NotImplementedError(msg)

    def unmanage(self, volume):
        pass

    def manage_existing_snapshot(self, snapshot, existing_ref):
        msg = _("Manage existing snapshot not implemented.")
        raise NotImplementedError(msg)

    def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
        msg = _("Manage existing snapshot not implemented.")
        raise NotImplementedError(msg)

    def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
                                 sort_keys, sort_dirs):
        msg = _("Get manageable snapshots not implemented.")
        raise NotImplementedError(msg)

    def unmanage_snapshot(self, snapshot):
        """Unmanage the specified snapshot from Cinder management."""

    def retype(self, context, volume, new_type, diff, host):
        return False, None

    # #######  Interface methods for DataPath (Connector) ########
    def ensure_export(self, context, volume):
        raise NotImplementedError()

    def create_export(self, context, volume, connector):
        raise NotImplementedError()

    def create_export_snapshot(self, context, snapshot, connector):
        raise NotImplementedError()

    def remove_export(self, context, volume):
        raise NotImplementedError()

    def remove_export_snapshot(self, context, snapshot):
        raise NotImplementedError()

    def initialize_connection(self, volume, connector, **kwargs):
        raise NotImplementedError()

    def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
        """Allow connection from connector for a snapshot."""

    def terminate_connection(self, volume, connector, **kwargs):
        """Disallow connection from connector

        :param volume: The volume to be disconnected.
        :param connector: A dictionary describing the connection with details
                          about the initiator. Can be None.
        """

    def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
        """Disallow connection from connector for a snapshot."""

    def create_consistencygroup(self, context, group):
        """Creates a consistencygroup.

        :param context: the context of the caller.
        :param group: the dictionary of the consistency group to be created.
        :returns: model_update

        model_update will be in this format: {'status': xxx, ......}.

        If the status in model_update is 'error', the manager will throw
        an exception and it will be caught in the try-except block in the
        manager. If the driver throws an exception, the manager will also
        catch it in the try-except block. The group status in the db will
        be changed to 'error'.

        For a successful operation, the driver can either build the
        model_update and return it or return None. The group status will
        be set to 'available'.
        """
        raise NotImplementedError()

    def create_consistencygroup_from_src(self, context, group, volumes,
                                         cgsnapshot=None, snapshots=None,
                                         source_cg=None, source_vols=None):
        """Creates a consistencygroup from source.

        :param context: the context of the caller.
        :param group: the dictionary of the consistency group to be created.
        :param volumes: a list of volume dictionaries in the group.
        :param cgsnapshot: the dictionary of the cgsnapshot as source.
        :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
        :param source_cg: the dictionary of a consistency group as source.
        :param source_vols: a list of volume dictionaries in the source_cg.
        :returns: model_update, volumes_model_update

        The source can be cgsnapshot or a source cg.

        param volumes is retrieved directly from the db. It is a list of
        cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
        assigned to volumes_model_update. volumes_model_update is a list of
        dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        To be consistent with other volume operations, the manager will
        assume the operation is successful if no exception is thrown by
        the driver. For a successful operation, the driver can either build
        the model_update and volumes_model_update and return them or
        return None, None.
        """
        raise NotImplementedError()

    def delete_consistencygroup(self, context, group, volumes):
        """Deletes a consistency group.

        :param context: the context of the caller.
        :param group: the dictionary of the consistency group to be deleted.
        :param volumes: a list of volume dictionaries in the group.
        :returns: model_update, volumes_model_update

        param volumes is retrieved directly from the db. It is a list of
        cinder.db.sqlalchemy.models.Volume to be precise. It cannot be
        assigned to volumes_model_update. volumes_model_update is a list of
        dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        The driver should populate volumes_model_update and model_update
        and return them.

        The manager will check volumes_model_update and update db accordingly
        for each volume. If the driver successfully deleted some volumes
        but failed to delete others, it should set statuses of the volumes
        accordingly so that the manager can update db correctly.

        If the status in any entry of volumes_model_update is 'error_deleting'
        or 'error', the status in model_update will be set to the same if it
        is not already 'error_deleting' or 'error'.

        If the status in model_update is 'error_deleting' or 'error', the
        manager will raise an exception and the status of the group will be
        set to 'error' in the db. If volumes_model_update is not returned by
        the driver, the manager will set the status of every volume in the
        group to 'error' in the except block.

        If the driver raises an exception during the operation, it will be
        caught by the try-except block in the manager. The statuses of the
        group and all volumes in it will be set to 'error'.

        For a successful operation, the driver can either build the
        model_update and volumes_model_update and return them or
        return None, None. The statuses of the group and all volumes
        will be set to 'deleted' after the manager deletes them from db.
        """
        raise NotImplementedError()

    def update_consistencygroup(self, context, group,
                                add_volumes=None, remove_volumes=None):
        """Updates a consistency group.

        :param context: the context of the caller.
        :param group: the dictionary of the consistency group to be updated.
        :param add_volumes: a list of volume dictionaries to be added.
        :param remove_volumes: a list of volume dictionaries to be removed.
        :returns: model_update, add_volumes_update, remove_volumes_update

        model_update is a dictionary that the driver wants the manager
        to update upon a successful return. If None is returned, the manager
        will set the status to 'available'.

        add_volumes_update and remove_volumes_update are lists of dictionaries
        that the driver wants the manager to update upon a successful return.
        Note that each entry requires a {'id': xxx} so that the correct
        volume entry can be updated. If None is returned, the volume will
        remain its original status. Also note that you cannot directly
        assign add_volumes to add_volumes_update as add_volumes is a list of
        cinder.db.sqlalchemy.models.Volume objects and cannot be used for
        db update directly. Same with remove_volumes.

        If the driver throws an exception, the status of the group as well as
        those of the volumes to be added/removed will be set to 'error'.
        """
        raise NotImplementedError()

    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
        """Creates a cgsnapshot.

        :param context: the context of the caller.
        :param cgsnapshot: the dictionary of the cgsnapshot to be created.
        :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
        :returns: model_update, snapshots_model_update

        param snapshots is retrieved directly from the db. It is a list of
        cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be
        assigned to snapshots_model_update. snapshots_model_update is a list
        of dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        The driver should populate snapshots_model_update and model_update
        and return them.

        The manager will check snapshots_model_update and update db accordingly
        for each snapshot. If the driver successfully deleted some snapshots
        but failed to delete others, it should set statuses of the snapshots
        accordingly so that the manager can update db correctly.

        If the status in any entry of snapshots_model_update is 'error', the
        status in model_update will be set to the same if it is not already
        'error'.

        If the status in model_update is 'error', the manager will raise an
        exception and the status of cgsnapshot will be set to 'error' in the
        db. If snapshots_model_update is not returned by the driver, the
        manager will set the status of every snapshot to 'error' in the except
        block.

        If the driver raises an exception during the operation, it will be
        caught by the try-except block in the manager and the statuses of
        cgsnapshot and all snapshots will be set to 'error'.

        For a successful operation, the driver can either build the
        model_update and snapshots_model_update and return them or
        return None, None. The statuses of cgsnapshot and all snapshots
        will be set to 'available' at the end of the manager function.
        """
        raise NotImplementedError()

    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
        """Deletes a cgsnapshot.

        :param context: the context of the caller.
        :param cgsnapshot: the dictionary of the cgsnapshot to be deleted.
        :param snapshots: a list of snapshot dictionaries in the cgsnapshot.
        :returns: model_update, snapshots_model_update

        param snapshots is retrieved directly from the db. It is a list of
        cinder.db.sqlalchemy.models.Snapshot to be precise. It cannot be
        assigned to snapshots_model_update. snapshots_model_update is a list
        of dictionaries. It has to be built by the driver. An entry will be
        in this format: {'id': xxx, 'status': xxx, ......}. model_update
        will be in this format: {'status': xxx, ......}.

        The driver should populate snapshots_model_update and model_update
        and return them.

        The manager will check snapshots_model_update and update db accordingly
        for each snapshot. If the driver successfully deleted some snapshots
        but failed to delete others, it should set statuses of the snapshots
        accordingly so that the manager can update db correctly.

        If the status in any entry of snapshots_model_update is
        'error_deleting' or 'error', the status in model_update will be set to
        the same if it is not already 'error_deleting' or 'error'.

        If the status in model_update is 'error_deleting' or 'error', the
        manager will raise an exception and the status of cgsnapshot will be
        set to 'error' in the db. If snapshots_model_update is not returned by
        the driver, the manager will set the status of every snapshot to
        'error' in the except block.

        If the driver raises an exception during the operation, it will be
        caught by the try-except block in the manager and the statuses of
        cgsnapshot and all snapshots will be set to 'error'.

        For a successful operation, the driver can either build the
        model_update and snapshots_model_update and return them or
        return None, None. The statuses of cgsnapshot and all snapshots
        will be set to 'deleted' after the manager deletes them from db.
        """
        raise NotImplementedError()

    def clone_image(self, context, volume,
                    image_location, image_meta, image_service):
        """Create a volume efficiently from an existing image.

        Refer to
        :obj:`cinder.interface.volume_driver.VolumeDriverCore.clone_image`
        for additional information.
        """
        return None, False

    def get_pool(self, volume):
        """Return pool name where volume reside on.

        :param volume: The volume hosted by the driver.
        :returns: name of the pool where given volume is in.
        """
        return None

    def migrate_volume(self, context, volume, host):
        return (False, None)

    def accept_transfer(self, context, volume, new_user, new_project):
        pass


class ProxyVD(object):
    """Proxy Volume Driver to mark proxy drivers

    If a driver uses a proxy class (e.g. by using __setattr__ and
    __getattr__) without directly inheriting from base volume driver this
    class can help marking them and retrieve the actual used driver object.
    """
    def _get_driver(self):
        """Returns the actual driver object.

        Can be overloaded by the proxy.
        """
        return getattr(self, "driver", None)


class ISCSIDriver(VolumeDriver):
    """Executes commands relating to ISCSI volumes.

    We make use of model provider properties as follows:

    ``provider_location``
      if present, contains the iSCSI target information in the same
      format as an ietadm discovery
      i.e. '<ip>:<port>,<portal> <target IQN>'

    ``provider_auth``
      if present, contains a space-separated triple:
      '<auth method> <auth username> <auth password>'.
      `CHAP` is the only auth_method in use at the moment.
    """

    def __init__(self, *args, **kwargs):
        super(ISCSIDriver, self).__init__(*args, **kwargs)

    def _do_iscsi_discovery(self, volume):
        # TODO(justinsb): Deprecate discovery and use stored info
        # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
        LOG.warning("ISCSI provider_location not stored, using discovery")

        volume_name = volume['name']

        try:
            # NOTE(griff) We're doing the split straight away which should be
            # safe since using '@' in hostname is considered invalid

            (out, _err) = self._execute('iscsiadm', '-m', 'discovery',
                                        '-t', 'sendtargets', '-p',
                                        volume['host'].split('@')[0],
                                        run_as_root=True)
        except processutils.ProcessExecutionError as ex:
            LOG.error("ISCSI discovery attempt failed for:%s",
                      volume['host'].split('@')[0])
            LOG.debug("Error from iscsiadm -m discovery: %s", ex.stderr)
            return None

        for target in out.splitlines():
            if (self.configuration.target_ip_address in target
                    and volume_name in target):
                return target
        return None

    def _get_iscsi_properties(self, volume, multipath=False):
        """Gets iscsi configuration

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in future
        The properties are:

        :target_discovered:    boolean indicating whether discovery was used

        :target_iqn:    the IQN of the iSCSI target

        :target_portal:    the portal of the iSCSI target

        :target_lun:    the lun of the iSCSI target

        :volume_id:    the id of the volume (currently used by xen)

        :auth_method:, :auth_username:, :auth_password:

            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.

        :discard:    boolean indicating if discard is supported

        In some of drivers that support multiple connections (for multipath
        and for single path with failover on connection failure), it returns
        :target_iqns, :target_portals, :target_luns, which contain lists of
        multiple values. The main portal information is also returned in
        :target_iqn, :target_portal, :target_lun for backward compatibility.

        Note that some of drivers don't return :target_portals even if they
        support multipath. Then the connector should use sendtargets discovery
        to find the other portals if it supports multipath.
        """

        properties = {}

        location = volume['provider_location']

        if location:
            # provider_location is the same format as iSCSI discovery output
            properties['target_discovered'] = False
        else:
            location = self._do_iscsi_discovery(volume)

            if not location:
                msg = (_("Could not find iSCSI export for volume %s") %
                        (volume['name']))
                raise exception.InvalidVolume(reason=msg)

            LOG.debug("ISCSI Discovery: Found %s", location)
            properties['target_discovered'] = True

        results = location.split(" ")
        portals = results[0].split(",")[0].split(";")
        iqn = results[1]
        nr_portals = len(portals)

        try:
            lun = int(results[2])
        except (IndexError, ValueError):
            if self.configuration.target_helper == 'tgtadm':
                lun = 1
            else:
                lun = 0

        if nr_portals > 1:
            properties['target_portals'] = portals
            properties['target_iqns'] = [iqn] * nr_portals
            properties['target_luns'] = [lun] * nr_portals
        properties['target_portal'] = portals[0]
        properties['target_iqn'] = iqn
        properties['target_lun'] = lun

        properties['volume_id'] = volume['id']

        auth = volume['provider_auth']
        if auth:
            (auth_method, auth_username, auth_secret) = auth.split()

            properties['auth_method'] = auth_method
            properties['auth_username'] = auth_username
            properties['auth_password'] = auth_secret

        geometry = volume.get('provider_geometry', None)
        if geometry:
            (physical_block_size, logical_block_size) = geometry.split()
            properties['physical_block_size'] = physical_block_size
            properties['logical_block_size'] = logical_block_size

        encryption_key_id = volume.get('encryption_key_id', None)
        properties['encrypted'] = encryption_key_id is not None

        return properties

    def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
        check_exit_code = kwargs.pop('check_exit_code', 0)
        (out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
                                   iscsi_properties['target_iqn'],
                                   '-p', iscsi_properties['target_portal'],
                                   *iscsi_command, run_as_root=True,
                                   check_exit_code=check_exit_code)
        LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
                  {'command': iscsi_command, 'out': out, 'err': err})
        return (out, err)

    def initialize_connection(self, volume, connector):
        """Initializes the connection and returns connection info.

        The iscsi driver returns a driver_volume_type of 'iscsi'.
        The format of the driver data is defined in _get_iscsi_properties.
        Example return value::

            {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': True,
                    'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
                    'target_portal': '127.0.0.0.1:3260',
                    'volume_id': 1,
                    'discard': False,
                }
            }

        If the backend driver supports multiple connections for multipath and
        for single path with failover, "target_portals", "target_iqns",
        "target_luns" are also populated::

            {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': 'iqn.2010-10.org.openstack:volume1',
                    'target_iqns': ['iqn.2010-10.org.openstack:volume1',
                                    'iqn.2010-10.org.openstack:volume1-2'],
                    'target_portal': '10.0.0.1:3260',
                    'target_portals': ['10.0.0.1:3260', '10.0.1.1:3260'],
                    'target_lun': 1,
                    'target_luns': [1, 1],
                    'volume_id': 1,
                    'discard': False,
                }
            }
        """
        # NOTE(jdg): Yes, this is duplicated in the volume/target
        # drivers, for now leaving it as there are 3'rd party
        # drivers that don't use target drivers, but inherit from
        # this base class and use this init data
        iscsi_properties = self._get_iscsi_properties(volume)
        return {
            'driver_volume_type':
                self.configuration.safe_get('target_protocol'),
            'data': iscsi_properties
        }

    def validate_connector(self, connector):
        # iSCSI drivers require the initiator information
        required = 'initiator'
        if required not in connector:
            LOG.error('The volume driver requires %(data)s '
                      'in the connector.', {'data': required})
            raise exception.InvalidConnectorException(missing=required)

    def terminate_connection(self, volume, connector, **kwargs):
        pass

    def _update_volume_stats(self):
        """Retrieve stats info from volume group."""

        LOG.debug("Updating volume stats...")
        data = {}
        backend_name = self.configuration.safe_get('volume_backend_name')
        data["volume_backend_name"] = backend_name or 'Generic_iSCSI'
        data["vendor_name"] = 'Open Source'
        data["driver_version"] = '1.0'
        data["storage_protocol"] = constants.ISCSI
        data["pools"] = []
        data["replication_enabled"] = False

        self._update_pools_and_stats(data)


class ISERDriver(ISCSIDriver):
    """Executes commands relating to ISER volumes.

    We make use of model provider properties as follows:

    ``provider_location``
      if present, contains the iSER target information in the same
      format as an ietadm discovery
      i.e. '<ip>:<port>,<portal> <target IQN>'

    ``provider_auth``
      if present, contains a space-separated triple:
      '<auth method> <auth username> <auth password>'.
      `CHAP` is the only auth_method in use at the moment.
    """
    def __init__(self, *args, **kwargs):
        super(ISERDriver, self).__init__(*args, **kwargs)
        # for backward compatibility
        self.configuration.num_volume_device_scan_tries = \
            self.configuration.num_iser_scan_tries
        self.configuration.target_prefix = \
            self.configuration.iser_target_prefix
        self.configuration.target_ip_address = \
            self.configuration.iser_ip_address
        self.configuration.target_port = self.configuration.iser_port

    def initialize_connection(self, volume, connector):
        """Initializes the connection and returns connection info.

        The iser driver returns a driver_volume_type of 'iser'.
        The format of the driver data is defined in _get_iser_properties.
        Example return value:

        .. code-block:: default

            {
                'driver_volume_type': 'iser',
                'data': {
                    'target_discovered': True,
                    'target_iqn':
                    'iqn.2010-10.org.iser.openstack:volume-00000001',
                    'target_portal': '127.0.0.0.1:3260',
                    'volume_id': 1,
                }
            }

        """
        iser_properties = self._get_iscsi_properties(volume)
        return {
            'driver_volume_type': 'iser',
            'data': iser_properties
        }

    def _update_volume_stats(self):
        """Retrieve stats info from volume group."""

        LOG.debug("Updating volume stats...")
        data = {}
        backend_name = self.configuration.safe_get('volume_backend_name')
        data["volume_backend_name"] = backend_name or 'Generic_iSER'
        data["vendor_name"] = 'Open Source'
        data["driver_version"] = '1.0'
        data["storage_protocol"] = constants.ISER
        data["pools"] = []

        self._update_pools_and_stats(data)


class FibreChannelDriver(VolumeDriver):
    """Executes commands relating to Fibre Channel volumes."""
    def __init__(self, *args, **kwargs):
        super(FibreChannelDriver, self).__init__(*args, **kwargs)

    def initialize_connection(self, volume, connector):
        """Initializes the connection and returns connection info.

        The  driver returns a driver_volume_type of 'fibre_channel'.
        The target_wwn can be a single entry or a list of wwns that
        correspond to the list of remote wwn(s) that will export the volume.
        Example return values:

        .. code-block:: default

            {
                'driver_volume_type': 'fibre_channel',
                'data': {
                    'target_discovered': True,
                    'target_lun': 1,
                    'target_wwn': '1234567890123',
                    'discard': False,
                }
            }

        or

        .. code-block:: default

             {
                'driver_volume_type': 'fibre_channel',
                'data': {
                    'target_discovered': True,
                    'target_lun': 1,
                    'target_wwn': ['1234567890123', '0987654321321'],
                    'discard': False,
                }
            }

        """
        msg = _("Driver must implement initialize_connection")
        raise NotImplementedError(msg)

    def validate_connector(self, connector):
        """Fail if connector doesn't contain all the data needed by driver.

        Do a check on the connector and ensure that it has wwnns, wwpns.
        """
        self.validate_connector_has_setting(connector, 'wwpns')
        self.validate_connector_has_setting(connector, 'wwnns')

    @staticmethod
    def validate_connector_has_setting(connector, setting):
        """Test for non-empty setting in connector."""
        if setting not in connector or not connector[setting]:
            LOG.error(
                "FibreChannelDriver validate_connector failed. "
                "No '%(setting)s'. Make sure HBA state is Online.",
                {'setting': setting})
            raise exception.InvalidConnectorException(missing=setting)

    def _update_volume_stats(self):
        """Retrieve stats info from volume group."""

        LOG.debug("Updating volume stats...")
        data = {}
        backend_name = self.configuration.safe_get('volume_backend_name')
        data["volume_backend_name"] = backend_name or 'Generic_FC'
        data["vendor_name"] = 'Open Source'
        data["driver_version"] = '1.0'
        data["storage_protocol"] = constants.FC
        data["pools"] = []

        self._update_pools_and_stats(data)