summaryrefslogtreecommitdiff
path: root/cinder/backup/drivers/ceph.py
blob: d21e458bfd77a03ea3589d176d27799d36e36cad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
# Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
#    Licensed under the Apache License, Version 2.0 (the "License"); you may
#    not use this file except in compliance with the License. You may obtain
#    a copy of the License at
#
#         http://www.apache.org/licenses/LICENSE-2.0
#
#    Unless required by applicable law or agreed to in writing, software
#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
#    License for the specific language governing permissions and limitations
#    under the License.

"""Ceph Backup Service Implementation.

This driver supports backing up volumes of any type to a Ceph object store. It
is also capable of detecting whether the volume to be backed up is a Ceph RBD
volume and, if so, attempts to perform incremental/differential backups.

Support is also included for the following in the case of a source volume being
a Ceph RBD volume:

    * backing up within the same Ceph pool (not recommended)
    * backing up between different Ceph pools
    * backing up between different Ceph clusters

At the time of writing, differential backup support in Ceph/librbd was quite
new so this driver accounts for this by first attempting differential backup
and falling back to full backup/copy if the former fails. It is recommended
that you upgrade to Ceph Dumpling (>= v0.67) or above to get the best results.

If incremental backups are used, multiple backups of the same volume are stored
as snapshots so that minimal space is consumed in the object store and
restoring the volume takes a far reduced amount of time compared to a full
copy.

Note that Cinder supports restoring to a new volume or the original volume the
backup was taken from. For the latter case, a full copy is enforced since this
was deemed the safest action to take. It is therefore recommended to always
restore to a new volume (default).
"""

import fcntl
import json
import os
import re
import subprocess
import time
from typing import Dict, List, Optional, Tuple  # noqa: H301

import eventlet
from os_brick.initiator import linuxrbd
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units

from cinder.backup import driver
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import objects
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver

try:
    import rados
    import rbd
except ImportError:
    rados = None
    rbd = None

LOG = logging.getLogger(__name__)

service_opts = [
    cfg.StrOpt('backup_ceph_conf', default='/etc/ceph/ceph.conf',
               help='Ceph configuration file to use.'),
    cfg.StrOpt('backup_ceph_user', default='cinder',
               help='The Ceph user to connect with. Default here is to use '
                    'the same user as for Cinder volumes. If not using cephx '
                    'this should be set to None.'),
    cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128),
               help='The chunk size, in bytes, that a backup is broken into '
                    'before transfer to the Ceph object store.'),
    cfg.StrOpt('backup_ceph_pool', default='backups',
               help='The Ceph pool where volume backups are stored.'),
    cfg.IntOpt('backup_ceph_stripe_unit', default=0,
               help='RBD stripe unit to use when creating a backup image.'),
    cfg.IntOpt('backup_ceph_stripe_count', default=0,
               help='RBD stripe count to use when creating a backup image.'),
    cfg.BoolOpt('backup_ceph_image_journals', default=False,
                help='If True, apply JOURNALING and EXCLUSIVE_LOCK feature '
                     'bits to the backup RBD objects to allow mirroring'),
    cfg.BoolOpt('restore_discard_excess_bytes', default=True,
                help='If True, always discard excess bytes when restoring '
                     'volumes i.e. pad with zeroes.')
]

CONF = cfg.CONF
CONF.register_opts(service_opts)


class VolumeMetadataBackup(object):

    def __init__(self, client: 'rados.Rados', backup_id: str):
        self._client: 'rados.Rados' = client
        self._backup_id: str = backup_id

    @property
    def name(self) -> str:
        return "backup.%s.meta" % self._backup_id

    @property
    def exists(self) -> bool:
        meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
                                                     self.name))
        return self._exists(meta_obj)

    def _exists(self, obj) -> bool:
        try:
            obj.stat()
        except rados.ObjectNotFound:
            return False
        else:
            return True

    def set(self, json_meta: str) -> None:
        """Write JSON metadata to a new object.

        This should only be called once per backup. Raises
        VolumeMetadataBackupExists if the object already exists.
        """
        meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
                                                     self.name))
        if self._exists(meta_obj):
            msg = _("Metadata backup object '%s' already exists") % self.name
            raise exception.VolumeMetadataBackupExists(msg)

        meta_obj.write(json_meta.encode('utf-8'))

    def get(self) -> Optional[str]:
        """Get metadata backup object.

        Returns None if the object does not exist.
        """
        meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
                                                     self.name))
        if not self._exists(meta_obj):
            LOG.debug("Metadata backup object %s does not exist", self.name)
            return None

        return meta_obj.read().decode('utf-8')

    def remove_if_exists(self) -> None:
        meta_obj = eventlet.tpool.Proxy(rados.Object(self._client.ioctx,
                                                     self.name))
        try:
            meta_obj.remove()
        except rados.ObjectNotFound:
            LOG.debug("Metadata backup object '%s' not found - ignoring",
                      self.name)


@interface.backupdriver
class CephBackupDriver(driver.BackupDriver):
    """Backup Cinder volumes to Ceph Object Store.

    This class enables backing up Cinder volumes to a Ceph object store.
    Backups may be stored in their own pool or even cluster. Store location is
    defined by the Ceph conf file and service config options supplied.

    If the source volume is itself an RBD volume, the backup will be performed
    using incremental differential backups which *should* give a performance
    gain.
    """

    def __init__(self, context, execute=None):
        super().__init__(context)
        self.rbd = rbd
        self.rados = rados
        self.chunk_size = CONF.backup_ceph_chunk_size
        self._execute = execute or utils.execute

        if self._supports_stripingv2:
            self.rbd_stripe_unit = CONF.backup_ceph_stripe_unit
            self.rbd_stripe_count = CONF.backup_ceph_stripe_count
        else:
            LOG.info("RBD striping not supported - ignoring configuration "
                     "settings for rbd striping.")
            self.rbd_stripe_count = 0
            self.rbd_stripe_unit = 0

        self._ceph_backup_user = CONF.backup_ceph_user
        self._ceph_backup_pool = CONF.backup_ceph_pool
        self._ceph_backup_conf = CONF.backup_ceph_conf

    @staticmethod
    def get_driver_options() -> list:
        return service_opts

    @staticmethod
    def _validate_string_args(*args: str) -> bool:
        """Ensure all args are non-None and non-empty."""
        return all(args)

    @staticmethod
    def _ceph_args(user: str, conf: Optional[str] = None,
                   pool: Optional[str] = None) -> List[str]:
        """Create default ceph args for executing rbd commands.

        If no --conf is provided, rbd will look in the default locations e.g.
        /etc/ceph/ceph.conf
        """

        # Make sure user arg is valid since rbd command may not fail if
        # invalid/no user provided, resulting in unexpected behaviour.
        if not CephBackupDriver._validate_string_args(user):
            raise exception.BackupInvalidCephArgs(_("invalid user '%s'") %
                                                  user)

        args = ['--id', user]
        if conf:
            args.extend(['--conf', conf])
        if pool:
            args.extend(['--pool', pool])

        return args

    @property
    def _supports_layering(self) -> bool:
        """Determine if copy-on-write is supported by our version of librbd."""
        return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')

    @property
    def _supports_stripingv2(self) -> bool:
        """Determine if striping is supported by our version of librbd."""
        return hasattr(self.rbd, 'RBD_FEATURE_STRIPINGV2')

    @property
    def _supports_exclusive_lock(self) -> bool:
        """Determine if exclusive-lock is supported by librbd."""
        return hasattr(self.rbd, 'RBD_FEATURE_EXCLUSIVE_LOCK')

    @property
    def _supports_journaling(self) -> bool:
        """Determine if journaling is supported by our version of librbd."""
        return hasattr(self.rbd, 'RBD_FEATURE_JOURNALING')

    @property
    def _supports_fast_diff(self) -> bool:
        """Determine if fast-diff is supported by our version of librbd."""
        return hasattr(self.rbd, 'RBD_FEATURE_FAST_DIFF')

    def _get_rbd_support(self) -> Tuple[bool, int]:
        """Determine RBD features supported by our version of librbd."""
        old_format = True
        features = 0
        if self._supports_layering:
            old_format = False
            features |= self.rbd.RBD_FEATURE_LAYERING
        if self._supports_stripingv2:
            old_format = False
            features |= self.rbd.RBD_FEATURE_STRIPINGV2

        if CONF.backup_ceph_image_journals:
            LOG.debug("RBD journaling supported by backend and requested "
                      "via config. Enabling it together with "
                      "exclusive-lock")
            old_format = False
            features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK |
                         self.rbd.RBD_FEATURE_JOURNALING)

        # NOTE(christian_rohmann): Check for fast-diff support and enable it
        if self._supports_fast_diff:
            LOG.debug("RBD also supports fast-diff, enabling it "
                      "together with exclusive-lock and object-map")
            old_format = False
            features |= (self.rbd.RBD_FEATURE_EXCLUSIVE_LOCK |
                         self.rbd.RBD_FEATURE_OBJECT_MAP |
                         self.rbd.RBD_FEATURE_FAST_DIFF)

        return (old_format, features)

    def check_for_setup_error(self) -> None:
        """Returns an error if prerequisites aren't met."""
        if rados is None or rbd is None:
            msg = _('rados and rbd python libraries not found')
            raise exception.BackupDriverException(reason=msg)

        for attr in ['backup_ceph_user', 'backup_ceph_pool',
                     'backup_ceph_conf']:
            val = getattr(CONF, attr)
            if not val:
                raise exception.InvalidConfigurationValue(option=attr,
                                                          value=val)
        # NOTE: Checking connection to ceph
        # RADOSClient __init__ method invokes _connect_to_rados
        # so no need to check for self.rados.Error here.
        with rbd_driver.RADOSClient(self, self._ceph_backup_pool):
            pass

        # NOTE(christian_rohmann): Check features required for journaling
        if CONF.backup_ceph_image_journals:
            if not self._supports_exclusive_lock and self._supports_journaling:
                LOG.error("RBD journaling not supported - unable to "
                          "support per image mirroring in backup pool")
                raise exception.BackupInvalidCephArgs(
                    _("Image Journaling set but RBD backend does "
                      "not support journaling")
                )

    def _connect_to_rados(self,
                          pool: Optional[str] = None) -> Tuple['rados.Rados',
                                                               'rados.Ioctx']:
        """Establish connection to the backup Ceph cluster."""
        client = eventlet.tpool.Proxy(self.rados.Rados(
                                      rados_id=self._ceph_backup_user,
                                      conffile=self._ceph_backup_conf))
        try:
            client.connect()
            pool_to_open = pool or self._ceph_backup_pool
            ioctx = client.open_ioctx(pool_to_open)
            return client, ioctx
        except self.rados.Error:
            # shutdown cannot raise an exception
            client.shutdown()
            raise

    @staticmethod
    def _disconnect_from_rados(client: 'rados.Rados',
                               ioctx: 'rados.Ioctx') -> None:
        """Terminate connection with the backup Ceph cluster."""
        # closing an ioctx cannot raise an exception
        ioctx.close()
        client.shutdown()

    @staticmethod
    def _format_base_name(service_metadata: str) -> str:
        base_name = json.loads(service_metadata)["base"]
        return base_name

    @staticmethod
    def _get_backup_base_name(
            volume_id: str,
            backup: Optional['objects.Backup'] = None) -> str:
        """Return name of base image used for backup.

        Incremental backups use a new base name so we support old and new style
        format.
        """
        # Ensure no unicode
        if not backup:
            return "volume-%s.backup.base" % volume_id

        if backup.service_metadata:
            return CephBackupDriver._format_base_name(backup.service_metadata)

        # 'parent' field will only be present in incremental backups. This is
        # filled by cinder-api
        if backup.parent:
            # Old backups don't have the base name in the service_metadata,
            # so we use the default RBD backup base
            if backup.parent.service_metadata:
                service_metadata = backup.parent.service_metadata
                base_name = CephBackupDriver._format_base_name(
                    service_metadata)
            else:
                base_name = "volume-%s.backup.base" % volume_id

            return base_name

        return "volume-%s.backup.%s" % (volume_id, backup.id)

    def _discard_bytes(self,
                       volume: linuxrbd.RBDVolumeIOWrapper,
                       offset: int,
                       length: int) -> None:
        """Trim length bytes from offset.

        If the volume is an rbd do a discard() otherwise assume it is a file
        and pad with zeroes.
        """
        if length:
            LOG.debug("Discarding %(length)s bytes from offset %(offset)s",
                      {'length': length, 'offset': offset})
            if self._file_is_rbd(volume):
                limit = 2 * units.Gi - 1
                chunks = int(length / limit)
                for chunk in range(0, chunks):
                    eventlet.tpool.Proxy(volume.rbd_image).discard(
                        offset + chunk * limit, limit)
                rem = int(length % limit)
                if rem:
                    eventlet.tpool.Proxy(volume.rbd_image).discard(
                        offset + chunks * limit, rem)
            else:
                zeroes = '\0' * self.chunk_size
                chunks = int(length / self.chunk_size)
                for chunk in range(0, chunks):
                    LOG.debug("Writing zeroes chunk %d", chunk)
                    volume.write(zeroes)
                    volume.flush()

                rem = int(length % self.chunk_size)
                if rem:
                    zeroes = '\0' * rem
                    volume.write(zeroes)
                    volume.flush()

    def _transfer_data(self,
                       src: linuxrbd.RBDVolumeIOWrapper,
                       src_name: str,
                       dest: linuxrbd.RBDVolumeIOWrapper,
                       dest_name: str,
                       length: int) -> None:
        """Transfer data between files (Python IO objects)."""
        LOG.debug("Transferring data between '%(src)s' and '%(dest)s'",
                  {'src': src_name, 'dest': dest_name})

        chunks = int(length / self.chunk_size)
        LOG.debug("%(chunks)s chunks of %(bytes)s bytes to be transferred",
                  {'chunks': chunks, 'bytes': self.chunk_size})

        for chunk in range(0, chunks):
            before = time.time()
            data = src.read(self.chunk_size)
            # If we have reach end of source, discard any extraneous bytes from
            # destination volume if trim is enabled and stop writing.
            if data == b'':
                if CONF.restore_discard_excess_bytes:
                    self._discard_bytes(dest, dest.tell(),
                                        length - dest.tell())

                return

            dest.write(data)
            dest.flush()
            delta = (time.time() - before)
            rate = (self.chunk_size / delta) / 1024
            LOG.debug("Transferred chunk %(chunk)s of %(chunks)s "
                      "(%(rate)dK/s)",
                      {'chunk': chunk + 1,
                       'chunks': chunks,
                       'rate': rate})

        rem = int(length % self.chunk_size)
        if rem:
            LOG.debug("Transferring remaining %s bytes", rem)
            data = src.read(rem)
            if data == b'':
                if CONF.restore_discard_excess_bytes:
                    self._discard_bytes(dest, dest.tell(), rem)
            else:
                dest.write(data)
                dest.flush()

    def _create_base_image(self,
                           name: str,
                           size: int,
                           rados_client: 'rados.Rados') -> None:
        """Create a base backup image.

        This will be the base image used for storing differential exports.
        """
        LOG.debug("Creating base image '%s'", name)
        old_format, features = self._get_rbd_support()
        eventlet.tpool.Proxy(self.rbd.RBD()).create(
            ioctx=rados_client.ioctx,
            name=name,
            size=size,
            old_format=old_format,
            features=features,
            stripe_unit=self.rbd_stripe_unit,
            stripe_count=self.rbd_stripe_count)

    def _delete_backup_snapshot(self,
                                rados_client: 'rados.Rados',
                                base_name: Optional[str],
                                backup_id: str) -> Tuple[Optional[str], int]:
        """Delete snapshot associated with this backup if one exists.

        A backup should have at most ONE associated snapshot.

        This is required before attempting to delete the base image. The
        snapshot on the original volume can be left as it will be purged when
        the volume is deleted.

        Returns tuple(deleted_snap_name, num_of_remaining_snaps).
        """
        remaining_snaps = 0
        base_rbd = eventlet.tpool.Proxy(self.rbd.Image(rados_client.ioctx,
                                                       base_name))
        try:
            snap_name = self._get_backup_snap_name(base_rbd, base_name,
                                                   backup_id)
            if snap_name:
                LOG.debug("Deleting backup snapshot='%s'", snap_name)
                base_rbd.remove_snap(snap_name)
            else:
                LOG.debug("No backup snapshot to delete")

            # Now check whether any snapshots remain on the base image
            backup_snaps = self.get_backup_snaps(base_rbd)
            if backup_snaps:
                remaining_snaps = len(backup_snaps)
        finally:
            base_rbd.close()

        return snap_name, remaining_snaps

    def _try_delete_base_image(self,
                               backup: 'objects.Backup',
                               base_name: Optional[str] = None) -> None:
        """Try to delete backup RBD image.

        If the rbd image is a base image for incremental backups, it may have
        snapshots. Delete the snapshot associated with backup_id and if the
        image has no more snapshots, delete it. Otherwise return.

        If no base name is provided try normal (full) format then diff format
        image name.

        If a base name is provided but does not exist, ImageNotFound will be
        raised.

        If the image is busy, a number of retries will be performed if
        ImageBusy is received, after which the exception will be propagated to
        the caller.
        """
        retries = 3
        delay = 5
        try_diff_format = False
        volume_id = backup.volume_id

        if base_name is None:
            try_diff_format = True

            base_name = self._get_backup_base_name(volume_id, backup=backup)
            LOG.debug("Trying diff format basename='%(basename)s' for "
                      "backup base image of volume %(volume)s.",
                      {'basename': base_name, 'volume': volume_id})

        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  backup.container)) as client:
            rbd_exists, base_name = \
                self._rbd_image_exists(base_name, volume_id, client,
                                       try_diff_format=try_diff_format)
            if not rbd_exists:
                raise self.rbd.ImageNotFound(_("image %s not found") %
                                             base_name)

            while retries >= 0:
                # First delete associated snapshot from base image (if exists)
                snap, rem = self._delete_backup_snapshot(client, base_name,
                                                         backup.id)
                if rem:
                    LOG.info(
                        "Backup base image of volume %(volume)s still "
                        "has %(snapshots)s snapshots so skipping base "
                        "image delete.",
                        {'snapshots': rem, 'volume': volume_id})
                    return

                LOG.info("Deleting backup base image='%(basename)s' of "
                         "volume %(volume)s.",
                         {'basename': base_name, 'volume': volume_id})
                # Delete base if no more snapshots
                try:
                    eventlet.tpool.Proxy(self.rbd.RBD()).remove(
                        client.ioctx, base_name)
                except self.rbd.ImageBusy:
                    # Allow a retry if the image is busy
                    if retries > 0:
                        LOG.info("Backup image of volume %(volume)s is "
                                 "busy, retrying %(retries)s more time(s) "
                                 "in %(delay)ss.",
                                 {'retries': retries,
                                  'delay': delay,
                                  'volume': volume_id})
                    else:
                        LOG.error("Max retries reached deleting backup "
                                  "%(basename)s image of volume %(volume)s.",
                                  {'volume': volume_id,
                                   'basename': base_name})
                        raise
                else:
                    LOG.debug("Base backup image='%(basename)s' of volume "
                              "%(volume)s deleted.",
                              {'basename': base_name, 'volume': volume_id})
                    retries = 0
                finally:
                    retries -= 1

            # Since we have deleted the base image we can delete the source
            # volume backup snapshot.
            src_name = volume_id
            if src_name in eventlet.tpool.Proxy(
                    self.rbd.RBD()).list(client.ioctx):
                LOG.debug("Deleting source volume snapshot '%(snapshot)s' "
                          "for backup %(basename)s.",
                          {'snapshot': snap, 'basename': base_name})
                src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                                              src_name))
                try:
                    src_rbd.remove_snap(snap)
                finally:
                    src_rbd.close()

    def _piped_execute(self, cmd1: list, cmd2: list) -> Tuple[int, bytes]:
        """Pipe output of cmd1 into cmd2."""
        LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1))
        LOG.debug("cmd2='%s'", ' '.join(cmd2))

        try:
            p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE,
                                  close_fds=True)
        except OSError as e:
            LOG.error("Pipe1 failed - %s ", e)
            raise

        # NOTE(dosaboy): ensure that the pipe is blocking. This is to work
        # around the case where evenlet.green.subprocess is used which seems to
        # use a non-blocking pipe.
        assert p1.stdout is not None
        flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK)
        fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags)

        try:
            p2 = subprocess.Popen(cmd2, stdin=p1.stdout,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE,
                                  close_fds=True)
        except OSError as e:
            LOG.error("Pipe2 failed - %s ", e)
            raise

        p1.stdout.close()
        stdout, stderr = p2.communicate()
        return p2.returncode, stderr

    def _rbd_diff_transfer(self, src_name: str, src_pool: str,
                           dest_name: str, dest_pool: str,
                           src_user: str, src_conf: Optional[str],
                           dest_user: str, dest_conf: Optional[str],
                           src_snap: Optional[str] = None,
                           from_snap: Optional[str] = None) -> None:
        """Copy only extents changed between two points.

        If no snapshot is provided, the diff extents will be all those changed
        since the rbd volume/base was created, otherwise it will be those
        changed since the snapshot was created.
        """
        LOG.debug("Performing differential transfer from '%(src)s' to "
                  "'%(dest)s'",
                  {'src': src_name, 'dest': dest_name})

        # NOTE(dosaboy): Need to be tolerant of clusters/clients that do
        # not support these operations since at the time of writing they
        # were very new.

        src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool)
        dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool)

        cmd1 = ['rbd', 'export-diff'] + src_ceph_args
        if from_snap is not None:
            cmd1.extend(['--from-snap', from_snap])
        if src_snap:
            path = "%s/%s@%s" % (src_pool, src_name, src_snap)
        else:
            path = "%s/%s" % (src_pool, src_name)
        cmd1.extend([path, '-'])

        cmd2 = ['rbd', 'import-diff'] + dest_ceph_args
        rbd_path = "%s/%s" % (dest_pool, dest_name)
        cmd2.extend(['-', rbd_path])

        ret, stderr = self._piped_execute(cmd1, cmd2)
        if ret:
            msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") %
                   {'ret': ret, 'stderr': stderr})
            LOG.info(msg)
            raise exception.BackupRBDOperationFailed(msg)

    def _rbd_image_exists(
            self, name: str, volume_id: str,
            client: 'rados.Rados',
            try_diff_format: Optional[bool] = False) -> Tuple[bool, str]:
        """Return tuple (exists, name)."""
        rbds = eventlet.tpool.Proxy(self.rbd.RBD()).list(client.ioctx)
        if name not in rbds:
            LOG.debug("Image '%s' not found - trying diff format name", name)
            if try_diff_format:
                name = CephBackupDriver._get_backup_base_name(volume_id)
                if name not in rbds:
                    LOG.debug("Diff format image '%s' not found", name)
                    return False, name
            else:
                return False, name

        return True, name

    def _snap_exists(self,
                     base_name: str,
                     snap_name: str,
                     client: 'rados.Rados') -> bool:
        """Return True if snapshot exists in base image."""
        base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                        base_name, read_only=True))
        try:
            snaps = base_rbd.list_snaps()

            if snaps is None:
                return False

            for snap in snaps:
                if snap['name'] == snap_name:
                    return True
        finally:
            base_rbd.close()

        return False

    def _full_rbd_backup(self,
                         container: str,
                         base_name: str,
                         length: int) -> Tuple[Optional[str], bool]:
        """Create the base_image for a full RBD backup."""
        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  container)) as client:
            self._create_base_image(base_name, length, client)
        # Now we just need to return from_snap=None and image_created=True, if
        # there is some exception in making backup snapshot, will clean up the
        # base image.
        return None, True

    def _incremental_rbd_backup(
            self, backup: 'objects.Backup',
            base_name: str, length: int,
            source_rbd_image, volume_id: str) -> Tuple[Optional[str], bool]:
        """Select the last snapshot for a RBD incremental backup."""

        container = backup.container
        last_incr = backup.parent_id
        LOG.debug("Trying to perform an incremental backup with container: "
                  "%(container)s, base_name: %(base)s, source RBD image: "
                  "%(source)s, volume ID %(volume)s and last incremental "
                  "backup ID: %(incr)s.",
                  {'container': container,
                   'base': base_name,
                   'source': source_rbd_image,
                   'volume': volume_id,
                   'incr': last_incr,
                   })

        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  container)) as client:
            base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                                           base_name,
                                                           read_only=True))
            try:
                from_snap = self._get_backup_snap_name(base_rbd,
                                                       base_name,
                                                       last_incr)
                if from_snap is None:
                    msg = (_(
                        "Can't find snapshot from parent %(incr)s and "
                        "base name image %(base)s.") %
                        {'incr': last_incr, 'base': base_name})
                    LOG.error(msg)
                    raise exception.BackupRBDOperationFailed(msg)
            finally:
                base_rbd.close()

        return from_snap, False

    def _backup_rbd(self,
                    backup: 'objects.Backup',
                    volume_file: linuxrbd.RBDVolumeIOWrapper,
                    volume_name: str, length: int) -> Dict[str, str]:
        """Create an incremental or full backup from an RBD image."""
        rbd_user = volume_file.rbd_user
        rbd_pool = volume_file.rbd_pool
        rbd_conf = volume_file.rbd_conf
        source_rbd_image = eventlet.tpool.Proxy(volume_file.rbd_image)
        volume_id = backup.volume_id
        base_name = None

        # If backup.parent_id is None performs full RBD backup
        if backup.parent_id is None:
            base_name = self._get_backup_base_name(volume_id, backup=backup)
            from_snap, image_created = self._full_rbd_backup(backup.container,
                                                             base_name,
                                                             length)
        # Otherwise performs incremental rbd backup
        else:
            # Find the base name from the parent backup's service_metadata
            base_name = self._get_backup_base_name(volume_id, backup=backup)
            rbd_img = source_rbd_image
            from_snap, image_created = self._incremental_rbd_backup(backup,
                                                                    base_name,
                                                                    length,
                                                                    rbd_img,
                                                                    volume_id)

        LOG.debug("Using --from-snap '%(snap)s' for incremental backup of "
                  "volume %(volume)s.",
                  {'snap': from_snap, 'volume': volume_id})

        # Snapshot source volume so that we have a new point-in-time
        new_snap = self._get_new_snap_name(backup.id)
        LOG.debug("Creating backup snapshot='%s'", new_snap)
        source_rbd_image.create_snap(new_snap)

        # Attempt differential backup. If this fails, perhaps because librbd
        # or Ceph cluster version does not support it, do a full backup
        # instead.
        #
        # TODO(dosaboy): find a way to determine if the operation is supported
        #                rather than brute force approach.
        try:
            before = time.time()
            self._rbd_diff_transfer(volume_name, rbd_pool, base_name,
                                    backup.container,
                                    src_user=rbd_user,
                                    src_conf=rbd_conf,
                                    dest_user=self._ceph_backup_user,
                                    dest_conf=self._ceph_backup_conf,
                                    src_snap=new_snap,
                                    from_snap=from_snap)

            LOG.debug("Differential backup transfer completed in %.4fs",
                      (time.time() - before))

        except exception.BackupRBDOperationFailed:
            with excutils.save_and_reraise_exception():
                LOG.debug("Differential backup transfer failed")

                # Clean up if image was created as part of this operation
                if image_created:
                    self._try_delete_base_image(backup, base_name=base_name)

                # Delete snapshot
                LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of "
                          "source volume='%(volume)s'.",
                          {'snapshot': new_snap, 'volume': volume_id})
                source_rbd_image.remove_snap(new_snap)

        return {'service_metadata': '{"base": "%s"}' % base_name}

    @staticmethod
    def _file_is_rbd(volume_file: linuxrbd.RBDVolumeIOWrapper) -> bool:
        """Returns True if the volume_file is actually an RBD image."""
        return hasattr(volume_file, 'rbd_image')

    def _full_backup(self, backup: 'objects.Backup',
                     src_volume: linuxrbd.RBDVolumeIOWrapper,
                     src_name: str, length: int) -> None:
        """Perform a full backup of src volume.

        First creates a base backup image in our backup location then performs
        an chunked copy of all data from source volume to a new backup rbd
        image.
        """
        volume_id = backup.volume_id
        if backup.snapshot_id:
            backup_name = self._get_backup_base_name(volume_id)
        else:
            backup_name = self._get_backup_base_name(volume_id, backup=backup)

        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  backup.container)) as client:
            # First create base backup image
            old_format, features = self._get_rbd_support()
            LOG.debug("Creating backup base image='%(name)s' for volume "
                      "%(volume)s.",
                      {'name': backup_name, 'volume': volume_id})
            eventlet.tpool.Proxy(self.rbd.RBD()).create(
                ioctx=client.ioctx,
                name=backup_name,
                size=length,
                old_format=old_format,
                features=features,
                stripe_unit=self.rbd_stripe_unit,
                stripe_count=self.rbd_stripe_count)

            LOG.debug("Copying data from volume %s.", volume_id)
            dest_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                            backup_name))
            meta_io_proxy = None
            try:
                rbd_meta = linuxrbd.RBDImageMetadata(dest_rbd,
                                                     backup.container,
                                                     self._ceph_backup_user,
                                                     self._ceph_backup_conf)
                rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
                meta_io_proxy = eventlet.tpool.Proxy(rbd_fd)
                self._transfer_data(src_volume, src_name, meta_io_proxy,
                                    backup_name, length)
            finally:
                # Closing the wrapper will close the image as well
                if meta_io_proxy:
                    meta_io_proxy.close()
                else:
                    dest_rbd.close()

    @staticmethod
    def backup_snapshot_name_pattern() -> str:
        """Returns the pattern used to match backup snapshots.

        It is essential that snapshots created for purposes other than backups
        do not have this name format.
        """
        return r"^backup\.([a-z0-9\-]+?)\.snap\.(.+)$"

    @classmethod
    def get_backup_snaps(cls, rbd_image: 'rbd.Image',
                         sort: bool = False) -> List[dict]:
        """Get all backup snapshots for the given rbd image.

        NOTE: this call is made public since these snapshots must be deleted
              before the base volume can be deleted.
        """
        snaps = rbd_image.list_snaps()

        backup_snaps = []
        for snap in snaps:
            search_key = cls.backup_snapshot_name_pattern()
            result = re.search(search_key, snap['name'])
            if result:
                backup_snaps.append({'name': result.group(0),
                                     'backup_id': result.group(1),
                                     'timestamp': result.group(2)})

        if sort:
            # Sort into ascending order of timestamp
            backup_snaps.sort(key=lambda x: x['timestamp'], reverse=True)

        return backup_snaps

    def _get_new_snap_name(self, backup_id: str) -> str:
        return "backup.%s.snap.%s" % (backup_id, time.time())

    def _get_backup_snap_name(self, rbd_image: 'rbd.Image',
                              name: Optional[str], backup_id: str):
        """Return the name of the snapshot associated with backup_id.

        The rbd image provided must be the base image used for an incremental
        backup.

        A backup is only allowed ONE associated snapshot. If more are found,
        exception.BackupOperationError is raised.
        """
        snaps = self.get_backup_snaps(rbd_image)

        LOG.debug("Looking for snapshot of backup base '%s'", name)

        if not snaps:
            LOG.debug("Backup base '%s' has no snapshots", name)
            return None

        snaps = [snap['name'] for snap in snaps
                 if snap['backup_id'] == backup_id]

        if not snaps:
            LOG.debug("Backup '%s' has no snapshot", backup_id)
            return None

        if len(snaps) > 1:
            msg = (_("Backup should only have one snapshot but instead has %s")
                   % len(snaps))
            LOG.error(msg)
            raise exception.BackupOperationError(msg)

        LOG.debug("Found snapshot '%s'", snaps[0])
        return snaps[0]

    def _get_volume_size_bytes(self, volume: 'objects.Volume') -> int:
        """Return the size in bytes of the given volume.

        Raises exception.InvalidParameterValue if volume size is 0.
        """
        if int(volume['size']) == 0:
            errmsg = _("Need non-zero volume size")
            raise exception.InvalidParameterValue(errmsg)

        return int(volume['size']) * units.Gi

    def _backup_metadata(self, backup: 'objects.Backup') -> None:
        """Backup volume metadata.

        NOTE(dosaboy): the metadata we are backing up is obtained from a
                       versioned api so we should not alter it in any way here.
                       We must also be sure that the service that will perform
                       the restore is compatible with version used.
        """
        json_meta = self.get_metadata(backup.volume_id)
        if not json_meta:
            LOG.debug("No metadata to backup for volume %s.", backup.volume_id)
            return

        LOG.debug("Backing up metadata for volume %s.", backup.volume_id)
        try:
            with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                      backup.container)) as client:
                vol_meta_backup = VolumeMetadataBackup(client, backup.id)
                vol_meta_backup.set(json_meta)
        except exception.VolumeMetadataBackupExists as e:
            msg = (_("Failed to backup volume metadata - %s") % e)
            raise exception.BackupOperationError(msg)

    def backup(self, backup: 'objects.Backup',
               volume_file: linuxrbd.RBDVolumeIOWrapper,
               backup_metadata: bool = True) -> dict:
        """Backup volume and metadata (if available) to Ceph object store.

        If the source volume is an RBD we will attempt to do an
        incremental/differential backup, otherwise a full copy is performed.
        If this fails we will attempt to fall back to full copy.
        """
        volume = self.db.volume_get(self.context, backup.volume_id)
        updates = {}
        if not backup.container:
            backup.container = self._ceph_backup_pool
            backup.save()

        LOG.debug("Starting backup of volume='%s'.", volume.id)

        # Ensure we are at the beginning of the volume
        volume_file.seek(0)
        length = self._get_volume_size_bytes(volume)

        if backup.snapshot_id:
            do_full_backup = True
        elif self._file_is_rbd(volume_file):
            # If volume an RBD, attempt incremental or full backup.
            do_full_backup = False
            LOG.debug("Volume file is RBD: attempting optimized backup")
            try:
                updates = self._backup_rbd(backup, volume_file, volume.name,
                                           length)
            except exception.BackupRBDOperationFailed:
                with excutils.save_and_reraise_exception():
                    self.delete_backup(backup)
        else:
            if backup.parent_id:
                LOG.debug("Volume file is NOT RBD: can't perform "
                          "incremental backup.")
                raise exception.BackupRBDOperationFailed
            LOG.debug("Volume file is NOT RBD: will do full backup.")
            do_full_backup = True

        if do_full_backup:
            try:
                self._full_backup(backup, volume_file, volume.name, length)
            except exception.BackupOperationError:
                with excutils.save_and_reraise_exception():
                    self.delete_backup(backup)

        if backup_metadata:
            try:
                self._backup_metadata(backup)
            except exception.BackupOperationError:
                with excutils.save_and_reraise_exception():
                    # Cleanup.
                    self.delete_backup(backup)

        LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.",
                  {'backup_id': backup.id, 'volume_id': volume.id})

        return updates

    def _full_restore(self, backup: 'objects.Backup',
                      dest_file,
                      dest_name: str,
                      length: int,
                      src_snap=None) -> None:
        """Restore volume using full copy i.e. all extents.

        This will result in all extents being copied from source to
        destination.
        """
        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  backup.container)) as client:
            # If a source snapshot is provided we assume the base is diff
            # format.
            if src_snap:
                backup_name = self._get_backup_base_name(backup.volume_id,
                                                         backup=backup)
            else:
                backup_name = self._get_backup_base_name(backup.volume_id)

            # Retrieve backup volume
            src_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                                          backup_name,
                                                          snapshot=src_snap,
                                                          read_only=True))
            try:
                rbd_meta = linuxrbd.RBDImageMetadata(src_rbd,
                                                     backup.container,
                                                     self._ceph_backup_user,
                                                     self._ceph_backup_conf)
                rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
                self._transfer_data(eventlet.tpool.Proxy(rbd_fd), backup_name,
                                    dest_file, dest_name, length)
            finally:
                src_rbd.close()

    def _check_restore_vol_size(self, backup: 'objects.Backup',
                                restore_vol, restore_length: int,
                                src_pool) -> None:
        """Ensure that the restore volume is the correct size.

        If the restore volume was bigger than the backup, the diff restore will
        shrink it to the size of the original backup so we need to
        post-process and resize it back to its expected size.
        """
        backup_base = self._get_backup_base_name(backup.volume_id,
                                                 backup=backup)

        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  backup.container)) as client:
            adjust_size = 0
            base_image = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                              backup_base,
                                              read_only=True))
            try:
                if restore_length != base_image.size():
                    adjust_size = restore_length
            finally:
                base_image.close()

        if adjust_size:
            LOG.debug("Adjusting restore vol size")
            restore_vol.rbd_image.resize(adjust_size)

    def _diff_restore_rbd(self, backup: 'objects.Backup',
                          restore_file,
                          restore_name: str,
                          restore_point: Optional[str],
                          restore_length: int) -> None:
        """Attempt restore rbd volume from backup using diff transfer."""
        rbd_user = restore_file.rbd_user
        rbd_pool = restore_file.rbd_pool
        rbd_conf = restore_file.rbd_conf
        base_name = self._get_backup_base_name(backup.volume_id,
                                               backup=backup)

        LOG.debug("Attempting incremental restore from base='%(base)s' "
                  "snap='%(snap)s'",
                  {'base': base_name, 'snap': restore_point})
        before = time.time()
        try:
            self._rbd_diff_transfer(base_name, backup.container,
                                    restore_name, rbd_pool,
                                    src_user=self._ceph_backup_user,
                                    src_conf=self._ceph_backup_conf,
                                    dest_user=rbd_user, dest_conf=rbd_conf,
                                    src_snap=restore_point)
        except exception.BackupRBDOperationFailed:
            LOG.exception("Differential restore failed, trying full restore")
            raise

        # If the volume we are restoring to is larger than the backup volume,
        # we will need to resize it after the diff import since import-diff
        # appears to shrink the target rbd volume to the size of the original
        # backup volume.
        self._check_restore_vol_size(backup, restore_file, restore_length,
                                     rbd_pool)

        LOG.debug("Restore transfer completed in %.4fs",
                  (time.time() - before))

    def _get_restore_point(self,
                           base_name: str,
                           backup_id: str) -> Optional[str]:
        """Get restore point snapshot name for incremental backup.

        If the backup was not incremental (determined by the fact that the
        base has no snapshots/restore points), None is returned. Otherwise, the
        restore point associated with backup_id is returned.
        """
        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self,
                                  self._ceph_backup_pool)) as client:
            base_rbd = eventlet.tpool.Proxy(self.rbd.Image(client.ioctx,
                                            base_name, read_only=True))
            try:
                restore_point = self._get_backup_snap_name(base_rbd, base_name,
                                                           backup_id)
            finally:
                base_rbd.close()

        return restore_point

    @staticmethod
    def _rbd_has_extents(rbd_volume) -> bool:
        """Check whether the given rbd volume has extents.

        Return True if has extents, otherwise False.
        """
        extents = []

        def iter_cb(offset, length, exists):
            if exists:
                extents.append(length)

        rbd_volume.diff_iterate(0, rbd_volume.size(), None, iter_cb)

        if extents:
            LOG.debug("RBD has %s extents", sum(extents))
            return True

        return False

    def _diff_restore_allowed(self, base_name: str, backup: 'objects.Backup',
                              volume: 'objects.Volume',
                              volume_file: linuxrbd.RBDVolumeIOWrapper,
                              rados_client: 'rados.Rados'
                              ) -> Tuple[bool, Optional[str]]:
        """Determine if differential restore is possible and restore point.

        Determine whether a differential restore is possible/allowed,
        and find out the restore point if backup base is diff-format.

        In order for a differential restore to be performed we need:
            * destination volume must be RBD
            * destination volume must have zero extents
            * backup base image must exist
            * backup must have a restore point
            * target volume is different from source volume of backup

        Returns True if differential restore is allowed, False otherwise.
        Return the restore point if back base is diff-format.
        """
        # NOTE(dosaboy): base_name here must be diff format.
        rbd_exists, base_name = self._rbd_image_exists(base_name,
                                                       backup.volume_id,
                                                       rados_client)

        if not rbd_exists:
            return False, None

        # Get the restore point. If no restore point is found, we assume
        # that the backup was not performed using diff/incremental methods
        # so we enforce full copy.
        restore_point = self._get_restore_point(base_name, backup.id)

        if restore_point:
            if self._file_is_rbd(volume_file):
                LOG.debug("Volume file is RBD.")
                # If the volume we are restoring to is the volume the backup
                # was made from, force a full restore since a diff will not
                # work in this case.
                if volume.id == backup.volume_id:
                    LOG.debug("Destination volume is same as backup source "
                              "volume %s - forcing full copy.", volume.id)
                    return False, restore_point

                # If the destination volume has extents we cannot allow a diff
                # restore.
                if self._rbd_has_extents(volume_file.rbd_image):
                    # We return the restore point so that a full copy is done
                    # from snapshot.
                    LOG.debug("Destination has extents - forcing full copy")
                    return False, restore_point

                return True, restore_point
            else:
                LOG.debug("Volume file is NOT RBD.")
        else:
            LOG.info("No restore point found for backup='%(backup)s' of "
                     "volume %(volume)s although base image is found - "
                     "forcing full copy.",
                     {'backup': backup.id,
                      'volume': backup.volume_id})
        return False, restore_point

    def _restore_volume(self,
                        backup: 'objects.Backup',
                        volume: 'objects.Volume',
                        volume_file: linuxrbd.RBDVolumeIOWrapper) -> None:
        """Restore volume from backup using diff transfer if possible.

        Attempts a differential restore and reverts to full copy if diff fails.
        """
        length = int(volume.size) * units.Gi

        if backup.service_metadata:
            base_name = self._get_backup_base_name(backup.volume_id, backup)
        else:
            base_name = self._get_backup_base_name(backup.volume_id)

        with eventlet.tpool.Proxy(rbd_driver.RADOSClient(
                                  self, backup.container)) as client:
            diff_allowed, restore_point = \
                self._diff_restore_allowed(base_name, backup, volume,
                                           volume_file, client)

        do_full_restore = True
        if diff_allowed:
            # Attempt diff
            try:
                LOG.debug("Attempting differential restore.")
                self._diff_restore_rbd(backup, volume_file, volume.name,
                                       restore_point, length)
                do_full_restore = False
            except exception.BackupRBDOperationFailed:
                LOG.debug("Forcing full restore to volume %s.",
                          volume.id)

        if do_full_restore:
            # Otherwise full copy
            LOG.debug("Running full restore.")
            self._full_restore(backup, volume_file, volume.name,
                               length, src_snap=restore_point)

    def _restore_metadata(self,
                          backup: 'objects.Backup',
                          volume_id: str) -> None:
        """Restore volume metadata from backup.

        If this backup has associated metadata, save it to the restore target
        otherwise do nothing.
        """
        try:
            with eventlet.tpool.Proxy(rbd_driver.RADOSClient(self)) as client:
                meta_bak = VolumeMetadataBackup(client, backup.id)
                meta = meta_bak.get()
                if meta is not None:
                    self.put_metadata(volume_id, meta)
                else:
                    LOG.debug("Volume %s has no backed up metadata.",
                              backup.volume_id)
        except exception.BackupMetadataUnsupportedVersion:
            msg = _("Metadata restore failed due to incompatible version")
            LOG.error(msg)
            raise exception.BackupOperationError(msg)

    def restore(self,
                backup: 'objects.Backup',
                volume_id: str,
                volume_file: linuxrbd.RBDVolumeIOWrapper) -> None:
        """Restore volume from backup in Ceph object store.

        If volume metadata is available this will also be restored.
        """
        target_volume = self.db.volume_get(self.context, volume_id)
        LOG.debug('Starting restore from Ceph backup=%(src)s to '
                  'volume=%(dest)s',
                  {'src': backup.id, 'dest': target_volume.name})

        try:
            self._restore_volume(backup, target_volume, volume_file)

            # Be tolerant of IO implementations that do not support fileno()
            try:
                fileno = volume_file.fileno()
            except IOError:
                LOG.debug("Restore target I/O object does not support "
                          "fileno() - skipping call to fsync().")
            else:
                os.fsync(fileno)

            self._restore_metadata(backup, volume_id)

            LOG.debug('Restore to volume %s finished successfully.',
                      volume_id)
        except exception.BackupOperationError as e:
            LOG.error('Restore to volume %(volume)s finished with error - '
                      '%(error)s.', {'error': e, 'volume': volume_id})
            raise

    def delete_backup(self, backup: 'objects.Backup') -> None:
        """Delete the given backup from Ceph object store."""
        LOG.debug('Delete started for backup=%s', backup.id)

        delete_failed = False
        has_pool = True
        try:
            self._try_delete_base_image(backup)
        except self.rbd.ImageNotFound:
            LOG.warning(
                "RBD image for backup %(backup)s of volume %(volume)s "
                "not found. Deleting backup metadata.",
                {'backup': backup.id, 'volume': backup.volume_id})
            delete_failed = True
        except self.rados.ObjectNotFound:
            LOG.warning("The pool %(pool)s doesn't exist.",
                        {'pool': backup.container})
            delete_failed = True
            has_pool = False

        if has_pool:
            with eventlet.tpool.Proxy(rbd_driver.RADOSClient(
                                      self, backup.container)) as client:
                VolumeMetadataBackup(client, backup.id).remove_if_exists()

        if delete_failed:
            LOG.info("Delete of backup '%(backup)s' for volume '%(volume)s' "
                     "finished with warning.",
                     {'backup': backup.id, 'volume': backup.volume_id})
        else:
            LOG.debug("Delete of backup '%(backup)s' for volume "
                      "'%(volume)s' finished.",
                      {'backup': backup.id, 'volume': backup.volume_id})