summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitmodules7
-rw-r--r--.gitreview1
-rw-r--r--.zuul.yaml32
-rwxr-xr-xbin/swift-losf-volume-check440
-rwxr-xr-xbin/swift-mount-losf511
-rwxr-xr-xbin/swift-object-rpcmanager26
-rw-r--r--doc/saio/swift/object-server/1.conf4
-rw-r--r--doc/saio/swift/object-server/2.conf4
-rw-r--r--doc/saio/swift/object-server/3.conf4
-rw-r--r--doc/saio/swift/object-server/4.conf4
-rw-r--r--etc/object-server.conf-sample8
-rw-r--r--go/swift-rpc-losf/Makefile49
-rw-r--r--go/swift-rpc-losf/README.md20
-rw-r--r--go/swift-rpc-losf/codes/codes.go17
-rw-r--r--go/swift-rpc-losf/db.go121
-rw-r--r--go/swift-rpc-losf/db_goleveldb.go230
-rw-r--r--go/swift-rpc-losf/db_leveldb.go239
-rw-r--r--go/swift-rpc-losf/encoding.go198
-rw-r--r--go/swift-rpc-losf/encoding_test.go225
-rw-r--r--go/swift-rpc-losf/go.mod27
-rw-r--r--go/swift-rpc-losf/go.sum76
m---------go/swift-rpc-losf/leveldb0
-rw-r--r--go/swift-rpc-losf/logging.go43
-rw-r--r--go/swift-rpc-losf/main.go174
-rw-r--r--go/swift-rpc-losf/proto/fmgr.pb.go2213
-rw-r--r--go/swift-rpc-losf/rpc.go1642
-rw-r--r--go/swift-rpc-losf/rpc_test.go1014
m---------go/swift-rpc-losf/snappy0
-rw-r--r--go/swift-rpc-losf/stats.go41
-rw-r--r--go/swift-rpc-losf/status/status.go27
-rw-r--r--go/swift-rpc-losf/swift.go66
-rw-r--r--go/swift-rpc-losf/swift_test.go130
-rw-r--r--go/swift-rpc-losf/utils.go102
-rw-r--r--requirements.txt2
-rw-r--r--setup.cfg6
-rw-r--r--swift/common/manager.py3
-rw-r--r--swift/obj/diskfile.py136
-rw-r--r--swift/obj/fmgr.proto225
-rw-r--r--swift/obj/fmgr_pb2.py2119
-rw-r--r--swift/obj/header.py394
-rw-r--r--swift/obj/kvfile.py1260
-rw-r--r--swift/obj/meta.proto14
-rw-r--r--swift/obj/meta_pb2.py115
-rw-r--r--swift/obj/objectrpcmanager.py157
-rw-r--r--swift/obj/reconstructor.py29
-rw-r--r--swift/obj/replicator.py36
-rw-r--r--swift/obj/rpc_http.py370
-rw-r--r--swift/obj/vfile.py1201
-rw-r--r--swift/obj/vfile_utils.py228
-rw-r--r--test/functional/__init__.py34
-rw-r--r--test/unit/obj/test_reconstructor.py4
-rw-r--r--test/unit/obj/test_replicator.py2
-rw-r--r--test/unit/obj/test_rpc_http.py43
-rw-r--r--test/unit/obj/test_vfile.py1232
-rw-r--r--test/unit/obj/test_vfile_utils.py328
-rw-r--r--tools/playbooks/common/install_dependencies.yaml2
-rw-r--r--tools/playbooks/common/install_losf_dependencies.yaml59
-rw-r--r--tools/playbooks/losf_setup/pre.yaml47
-rw-r--r--tools/playbooks/losf_setup/run.yaml46
-rw-r--r--tools/playbooks/losf_setup/templates/swift.conf.j211
-rw-r--r--tools/playbooks/probetests/run.yaml2
-rw-r--r--tools/playbooks/saio_single_node_setup/make_rings.yaml3
-rw-r--r--tools/playbooks/saio_single_node_setup/setup_saio.yaml54
-rw-r--r--tox.ini7
64 files changed, 15772 insertions, 92 deletions
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 000000000..b0c3a9e17
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,7 @@
+[submodule "snappy"]
+ path = go/swift-rpc-losf/snappy
+ url = https://github.com/google/snappy.git
+ ignore = dirty
+[submodule "leveldb"]
+ path = go/swift-rpc-losf/leveldb
+ url = https://github.com/google/leveldb.git
diff --git a/.gitreview b/.gitreview
index 529e7ec93..2b49e07fb 100644
--- a/.gitreview
+++ b/.gitreview
@@ -2,3 +2,4 @@
host=review.opendev.org
port=29418
project=openstack/swift.git
+defaultbranch=feature/losf
diff --git a/.zuul.yaml b/.zuul.yaml
index 99b035469..4ae44ec20 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -174,6 +174,23 @@
tox_envlist: func-ec
- job:
+ name: swift-tox-func-losf-py27
+ parent: unittests
+ voting: false
+ nodeset: ubuntu-xenial
+ description: |
+ Run losf functional tests for swift under cPython version 2.7.
+ branches: feature/losf
+ pre-run:
+ - tools/playbooks/common/install_dependencies.yaml
+ - tools/playbooks/common/install_losf_dependencies.yaml
+ - tools/playbooks/losf_setup/pre.yaml
+ - tools/playbooks/saio_single_node_setup/setup_saio.yaml
+ - tools/playbooks/saio_single_node_setup/make_rings.yaml
+ run: tools/playbooks/losf_setup/run.yaml
+ post-run: tools/playbooks/probetests/post.yaml
+
+- job:
name: swift-tox-func-ec-py27-centos-7
parent: swift-tox-func-ec-py27
nodeset: centos-7
@@ -184,9 +201,13 @@
description: |
Setup a Swift/Keystone environment and run Swift's func tests.
required-projects:
- - opendev.org/openstack/requirements
+ - name: opendev.org/openstack/requirements
+ override-checkout: master
- opendev.org/openstack/swift
- - opendev.org/openstack/keystone
+ - name: opendev.org/openstack/keystone
+ override-checkout: master
+ - name: opendev.org/openstack/devstack
+ override-checkout: master
timeout: 3600
vars:
tox_constraints_file: '{{ ansible_user_dir }}/src/opendev.org/openstack/requirements/upper-constraints.txt'
@@ -484,6 +505,11 @@
- ^(api-ref|doc|releasenotes)/.*$
- ^test/probe/.*$
- ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
+ - swift-tox-func-losf-py27:
+ irrelevant-files:
+ - ^(api-ref|doc|releasenotes)/.*$
+ - ^test/probe/.*$
+ - ^(.gitreview|.mailmap|AUTHORS|CHANGELOG|.*\.rst)$
# py3 functional tests
- swift-tox-func-py37:
@@ -574,6 +600,7 @@
- swift-tox-func-py27
- swift-tox-func-encryption-py27
- swift-tox-func-ec-py27
+ - swift-tox-func-losf-py27
- swift-tox-func-py37
- swift-tox-func-encryption
- swift-tox-func-ec-py37
@@ -638,3 +665,4 @@
jobs:
- swift-promote-image
- swift-promote-image-py3
+
diff --git a/bin/swift-losf-volume-check b/bin/swift-losf-volume-check
new file mode 100755
index 000000000..0d25b238c
--- /dev/null
+++ b/bin/swift-losf-volume-check
@@ -0,0 +1,440 @@
+#!/usr/bin/env python
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Consistency checks between the volumes actual content and the KV.
+This needs work.
+"""
+import argparse
+import logging.handlers
+import os
+from os.path import basename, dirname, normpath
+
+from swift.obj.rpc_http import RpcError, StatusCode
+import glob
+import sys
+
+import time
+
+from swift.common.storage_policy import get_policy_string
+from swift.obj import vfile
+from swift.obj.fmgr_pb2 import STATE_RW
+from swift.obj.header import read_volume_header, HeaderException, \
+ read_object_header
+from swift.obj.vfile_utils import get_socket_path_from_volume_path, \
+ next_aligned_offset, change_user, get_mountpoint_from_volume_path
+from swift.common.utils import ismount
+from swift.obj import rpc_http as rpc
+
+# Not available in this python version
+SEEK_DATA = 3
+SEEK_HOLE = 4
+
+
+class VolumeCheckException(Exception):
+ def __init__(self, msg, volume_path, *args):
+ self.msg = msg
+ self.volume_path = volume_path
+ super(VolumeCheckException, self).__init__(msg, volume_path, *args)
+
+
+def check_volume_header(volume_path, header, socket_path):
+ if header.state != STATE_RW:
+ err_txt = "volume {} not in state RW. ({})".format(volume_path,
+ header.state)
+ raise VolumeCheckException(err_txt, volume_path)
+
+
+class Vfiles(object):
+ """
+ Will yield the offset of the vfile header and a vfile header until the
+ end of the file is reached. It expects to find a valid header at offset.
+ It will attempt to find the next vfile from the previous header
+ information (file length). If that fails (next file has been deleted),
+ it will scan for the next object header.
+
+ Starting at the provided offset :
+ - attempt to read header, yield if successful. seek to current
+ position + object length
+ - if no header is found, first attempt to skip hole (SEEK_DATA) and try
+ to read a header again
+ - if no header is found, seek further 4k, try to read a header, repeat
+ until found or EOF.
+ It expects headers to be aligned on 4k boundaries. (don't search for a
+ header where there could be user data)
+ """
+
+ def __init__(self, volume_file, offset):
+ volume_file.seek(0, os.SEEK_END)
+ volume_size = volume_file.tell()
+ self.offset = next_aligned_offset(offset, 4096)
+ self.volume_file = volume_file
+ self.volume_size = volume_size
+ self.next_offset = offset
+ if self.offset >= volume_size:
+ return
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ vh = None
+ seek_data_done = False
+ if self.next_offset >= self.volume_size:
+ raise StopIteration
+
+ self.volume_file.seek(self.next_offset)
+
+ while True:
+ try:
+ header_offset = self.volume_file.tell()
+ vh = read_object_header(self.volume_file)
+ except HeaderException:
+ logger.debug("no vfile at offset {}".format(self.next_offset))
+
+ # found a header
+ if vh:
+ self.next_offset += vh.total_size
+ logger.debug("found a header, set next offset to: {}".format(
+ self.next_offset))
+ # should not happen
+ aligned_next_offset = next_aligned_offset(self.next_offset,
+ 4096)
+ if aligned_next_offset != self.next_offset:
+ logger.warn(
+ "total_size of header not aligned on 4k ({})".format(
+ vh))
+ self.next_offset = aligned_next_offset
+ return header_offset, vh
+
+ if not seek_data_done:
+ # that's ugly: our python version (2.7.12) does not support
+ # SEEK_HOLE/SEEK_DATA. Skip holes via lseek using the
+ # underlying file descriptor, then you need to seek() on the
+ # python file.
+ logger.debug("SEEK_DATA")
+ try:
+ self.next_offset = next_aligned_offset(
+ os.lseek(self.volume_file.fileno(), self.next_offset,
+ SEEK_DATA), 4096)
+ except OSError:
+ # lseek() with SEEK_DATA sometimes returns ENXIO despite
+ # the offset being greater than zero and smaller than
+ # the file size. If that happens, continue skipping 4k
+ seek_data_done = True
+ continue
+ if self.next_offset >= self.volume_size:
+ raise StopIteration
+ self.volume_file.seek(self.next_offset)
+ seek_data_done = True
+ continue
+
+ logger.debug("SEEK+4k")
+ self.next_offset += 4096
+ if self.next_offset >= self.volume_size:
+ raise StopIteration
+ self.volume_file.seek(self.next_offset)
+ continue
+
+
+def register_volume(volume_path, socket_path):
+ with open(volume_path, "rb") as f:
+ vh = read_volume_header(f)
+
+ logger.info(
+ "Registering volume {} in KV. Partition: {} Type: {} \
+ State: {}".format(volume_path, vh.partition, vh.type, vh.state))
+
+ # Get the next usable offset in the volume
+ f.seek(0, os.SEEK_END)
+ offset = next_aligned_offset(f.tell(), 4096)
+
+ rpc.register_volume(socket_path, vh.partition, vh.type, vh.volume_idx,
+ offset, vh.state, repair_tool=True)
+
+
+def check_volume(volume_path, socket_path=None, force_full_check=False):
+ if not socket_path:
+ socket_path = get_socket_path_from_volume_path(volume_path)
+
+ missing_in_kv = False
+
+ # read volume header
+ with open(volume_path, "rb") as f:
+ vh = read_volume_header(f)
+ # check header
+ check_volume_header(volume_path, vh, socket_path)
+ # get file size
+ # TODO: check the volume size with the header value
+ # curpos = f.tell()
+ f.seek(0, os.SEEK_END)
+ # vol_size = f.tell()
+
+ # check that volume exists
+ try:
+ rpc.get_volume(socket_path, vh.volume_idx, repair_tool=True)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ txt = "Missing volume: {} in the KV"
+ logger.warn(txt.format(vh.volume_idx))
+ missing_in_kv = True
+ else:
+ logger.exception(e)
+ logger.warn('Error while checking volume entry in KV, exiting')
+ return False
+
+ if missing_in_kv:
+ if args.repair:
+ if not args.no_prompt:
+ if confirm_action("Add missing volume {} to the KV?".format(
+ volume_path)):
+ register_volume(volume_path, socket_path)
+ else:
+ register_volume(volume_path, socket_path)
+ else:
+ raise VolumeCheckException("Volume not in KV", volume_path)
+
+ # TODO: add check for volume state (header vs KV)
+
+ if force_full_check:
+ start_offset = vh.first_obj_offset
+ else:
+ start_offset = rpc.get_next_offset(socket_path, vh.volume_idx,
+ repair_tool=True)
+
+ with open(volume_path, "rb") as f:
+ for offset, header in Vfiles(f, start_offset):
+ logger.debug("start check: {} {}".format(offset, header))
+ objname = "{}{}".format(header.ohash, header.filename)
+ # Get object information from the KV
+ try:
+ obj = rpc.get_object(socket_path, "{}".format(objname),
+ repair_tool=True)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ handle_obj_missing_in_kv(socket_path, volume_path, header,
+ offset, args)
+ continue
+ else:
+ # TODO: handle this
+ logger.exception(e)
+ except Exception as e:
+ logger.exception(e)
+ continue
+
+ # check header and kv consistency
+ check_header_vs_obj(offset, obj, header, volume_path)
+
+ # check that vfile can be opened and metadata deserialized
+ check_open_vfile(objname, dirname(volume_path), socket_path)
+
+
+def check_open_vfile(name, volume_dir, socket_path):
+ vf = vfile.VFileReader._get_vfile(name, volume_dir, socket_path, logger,
+ repair_tool=True)
+ vf.close()
+
+
+def check_header_vs_obj(file_offset, obj, header, volume_path):
+ # Check offset in file
+ if file_offset != obj.offset:
+ err_txt = "Header/KV inconsistency. Name: {} File offset: {} \
+Position from RPC: {}".format(obj.name, file_offset, obj.offset)
+ logger.warn(err_txt)
+ logger.warn("header: {}".format(header))
+ logger.warn("rpc obj: {}".format(obj))
+ raise VolumeCheckException(err_txt)
+
+ volume_file_index = vfile.get_volume_index(basename(volume_path))
+ if volume_file_index != obj.volume_index:
+ txt = "Volume index error, KV volume index: {}, actual index: {}"
+ err_txt = txt.format(obj.volume_index, volume_file_index)
+ raise VolumeCheckException(err_txt)
+
+ # Check volume index, Todo
+
+
+def handle_obj_missing_in_kv(socket_path, volume_path, header, offset, args):
+ objname = "{}{}".format(header.ohash, header.filename)
+ txt = "Missing file in the KV. Volume: {}. full name: {}"
+ logger.warn(txt.format(volume_path, objname))
+ txt = "Offset: {}, total length: {}"
+ logger.warn(txt.format(offset, header.total_size))
+ if args.repair:
+ if args.no_prompt:
+ register_object(objname, header, volume_path, offset, socket_path)
+ else:
+ if confirm_action("Add object to the KV?"):
+ register_object(objname, header, volume_path, offset,
+ socket_path)
+
+
+def register_object(objname, header, volume_path, offset, socket_path=None):
+ logger.debug("Registering {}".format(objname))
+ if not socket_path:
+ socket_path = get_socket_path_from_volume_path(volume_path)
+
+ # Notes about register :
+ # - because there is no end marker, we have to trust the header about the
+ # end of the file.
+
+ # get partition, from volume path, Todo, check it against the obj hash
+ volume_index = vfile.get_volume_index(basename(volume_path))
+ # absolute object_end (next_offset)
+ object_end = offset + header.total_size
+ try:
+ rpc.register_object(socket_path, objname, volume_index, offset,
+ object_end, repair_tool=True)
+ except RpcError as e:
+ logger.warn("Failed to register object {}".format(objname))
+ logger.exception(e)
+
+
+def confirm_action(message):
+ response = raw_input("{} (y/n)".format(message))
+ if response == "y":
+ return True
+ else:
+ return False
+
+
+log_levels = {
+ "critical": logging.CRITICAL,
+ "error": logging.ERROR,
+ "warning": logging.WARNING,
+ "info": logging.INFO,
+ "debug": logging.DEBUG
+}
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ # log level
+ parser.add_argument("--log_level", help="logging level, defaults to info")
+
+ # check one volume
+ parser.add_argument("--volume", help="path to volume")
+
+ # check all volumes on the disk
+ parser.add_argument("--disk_path", help="/srv/node/disk-xx")
+ parser.add_argument("--policy_idx", help="policy index")
+
+ # by default, we will only check the volume, repair is to create missing
+ # entries
+ help_txt = "creates missing files in the KV"
+ parser.add_argument("--repair", action="store_true", help=help_txt)
+
+ help_txt = "No prompt. In repair mode, do not prompt and take " \
+ "automatic action"
+ parser.add_argument("--no_prompt", action="store_true", help=help_txt)
+
+ # force full check
+ help_txt = "Force check of the whole volume"
+ parser.add_argument("--force_full", action="store_true", default=False,
+ help=help_txt)
+
+ parser.add_argument("--keepuser", action='store_true', default=True,
+ help="Do not attempt to switch to swift user")
+ parser.add_argument("--mount_check", action='store_true', default=False,
+ help="Wait until disk is mounted")
+
+ args = parser.parse_args()
+
+ log_level = "info"
+ if args.log_level:
+ log_level = args.log_level
+
+ logger = logging.getLogger(__name__)
+ logger.setLevel(log_levels[log_level])
+ handler = logging.handlers.SysLogHandler(address='/dev/log')
+ formatter = logging.Formatter('losf.volcheck: %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ if (not args.volume and not args.disk_path) or (args.volume and
+ args.disk_path):
+ parser.print_help()
+ sys.exit(0)
+
+ if not args.keepuser:
+ change_user("swift")
+
+ if args.volume:
+ if args.mount_check:
+ mountpoint = get_mountpoint_from_volume_path(args.volume)
+ while not ismount(mountpoint):
+ logger.info(
+ "Waiting for disk {} to be mounted".format(mountpoint))
+ time.sleep(5)
+
+ socket_path = get_socket_path_from_volume_path(args.volume)
+ if not args.force_full:
+ while True:
+ resp = rpc.get_kv_state(socket_path)
+ if resp.isClean:
+ logger.info(
+ "LOSF DB {} is clean, skipping".format(socket_path))
+ sys.exit(0)
+ check_volume(args.volume, force_full_check=args.force_full)
+
+ if args.policy_idx and args.disk_path:
+ losf_dir = get_policy_string('losf', args.policy_idx)
+ volume_topdir = os.path.join(args.disk_path, losf_dir, 'volumes')
+ if args.mount_check:
+ mountpoint = dirname(dirname(os.path.normpath(volume_topdir)))
+ while not ismount(mountpoint):
+ logger.debug(
+ "Waiting for disk {} to be mounted".format(mountpoint))
+ time.sleep(1)
+ socket_path = os.path.join(dirname(normpath(volume_topdir)),
+ "rpc.socket")
+ if not args.force_full:
+ resp = rpc.get_kv_state(socket_path)
+ if resp.isClean:
+ logger.info(
+ "LOSF DB {} is clean, skipping".format(socket_path))
+ sys.exit(0)
+
+ lock_pattern = "{}/*.writelock"
+ failed_at_least_once = False
+ for lock_path in glob.iglob(lock_pattern.format(volume_topdir)):
+ volume_path = lock_path.replace(".writelock", "")
+ logger.info("Checking volume {}".format(volume_path))
+ if not os.path.exists(volume_path):
+ logger.warn(
+ "writelock file found but volume does not exist, "
+ "remove it")
+ os.remove(lock_path)
+ continue
+ try:
+ check_volume(volume_path, force_full_check=args.force_full)
+ except Exception as e:
+ logger.warn("check_volume failed on {}".format(volume_path))
+ failed_at_least_once = True
+
+ # Mark kv as clean
+ # FIXME: check failed_at_least_once, and don't mark KV clean if True.
+ # However, if we do this, if you get a single IO error on the drive
+ # preventing the check of a volume, the whole drive becomes
+ # unavailble. Probably this should be more fine-grained (KV available
+ # but keeps a list of unchecked volumes)
+ if args.repair:
+ socket_path = os.path.join(dirname(normpath(volume_topdir)),
+ "rpc.socket")
+ # This will be done each time, even if we have not had to repair
+ # a volume, change this
+ logger.info("Marking KV as clean ({})".format(socket_path))
+ rpc.set_kv_state(socket_path, True)
diff --git a/bin/swift-mount-losf b/bin/swift-mount-losf
new file mode 100755
index 000000000..f6d2a5633
--- /dev/null
+++ b/bin/swift-mount-losf
@@ -0,0 +1,511 @@
+#!/usr/bin/env python
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+View the vfiles as a filesystem, using FUSE
+Access goes through RPC so there is no need to stop a running object server to
+inspect files.
+This is meant as a debug tool.
+"""
+
+from __future__ import with_statement
+
+import os
+import sys
+import errno
+import argparse
+import logging
+import six
+
+from fuse import FUSE, FuseOSError, Operations
+from stat import S_IFDIR, S_IFREG, S_IRUSR, S_IFLNK
+
+from swift.common.storage_policy import POLICIES
+from swift.obj import vfile
+import pickle
+from threading import Lock
+from swift.obj.diskfile import METADATA_KEY, PICKLE_PROTOCOL, get_data_dir
+
+from swift.obj.vfile_utils import SwiftPathInfo
+from swift.obj import rpc_http as rpc
+from swift.common import utils
+from swift.common import manager
+
+
+# Decorator for all operations
+def op(func):
+ def fwrap(*args, **kwargs):
+ logging.debug(
+ "func: {} args: kwargs: {}".format(func.func_name, args, kwargs))
+ return func(*args, **kwargs)
+
+ return fwrap
+
+
+def dummy_getattr(path):
+ st_mode = S_IFDIR | S_IRUSR
+ st_size = 4096
+
+ stats = {"st_mode": st_mode, "st_ino": 6434658, "st_dev": 64515L,
+ "st_nlink": 2, "st_uid": 3014, "st_gid": 3014,
+ "st_size": st_size, "st_atime": 1494423623,
+ "st_mtime": 1494423609, "st_ctime": 1494423609}
+ return stats
+
+
+def vfile_getattr(path):
+ st_mode = S_IFREG | S_IRUSR
+ try:
+ vf = vfile.VFileReader.get_vfile(path, logging)
+ st_size = vf._header.data_size
+ vf.close()
+ except vfile.VIOError:
+ raise
+ # this signals a vfile is broken on the volume
+ st_mode = S_IFLNK | S_IRUSR
+ st_size = 0
+
+ stats = {"st_mode": st_mode, "st_ino": 6434658, "st_dev": 64515L,
+ "st_nlink": 2, "st_uid": 3014, "st_gid": 3014,
+ "st_size": st_size, "st_atime": 1494423623,
+ "st_mtime": 1494423609, "st_ctime": 1494423609}
+ return stats
+
+
+def quarantined_vfile_getattr(path):
+ st_mode = S_IFREG | S_IRUSR
+ try:
+ vf = vfile.VFileReader.get_quarantined_vfile(path, logging)
+ st_size = vf._header.data_size
+ vf.close()
+ except vfile.VIOError:
+ raise
+ # this signals a vfile is broken on the volume
+ st_mode = S_IFLNK | S_IRUSR
+ st_size = 0
+
+ stats = {"st_mode": st_mode, "st_ino": 6434658, "st_dev": 64515L,
+ "st_nlink": 2, "st_uid": 3014, "st_gid": 3014,
+ "st_size": st_size, "st_atime": 1494423623,
+ "st_mtime": 1494423609, "st_ctime": 1494423609}
+ return stats
+
+
+class Vfiles(Operations):
+ def __init__(self):
+ self.fd = 0
+ self.file_lock = Lock()
+ # open files
+ self.files = {}
+
+ # known devices
+ self.devices = []
+
+ self.type2functions = {
+ 'abovedeviceroot': {
+ 'listdir': os.listdir,
+ 'getattr': dummy_getattr
+ },
+ 'deviceroot': {
+ 'listdir': self._get_device_root_entries,
+ 'getattr': dummy_getattr,
+ },
+ 'objdir': {
+ 'listdir': vfile.listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'objpartitions': {
+ 'listdir': vfile.listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'objsuffixes': {
+ 'listdir': vfile.listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'objohashes': {
+ 'listdir': vfile.listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'objfiles': {
+ 'listdir': None,
+ 'getattr': vfile_getattr,
+ },
+ 'quardir': {
+ 'listdir': self.quardir_listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'quarobjects': {
+ 'listdir': self.quarobjects_listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'quarohashes': {
+ 'listdir': self.quarohashes_listdir,
+ 'getattr': dummy_getattr,
+ },
+ 'quarfiles': {
+ 'listdir': None,
+ 'getattr': quarantined_vfile_getattr
+ },
+ }
+
+ # read object-server(s) configuration
+ s = manager.Server('object')
+ conf_files = s.conf_files()
+ for conf_file in conf_files:
+ conf = utils.readconf(conf_file)
+ self.devices.append(conf['app:object-server']['devices'])
+ # quick and ugly way to deal with SAIO
+ self.device_depth = len(self.devices[0].split(os.sep))
+
+ self.policies = POLICIES
+
+ def quardir_listdir(self, path):
+ return [get_data_dir(policy) for policy in self.policies]
+
+ def quarobjects_listdir(self, path):
+ # we get a stream from the rpc server, we may have a very large
+ # count of quarantined objects
+ response = vfile.list_quarantine(path)
+ for item in response:
+ yield item.name
+
+ def quarohashes_listdir(self, path):
+ return vfile.list_quarantined_ohash(path)
+
+ def _getdirtype(self, path):
+ path = os.path.normpath(path)
+ ldir = path.split(os.sep)
+
+ # First check for a device root
+ # ugly to deal with SAIO
+ if path.startswith('/srv/node') and \
+ len(ldir) == self.device_depth + 1:
+ return 'deviceroot'
+
+ obj_idx = quar_idx = None
+
+ try:
+ obj_idx = \
+ [i for i, elem in enumerate(ldir) if
+ elem.startswith('objects')][0]
+ except (ValueError, IndexError):
+ pass
+
+ try:
+ quar_idx = \
+ [i for i, elem in enumerate(ldir) if
+ elem == 'quarantined'][0]
+ except (ValueError, IndexError):
+ pass
+
+ if quar_idx:
+ quar_types = {
+ 1: 'quardir',
+ 2: 'quarobjects',
+ 3: 'quarohashes',
+ 4: 'quarfiles'
+ }
+ try:
+ return quar_types[len(ldir[quar_idx:])]
+ except KeyError:
+ return 'unknown'
+ elif obj_idx:
+ obj_types = {
+ 1: 'objdir',
+ 2: 'objpartitions',
+ 3: 'objsuffixes',
+ 4: 'objohashes',
+ 5: 'objfiles'
+ }
+ try:
+ return obj_types[len(ldir[obj_idx:])]
+ except KeyError:
+ return 'unknown'
+ else:
+ return 'abovedeviceroot'
+
+ def _isinkv(self, path):
+ """
+ We look up in the KV if we're in an "objects*" directory, two levels
+ below the device dir
+ """
+ path = os.path.normpath(path)
+ ldir = path.split(os.sep)
+ if len(ldir) > 4 and \
+ any([p for p in self.devices if path.startswith(p)]) and \
+ ldir[4].startswith('objects'):
+ return True
+ return False
+
+ def _isindeviceroot(self, path):
+ """
+ Are we in /srv/node/disk*
+ """
+ path = os.path.normpath(path)
+ ldir = path.split(os.sep)
+ if len(ldir) == 4 and \
+ any([p for p in self.devices if path.startswith(p)]):
+ return True
+ return False
+
+ def _get_device_root_entries(self, path):
+ """
+ Removes "losf*" path, replaces them with "objects*", if an rpc.socket
+ is found
+ """
+ dirents = os.listdir(path)
+ dirents_ret = ['.', '..']
+ for entry in dirents:
+ if entry.startswith('losf'):
+ socket = os.path.join(path, entry, 'rpc.socket')
+ if os.path.exists(socket):
+ dirents_ret.append(entry.replace('losf', 'objects'))
+ else:
+ dirents_ret.append(entry)
+
+ dirents_ret.append('quarantined')
+ return dirents_ret
+
+ @op
+ def access(self, path, mode):
+ return
+ # if not os.access(full_path, mode):
+ # raise FuseOSError(errno.EACCES)
+
+ @op
+ def getattr(self, path, fh=None):
+ """
+ Very basic getattr. Everything above the object hash directory is set
+ as a directory. For vfiles, the size is set correctly from the header.
+ This must succeed even if get_vfile fails (for example, bad header in
+ volume). Otherwise we can't delete the file from the KV.
+
+ For now, set size to 0 if we fail.
+ """
+ path = os.path.normpath(path)
+
+ etype = self._getdirtype(path)
+ return self.type2functions[etype]['getattr'](path)
+
+ @op
+ def readdir(self, path, fh):
+ path = os.path.normpath(path)
+
+ etype = self._getdirtype(path)
+ print(etype)
+ dirents = ['.', '..']
+ dirents.extend(self.type2functions[etype]['listdir'](path))
+
+ for r in dirents:
+ yield r
+
+ @op
+ def readlink(self, path):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def mknod(self, path, mode, dev):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def rmdir(self, path):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def mkdir(self, path, mode):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def statfs(self, path):
+ stv = os.statvfs('/')
+ return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
+ 'f_blocks', 'f_bsize',
+ 'f_favail', 'f_ffree',
+ 'f_files', 'f_flag',
+ 'f_frsize',
+ 'f_namemax'))
+
+ @op
+ def unlink(self, path):
+ if unlink_func:
+ unlink_func(path)
+ else:
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def symlink(self, name, target):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def rename(self, old, new):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def link(self, target, name):
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def utimens(self, path, times=None):
+ raise FuseOSError(errno.ENOTSUP)
+
+ # provide read-only access exclusively, for now.
+ @op
+ def open(self, path, flags):
+ with self.file_lock:
+ self.fd += 1
+ if 'quarantined' in path:
+ vf = vfile.VFileReader.get_quarantined_vfile(path, logging)
+ else:
+ vf = vfile.VFileReader.get_vfile(path, logging)
+ self.files = {self.fd: vf}
+ return self.fd
+
+ @op
+ def create(self, path, mode, fi=None):
+ print("create unsupported")
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def read(self, path, length, offset, fh):
+ vf = self.files[fh]
+ try:
+ vf.seek(offset)
+ except Exception as e:
+ raise (e)
+ return vf.read(length)
+
+ @op
+ def write(self, path, buf, offset, fh):
+ print("write unsupported")
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def truncate(self, path, length, fh=None):
+ print("truncate unsupported")
+ raise FuseOSError(errno.ENOTSUP)
+
+ @op
+ def release(self, path, fh):
+ self.files[fh].close()
+ del self.files[fh]
+ return
+
+ @op
+ def listxattr(self, path):
+ # swift tools expect to find attributes in a pickled string,
+ # replicate that
+ return [METADATA_KEY]
+
+ @op
+ def getxattr(self, path, name, position=0):
+ # see comment in listxattr
+ if name != METADATA_KEY:
+ raise FuseOSError(errno.ENODATA)
+
+ vf = vfile.VFileReader.get_vfile(path, logging)
+ metastr = pickle.dumps(_encode_metadata(vf.metadata), PICKLE_PROTOCOL)
+ return metastr
+
+
+def _encode_metadata(metadata):
+ """
+ UTF8 encode any unicode keys or values in given metadata dict.
+
+ :param metadata: a dict
+ """
+
+ def encode_str(item):
+ if isinstance(item, six.text_type):
+ return item.encode('utf8')
+ return item
+
+ return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
+
+
+def delete_vfile_from_kv(path):
+ """
+ Deletes a vfile *from the KV only*.
+ This bypasses the vfile module and calls the RPC directly.
+ """
+ logging.info("delete vfile from KV")
+ si = SwiftPathInfo.from_path(path)
+ full_name = si.ohash + si.filename
+ # obj = rpc.get_object(si.socket_path, full_name)
+ rpc.unregister_object(si.socket_path, full_name)
+
+
+def delete_vfile(path):
+ """
+ Deletes a vfile from the volume and KV
+ :param path: path to the vfile
+ :return:
+ """
+ logging.info("delete vfile")
+ vfile.delete_vfile_from_path(path)
+
+
+def main(mountpoint):
+ FUSE(Vfiles(), mountpoint, nothreads=True, foreground=True, debug=False)
+
+
+log_levels = {
+ "critical": logging.CRITICAL,
+ "error": logging.ERROR,
+ "warning": logging.WARNING,
+ "info": logging.INFO,
+ "debug": logging.DEBUG
+}
+
+# Dangerous callbacks
+unlink_func = None
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+
+ # log level
+ parser.add_argument("--log_level", help="logging level, defaults to info")
+
+ # mount dir
+ parser.add_argument("--mount_dir",
+ help="directory on which to mount the filesystem")
+
+ # By default, only read access is provided. Options below will let you
+ # modify the KV content
+ help_txt = "DANGEROUS - enable unlinking of files."
+ unlink_funcs = {"delete_vfile": delete_vfile,
+ "delete_vfile_from_kv": delete_vfile_from_kv}
+ unlink_choices = unlink_funcs.keys()
+ parser.add_argument("--unlink_function", choices=unlink_choices,
+ help=help_txt)
+
+ args = parser.parse_args()
+
+ log_level = "info"
+ if args.log_level:
+ log_level = args.log_level
+
+ logging.basicConfig(level=log_levels[log_level])
+
+ if not args.mount_dir:
+ parser.print_help()
+ sys.exit(0)
+
+ if args.unlink_function:
+ unlink_func = unlink_funcs[args.unlink_function]
+ logging.critical(
+ "Enabled vfile deletion ({})".format(args.unlink_function))
+
+ main(args.mount_dir)
diff --git a/bin/swift-object-rpcmanager b/bin/swift-object-rpcmanager
new file mode 100755
index 000000000..b1b13d545
--- /dev/null
+++ b/bin/swift-object-rpcmanager
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from swift.obj.objectrpcmanager import ObjectRpcManager
+from swift.common.utils import parse_options
+from swift.common.daemon import run_daemon
+from optparse import OptionParser
+
+if __name__ == '__main__':
+ parser = OptionParser("%prog CONFIG [options]")
+ conf_file, options = parse_options(parser=parser, once=True)
+ run_daemon(ObjectRpcManager, conf_file, section_name='object-rpcmanager',
+ **options)
diff --git a/doc/saio/swift/object-server/1.conf b/doc/saio/swift/object-server/1.conf
index 243722394..e869dd4b7 100644
--- a/doc/saio/swift/object-server/1.conf
+++ b/doc/saio/swift/object-server/1.conf
@@ -30,3 +30,7 @@ rsync_module = {replication_ip}::object{replication_port}
[object-updater]
[object-auditor]
+
+[object-rpcmanager]
+volcheck = /usr/local/bin/swift-losf-volume-check
+losf_bin = /usr/local/bin/swift-rpc-losf
diff --git a/doc/saio/swift/object-server/2.conf b/doc/saio/swift/object-server/2.conf
index 647afda41..c7b0c5054 100644
--- a/doc/saio/swift/object-server/2.conf
+++ b/doc/saio/swift/object-server/2.conf
@@ -30,3 +30,7 @@ rsync_module = {replication_ip}::object{replication_port}
[object-updater]
[object-auditor]
+
+[object-rpcmanager]
+volcheck = /usr/local/bin/swift-losf-volume-check
+losf_bin = /usr/local/bin/swift-rpc-losf
diff --git a/doc/saio/swift/object-server/3.conf b/doc/saio/swift/object-server/3.conf
index 563025761..0720ac876 100644
--- a/doc/saio/swift/object-server/3.conf
+++ b/doc/saio/swift/object-server/3.conf
@@ -30,3 +30,7 @@ rsync_module = {replication_ip}::object{replication_port}
[object-updater]
[object-auditor]
+
+[object-rpcmanager]
+volcheck = /usr/local/bin/swift-losf-volume-check
+losf_bin = /usr/local/bin/swift-rpc-losf
diff --git a/doc/saio/swift/object-server/4.conf b/doc/saio/swift/object-server/4.conf
index 0ab17690d..af5a57456 100644
--- a/doc/saio/swift/object-server/4.conf
+++ b/doc/saio/swift/object-server/4.conf
@@ -30,3 +30,7 @@ rsync_module = {replication_ip}::object{replication_port}
[object-updater]
[object-auditor]
+
+[object-rpcmanager]
+volcheck = /usr/local/bin/swift-losf-volume-check
+losf_bin = /usr/local/bin/swift-rpc-losf
diff --git a/etc/object-server.conf-sample b/etc/object-server.conf-sample
index cbef1be1b..330cd4b5e 100644
--- a/etc/object-server.conf-sample
+++ b/etc/object-server.conf-sample
@@ -538,6 +538,14 @@ use = egg:swift#recon
#
# Note: Put it at the beginning of the pipleline to profile all middleware. But
# it is safer to put this after healthcheck.
+
+# TODO: think of the daemon name, perhaps we may want to make it as
+# index-server?
+[object-rpcmanager]
+# TODO: check if we could configure kinds of log things (e.g. log_name)
+volcheck = /usr/local/bin/swift-losf-volume-check
+losf_bin = /usr/local/bin/swift-rpc-losf
+
[filter:xprofile]
use = egg:swift#xprofile
# This option enable you to switch profilers which should inherit from python
diff --git a/go/swift-rpc-losf/Makefile b/go/swift-rpc-losf/Makefile
new file mode 100644
index 000000000..b3c365c48
--- /dev/null
+++ b/go/swift-rpc-losf/Makefile
@@ -0,0 +1,49 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.PHONY: all build-snappy install-snappy build-leveldb install-leveldb install-go-leveldb build-losf
+all: build-snappy install-snappy build-leveldb install-leveldb install-go-leveldb build-losf
+
+build-losf:
+ # this installs the protoc-gen-go in $HOME/go/bin, and requires the PATH be set accordingly
+ go get -u github.com/golang/protobuf/protoc-gen-go
+ protoc -I ../../swift/obj fmgr.proto --go_out=proto
+ go get
+ go build -o ../../bin/swift-rpc-losf
+
+# TODO: installation will be taken by setup.py when swift-rpc-losf is
+# included bin/ directory
+
+build-snappy:
+ git submodule update --init snappy
+ sed -i 's/\(BUILD_SHARED_LIBS "Build.*\) OFF/\1 ON/' snappy/CMakeLists.txt
+ mkdir -p snappy/build
+ cmake -S snappy -B snappy/build
+ $(MAKE) -C snappy/build all
+
+install-snappy:
+ sudo $(MAKE) -C snappy/build install
+
+build-leveldb:
+ git submodule update --init leveldb
+ mkdir -p leveldb/build
+ cmake -DBUILD_SHARED_LIBS=ON -S leveldb -B leveldb/build
+ cmake --build leveldb/build
+
+install-leveldb:
+ sudo $(MAKE) -C leveldb/build install
+
+install-go-leveldb:
+ CGO_CFLAGS=/usr/local/include CGO_LDFLAGS="-L/usr/local/lib -Wl,-rpath=/usr/local/lib" go get github.com/jmhodges/levigo
diff --git a/go/swift-rpc-losf/README.md b/go/swift-rpc-losf/README.md
new file mode 100644
index 000000000..3cb8d5a28
--- /dev/null
+++ b/go/swift-rpc-losf/README.md
@@ -0,0 +1,20 @@
+This is the RPC server part of the "LOSF" (Lots Of Small Files) work.
+
+Setup
+=====
+You will need a working golang environment, gcc and tools (build-essential), and cmake >= 3.9 (ubuntu 16.04 has a version that is too old, get it from cmake.org)
+
+GO>=1.11 is required to support go modules (https://github.com/golang/go/wiki/Modules).
+
+Run `make` command in this directory
+```
+make
+sudo make install
+```
+
+Usage ****OUTDATED*****
+=====
+Currently it does not read the ring, you need to start one process per disk and policy on your object-server.
+For example : swift-rpc-losf -diskPath=/srv/node/sda -policyIdx=0 -waitForMount=false
+
+Note that a new database is marked dirty, because there may already be data on disk. (original db may have been removed or corrupted)
diff --git a/go/swift-rpc-losf/codes/codes.go b/go/swift-rpc-losf/codes/codes.go
new file mode 100644
index 000000000..c7d6aaed9
--- /dev/null
+++ b/go/swift-rpc-losf/codes/codes.go
@@ -0,0 +1,17 @@
+package codes
+
+type StatusCode int
+
+//go:generate stringer -type=StatusCode
+const (
+ Ok StatusCode = 200
+ Cancelled StatusCode = 299
+ InvalidArgument StatusCode = 400
+ NotFound StatusCode = 404
+ AlreadyExists StatusCode = 409
+ PermissionDenied StatusCode = 403
+ FailedPrecondition StatusCode = 412
+ Unimplemented StatusCode = 501
+ Internal StatusCode = 500
+ Unavailable StatusCode = 503
+)
diff --git a/go/swift-rpc-losf/db.go b/go/swift-rpc-losf/db.go
new file mode 100644
index 000000000..1a2832c06
--- /dev/null
+++ b/go/swift-rpc-losf/db.go
@@ -0,0 +1,121 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is the definition of the key-value interface.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// KV is the interface for operations that must be supported on the key-value store.
+// the namespace is a single byte that is used as a key prefix for the different types of objects
+// represented in the key-value; (volume, vfile..)
+type KV interface {
+ Get(namespace byte, key []byte) ([]byte, error)
+ Put(namespace byte, key, value []byte) error
+ PutSync(namespace byte, key, value []byte) error
+ Delete(namespace byte, key []byte) error
+ NewWriteBatch() WriteBatch
+ NewIterator(namespace byte) Iterator
+ Close()
+}
+
+// Iterator is the interface for operations that must be supported on the key-value iterator.
+type Iterator interface {
+ SeekToFirst()
+ Seek(key []byte)
+ Next()
+ Key() []byte
+ Value() []byte
+ Valid() bool
+ Close()
+}
+
+// WriteBatch is the interface for operations that must be supported on a "WriteBatch".
+// The key-value used must support a write batch (atomic write of multiple entries)
+type WriteBatch interface {
+ // Put places a key-value pair into the WriteBatch for writing later.
+ Put(namespace byte, key, value []byte)
+ Delete(namespace byte, key []byte)
+
+ // Commit the WriteBatch atomically
+ Commit() error
+ Close()
+}
+
+// Key for the state of the DB. If it has shut down cleanly, the value should be "closed"
+const dbStateKey = "dbstate"
+const closeState = "closed"
+const openState = "opened"
+
+// setKvState will be called on startup and check whether the kv was closed cleanly.
+// It will then mark the db as "opened".
+// This is needed because we write asynchronously to the key-value. After a crash/power loss/OOM kill, the db
+// may be not in sync with the actual state of the volumes.
+func setKvState(kv KV) (isClean bool, err error) {
+ // Check if we stopped cleanly
+ isClean, err = IsDbClean(kv)
+ if err != nil {
+ log.Warn("Could not check if DB is clean")
+ return
+ }
+
+ if isClean {
+ log.Info("DB is clean, set db state to open")
+ err = MarkDbOpened(kv)
+ if err != nil {
+ log.Warn("Failed to mark db as opened when starting")
+ return
+ }
+ }
+
+ return
+}
+
+// IsDbClean will return true if the db has been previously closed properly.
+// This is determined from a specific key in the database that should be set before closing.
+func IsDbClean(kv KV) (isClean bool, err error) {
+ value, err := kv.Get(statsPrefix, []byte(dbStateKey))
+ if err != nil {
+ log.Warn("failed to check kv state")
+ return
+ }
+
+ // if the state is "closed", consider it clean
+ // if the key is missing (new db) consider it dirty. It may have been deleted after a
+ // corruption and we want to rebuild the DB with the existing volumes, not let the cluster
+ // restart from scratch. If it's an actual new machine, the check will do nothing (no existing volumes)
+ if bytes.Equal(value, []byte(closeState)) {
+ isClean = true
+ } else {
+ log.Info(fmt.Sprintf("DB was not closed cleanly, state: %s", value))
+ }
+ return
+}
+
+// MarkDbClosed marks the DB as clean by setting the value of the db state key
+func MarkDbClosed(kv KV) (err error) {
+ err = kv.PutSync(statsPrefix, []byte(dbStateKey), []byte(closeState))
+ return
+}
+
+// MarkDbOpened marks the DB as opened by setting the value of the db state key
+func MarkDbOpened(kv KV) (err error) {
+ err = kv.PutSync(statsPrefix, []byte(dbStateKey), []byte(openState))
+ return
+}
diff --git a/go/swift-rpc-losf/db_goleveldb.go b/go/swift-rpc-losf/db_goleveldb.go
new file mode 100644
index 000000000..454e5b29c
--- /dev/null
+++ b/go/swift-rpc-losf/db_goleveldb.go
@@ -0,0 +1,230 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This implements the KV interface using goleveldb, a native golang leveldb package.
+// Its behavior has been adapted to match the levigo behavior.
+
+package main
+
+import (
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+)
+
+type goLevelDB struct {
+ db *leveldb.DB
+ ro *opt.ReadOptions
+ wo *opt.WriteOptions
+}
+
+type levelDBIterator struct {
+ it iterator.Iterator
+ namespace byte
+}
+
+type levelDBWriteBatch struct {
+ wb *leveldb.Batch
+ ldb *goLevelDB
+}
+
+// openGoLevelDB Opens or create the DB.
+// (should use an interface?)
+func openGoLevelDb(path string) (*goLevelDB, error) {
+
+ // TODO check options
+ db, err := leveldb.OpenFile(path, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ ro := &opt.ReadOptions{}
+ wo := &opt.WriteOptions{}
+
+ ldb := goLevelDB{db, ro, wo}
+
+ return &ldb, nil
+}
+
+// Key value operations
+//
+// All operations take a namespace byte to denote the type of object the entry refers to.
+func (ldb *goLevelDB) Get(namespace byte, key []byte) (value []byte, err error) {
+ db := ldb.db
+ ro := ldb.ro
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ value, err = db.Get(buf, ro)
+ // Behave similarly to levigo
+ if err == leveldb.ErrNotFound {
+ value = nil
+ err = nil
+ }
+ return
+}
+
+func (ldb *goLevelDB) Put(namespace byte, key, value []byte) error {
+ db := ldb.db
+ wo := ldb.wo
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ return db.Put(buf, value, wo)
+}
+
+// PutSync will write an entry with the "Sync" option set
+func (ldb *goLevelDB) PutSync(namespace byte, key, value []byte) error {
+ db := ldb.db
+ wo := &opt.WriteOptions{Sync: true}
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ return db.Put(buf, value, wo)
+}
+
+func (ldb *goLevelDB) Close() {
+ ldb.db.Close()
+}
+
+func (ldb *goLevelDB) Delete(namespace byte, key []byte) error {
+ db := ldb.db
+ wo := ldb.wo
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ return db.Delete(buf, wo)
+}
+
+func (ldb *goLevelDB) NewWriteBatch() WriteBatch {
+ lwb := &levelDBWriteBatch{}
+ lwb.wb = new(leveldb.Batch)
+ lwb.ldb = ldb
+ return lwb
+}
+
+// Put on a WriteBatch
+func (lwb *levelDBWriteBatch) Put(namespace byte, key, value []byte) {
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ lwb.wb.Put(buf, value)
+ return
+}
+
+// Delete on a WriteBatch
+func (lwb *levelDBWriteBatch) Delete(namespace byte, key []byte) {
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ lwb.wb.Delete(buf)
+ return
+}
+
+// Commit a WriteBatch
+func (lwb *levelDBWriteBatch) Commit() (err error) {
+ db := lwb.ldb.db
+ wo := lwb.ldb.wo
+ wb := lwb.wb
+
+ err = db.Write(wb, wo)
+
+ return
+}
+
+// Close a WriteBatch
+func (lwb *levelDBWriteBatch) Close() {
+ // TODO: check if there really is nothing to do
+}
+
+// Iterator functions
+//
+// NewIterator creates a new iterator for the given object type (namespace)
+func (ldb *goLevelDB) NewIterator(namespace byte) Iterator {
+ db := ldb.db
+ ro := ldb.ro
+
+ // Could use the "range" thing in this library
+ lit := &levelDBIterator{}
+ lit.it = db.NewIterator(nil, ro)
+ lit.namespace = namespace
+ return lit
+}
+
+// SeekToFirst will seek to the first object of the given type
+func (lit *levelDBIterator) SeekToFirst() {
+ // The "first key" is the first one in the iterator's namespace
+ buf := make([]byte, 1)
+ buf[0] = lit.namespace
+
+ lit.it.Seek(buf)
+ return
+}
+
+// Seek moves the iterator to the position of the key
+func (lit *levelDBIterator) Seek(key []byte) {
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = lit.namespace
+ copy(buf[1:], key)
+
+ lit.it.Seek(buf)
+ return
+}
+
+// Next moves the iterator to the next key
+func (lit *levelDBIterator) Next() {
+ lit.it.Next()
+ return
+}
+
+// Key returns the key (without the leading namespace byte)
+func (lit *levelDBIterator) Key() (key []byte) {
+ return lit.it.Key()[1:]
+}
+
+// Value returns the value at the current iterator position
+func (lit *levelDBIterator) Value() (key []byte) {
+ return lit.it.Value()
+}
+
+// Valid returns false if we are past the first or last key in the key-value
+func (lit *levelDBIterator) Valid() bool {
+ if lit.it.Key() != nil && lit.it.Key()[0] == lit.namespace {
+ return true
+ } else {
+ return false
+ }
+}
+
+// Close the iterator
+func (lit *levelDBIterator) Close() {
+ lit.it.Release()
+ return
+}
diff --git a/go/swift-rpc-losf/db_leveldb.go b/go/swift-rpc-losf/db_leveldb.go
new file mode 100644
index 000000000..4d3a35a82
--- /dev/null
+++ b/go/swift-rpc-losf/db_leveldb.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This implements the KV interface using levigo, which is a golang wrapper around the leveldb C++ library.
+
+package main
+
+import (
+ "github.com/jmhodges/levigo"
+)
+
+// levigoDB holds the leveldb handle and options
+type levigoDB struct {
+ db *levigo.DB
+ ro *levigo.ReadOptions
+ wo *levigo.WriteOptions
+}
+
+// levigoIterator wraps a levelDB iterator. The namespace byte is used to specify which type of
+// entry (volume, vfile..) it will iterate on.
+type levigoIterator struct {
+ it *levigo.Iterator
+ namespace byte
+}
+
+// levigoWriteBatch wraps a levigoDB WriteBatch
+type levigoWriteBatch struct {
+ wb *levigo.WriteBatch
+ ldb *levigoDB
+}
+
+// openLevigoDB Opens or create the DB.
+// (shoult use an interface?)
+func openLevigoDB(path string) (*levigoDB, error) {
+
+ opts := levigo.NewOptions()
+ // filter := levigo.NewBloomFilter(10)
+ // opts.SetFilterPolicy(filter)
+ // That may be useless, since we're supposed to fit in memory ? 10MB for now
+ opts.SetCache(levigo.NewLRUCache(10 * 1048576))
+ opts.SetCreateIfMissing(true)
+
+ // This will open or create the DB. A new DB is not marked as clean. It
+ // may have been lost or deleted, while there is data in volumes on-disk.
+ // A new DB will have to be checked and marked as clean.
+ db, err := levigo.Open(path, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ ro := levigo.NewReadOptions()
+ wo := levigo.NewWriteOptions()
+
+ ldb := levigoDB{db, ro, wo}
+
+ return &ldb, nil
+}
+
+// Key value operations
+//
+// All operations take a namespace byte to denote the type of object the entry refers to.
+// Get wraps levigoDB Get
+func (ldb *levigoDB) Get(namespace byte, key []byte) (value []byte, err error) {
+ db := ldb.db
+ ro := ldb.ro
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ value, err = db.Get(ro, buf)
+ return
+}
+
+// Put wraps levigoDB Put
+func (ldb *levigoDB) Put(namespace byte, key, value []byte) error {
+ db := ldb.db
+ wo := ldb.wo
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ return db.Put(wo, buf, value)
+}
+
+// PutSync will write an entry with the "Sync" option set
+func (ldb *levigoDB) PutSync(namespace byte, key, value []byte) error {
+ db := ldb.db
+ wo := levigo.NewWriteOptions()
+ wo.SetSync(true)
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ return db.Put(wo, buf, value)
+}
+
+// Close wraps levigoDB Close
+func (ldb *levigoDB) Close() {
+ ldb.db.Close()
+}
+
+// Delete wraps levigoDB Delete
+func (ldb *levigoDB) Delete(namespace byte, key []byte) error {
+ db := ldb.db
+ wo := ldb.wo
+
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ return db.Delete(wo, buf)
+}
+
+// NewWriteBatch creates a new WriteBatch
+func (ldb *levigoDB) NewWriteBatch() WriteBatch {
+ lwb := &levigoWriteBatch{}
+ lwb.wb = levigo.NewWriteBatch()
+ lwb.ldb = ldb
+ return lwb
+}
+
+// Put on a WriteBatch
+func (lwb *levigoWriteBatch) Put(namespace byte, key, value []byte) {
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ lwb.wb.Put(buf, value)
+ return
+}
+
+// Delete on a WriteBatch
+func (lwb *levigoWriteBatch) Delete(namespace byte, key []byte) {
+ buf := make([]byte, len(key)+1)
+ buf[0] = namespace
+ copy(buf[1:], key)
+
+ lwb.wb.Delete(buf)
+ return
+}
+
+// Commit a WriteBatch
+func (lwb *levigoWriteBatch) Commit() (err error) {
+ db := lwb.ldb.db
+ wo := lwb.ldb.wo
+ wb := lwb.wb
+
+ err = db.Write(wo, wb)
+
+ return
+}
+
+// Close a WriteBatch
+func (lwb *levigoWriteBatch) Close() {
+ wb := lwb.wb
+
+ wb.Close()
+}
+
+// Iterator functions
+//
+// NewIterator creates a new iterator for the given object type (namespace)
+func (ldb *levigoDB) NewIterator(namespace byte) Iterator {
+ lit := &levigoIterator{}
+ lit.it = ldb.db.NewIterator(ldb.ro)
+ lit.namespace = namespace
+ return lit
+}
+
+// SeekToFirst will seek to the first object of the given type
+func (lit *levigoIterator) SeekToFirst() {
+ // The "first key" is the first one in the iterator's namespace
+ buf := make([]byte, 1)
+ buf[0] = lit.namespace
+
+ lit.it.Seek(buf)
+ return
+}
+
+// Seek moves the iterator to the position of the key
+func (lit *levigoIterator) Seek(key []byte) {
+ // Prefix the key with a single byte (namespace)
+ buf := make([]byte, len(key)+1)
+ buf[0] = lit.namespace
+ copy(buf[1:], key)
+
+ lit.it.Seek(buf)
+ return
+}
+
+// Next moves the iterator to the next key
+func (lit *levigoIterator) Next() {
+ lit.it.Next()
+ return
+}
+
+// Key returns the key (without the leading namespace byte)
+func (lit *levigoIterator) Key() (key []byte) {
+ return lit.it.Key()[1:]
+}
+
+// Value returns the value at the current iterator position
+func (lit *levigoIterator) Value() (key []byte) {
+ return lit.it.Value()
+}
+
+// Valid returns false if we are past the first or last key in the key-value
+func (lit *levigoIterator) Valid() bool {
+ if lit.it.Valid() && lit.it.Key()[0] == lit.namespace {
+ return true
+ } else {
+ return false
+ }
+}
+
+// Close the iterator
+func (lit *levigoIterator) Close() {
+ lit.it.Close()
+ return
+}
diff --git a/go/swift-rpc-losf/encoding.go b/go/swift-rpc-losf/encoding.go
new file mode 100644
index 000000000..99c4384b1
--- /dev/null
+++ b/go/swift-rpc-losf/encoding.go
@@ -0,0 +1,198 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "github.com/sirupsen/logrus"
+)
+
+// Encodes a volume key.
+func EncodeVolumeKey(index uint32) (val []byte) {
+ buf := make([]byte, binary.MaxVarintLen32)
+
+ n := binary.PutUvarint(buf, uint64(index))
+
+ val = buf[:n]
+ log.WithFields(logrus.Fields{"value": val}).Debug("encoded volume key")
+ return
+}
+
+func DecodeVolumeKey(val []byte) (index uint32, err error) {
+ index32, n := binary.Uvarint(val)
+ if n <= 0 {
+ err = errors.New("failed to decode index")
+ return
+ }
+
+ index = uint32(index32)
+ return
+}
+
+// volumeType is an int32 to match the type generated by protobuf for enums
+func EncodeVolumeValue(partition int64, volumeType int32, nextOffset, usedSpace, state int64) (val []byte) {
+ buf := make([]byte, binary.MaxVarintLen64*5)
+ bufLen := 0
+
+ n := binary.PutUvarint(buf, uint64(partition))
+ bufLen += n
+
+ n = binary.PutUvarint(buf[bufLen:], uint64(volumeType))
+ bufLen += n
+
+ n = binary.PutUvarint(buf[bufLen:], uint64(nextOffset))
+ bufLen += n
+
+ n = binary.PutUvarint(buf[bufLen:], uint64(usedSpace))
+ bufLen += n
+
+ n = binary.PutUvarint(buf[bufLen:], uint64(state))
+ bufLen += n
+
+ val = buf[:bufLen]
+ log.WithFields(logrus.Fields{"value": val}).Debug("encoded volume value")
+ return
+}
+
+func DecodeVolumeValue(val []byte) (partition int64, volumeType int32, nextOffset, usedSpace, state int64, err error) {
+ position := 0
+
+ partition64, n := binary.Uvarint(val)
+ if n <= 0 {
+ err = errors.New("failed to decode partition")
+ }
+ position += n
+
+ volumeType64, n := binary.Uvarint(val[position:])
+ if n <= 0 {
+ err = errors.New("failed to decode nextOffset")
+ }
+ position += n
+
+ nextOffset64, n := binary.Uvarint(val[position:])
+ if n <= 0 {
+ err = errors.New("failed to decode nextOffset")
+ }
+ position += n
+
+ usedSpace64, n := binary.Uvarint(val[position:])
+ if n <= 0 {
+ err = errors.New("failed to decode usedSpace")
+ }
+ position += n
+
+ state64, n := binary.Uvarint(val[position:])
+ if n <= 0 {
+ err = errors.New("failed to decode state")
+ }
+
+ partition = int64(partition64)
+ volumeType = int32(volumeType64)
+ nextOffset = int64(nextOffset64)
+ usedSpace = int64(usedSpace64)
+ state = int64(state64)
+ return
+}
+
+// Encodes an object key. the key is the md5 hash string + the filename.
+// Turn the 32 characters hash to a 16 byte array. Leave the filename as
+// is for now. We could gain more space encoding the filename (varint timestamp + encoded file extension),
+// but there are special cases to handle (the "delta")
+func EncodeObjectKey(key []byte) ([]byte, error) {
+ var err error
+
+ if len(key) < 32 {
+ err = errors.New("object key len < 32, cannot encode")
+ return nil, err
+ }
+
+ dst := make([]byte, 16+len(key[32:]))
+ n, err := hex.Decode(dst, key[:32])
+ if err != nil {
+ err = errors.New("failed to encode object hash")
+ return dst, err
+ }
+
+ if n != 16 {
+ err = errors.New("encoded object hash is not 16 bytes long")
+ return dst, err
+ }
+
+ // copy the filename
+ copy(dst[16:], key[32:])
+
+ return dst, err
+}
+
+// Decodes object key
+// This is the most called function of the project. The profiler showed that it did
+// the most allocations on the heap (after cgo, which is something else to look at..)
+// Now expect the buffer from the caller.
+// decodedKey size must be 32+len(encodedKey[16:]), because we will decode the 16 bytes
+// hash to a 32 bytes string, with the rest unchanged.
+func DecodeObjectKey(encodedKey []byte, decodedKey []byte) error {
+ if len(encodedKey) < 16 {
+ err := errors.New("DecodeObjectKey called with encodedKey of len < 16")
+ return err
+ }
+ if len(decodedKey) < 32+len(encodedKey[16:]) {
+ err := errors.New("DecodeObjectKey called with decodedKey too small")
+ return err
+ }
+
+ hex.Encode(decodedKey, encodedKey[:16])
+ copy(decodedKey[32:], encodedKey[16:])
+
+ return nil
+}
+
+// Encodes an object file value.
+func EncodeObjectValue(volumeIndex uint32, offset uint64) (val []byte) {
+ buf := make([]byte, binary.MaxVarintLen64*2)
+ bufLen := 0
+
+ n := binary.PutUvarint(buf, uint64(volumeIndex))
+ bufLen += n
+
+ n = binary.PutUvarint(buf[n:], offset)
+ bufLen += n
+
+ val = buf[:bufLen]
+ log.WithFields(logrus.Fields{"value": val}).Debug("encoded object value")
+ return
+}
+
+func DecodeObjectValue(val []byte) (volumeIndex uint32, offset uint64, err error) {
+ log.WithFields(logrus.Fields{"value": val}).Debug("Decode object value")
+ volumeIndex64, n := binary.Uvarint(val)
+ if n <= 0 {
+ log.WithFields(logrus.Fields{"index": n}).Debug("failed to decode volumeIndex")
+ err = errors.New("failed to decode volumeIndex")
+ }
+
+ offset64, n := binary.Uvarint(val[n:])
+ if n <= 0 {
+ log.WithFields(logrus.Fields{"offset": n}).Debug("failed to decode offset")
+ err = errors.New("failed to decode offset")
+ return
+ }
+
+ volumeIndex = uint32(volumeIndex64)
+ offset = uint64(offset64)
+ return
+}
diff --git a/go/swift-rpc-losf/encoding_test.go b/go/swift-rpc-losf/encoding_test.go
new file mode 100644
index 000000000..986a1c903
--- /dev/null
+++ b/go/swift-rpc-losf/encoding_test.go
@@ -0,0 +1,225 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "testing"
+)
+
+type dfKeyTest struct {
+ index uint32
+ value []byte
+}
+
+func TestVolumeKey(t *testing.T) {
+ var dFKeyTests = []dfKeyTest{
+ {0, []byte("\x00")},
+ {1, []byte("\x01")},
+ {123, []byte("\x7b")},
+ {863523, []byte("\xa3\xda\x34")},
+ {1<<32 - 1, []byte("\xff\xff\xff\xff\x0f")},
+ }
+
+ for _, tt := range dFKeyTests {
+ // Test encoding
+ val := EncodeVolumeKey(tt.index)
+ if !bytes.Equal(val, tt.value) {
+ t.Errorf("For index: %d, got %x, expected %x", tt.index, val, tt.value)
+ }
+
+ // Test decoding
+ index, err := DecodeVolumeKey(val)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if index != tt.index {
+ t.Errorf("For value: %x, got %d, expected %d", val, index, tt.index)
+ }
+
+ }
+
+ // Test overflow
+ m1 := []byte{0x80, 0x80, 0x80, 0x80}
+ _, err := DecodeVolumeKey(m1)
+ if err == nil {
+ t.Errorf("We should fail to decode %x", m1)
+ }
+}
+
+type dfValueTest struct {
+ partition int64
+ volumeType int32
+ nextOffset int64
+ usedSpace int64
+ state int64
+ value []byte
+}
+
+func TestVolumeValue(t *testing.T) {
+ var dfValueTests = []dfValueTest{
+ {0, 0, 0, 0, 0, []byte("\x00\x00\x00\x00\x00")},
+ {1343, 12, 3345314, 9821637, 2, []byte("\xbf\x0a\x0c\xa2\x97\xcc\x01\xc5\xbb\xd7\x04\x02")},
+ {^int64(0), ^int32(0), ^int64(0), ^int64(0), ^int64(0), bytes.Repeat([]byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01"), 5)},
+ // any negative value does not make sense, and should be caught by the RPC.
+ // test anyway, they get cast to uint64.
+ {-3572, 12, -1977878, 66666, -999999,
+ []byte("\x8c\xe4\xff\xff\xff\xff\xff\xff\xff\x01\x0c\xea\xa3\x87\xff\xff\xff" +
+ "\xff\xff\xff\x01\xea\x88\x04\xc1\xfb\xc2\xff\xff\xff\xff\xff\xff\x01")},
+ }
+
+ for _, tt := range dfValueTests {
+ // Test encoding
+ val := EncodeVolumeValue(tt.partition, tt.volumeType, tt.nextOffset, tt.usedSpace, tt.state)
+ if !bytes.Equal(val, tt.value) {
+ t.Errorf("For partition: %d, volumeType: %d, nextOffset: %d, usedSpace: %d, state: %d "+
+ "got: %x, expected: %x",
+ tt.partition, tt.volumeType, tt.nextOffset, tt.usedSpace, tt.state, val, tt.value)
+ }
+
+ // Test decoding
+ partition, volumeType, nextOffset, usedSpace, state, err := DecodeVolumeValue(tt.value)
+ if err != nil {
+ t.Error(err)
+ }
+ if partition != tt.partition {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.partition, partition)
+ }
+ if volumeType != tt.volumeType {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.volumeType, volumeType)
+ }
+ if nextOffset != tt.nextOffset {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.nextOffset, nextOffset)
+ }
+ if usedSpace != tt.usedSpace {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.usedSpace, usedSpace)
+ }
+ if state != tt.state {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.state, state)
+ }
+ }
+ // Test overflow
+ m1 := []byte{0x80, 0x80, 0x80, 0x80}
+ _, _, _, _, _, err := DecodeVolumeValue(m1)
+ if err == nil {
+ t.Errorf("We should fail to decode %x", m1)
+ }
+}
+
+type objectKeyTest struct {
+ key []byte
+ value []byte
+}
+
+func TestObjectKey(t *testing.T) {
+ var objectKeyTests = []objectKeyTest{
+ {[]byte("b80362143ac3221d15a75f4bd1af3fac1484213329.64315.data"),
+ []byte("\xb8\x03\x62\x14\x3a\xc3\x22\x1d\x15\xa7\x5f\x4b\xd1\xaf" +
+ "\x3f\xac\x31\x34\x38\x34\x32\x31\x33\x33\x32\x39\x2e\x36\x34" +
+ "\x33\x31\x35\x2e\x64\x61\x74\x61")},
+ {[]byte("a2b98cd26a070c2e5200be1f950813d51494323929.64315.ts"),
+ []byte("\xa2\xb9\x8c\xd2\x6a\x07\x0c\x2e\x52\x00\xbe\x1f\x95\x08" +
+ "\x13\xd5\x31\x34\x39\x34\x33\x32\x33\x39\x32\x39\x2e\x36\x34" +
+ "\x33\x31\x35\x2e\x74\x73")},
+ }
+
+ for _, tt := range objectKeyTests {
+ // Test encoding
+ val, err := EncodeObjectKey(tt.key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(val, tt.value) {
+ t.Errorf("For key: %x, got %x, expected %x", tt.key, val, tt.value)
+ }
+
+ // Test decoding
+ key := make([]byte, 32+len(val[16:]))
+ err = DecodeObjectKey(val, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(key, tt.key) {
+ t.Errorf("For value: %x, got %d, expected %d", val, key, tt.key)
+ }
+
+ }
+
+ // Test encoding invalid object name, too short
+ invalidName := []byte("tooshort")
+ _, err := EncodeObjectKey(invalidName)
+ if err == nil {
+ t.Fatalf("should fail to encode object key: %x", invalidName)
+ }
+
+ // Test encoding invalid object name, bad char
+ invalidName = []byte("badchar\xff6a070c2e5200be1f950813d51494323929.64315.ts")
+ _, err = EncodeObjectKey(invalidName)
+ if err == nil {
+ t.Fatalf("should fail to encode: %x as object key", invalidName)
+ }
+
+ // Test decoding invalid data
+ invalidData := []byte("tooshort")
+ key := make([]byte, 12)
+ err = DecodeObjectKey(invalidData, key)
+ if err == nil {
+ t.Fatalf("should fail to decode: %x as object key", invalidData)
+ }
+
+}
+
+type objectValueTest struct {
+ volumeIndex uint32
+ offset uint64
+ value []byte
+}
+
+func TestObjectValue(t *testing.T) {
+ var objectValueTests = []objectValueTest{
+ {0, 0, []byte("\x00\x00")},
+ {1, 16384, []byte("\x01\x80\x80\x01")},
+ {823762, 61 * 1024 * 1024 * 1024, []byte("\xd2\xa3\x32\x80\x80\x80\x80\xf4\x01")},
+ {1<<32 - 1, 1<<64 - 1, []byte("\xff\xff\xff\xff\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01")},
+ }
+
+ for _, tt := range objectValueTests {
+ // Test encoding
+ val := EncodeObjectValue(tt.volumeIndex, tt.offset)
+ if !bytes.Equal(val, tt.value) {
+ t.Errorf("For volumeType: %d, offset: %d, got %x, expected %x", tt.volumeIndex, tt.offset, val, tt.value)
+ }
+
+ // Test decoding
+ volumeIndex, offset, err := DecodeObjectValue(val)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if volumeIndex != tt.volumeIndex {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.volumeIndex, volumeIndex)
+ }
+ if offset != tt.offset {
+ t.Errorf("Decoding value: %x, expected: %d, got: %d", tt.value, tt.offset, offset)
+ }
+ }
+
+ // Test decoding invalid data
+ invalidData := []byte("\xff")
+ _, _, err := DecodeObjectValue(invalidData)
+ if err == nil {
+ t.Fatalf("should fail to decode: %x as object value", invalidData)
+ }
+}
diff --git a/go/swift-rpc-losf/go.mod b/go/swift-rpc-losf/go.mod
new file mode 100644
index 000000000..b76dcc723
--- /dev/null
+++ b/go/swift-rpc-losf/go.mod
@@ -0,0 +1,27 @@
+module github.com/openstack/swift-rpc-losf
+
+go 1.14
+
+// This file is auto-generated with following commands
+// GO111MODULE=on go mod init // require to run under GOPATH
+// GO111MODULE=on go get // able to run anywhare
+
+// TODO: think of we need to pin the versions as required
+
+require (
+ github.com/alecuyer/statsd/v2 v2.0.6
+ github.com/golang/protobuf v1.3.5
+ github.com/golang/snappy v0.0.1 // indirect
+ github.com/jmhodges/levigo v1.0.0
+ github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
+ github.com/kr/pretty v0.1.0 // indirect
+ github.com/onsi/ginkgo v1.10.1 // indirect
+ github.com/onsi/gomega v1.7.0 // indirect
+ github.com/sirupsen/logrus v1.4.2
+ github.com/stretchr/testify v1.4.0 // indirect
+ github.com/syndtr/goleveldb v1.0.0
+ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect
+ golang.org/x/sys v0.0.0-20190904005037-43c01164e931
+ golang.org/x/text v0.3.2 // indirect
+ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
+)
diff --git a/go/swift-rpc-losf/go.sum b/go/swift-rpc-losf/go.sum
new file mode 100644
index 000000000..b1a8747d6
--- /dev/null
+++ b/go/swift-rpc-losf/go.sum
@@ -0,0 +1,76 @@
+github.com/alecuyer/statsd/v2 v2.0.6 h1:Zw7MkTocpUgJiiyGK/4r99qV6rFcq3COJJYkyFVvKpo=
+github.com/alecuyer/statsd/v2 v2.0.6/go.mod h1:qNFEkL4GN8jGsZpfrn9P6Q0FfdJf1b1FEGpWnesU4dc=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
+github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904005037-43c01164e931 h1:+WYfosiOJzB4BjsISl1Rv4ZLUy+VYcF+u+0Y9jcerv8=
+golang.org/x/sys v0.0.0-20190904005037-43c01164e931/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/go/swift-rpc-losf/leveldb b/go/swift-rpc-losf/leveldb
new file mode 160000
+Subproject fe4494804f5e3a2e25485d32aeb0eb7d2f25732
diff --git a/go/swift-rpc-losf/logging.go b/go/swift-rpc-losf/logging.go
new file mode 100644
index 000000000..d30e764db
--- /dev/null
+++ b/go/swift-rpc-losf/logging.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "github.com/sirupsen/logrus"
+ lSyslog "github.com/sirupsen/logrus/hooks/syslog"
+ "io/ioutil"
+ "log/syslog"
+)
+
+// global logger
+var log = logrus.New()
+
+func setupLogging() {
+ formatter := new(logrus.TextFormatter)
+ formatter.DisableTimestamp = true
+ log.Formatter = formatter
+
+ hook, err := lSyslog.NewSyslogHook("", "", syslog.LOG_INFO, "swift-kv")
+
+ if err != nil {
+ panic("cannot create syslog hook")
+ }
+
+ log.Hooks.Add(hook)
+
+ // Disable default logging, we only want to log through the syslog hook
+ log.Out = ioutil.Discard
+}
diff --git a/go/swift-rpc-losf/main.go b/go/swift-rpc-losf/main.go
new file mode 100644
index 000000000..9bf6489d3
--- /dev/null
+++ b/go/swift-rpc-losf/main.go
@@ -0,0 +1,174 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// KV store for LOFS
+package main
+
+import (
+ "flag"
+ "fmt"
+ "github.com/sirupsen/logrus"
+ "net/http"
+ _ "net/http/pprof"
+ "os"
+ "os/signal"
+ "path"
+ "syscall"
+ "time"
+)
+
+// Name of the base losf root directory, relative to the swift disk
+const rootDirBase = "losf"
+
+// Will run checks and create rootDir if needed
+// Returns the path to rootDir
+func diskSetup(diskPath string, policyIdx int, waitForMount bool) (string, error) {
+
+ if waitForMount {
+ log.Debugf("waitForMount is set, if %s is not mounted, will wait until it is", diskPath)
+ }
+ for waitForMount {
+ nowMounted, err := isMounted(diskPath)
+ if err != nil {
+ return "", err
+ }
+ if nowMounted {
+ break
+ }
+ time.Sleep(time.Second / 10)
+ }
+
+ // OVH patch to match a similar mechanism in the python code.
+ // If a ".offline" file exists in srv node, do not start (/srv/node/disk-XX.offline)
+ offlineFile := fmt.Sprintf("%s%s", diskPath, ".offline")
+ offlineFileExists := false
+ if _, err := os.Stat(offlineFile); err == nil {
+ offlineFileExists = true
+ log.Debugf("offline file exists: %s", offlineFile)
+ }
+ for offlineFileExists {
+ if _, err := os.Stat(offlineFile); os.IsNotExist(err) {
+ offlineFileExists = false
+ }
+ time.Sleep(time.Second * 10)
+ }
+
+ rootDir := path.Join(diskPath, getBaseDirName(rootDirBase, policyIdx))
+ log.Debug(rootDir)
+
+ rootDirExists, err := dirExists(rootDir)
+ if err != nil {
+ return "", err
+ }
+ if !rootDirExists {
+ err := os.Mkdir(rootDir, os.FileMode(0700))
+ if err != nil {
+ return "", err
+ }
+ }
+ return rootDir, nil
+}
+
+// Parse options and starts the rpc server.
+func main() {
+ var dbDir, socketPath string
+ var kv KV
+
+ setupLogging()
+
+ debugLevels := map[string]logrus.Level{
+ "panic": logrus.PanicLevel,
+ "fatal": logrus.FatalLevel,
+ "error": logrus.ErrorLevel,
+ "warn": logrus.WarnLevel,
+ "info": logrus.InfoLevel,
+ "debug": logrus.DebugLevel,
+ }
+
+ var diskPath = flag.String("diskPath", "", "Swift disk path (/srv/node/disk-xyz)")
+ var policyIdx = flag.Int("policyIdx", 0, "Policy index")
+ var waitForMount = flag.Bool("waitForMount", true, "Wait for diskPath to be mounted. If diskPath exists but is not a mount, it will wait")
+ var profilerAddr = flag.String("profilerAddr", "", "Start profiler and make it available at this address (127.0.0.1:8081)")
+ var debugLevel = flag.String("debug", "info", "Debug level (error, warn, info, debug)")
+ var allowRoot = flag.Bool("allowRoot", false, "Allow process to run as root")
+ var useGoLevelDB = flag.Bool("useGoLevelDB", false, "Use native golang levelDB package")
+
+ flag.Parse()
+
+ log.SetLevel(debugLevels[*debugLevel])
+
+ if !*allowRoot && os.Getuid() == 0 {
+ log.Fatal("running as root, and allowRoot is false")
+ }
+
+ if *diskPath == "" {
+ log.Fatal("diskPath not specified")
+ }
+
+ rootDir, err := diskSetup(*diskPath, *policyIdx, *waitForMount)
+ if err != nil {
+ panic(err)
+ }
+
+ dbDir = path.Join(rootDir, "db")
+ socketPath = path.Join(rootDir, "rpc.socket")
+ rlog := log.WithFields(logrus.Fields{"socket": socketPath})
+
+ // install signal handler
+ stopChan := make(chan os.Signal, 1)
+ signal.Notify(stopChan, os.Interrupt, syscall.SIGTERM)
+
+ // start http server for profiling
+ if *profilerAddr != "" {
+ go func() {
+ rlog.Debug(http.ListenAndServe(*profilerAddr, nil))
+ }()
+ }
+
+ // Acquire lock to protect socket
+ rlog.Debug("Locking socket")
+ err = lockSocket(socketPath)
+ if err != nil {
+ rlog.Fatalf("Failed to lock RPC socket: %s", err)
+ }
+ os.Remove(socketPath)
+
+ // Open the database
+ if *useGoLevelDB {
+ kv, err = openGoLevelDb(dbDir)
+ } else {
+ kv, err = openLevigoDB(dbDir)
+ }
+
+ if err != nil {
+ rlog.Fatal(err)
+ }
+
+ // Check the kv was stopped properly
+ isClean, err := setKvState(kv)
+ if err != nil {
+ rlog.Fatal(err)
+ }
+ log.Infof("kv is clean: %v", isClean)
+
+ // Start the RPC server
+ rlog.Info("Starting RPC server")
+ err = runServer(kv, *diskPath, socketPath, stopChan, isClean)
+ if err != nil {
+ rlog.Fatal(err)
+ }
+
+ return
+}
diff --git a/go/swift-rpc-losf/proto/fmgr.pb.go b/go/swift-rpc-losf/proto/fmgr.pb.go
new file mode 100644
index 000000000..1cb26dc7b
--- /dev/null
+++ b/go/swift-rpc-losf/proto/fmgr.pb.go
@@ -0,0 +1,2213 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: fmgr.proto
+
+package filemgr
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Enums
+type VolumeType int32
+
+const (
+ VolumeType_VOLUME_DEFAULT VolumeType = 0
+ VolumeType_VOLUME_TOMBSTONE VolumeType = 1
+ VolumeType_VOLUME_X_DELETE_AT VolumeType = 2
+)
+
+var VolumeType_name = map[int32]string{
+ 0: "VOLUME_DEFAULT",
+ 1: "VOLUME_TOMBSTONE",
+ 2: "VOLUME_X_DELETE_AT",
+}
+
+var VolumeType_value = map[string]int32{
+ "VOLUME_DEFAULT": 0,
+ "VOLUME_TOMBSTONE": 1,
+ "VOLUME_X_DELETE_AT": 2,
+}
+
+func (x VolumeType) String() string {
+ return proto.EnumName(VolumeType_name, int32(x))
+}
+
+func (VolumeType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{0}
+}
+
+type VolumeState int32
+
+const (
+ // Default state, volume can be read from and written to
+ VolumeState_STATE_RW VolumeState = 0
+ // Volume is being compacted (source). New objects cannot be appended
+ VolumeState_STATE_COMPACTION_SRC VolumeState = 1
+ // Volume is a compaction target. New objects cannot be appended
+ VolumeState_STATE_COMPACTION_TARGET VolumeState = 2
+)
+
+var VolumeState_name = map[int32]string{
+ 0: "STATE_RW",
+ 1: "STATE_COMPACTION_SRC",
+ 2: "STATE_COMPACTION_TARGET",
+}
+
+var VolumeState_value = map[string]int32{
+ "STATE_RW": 0,
+ "STATE_COMPACTION_SRC": 1,
+ "STATE_COMPACTION_TARGET": 2,
+}
+
+func (x VolumeState) String() string {
+ return proto.EnumName(VolumeState_name, int32(x))
+}
+
+func (VolumeState) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{1}
+}
+
+type RegisterVolumeRequest struct {
+ Partition uint32 `protobuf:"varint,1,opt,name=partition,proto3" json:"partition,omitempty"`
+ Type VolumeType `protobuf:"varint,2,opt,name=type,proto3,enum=filemgr.VolumeType" json:"type,omitempty"`
+ VolumeIndex uint32 `protobuf:"varint,3,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ Offset uint64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+ State VolumeState `protobuf:"varint,5,opt,name=state,proto3,enum=filemgr.VolumeState" json:"state,omitempty"`
+ RepairTool bool `protobuf:"varint,6,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RegisterVolumeRequest) Reset() { *m = RegisterVolumeRequest{} }
+func (m *RegisterVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*RegisterVolumeRequest) ProtoMessage() {}
+func (*RegisterVolumeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{0}
+}
+
+func (m *RegisterVolumeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RegisterVolumeRequest.Unmarshal(m, b)
+}
+func (m *RegisterVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RegisterVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *RegisterVolumeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RegisterVolumeRequest.Merge(m, src)
+}
+func (m *RegisterVolumeRequest) XXX_Size() int {
+ return xxx_messageInfo_RegisterVolumeRequest.Size(m)
+}
+func (m *RegisterVolumeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_RegisterVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RegisterVolumeRequest proto.InternalMessageInfo
+
+func (m *RegisterVolumeRequest) GetPartition() uint32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *RegisterVolumeRequest) GetType() VolumeType {
+ if m != nil {
+ return m.Type
+ }
+ return VolumeType_VOLUME_DEFAULT
+}
+
+func (m *RegisterVolumeRequest) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *RegisterVolumeRequest) GetOffset() uint64 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+func (m *RegisterVolumeRequest) GetState() VolumeState {
+ if m != nil {
+ return m.State
+ }
+ return VolumeState_STATE_RW
+}
+
+func (m *RegisterVolumeRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type RegisterVolumeReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RegisterVolumeReply) Reset() { *m = RegisterVolumeReply{} }
+func (m *RegisterVolumeReply) String() string { return proto.CompactTextString(m) }
+func (*RegisterVolumeReply) ProtoMessage() {}
+func (*RegisterVolumeReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{1}
+}
+
+func (m *RegisterVolumeReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RegisterVolumeReply.Unmarshal(m, b)
+}
+func (m *RegisterVolumeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RegisterVolumeReply.Marshal(b, m, deterministic)
+}
+func (m *RegisterVolumeReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RegisterVolumeReply.Merge(m, src)
+}
+func (m *RegisterVolumeReply) XXX_Size() int {
+ return xxx_messageInfo_RegisterVolumeReply.Size(m)
+}
+func (m *RegisterVolumeReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_RegisterVolumeReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RegisterVolumeReply proto.InternalMessageInfo
+
+type UnregisterVolumeRequest struct {
+ Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnregisterVolumeRequest) Reset() { *m = UnregisterVolumeRequest{} }
+func (m *UnregisterVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*UnregisterVolumeRequest) ProtoMessage() {}
+func (*UnregisterVolumeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{2}
+}
+
+func (m *UnregisterVolumeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnregisterVolumeRequest.Unmarshal(m, b)
+}
+func (m *UnregisterVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnregisterVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *UnregisterVolumeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnregisterVolumeRequest.Merge(m, src)
+}
+func (m *UnregisterVolumeRequest) XXX_Size() int {
+ return xxx_messageInfo_UnregisterVolumeRequest.Size(m)
+}
+func (m *UnregisterVolumeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnregisterVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnregisterVolumeRequest proto.InternalMessageInfo
+
+func (m *UnregisterVolumeRequest) GetIndex() uint32 {
+ if m != nil {
+ return m.Index
+ }
+ return 0
+}
+
+func (m *UnregisterVolumeRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type UnregisterVolumeReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnregisterVolumeReply) Reset() { *m = UnregisterVolumeReply{} }
+func (m *UnregisterVolumeReply) String() string { return proto.CompactTextString(m) }
+func (*UnregisterVolumeReply) ProtoMessage() {}
+func (*UnregisterVolumeReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{3}
+}
+
+func (m *UnregisterVolumeReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnregisterVolumeReply.Unmarshal(m, b)
+}
+func (m *UnregisterVolumeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnregisterVolumeReply.Marshal(b, m, deterministic)
+}
+func (m *UnregisterVolumeReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnregisterVolumeReply.Merge(m, src)
+}
+func (m *UnregisterVolumeReply) XXX_Size() int {
+ return xxx_messageInfo_UnregisterVolumeReply.Size(m)
+}
+func (m *UnregisterVolumeReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnregisterVolumeReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnregisterVolumeReply proto.InternalMessageInfo
+
+type UpdateVolumeStateRequest struct {
+ VolumeIndex uint32 `protobuf:"varint,1,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ State VolumeState `protobuf:"varint,2,opt,name=state,proto3,enum=filemgr.VolumeState" json:"state,omitempty"`
+ RepairTool bool `protobuf:"varint,3,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateVolumeStateRequest) Reset() { *m = UpdateVolumeStateRequest{} }
+func (m *UpdateVolumeStateRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateVolumeStateRequest) ProtoMessage() {}
+func (*UpdateVolumeStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{4}
+}
+
+func (m *UpdateVolumeStateRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateVolumeStateRequest.Unmarshal(m, b)
+}
+func (m *UpdateVolumeStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateVolumeStateRequest.Marshal(b, m, deterministic)
+}
+func (m *UpdateVolumeStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateVolumeStateRequest.Merge(m, src)
+}
+func (m *UpdateVolumeStateRequest) XXX_Size() int {
+ return xxx_messageInfo_UpdateVolumeStateRequest.Size(m)
+}
+func (m *UpdateVolumeStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateVolumeStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateVolumeStateRequest proto.InternalMessageInfo
+
+func (m *UpdateVolumeStateRequest) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *UpdateVolumeStateRequest) GetState() VolumeState {
+ if m != nil {
+ return m.State
+ }
+ return VolumeState_STATE_RW
+}
+
+func (m *UpdateVolumeStateRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type UpdateVolumeStateReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdateVolumeStateReply) Reset() { *m = UpdateVolumeStateReply{} }
+func (m *UpdateVolumeStateReply) String() string { return proto.CompactTextString(m) }
+func (*UpdateVolumeStateReply) ProtoMessage() {}
+func (*UpdateVolumeStateReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{5}
+}
+
+func (m *UpdateVolumeStateReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdateVolumeStateReply.Unmarshal(m, b)
+}
+func (m *UpdateVolumeStateReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdateVolumeStateReply.Marshal(b, m, deterministic)
+}
+func (m *UpdateVolumeStateReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateVolumeStateReply.Merge(m, src)
+}
+func (m *UpdateVolumeStateReply) XXX_Size() int {
+ return xxx_messageInfo_UpdateVolumeStateReply.Size(m)
+}
+func (m *UpdateVolumeStateReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateVolumeStateReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdateVolumeStateReply proto.InternalMessageInfo
+
+type GetVolumeRequest struct {
+ Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetVolumeRequest) Reset() { *m = GetVolumeRequest{} }
+func (m *GetVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVolumeRequest) ProtoMessage() {}
+func (*GetVolumeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{6}
+}
+
+func (m *GetVolumeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetVolumeRequest.Unmarshal(m, b)
+}
+func (m *GetVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *GetVolumeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetVolumeRequest.Merge(m, src)
+}
+func (m *GetVolumeRequest) XXX_Size() int {
+ return xxx_messageInfo_GetVolumeRequest.Size(m)
+}
+func (m *GetVolumeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetVolumeRequest proto.InternalMessageInfo
+
+func (m *GetVolumeRequest) GetIndex() uint32 {
+ if m != nil {
+ return m.Index
+ }
+ return 0
+}
+
+func (m *GetVolumeRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type GetVolumeReply struct {
+ VolumeIndex uint32 `protobuf:"varint,1,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ VolumeType VolumeType `protobuf:"varint,2,opt,name=volume_type,json=volumeType,proto3,enum=filemgr.VolumeType" json:"volume_type,omitempty"`
+ VolumeState uint32 `protobuf:"varint,3,opt,name=volume_state,json=volumeState,proto3" json:"volume_state,omitempty"`
+ Partition uint32 `protobuf:"varint,4,opt,name=partition,proto3" json:"partition,omitempty"`
+ NextOffset uint64 `protobuf:"varint,5,opt,name=next_offset,json=nextOffset,proto3" json:"next_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetVolumeReply) Reset() { *m = GetVolumeReply{} }
+func (m *GetVolumeReply) String() string { return proto.CompactTextString(m) }
+func (*GetVolumeReply) ProtoMessage() {}
+func (*GetVolumeReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{7}
+}
+
+func (m *GetVolumeReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetVolumeReply.Unmarshal(m, b)
+}
+func (m *GetVolumeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetVolumeReply.Marshal(b, m, deterministic)
+}
+func (m *GetVolumeReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetVolumeReply.Merge(m, src)
+}
+func (m *GetVolumeReply) XXX_Size() int {
+ return xxx_messageInfo_GetVolumeReply.Size(m)
+}
+func (m *GetVolumeReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetVolumeReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetVolumeReply proto.InternalMessageInfo
+
+func (m *GetVolumeReply) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *GetVolumeReply) GetVolumeType() VolumeType {
+ if m != nil {
+ return m.VolumeType
+ }
+ return VolumeType_VOLUME_DEFAULT
+}
+
+func (m *GetVolumeReply) GetVolumeState() uint32 {
+ if m != nil {
+ return m.VolumeState
+ }
+ return 0
+}
+
+func (m *GetVolumeReply) GetPartition() uint32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *GetVolumeReply) GetNextOffset() uint64 {
+ if m != nil {
+ return m.NextOffset
+ }
+ return 0
+}
+
+type ListVolumesRequest struct {
+ Partition uint32 `protobuf:"varint,1,opt,name=partition,proto3" json:"partition,omitempty"`
+ Type VolumeType `protobuf:"varint,2,opt,name=type,proto3,enum=filemgr.VolumeType" json:"type,omitempty"`
+ RepairTool bool `protobuf:"varint,3,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} }
+func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesRequest) ProtoMessage() {}
+func (*ListVolumesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{8}
+}
+
+func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b)
+}
+func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListVolumesRequest.Merge(m, src)
+}
+func (m *ListVolumesRequest) XXX_Size() int {
+ return xxx_messageInfo_ListVolumesRequest.Size(m)
+}
+func (m *ListVolumesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo
+
+func (m *ListVolumesRequest) GetPartition() uint32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *ListVolumesRequest) GetType() VolumeType {
+ if m != nil {
+ return m.Type
+ }
+ return VolumeType_VOLUME_DEFAULT
+}
+
+func (m *ListVolumesRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type ListVolumesReply struct {
+ Volumes []*Volume `protobuf:"bytes,1,rep,name=volumes,proto3" json:"volumes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListVolumesReply) Reset() { *m = ListVolumesReply{} }
+func (m *ListVolumesReply) String() string { return proto.CompactTextString(m) }
+func (*ListVolumesReply) ProtoMessage() {}
+func (*ListVolumesReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{9}
+}
+
+func (m *ListVolumesReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListVolumesReply.Unmarshal(m, b)
+}
+func (m *ListVolumesReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListVolumesReply.Marshal(b, m, deterministic)
+}
+func (m *ListVolumesReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListVolumesReply.Merge(m, src)
+}
+func (m *ListVolumesReply) XXX_Size() int {
+ return xxx_messageInfo_ListVolumesReply.Size(m)
+}
+func (m *ListVolumesReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListVolumesReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListVolumesReply proto.InternalMessageInfo
+
+func (m *ListVolumesReply) GetVolumes() []*Volume {
+ if m != nil {
+ return m.Volumes
+ }
+ return nil
+}
+
+type RegisterObjectRequest struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ VolumeIndex uint32 `protobuf:"varint,2,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ Offset uint64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
+ NextOffset uint64 `protobuf:"varint,4,opt,name=next_offset,json=nextOffset,proto3" json:"next_offset,omitempty"`
+ RepairTool bool `protobuf:"varint,5,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RegisterObjectRequest) Reset() { *m = RegisterObjectRequest{} }
+func (m *RegisterObjectRequest) String() string { return proto.CompactTextString(m) }
+func (*RegisterObjectRequest) ProtoMessage() {}
+func (*RegisterObjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{10}
+}
+
+func (m *RegisterObjectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RegisterObjectRequest.Unmarshal(m, b)
+}
+func (m *RegisterObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RegisterObjectRequest.Marshal(b, m, deterministic)
+}
+func (m *RegisterObjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RegisterObjectRequest.Merge(m, src)
+}
+func (m *RegisterObjectRequest) XXX_Size() int {
+ return xxx_messageInfo_RegisterObjectRequest.Size(m)
+}
+func (m *RegisterObjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_RegisterObjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RegisterObjectRequest proto.InternalMessageInfo
+
+func (m *RegisterObjectRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *RegisterObjectRequest) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *RegisterObjectRequest) GetOffset() uint64 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+func (m *RegisterObjectRequest) GetNextOffset() uint64 {
+ if m != nil {
+ return m.NextOffset
+ }
+ return 0
+}
+
+func (m *RegisterObjectRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type RegisterObjectReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RegisterObjectReply) Reset() { *m = RegisterObjectReply{} }
+func (m *RegisterObjectReply) String() string { return proto.CompactTextString(m) }
+func (*RegisterObjectReply) ProtoMessage() {}
+func (*RegisterObjectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{11}
+}
+
+func (m *RegisterObjectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RegisterObjectReply.Unmarshal(m, b)
+}
+func (m *RegisterObjectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RegisterObjectReply.Marshal(b, m, deterministic)
+}
+func (m *RegisterObjectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RegisterObjectReply.Merge(m, src)
+}
+func (m *RegisterObjectReply) XXX_Size() int {
+ return xxx_messageInfo_RegisterObjectReply.Size(m)
+}
+func (m *RegisterObjectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_RegisterObjectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RegisterObjectReply proto.InternalMessageInfo
+
+type UnregisterObjectRequest struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnregisterObjectRequest) Reset() { *m = UnregisterObjectRequest{} }
+func (m *UnregisterObjectRequest) String() string { return proto.CompactTextString(m) }
+func (*UnregisterObjectRequest) ProtoMessage() {}
+func (*UnregisterObjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{12}
+}
+
+func (m *UnregisterObjectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnregisterObjectRequest.Unmarshal(m, b)
+}
+func (m *UnregisterObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnregisterObjectRequest.Marshal(b, m, deterministic)
+}
+func (m *UnregisterObjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnregisterObjectRequest.Merge(m, src)
+}
+func (m *UnregisterObjectRequest) XXX_Size() int {
+ return xxx_messageInfo_UnregisterObjectRequest.Size(m)
+}
+func (m *UnregisterObjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnregisterObjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnregisterObjectRequest proto.InternalMessageInfo
+
+func (m *UnregisterObjectRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *UnregisterObjectRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type UnregisterObjectReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnregisterObjectReply) Reset() { *m = UnregisterObjectReply{} }
+func (m *UnregisterObjectReply) String() string { return proto.CompactTextString(m) }
+func (*UnregisterObjectReply) ProtoMessage() {}
+func (*UnregisterObjectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{13}
+}
+
+func (m *UnregisterObjectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnregisterObjectReply.Unmarshal(m, b)
+}
+func (m *UnregisterObjectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnregisterObjectReply.Marshal(b, m, deterministic)
+}
+func (m *UnregisterObjectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnregisterObjectReply.Merge(m, src)
+}
+func (m *UnregisterObjectReply) XXX_Size() int {
+ return xxx_messageInfo_UnregisterObjectReply.Size(m)
+}
+func (m *UnregisterObjectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnregisterObjectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnregisterObjectReply proto.InternalMessageInfo
+
+type RenameObjectRequest struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ NewName []byte `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
+ RepairTool bool `protobuf:"varint,3,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RenameObjectRequest) Reset() { *m = RenameObjectRequest{} }
+func (m *RenameObjectRequest) String() string { return proto.CompactTextString(m) }
+func (*RenameObjectRequest) ProtoMessage() {}
+func (*RenameObjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{14}
+}
+
+func (m *RenameObjectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RenameObjectRequest.Unmarshal(m, b)
+}
+func (m *RenameObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RenameObjectRequest.Marshal(b, m, deterministic)
+}
+func (m *RenameObjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RenameObjectRequest.Merge(m, src)
+}
+func (m *RenameObjectRequest) XXX_Size() int {
+ return xxx_messageInfo_RenameObjectRequest.Size(m)
+}
+func (m *RenameObjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_RenameObjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RenameObjectRequest proto.InternalMessageInfo
+
+func (m *RenameObjectRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *RenameObjectRequest) GetNewName() []byte {
+ if m != nil {
+ return m.NewName
+ }
+ return nil
+}
+
+func (m *RenameObjectRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type RenameObjectReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RenameObjectReply) Reset() { *m = RenameObjectReply{} }
+func (m *RenameObjectReply) String() string { return proto.CompactTextString(m) }
+func (*RenameObjectReply) ProtoMessage() {}
+func (*RenameObjectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{15}
+}
+
+func (m *RenameObjectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RenameObjectReply.Unmarshal(m, b)
+}
+func (m *RenameObjectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RenameObjectReply.Marshal(b, m, deterministic)
+}
+func (m *RenameObjectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RenameObjectReply.Merge(m, src)
+}
+func (m *RenameObjectReply) XXX_Size() int {
+ return xxx_messageInfo_RenameObjectReply.Size(m)
+}
+func (m *RenameObjectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_RenameObjectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RenameObjectReply proto.InternalMessageInfo
+
+type LoadObjectRequest struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ IsQuarantined bool `protobuf:"varint,2,opt,name=is_quarantined,json=isQuarantined,proto3" json:"is_quarantined,omitempty"`
+ RepairTool bool `protobuf:"varint,3,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoadObjectRequest) Reset() { *m = LoadObjectRequest{} }
+func (m *LoadObjectRequest) String() string { return proto.CompactTextString(m) }
+func (*LoadObjectRequest) ProtoMessage() {}
+func (*LoadObjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{16}
+}
+
+func (m *LoadObjectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoadObjectRequest.Unmarshal(m, b)
+}
+func (m *LoadObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoadObjectRequest.Marshal(b, m, deterministic)
+}
+func (m *LoadObjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoadObjectRequest.Merge(m, src)
+}
+func (m *LoadObjectRequest) XXX_Size() int {
+ return xxx_messageInfo_LoadObjectRequest.Size(m)
+}
+func (m *LoadObjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoadObjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadObjectRequest proto.InternalMessageInfo
+
+func (m *LoadObjectRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LoadObjectRequest) GetIsQuarantined() bool {
+ if m != nil {
+ return m.IsQuarantined
+ }
+ return false
+}
+
+func (m *LoadObjectRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type LoadObjectReply struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ VolumeIndex uint32 `protobuf:"varint,2,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ Offset uint64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoadObjectReply) Reset() { *m = LoadObjectReply{} }
+func (m *LoadObjectReply) String() string { return proto.CompactTextString(m) }
+func (*LoadObjectReply) ProtoMessage() {}
+func (*LoadObjectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{17}
+}
+
+func (m *LoadObjectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoadObjectReply.Unmarshal(m, b)
+}
+func (m *LoadObjectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoadObjectReply.Marshal(b, m, deterministic)
+}
+func (m *LoadObjectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoadObjectReply.Merge(m, src)
+}
+func (m *LoadObjectReply) XXX_Size() int {
+ return xxx_messageInfo_LoadObjectReply.Size(m)
+}
+func (m *LoadObjectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoadObjectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadObjectReply proto.InternalMessageInfo
+
+func (m *LoadObjectReply) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LoadObjectReply) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *LoadObjectReply) GetOffset() uint64 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+type QuarantineObjectRequest struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *QuarantineObjectRequest) Reset() { *m = QuarantineObjectRequest{} }
+func (m *QuarantineObjectRequest) String() string { return proto.CompactTextString(m) }
+func (*QuarantineObjectRequest) ProtoMessage() {}
+func (*QuarantineObjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{18}
+}
+
+func (m *QuarantineObjectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QuarantineObjectRequest.Unmarshal(m, b)
+}
+func (m *QuarantineObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QuarantineObjectRequest.Marshal(b, m, deterministic)
+}
+func (m *QuarantineObjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuarantineObjectRequest.Merge(m, src)
+}
+func (m *QuarantineObjectRequest) XXX_Size() int {
+ return xxx_messageInfo_QuarantineObjectRequest.Size(m)
+}
+func (m *QuarantineObjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuarantineObjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuarantineObjectRequest proto.InternalMessageInfo
+
+func (m *QuarantineObjectRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *QuarantineObjectRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type QuarantineObjectReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *QuarantineObjectReply) Reset() { *m = QuarantineObjectReply{} }
+func (m *QuarantineObjectReply) String() string { return proto.CompactTextString(m) }
+func (*QuarantineObjectReply) ProtoMessage() {}
+func (*QuarantineObjectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{19}
+}
+
+func (m *QuarantineObjectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QuarantineObjectReply.Unmarshal(m, b)
+}
+func (m *QuarantineObjectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QuarantineObjectReply.Marshal(b, m, deterministic)
+}
+func (m *QuarantineObjectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuarantineObjectReply.Merge(m, src)
+}
+func (m *QuarantineObjectReply) XXX_Size() int {
+ return xxx_messageInfo_QuarantineObjectReply.Size(m)
+}
+func (m *QuarantineObjectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuarantineObjectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuarantineObjectReply proto.InternalMessageInfo
+
+type UnquarantineObjectRequest struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnquarantineObjectRequest) Reset() { *m = UnquarantineObjectRequest{} }
+func (m *UnquarantineObjectRequest) String() string { return proto.CompactTextString(m) }
+func (*UnquarantineObjectRequest) ProtoMessage() {}
+func (*UnquarantineObjectRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{20}
+}
+
+func (m *UnquarantineObjectRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnquarantineObjectRequest.Unmarshal(m, b)
+}
+func (m *UnquarantineObjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnquarantineObjectRequest.Marshal(b, m, deterministic)
+}
+func (m *UnquarantineObjectRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnquarantineObjectRequest.Merge(m, src)
+}
+func (m *UnquarantineObjectRequest) XXX_Size() int {
+ return xxx_messageInfo_UnquarantineObjectRequest.Size(m)
+}
+func (m *UnquarantineObjectRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnquarantineObjectRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnquarantineObjectRequest proto.InternalMessageInfo
+
+func (m *UnquarantineObjectRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *UnquarantineObjectRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type UnquarantineObjectReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnquarantineObjectReply) Reset() { *m = UnquarantineObjectReply{} }
+func (m *UnquarantineObjectReply) String() string { return proto.CompactTextString(m) }
+func (*UnquarantineObjectReply) ProtoMessage() {}
+func (*UnquarantineObjectReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{21}
+}
+
+func (m *UnquarantineObjectReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UnquarantineObjectReply.Unmarshal(m, b)
+}
+func (m *UnquarantineObjectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UnquarantineObjectReply.Marshal(b, m, deterministic)
+}
+func (m *UnquarantineObjectReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnquarantineObjectReply.Merge(m, src)
+}
+func (m *UnquarantineObjectReply) XXX_Size() int {
+ return xxx_messageInfo_UnquarantineObjectReply.Size(m)
+}
+func (m *UnquarantineObjectReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnquarantineObjectReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnquarantineObjectReply proto.InternalMessageInfo
+
+type LoadObjectsByPrefixRequest struct {
+ Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoadObjectsByPrefixRequest) Reset() { *m = LoadObjectsByPrefixRequest{} }
+func (m *LoadObjectsByPrefixRequest) String() string { return proto.CompactTextString(m) }
+func (*LoadObjectsByPrefixRequest) ProtoMessage() {}
+func (*LoadObjectsByPrefixRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{22}
+}
+
+func (m *LoadObjectsByPrefixRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoadObjectsByPrefixRequest.Unmarshal(m, b)
+}
+func (m *LoadObjectsByPrefixRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoadObjectsByPrefixRequest.Marshal(b, m, deterministic)
+}
+func (m *LoadObjectsByPrefixRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoadObjectsByPrefixRequest.Merge(m, src)
+}
+func (m *LoadObjectsByPrefixRequest) XXX_Size() int {
+ return xxx_messageInfo_LoadObjectsByPrefixRequest.Size(m)
+}
+func (m *LoadObjectsByPrefixRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoadObjectsByPrefixRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadObjectsByPrefixRequest proto.InternalMessageInfo
+
+func (m *LoadObjectsByPrefixRequest) GetPrefix() []byte {
+ if m != nil {
+ return m.Prefix
+ }
+ return nil
+}
+
+func (m *LoadObjectsByPrefixRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type LoadObjectsByPrefixReply struct {
+ Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoadObjectsByPrefixReply) Reset() { *m = LoadObjectsByPrefixReply{} }
+func (m *LoadObjectsByPrefixReply) String() string { return proto.CompactTextString(m) }
+func (*LoadObjectsByPrefixReply) ProtoMessage() {}
+func (*LoadObjectsByPrefixReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{23}
+}
+
+func (m *LoadObjectsByPrefixReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoadObjectsByPrefixReply.Unmarshal(m, b)
+}
+func (m *LoadObjectsByPrefixReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoadObjectsByPrefixReply.Marshal(b, m, deterministic)
+}
+func (m *LoadObjectsByPrefixReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoadObjectsByPrefixReply.Merge(m, src)
+}
+func (m *LoadObjectsByPrefixReply) XXX_Size() int {
+ return xxx_messageInfo_LoadObjectsByPrefixReply.Size(m)
+}
+func (m *LoadObjectsByPrefixReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoadObjectsByPrefixReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadObjectsByPrefixReply proto.InternalMessageInfo
+
+func (m *LoadObjectsByPrefixReply) GetObjects() []*Object {
+ if m != nil {
+ return m.Objects
+ }
+ return nil
+}
+
+type LoadObjectsByVolumeRequest struct {
+ Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
+ Quarantined bool `protobuf:"varint,2,opt,name=quarantined,proto3" json:"quarantined,omitempty"`
+ PageToken []byte `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ PageSize uint32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ RepairTool bool `protobuf:"varint,5,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoadObjectsByVolumeRequest) Reset() { *m = LoadObjectsByVolumeRequest{} }
+func (m *LoadObjectsByVolumeRequest) String() string { return proto.CompactTextString(m) }
+func (*LoadObjectsByVolumeRequest) ProtoMessage() {}
+func (*LoadObjectsByVolumeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{24}
+}
+
+func (m *LoadObjectsByVolumeRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoadObjectsByVolumeRequest.Unmarshal(m, b)
+}
+func (m *LoadObjectsByVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoadObjectsByVolumeRequest.Marshal(b, m, deterministic)
+}
+func (m *LoadObjectsByVolumeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoadObjectsByVolumeRequest.Merge(m, src)
+}
+func (m *LoadObjectsByVolumeRequest) XXX_Size() int {
+ return xxx_messageInfo_LoadObjectsByVolumeRequest.Size(m)
+}
+func (m *LoadObjectsByVolumeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoadObjectsByVolumeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadObjectsByVolumeRequest proto.InternalMessageInfo
+
+func (m *LoadObjectsByVolumeRequest) GetIndex() uint32 {
+ if m != nil {
+ return m.Index
+ }
+ return 0
+}
+
+func (m *LoadObjectsByVolumeRequest) GetQuarantined() bool {
+ if m != nil {
+ return m.Quarantined
+ }
+ return false
+}
+
+func (m *LoadObjectsByVolumeRequest) GetPageToken() []byte {
+ if m != nil {
+ return m.PageToken
+ }
+ return nil
+}
+
+func (m *LoadObjectsByVolumeRequest) GetPageSize() uint32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+func (m *LoadObjectsByVolumeRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type LoadObjectsByVolumeReply struct {
+ Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"`
+ NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LoadObjectsByVolumeReply) Reset() { *m = LoadObjectsByVolumeReply{} }
+func (m *LoadObjectsByVolumeReply) String() string { return proto.CompactTextString(m) }
+func (*LoadObjectsByVolumeReply) ProtoMessage() {}
+func (*LoadObjectsByVolumeReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{25}
+}
+
+func (m *LoadObjectsByVolumeReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LoadObjectsByVolumeReply.Unmarshal(m, b)
+}
+func (m *LoadObjectsByVolumeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LoadObjectsByVolumeReply.Marshal(b, m, deterministic)
+}
+func (m *LoadObjectsByVolumeReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LoadObjectsByVolumeReply.Merge(m, src)
+}
+func (m *LoadObjectsByVolumeReply) XXX_Size() int {
+ return xxx_messageInfo_LoadObjectsByVolumeReply.Size(m)
+}
+func (m *LoadObjectsByVolumeReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_LoadObjectsByVolumeReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LoadObjectsByVolumeReply proto.InternalMessageInfo
+
+func (m *LoadObjectsByVolumeReply) GetObjects() []*Object {
+ if m != nil {
+ return m.Objects
+ }
+ return nil
+}
+
+func (m *LoadObjectsByVolumeReply) GetNextPageToken() []byte {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return nil
+}
+
+type ListPartitionsRequest struct {
+ PartitionBits uint32 `protobuf:"varint,1,opt,name=partition_bits,json=partitionBits,proto3" json:"partition_bits,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListPartitionsRequest) Reset() { *m = ListPartitionsRequest{} }
+func (m *ListPartitionsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListPartitionsRequest) ProtoMessage() {}
+func (*ListPartitionsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{26}
+}
+
+func (m *ListPartitionsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListPartitionsRequest.Unmarshal(m, b)
+}
+func (m *ListPartitionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListPartitionsRequest.Marshal(b, m, deterministic)
+}
+func (m *ListPartitionsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListPartitionsRequest.Merge(m, src)
+}
+func (m *ListPartitionsRequest) XXX_Size() int {
+ return xxx_messageInfo_ListPartitionsRequest.Size(m)
+}
+func (m *ListPartitionsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListPartitionsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListPartitionsRequest proto.InternalMessageInfo
+
+func (m *ListPartitionsRequest) GetPartitionBits() uint32 {
+ if m != nil {
+ return m.PartitionBits
+ }
+ return 0
+}
+
+type ListPartitionRequest struct {
+ Partition uint32 `protobuf:"varint,1,opt,name=partition,proto3" json:"partition,omitempty"`
+ PartitionBits uint32 `protobuf:"varint,2,opt,name=partition_bits,json=partitionBits,proto3" json:"partition_bits,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListPartitionRequest) Reset() { *m = ListPartitionRequest{} }
+func (m *ListPartitionRequest) String() string { return proto.CompactTextString(m) }
+func (*ListPartitionRequest) ProtoMessage() {}
+func (*ListPartitionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{27}
+}
+
+func (m *ListPartitionRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListPartitionRequest.Unmarshal(m, b)
+}
+func (m *ListPartitionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListPartitionRequest.Marshal(b, m, deterministic)
+}
+func (m *ListPartitionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListPartitionRequest.Merge(m, src)
+}
+func (m *ListPartitionRequest) XXX_Size() int {
+ return xxx_messageInfo_ListPartitionRequest.Size(m)
+}
+func (m *ListPartitionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListPartitionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListPartitionRequest proto.InternalMessageInfo
+
+func (m *ListPartitionRequest) GetPartition() uint32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *ListPartitionRequest) GetPartitionBits() uint32 {
+ if m != nil {
+ return m.PartitionBits
+ }
+ return 0
+}
+
+type ListSuffixRequest struct {
+ Partition uint32 `protobuf:"varint,1,opt,name=partition,proto3" json:"partition,omitempty"`
+ Suffix []byte `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
+ PartitionBits uint32 `protobuf:"varint,3,opt,name=partition_bits,json=partitionBits,proto3" json:"partition_bits,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListSuffixRequest) Reset() { *m = ListSuffixRequest{} }
+func (m *ListSuffixRequest) String() string { return proto.CompactTextString(m) }
+func (*ListSuffixRequest) ProtoMessage() {}
+func (*ListSuffixRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{28}
+}
+
+func (m *ListSuffixRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListSuffixRequest.Unmarshal(m, b)
+}
+func (m *ListSuffixRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListSuffixRequest.Marshal(b, m, deterministic)
+}
+func (m *ListSuffixRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListSuffixRequest.Merge(m, src)
+}
+func (m *ListSuffixRequest) XXX_Size() int {
+ return xxx_messageInfo_ListSuffixRequest.Size(m)
+}
+func (m *ListSuffixRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListSuffixRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListSuffixRequest proto.InternalMessageInfo
+
+func (m *ListSuffixRequest) GetPartition() uint32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *ListSuffixRequest) GetSuffix() []byte {
+ if m != nil {
+ return m.Suffix
+ }
+ return nil
+}
+
+func (m *ListSuffixRequest) GetPartitionBits() uint32 {
+ if m != nil {
+ return m.PartitionBits
+ }
+ return 0
+}
+
+type ListQuarantinedOHashesRequest struct {
+ PageToken []byte `protobuf:"bytes,1,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ PageSize uint32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListQuarantinedOHashesRequest) Reset() { *m = ListQuarantinedOHashesRequest{} }
+func (m *ListQuarantinedOHashesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListQuarantinedOHashesRequest) ProtoMessage() {}
+func (*ListQuarantinedOHashesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{29}
+}
+
+func (m *ListQuarantinedOHashesRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListQuarantinedOHashesRequest.Unmarshal(m, b)
+}
+func (m *ListQuarantinedOHashesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListQuarantinedOHashesRequest.Marshal(b, m, deterministic)
+}
+func (m *ListQuarantinedOHashesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListQuarantinedOHashesRequest.Merge(m, src)
+}
+func (m *ListQuarantinedOHashesRequest) XXX_Size() int {
+ return xxx_messageInfo_ListQuarantinedOHashesRequest.Size(m)
+}
+func (m *ListQuarantinedOHashesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListQuarantinedOHashesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListQuarantinedOHashesRequest proto.InternalMessageInfo
+
+func (m *ListQuarantinedOHashesRequest) GetPageToken() []byte {
+ if m != nil {
+ return m.PageToken
+ }
+ return nil
+}
+
+func (m *ListQuarantinedOHashesRequest) GetPageSize() uint32 {
+ if m != nil {
+ return m.PageSize
+ }
+ return 0
+}
+
+type ListQuarantinedOHashesReply struct {
+ Objects []*QuarantinedObjectName `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"`
+ NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListQuarantinedOHashesReply) Reset() { *m = ListQuarantinedOHashesReply{} }
+func (m *ListQuarantinedOHashesReply) String() string { return proto.CompactTextString(m) }
+func (*ListQuarantinedOHashesReply) ProtoMessage() {}
+func (*ListQuarantinedOHashesReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{30}
+}
+
+func (m *ListQuarantinedOHashesReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListQuarantinedOHashesReply.Unmarshal(m, b)
+}
+func (m *ListQuarantinedOHashesReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListQuarantinedOHashesReply.Marshal(b, m, deterministic)
+}
+func (m *ListQuarantinedOHashesReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListQuarantinedOHashesReply.Merge(m, src)
+}
+func (m *ListQuarantinedOHashesReply) XXX_Size() int {
+ return xxx_messageInfo_ListQuarantinedOHashesReply.Size(m)
+}
+func (m *ListQuarantinedOHashesReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListQuarantinedOHashesReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListQuarantinedOHashesReply proto.InternalMessageInfo
+
+func (m *ListQuarantinedOHashesReply) GetObjects() []*QuarantinedObjectName {
+ if m != nil {
+ return m.Objects
+ }
+ return nil
+}
+
+func (m *ListQuarantinedOHashesReply) GetNextPageToken() []byte {
+ if m != nil {
+ return m.NextPageToken
+ }
+ return nil
+}
+
+type ListQuarantinedOHashRequest struct {
+ Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListQuarantinedOHashRequest) Reset() { *m = ListQuarantinedOHashRequest{} }
+func (m *ListQuarantinedOHashRequest) String() string { return proto.CompactTextString(m) }
+func (*ListQuarantinedOHashRequest) ProtoMessage() {}
+func (*ListQuarantinedOHashRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{31}
+}
+
+func (m *ListQuarantinedOHashRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListQuarantinedOHashRequest.Unmarshal(m, b)
+}
+func (m *ListQuarantinedOHashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListQuarantinedOHashRequest.Marshal(b, m, deterministic)
+}
+func (m *ListQuarantinedOHashRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListQuarantinedOHashRequest.Merge(m, src)
+}
+func (m *ListQuarantinedOHashRequest) XXX_Size() int {
+ return xxx_messageInfo_ListQuarantinedOHashRequest.Size(m)
+}
+func (m *ListQuarantinedOHashRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListQuarantinedOHashRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListQuarantinedOHashRequest proto.InternalMessageInfo
+
+func (m *ListQuarantinedOHashRequest) GetPrefix() []byte {
+ if m != nil {
+ return m.Prefix
+ }
+ return nil
+}
+
+func (m *ListQuarantinedOHashRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type ListQuarantinedOHashReply struct {
+ Objects []*Object `protobuf:"bytes,1,rep,name=objects,proto3" json:"objects,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListQuarantinedOHashReply) Reset() { *m = ListQuarantinedOHashReply{} }
+func (m *ListQuarantinedOHashReply) String() string { return proto.CompactTextString(m) }
+func (*ListQuarantinedOHashReply) ProtoMessage() {}
+func (*ListQuarantinedOHashReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{32}
+}
+
+func (m *ListQuarantinedOHashReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListQuarantinedOHashReply.Unmarshal(m, b)
+}
+func (m *ListQuarantinedOHashReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListQuarantinedOHashReply.Marshal(b, m, deterministic)
+}
+func (m *ListQuarantinedOHashReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListQuarantinedOHashReply.Merge(m, src)
+}
+func (m *ListQuarantinedOHashReply) XXX_Size() int {
+ return xxx_messageInfo_ListQuarantinedOHashReply.Size(m)
+}
+func (m *ListQuarantinedOHashReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListQuarantinedOHashReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListQuarantinedOHashReply proto.InternalMessageInfo
+
+func (m *ListQuarantinedOHashReply) GetObjects() []*Object {
+ if m != nil {
+ return m.Objects
+ }
+ return nil
+}
+
+type GetNextOffsetRequest struct {
+ VolumeIndex uint32 `protobuf:"varint,1,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ RepairTool bool `protobuf:"varint,2,opt,name=repair_tool,json=repairTool,proto3" json:"repair_tool,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNextOffsetRequest) Reset() { *m = GetNextOffsetRequest{} }
+func (m *GetNextOffsetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNextOffsetRequest) ProtoMessage() {}
+func (*GetNextOffsetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{33}
+}
+
+func (m *GetNextOffsetRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNextOffsetRequest.Unmarshal(m, b)
+}
+func (m *GetNextOffsetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNextOffsetRequest.Marshal(b, m, deterministic)
+}
+func (m *GetNextOffsetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNextOffsetRequest.Merge(m, src)
+}
+func (m *GetNextOffsetRequest) XXX_Size() int {
+ return xxx_messageInfo_GetNextOffsetRequest.Size(m)
+}
+func (m *GetNextOffsetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNextOffsetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNextOffsetRequest proto.InternalMessageInfo
+
+func (m *GetNextOffsetRequest) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *GetNextOffsetRequest) GetRepairTool() bool {
+ if m != nil {
+ return m.RepairTool
+ }
+ return false
+}
+
+type GetNextOffsetReply struct {
+ Offset uint64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetNextOffsetReply) Reset() { *m = GetNextOffsetReply{} }
+func (m *GetNextOffsetReply) String() string { return proto.CompactTextString(m) }
+func (*GetNextOffsetReply) ProtoMessage() {}
+func (*GetNextOffsetReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{34}
+}
+
+func (m *GetNextOffsetReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetNextOffsetReply.Unmarshal(m, b)
+}
+func (m *GetNextOffsetReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetNextOffsetReply.Marshal(b, m, deterministic)
+}
+func (m *GetNextOffsetReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetNextOffsetReply.Merge(m, src)
+}
+func (m *GetNextOffsetReply) XXX_Size() int {
+ return xxx_messageInfo_GetNextOffsetReply.Size(m)
+}
+func (m *GetNextOffsetReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetNextOffsetReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetNextOffsetReply proto.InternalMessageInfo
+
+func (m *GetNextOffsetReply) GetOffset() uint64 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+type GetStatsRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetStatsRequest) Reset() { *m = GetStatsRequest{} }
+func (m *GetStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetStatsRequest) ProtoMessage() {}
+func (*GetStatsRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{35}
+}
+
+func (m *GetStatsRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetStatsRequest.Unmarshal(m, b)
+}
+func (m *GetStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *GetStatsRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetStatsRequest.Merge(m, src)
+}
+func (m *GetStatsRequest) XXX_Size() int {
+ return xxx_messageInfo_GetStatsRequest.Size(m)
+}
+func (m *GetStatsRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetStatsRequest proto.InternalMessageInfo
+
+type GetStatsReply struct {
+ Stats map[string]uint64 `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetStatsReply) Reset() { *m = GetStatsReply{} }
+func (m *GetStatsReply) String() string { return proto.CompactTextString(m) }
+func (*GetStatsReply) ProtoMessage() {}
+func (*GetStatsReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{36}
+}
+
+func (m *GetStatsReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetStatsReply.Unmarshal(m, b)
+}
+func (m *GetStatsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetStatsReply.Marshal(b, m, deterministic)
+}
+func (m *GetStatsReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetStatsReply.Merge(m, src)
+}
+func (m *GetStatsReply) XXX_Size() int {
+ return xxx_messageInfo_GetStatsReply.Size(m)
+}
+func (m *GetStatsReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetStatsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetStatsReply proto.InternalMessageInfo
+
+func (m *GetStatsReply) GetStats() map[string]uint64 {
+ if m != nil {
+ return m.Stats
+ }
+ return nil
+}
+
+type SetKvStateReply struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SetKvStateReply) Reset() { *m = SetKvStateReply{} }
+func (m *SetKvStateReply) String() string { return proto.CompactTextString(m) }
+func (*SetKvStateReply) ProtoMessage() {}
+func (*SetKvStateReply) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{37}
+}
+
+func (m *SetKvStateReply) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SetKvStateReply.Unmarshal(m, b)
+}
+func (m *SetKvStateReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SetKvStateReply.Marshal(b, m, deterministic)
+}
+func (m *SetKvStateReply) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SetKvStateReply.Merge(m, src)
+}
+func (m *SetKvStateReply) XXX_Size() int {
+ return xxx_messageInfo_SetKvStateReply.Size(m)
+}
+func (m *SetKvStateReply) XXX_DiscardUnknown() {
+ xxx_messageInfo_SetKvStateReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetKvStateReply proto.InternalMessageInfo
+
+type GetKvStateRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GetKvStateRequest) Reset() { *m = GetKvStateRequest{} }
+func (m *GetKvStateRequest) String() string { return proto.CompactTextString(m) }
+func (*GetKvStateRequest) ProtoMessage() {}
+func (*GetKvStateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{38}
+}
+
+func (m *GetKvStateRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GetKvStateRequest.Unmarshal(m, b)
+}
+func (m *GetKvStateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GetKvStateRequest.Marshal(b, m, deterministic)
+}
+func (m *GetKvStateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GetKvStateRequest.Merge(m, src)
+}
+func (m *GetKvStateRequest) XXX_Size() int {
+ return xxx_messageInfo_GetKvStateRequest.Size(m)
+}
+func (m *GetKvStateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_GetKvStateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetKvStateRequest proto.InternalMessageInfo
+
+type KvState struct {
+ IsClean bool `protobuf:"varint,1,opt,name=isClean,proto3" json:"isClean,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *KvState) Reset() { *m = KvState{} }
+func (m *KvState) String() string { return proto.CompactTextString(m) }
+func (*KvState) ProtoMessage() {}
+func (*KvState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{39}
+}
+
+func (m *KvState) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_KvState.Unmarshal(m, b)
+}
+func (m *KvState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_KvState.Marshal(b, m, deterministic)
+}
+func (m *KvState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KvState.Merge(m, src)
+}
+func (m *KvState) XXX_Size() int {
+ return xxx_messageInfo_KvState.Size(m)
+}
+func (m *KvState) XXX_DiscardUnknown() {
+ xxx_messageInfo_KvState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KvState proto.InternalMessageInfo
+
+func (m *KvState) GetIsClean() bool {
+ if m != nil {
+ return m.IsClean
+ }
+ return false
+}
+
+// Generic messages
+type Volume struct {
+ VolumeIndex uint32 `protobuf:"varint,1,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ VolumeType VolumeType `protobuf:"varint,2,opt,name=volume_type,json=volumeType,proto3,enum=filemgr.VolumeType" json:"volume_type,omitempty"`
+ VolumeState uint32 `protobuf:"varint,3,opt,name=volume_state,json=volumeState,proto3" json:"volume_state,omitempty"`
+ Partition uint32 `protobuf:"varint,4,opt,name=partition,proto3" json:"partition,omitempty"`
+ NextOffset uint64 `protobuf:"varint,5,opt,name=next_offset,json=nextOffset,proto3" json:"next_offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Volume) Reset() { *m = Volume{} }
+func (m *Volume) String() string { return proto.CompactTextString(m) }
+func (*Volume) ProtoMessage() {}
+func (*Volume) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{40}
+}
+
+func (m *Volume) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Volume.Unmarshal(m, b)
+}
+func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Volume.Marshal(b, m, deterministic)
+}
+func (m *Volume) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Volume.Merge(m, src)
+}
+func (m *Volume) XXX_Size() int {
+ return xxx_messageInfo_Volume.Size(m)
+}
+func (m *Volume) XXX_DiscardUnknown() {
+ xxx_messageInfo_Volume.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Volume proto.InternalMessageInfo
+
+func (m *Volume) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *Volume) GetVolumeType() VolumeType {
+ if m != nil {
+ return m.VolumeType
+ }
+ return VolumeType_VOLUME_DEFAULT
+}
+
+func (m *Volume) GetVolumeState() uint32 {
+ if m != nil {
+ return m.VolumeState
+ }
+ return 0
+}
+
+func (m *Volume) GetPartition() uint32 {
+ if m != nil {
+ return m.Partition
+ }
+ return 0
+}
+
+func (m *Volume) GetNextOffset() uint64 {
+ if m != nil {
+ return m.NextOffset
+ }
+ return 0
+}
+
+type Object struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ VolumeIndex uint32 `protobuf:"varint,2,opt,name=volume_index,json=volumeIndex,proto3" json:"volume_index,omitempty"`
+ Offset uint64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Object) Reset() { *m = Object{} }
+func (m *Object) String() string { return proto.CompactTextString(m) }
+func (*Object) ProtoMessage() {}
+func (*Object) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{41}
+}
+
+func (m *Object) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Object.Unmarshal(m, b)
+}
+func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Object.Marshal(b, m, deterministic)
+}
+func (m *Object) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Object.Merge(m, src)
+}
+func (m *Object) XXX_Size() int {
+ return xxx_messageInfo_Object.Size(m)
+}
+func (m *Object) XXX_DiscardUnknown() {
+ xxx_messageInfo_Object.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Object proto.InternalMessageInfo
+
+func (m *Object) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *Object) GetVolumeIndex() uint32 {
+ if m != nil {
+ return m.VolumeIndex
+ }
+ return 0
+}
+
+func (m *Object) GetOffset() uint64 {
+ if m != nil {
+ return m.Offset
+ }
+ return 0
+}
+
+type QuarantinedObjectName struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *QuarantinedObjectName) Reset() { *m = QuarantinedObjectName{} }
+func (m *QuarantinedObjectName) String() string { return proto.CompactTextString(m) }
+func (*QuarantinedObjectName) ProtoMessage() {}
+func (*QuarantinedObjectName) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{42}
+}
+
+func (m *QuarantinedObjectName) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QuarantinedObjectName.Unmarshal(m, b)
+}
+func (m *QuarantinedObjectName) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QuarantinedObjectName.Marshal(b, m, deterministic)
+}
+func (m *QuarantinedObjectName) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QuarantinedObjectName.Merge(m, src)
+}
+func (m *QuarantinedObjectName) XXX_Size() int {
+ return xxx_messageInfo_QuarantinedObjectName.Size(m)
+}
+func (m *QuarantinedObjectName) XXX_DiscardUnknown() {
+ xxx_messageInfo_QuarantinedObjectName.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QuarantinedObjectName proto.InternalMessageInfo
+
+func (m *QuarantinedObjectName) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+// For listdir() like functions
+type DirEntries struct {
+ Entry []string `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DirEntries) Reset() { *m = DirEntries{} }
+func (m *DirEntries) String() string { return proto.CompactTextString(m) }
+func (*DirEntries) ProtoMessage() {}
+func (*DirEntries) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1fcd0776e05e82a6, []int{43}
+}
+
+func (m *DirEntries) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DirEntries.Unmarshal(m, b)
+}
+func (m *DirEntries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DirEntries.Marshal(b, m, deterministic)
+}
+func (m *DirEntries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DirEntries.Merge(m, src)
+}
+func (m *DirEntries) XXX_Size() int {
+ return xxx_messageInfo_DirEntries.Size(m)
+}
+func (m *DirEntries) XXX_DiscardUnknown() {
+ xxx_messageInfo_DirEntries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DirEntries proto.InternalMessageInfo
+
+func (m *DirEntries) GetEntry() []string {
+ if m != nil {
+ return m.Entry
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("filemgr.VolumeType", VolumeType_name, VolumeType_value)
+ proto.RegisterEnum("filemgr.VolumeState", VolumeState_name, VolumeState_value)
+ proto.RegisterType((*RegisterVolumeRequest)(nil), "filemgr.RegisterVolumeRequest")
+ proto.RegisterType((*RegisterVolumeReply)(nil), "filemgr.RegisterVolumeReply")
+ proto.RegisterType((*UnregisterVolumeRequest)(nil), "filemgr.UnregisterVolumeRequest")
+ proto.RegisterType((*UnregisterVolumeReply)(nil), "filemgr.UnregisterVolumeReply")
+ proto.RegisterType((*UpdateVolumeStateRequest)(nil), "filemgr.UpdateVolumeStateRequest")
+ proto.RegisterType((*UpdateVolumeStateReply)(nil), "filemgr.UpdateVolumeStateReply")
+ proto.RegisterType((*GetVolumeRequest)(nil), "filemgr.GetVolumeRequest")
+ proto.RegisterType((*GetVolumeReply)(nil), "filemgr.GetVolumeReply")
+ proto.RegisterType((*ListVolumesRequest)(nil), "filemgr.ListVolumesRequest")
+ proto.RegisterType((*ListVolumesReply)(nil), "filemgr.ListVolumesReply")
+ proto.RegisterType((*RegisterObjectRequest)(nil), "filemgr.RegisterObjectRequest")
+ proto.RegisterType((*RegisterObjectReply)(nil), "filemgr.RegisterObjectReply")
+ proto.RegisterType((*UnregisterObjectRequest)(nil), "filemgr.UnregisterObjectRequest")
+ proto.RegisterType((*UnregisterObjectReply)(nil), "filemgr.UnregisterObjectReply")
+ proto.RegisterType((*RenameObjectRequest)(nil), "filemgr.RenameObjectRequest")
+ proto.RegisterType((*RenameObjectReply)(nil), "filemgr.RenameObjectReply")
+ proto.RegisterType((*LoadObjectRequest)(nil), "filemgr.LoadObjectRequest")
+ proto.RegisterType((*LoadObjectReply)(nil), "filemgr.LoadObjectReply")
+ proto.RegisterType((*QuarantineObjectRequest)(nil), "filemgr.QuarantineObjectRequest")
+ proto.RegisterType((*QuarantineObjectReply)(nil), "filemgr.QuarantineObjectReply")
+ proto.RegisterType((*UnquarantineObjectRequest)(nil), "filemgr.UnquarantineObjectRequest")
+ proto.RegisterType((*UnquarantineObjectReply)(nil), "filemgr.UnquarantineObjectReply")
+ proto.RegisterType((*LoadObjectsByPrefixRequest)(nil), "filemgr.LoadObjectsByPrefixRequest")
+ proto.RegisterType((*LoadObjectsByPrefixReply)(nil), "filemgr.LoadObjectsByPrefixReply")
+ proto.RegisterType((*LoadObjectsByVolumeRequest)(nil), "filemgr.LoadObjectsByVolumeRequest")
+ proto.RegisterType((*LoadObjectsByVolumeReply)(nil), "filemgr.LoadObjectsByVolumeReply")
+ proto.RegisterType((*ListPartitionsRequest)(nil), "filemgr.ListPartitionsRequest")
+ proto.RegisterType((*ListPartitionRequest)(nil), "filemgr.ListPartitionRequest")
+ proto.RegisterType((*ListSuffixRequest)(nil), "filemgr.ListSuffixRequest")
+ proto.RegisterType((*ListQuarantinedOHashesRequest)(nil), "filemgr.ListQuarantinedOHashesRequest")
+ proto.RegisterType((*ListQuarantinedOHashesReply)(nil), "filemgr.ListQuarantinedOHashesReply")
+ proto.RegisterType((*ListQuarantinedOHashRequest)(nil), "filemgr.ListQuarantinedOHashRequest")
+ proto.RegisterType((*ListQuarantinedOHashReply)(nil), "filemgr.ListQuarantinedOHashReply")
+ proto.RegisterType((*GetNextOffsetRequest)(nil), "filemgr.GetNextOffsetRequest")
+ proto.RegisterType((*GetNextOffsetReply)(nil), "filemgr.GetNextOffsetReply")
+ proto.RegisterType((*GetStatsRequest)(nil), "filemgr.GetStatsRequest")
+ proto.RegisterType((*GetStatsReply)(nil), "filemgr.GetStatsReply")
+ proto.RegisterMapType((map[string]uint64)(nil), "filemgr.GetStatsReply.StatsEntry")
+ proto.RegisterType((*SetKvStateReply)(nil), "filemgr.SetKvStateReply")
+ proto.RegisterType((*GetKvStateRequest)(nil), "filemgr.GetKvStateRequest")
+ proto.RegisterType((*KvState)(nil), "filemgr.KvState")
+ proto.RegisterType((*Volume)(nil), "filemgr.Volume")
+ proto.RegisterType((*Object)(nil), "filemgr.Object")
+ proto.RegisterType((*QuarantinedObjectName)(nil), "filemgr.QuarantinedObjectName")
+ proto.RegisterType((*DirEntries)(nil), "filemgr.DirEntries")
+}
+
+func init() {
+ proto.RegisterFile("fmgr.proto", fileDescriptor_1fcd0776e05e82a6)
+}
+
+var fileDescriptor_1fcd0776e05e82a6 = []byte{
+ // 1083 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x57, 0xdd, 0x6e, 0xe3, 0x44,
+ 0x1b, 0x5e, 0x3b, 0x69, 0xda, 0xbe, 0xe9, 0x8f, 0x33, 0x4d, 0x53, 0x77, 0xfb, 0xed, 0xb7, 0x59,
+ 0xa3, 0x85, 0x50, 0x50, 0x0f, 0x16, 0x24, 0x2a, 0x24, 0x90, 0xd2, 0xd6, 0x1b, 0x2a, 0xd2, 0x24,
+ 0x38, 0x4e, 0x17, 0xc1, 0x81, 0x71, 0xe9, 0x24, 0x98, 0x4d, 0x6c, 0xd7, 0x9e, 0x74, 0x9b, 0x15,
+ 0x12, 0x12, 0x87, 0x5c, 0x09, 0xa7, 0x5c, 0x03, 0x87, 0xdc, 0x07, 0xd7, 0x81, 0x66, 0xc6, 0x4e,
+ 0xfc, 0x47, 0xd3, 0x88, 0x9c, 0x70, 0xe6, 0xf7, 0xb1, 0xf3, 0x3e, 0xcf, 0xfb, 0x3b, 0x13, 0x80,
+ 0xfe, 0x68, 0xe0, 0x1d, 0xb9, 0x9e, 0x43, 0x1c, 0xb4, 0xda, 0xb7, 0x86, 0x78, 0x34, 0xf0, 0x94,
+ 0xbf, 0x04, 0xd8, 0xd5, 0xf0, 0xc0, 0xf2, 0x09, 0xf6, 0x2e, 0x9d, 0xe1, 0x78, 0x84, 0x35, 0x7c,
+ 0x33, 0xc6, 0x3e, 0x41, 0xff, 0x83, 0x75, 0xd7, 0xf4, 0x88, 0x45, 0x2c, 0xc7, 0x96, 0x85, 0xaa,
+ 0x50, 0xdb, 0xd4, 0x66, 0x00, 0x7a, 0x0f, 0xf2, 0x64, 0xe2, 0x62, 0x59, 0xac, 0x0a, 0xb5, 0xad,
+ 0x17, 0x3b, 0x47, 0x81, 0xbf, 0x23, 0xee, 0x43, 0x9f, 0xb8, 0x58, 0x63, 0x1f, 0xa0, 0x67, 0xb0,
+ 0x71, 0xcb, 0x30, 0xc3, 0xb2, 0xaf, 0xf1, 0x9d, 0x9c, 0x63, 0x9e, 0x8a, 0x1c, 0x3b, 0xa7, 0x10,
+ 0xaa, 0x40, 0xc1, 0xe9, 0xf7, 0x7d, 0x4c, 0xe4, 0x7c, 0x55, 0xa8, 0xe5, 0xb5, 0xc0, 0x42, 0x87,
+ 0xb0, 0xe2, 0x13, 0x93, 0x60, 0x79, 0x85, 0x91, 0x94, 0x13, 0x24, 0x5d, 0xfa, 0x4e, 0xe3, 0x9f,
+ 0xa0, 0xa7, 0x50, 0xf4, 0xb0, 0x6b, 0x5a, 0x9e, 0x41, 0x1c, 0x67, 0x28, 0x17, 0xaa, 0x42, 0x6d,
+ 0x4d, 0x03, 0x0e, 0xe9, 0x8e, 0x33, 0x54, 0x76, 0x61, 0x27, 0x19, 0xa7, 0x3b, 0x9c, 0x28, 0x1d,
+ 0xd8, 0xeb, 0xd9, 0x5e, 0x66, 0x02, 0xca, 0xb0, 0xc2, 0x25, 0xf3, 0xe0, 0xb9, 0x91, 0x24, 0x12,
+ 0x53, 0x44, 0x7b, 0xb0, 0x9b, 0xf6, 0x48, 0xa9, 0x7e, 0x15, 0x40, 0xee, 0xb9, 0xd7, 0x26, 0xc1,
+ 0x51, 0xfd, 0x01, 0x59, 0x32, 0x4d, 0x42, 0x3a, 0x4d, 0xd3, 0x74, 0x88, 0x0b, 0xa7, 0x23, 0x97,
+ 0x52, 0x29, 0x43, 0x25, 0x43, 0x0b, 0x95, 0x79, 0x0e, 0x52, 0x03, 0x93, 0xa5, 0xa4, 0xe2, 0x4f,
+ 0x01, 0xb6, 0x22, 0xbe, 0xdc, 0xe1, 0xe4, 0x21, 0x71, 0x7e, 0x0c, 0x81, 0x69, 0xcc, 0xeb, 0x30,
+ 0xb8, 0x9d, 0x3e, 0x47, 0x1c, 0xf3, 0x24, 0xc5, 0xfa, 0x8c, 0x85, 0x17, 0xef, 0xe8, 0x7c, 0xb2,
+ 0xa3, 0x9f, 0x42, 0xd1, 0xc6, 0x77, 0xc4, 0x08, 0x5a, 0x71, 0x85, 0xb5, 0x22, 0x50, 0xa8, 0xcd,
+ 0x10, 0xe5, 0x27, 0x40, 0x4d, 0xcb, 0x0f, 0xa2, 0xf1, 0x97, 0x3c, 0x26, 0x73, 0x0b, 0xf6, 0x19,
+ 0x48, 0x31, 0x76, 0x9a, 0xcc, 0xf7, 0x61, 0x95, 0xc7, 0xe7, 0xcb, 0x42, 0x35, 0x57, 0x2b, 0xbe,
+ 0xd8, 0x4e, 0x10, 0x68, 0xe1, 0x7b, 0xe5, 0xb7, 0xc8, 0x9c, 0xb7, 0xaf, 0x7e, 0xc4, 0xdf, 0x93,
+ 0x30, 0x00, 0x04, 0x79, 0xdb, 0x1c, 0x61, 0xa6, 0x7d, 0x43, 0x63, 0xcf, 0xa9, 0x2a, 0x89, 0xf7,
+ 0x0d, 0x6d, 0x2e, 0x36, 0xb4, 0x89, 0x34, 0xe6, 0x93, 0x69, 0x4c, 0x46, 0xba, 0x72, 0xdf, 0xa4,
+ 0x86, 0x4a, 0x69, 0x5f, 0xb6, 0xa2, 0x93, 0x3a, 0x3f, 0x84, 0xc5, 0xe6, 0x34, 0x4a, 0x84, 0x29,
+ 0x3f, 0xf5, 0x31, 0x9f, 0x64, 0x1f, 0xd6, 0x6c, 0xfc, 0xc6, 0x60, 0xb8, 0xc8, 0xf0, 0x55, 0x1b,
+ 0xbf, 0x69, 0x65, 0xf0, 0xa7, 0x0b, 0xba, 0x03, 0xa5, 0x38, 0x0d, 0xe5, 0x76, 0xa0, 0xd4, 0x74,
+ 0xcc, 0xeb, 0xf9, 0xcc, 0xcf, 0x61, 0xcb, 0xf2, 0x8d, 0x9b, 0xb1, 0xe9, 0x99, 0x36, 0xb1, 0x6c,
+ 0x7c, 0x1d, 0x44, 0xb8, 0x69, 0xf9, 0x5f, 0xcd, 0xc0, 0xf9, 0x2a, 0xbe, 0x83, 0xed, 0x28, 0x21,
+ 0xed, 0xaa, 0xe5, 0x36, 0x04, 0xad, 0xdb, 0x4c, 0xd1, 0x72, 0xea, 0x96, 0xf6, 0xc7, 0x57, 0xf9,
+ 0x7e, 0xcf, 0xbe, 0x59, 0x26, 0xd5, 0x3e, 0x6d, 0xb9, 0x9b, 0x4c, 0xb2, 0x1e, 0x3c, 0x9e, 0xe5,
+ 0xcd, 0x3f, 0x99, 0x74, 0x3c, 0xdc, 0xb7, 0xee, 0x42, 0xb6, 0x0a, 0x14, 0x5c, 0x06, 0x04, 0x7c,
+ 0x81, 0x35, 0x9f, 0x51, 0x05, 0x39, 0xd3, 0x6d, 0x30, 0xed, 0x0e, 0xc7, 0x53, 0xd3, 0x1e, 0x28,
+ 0x0b, 0xdf, 0x2b, 0xbf, 0x0b, 0x09, 0x79, 0x0f, 0x59, 0xe7, 0x55, 0x28, 0xa6, 0xfb, 0x29, 0x0a,
+ 0xa1, 0x27, 0x00, 0xae, 0x39, 0xc0, 0x06, 0x71, 0x5e, 0x63, 0x9b, 0x95, 0x79, 0x83, 0x2e, 0xbb,
+ 0x01, 0xd6, 0x29, 0x80, 0x0e, 0x80, 0x19, 0x86, 0x6f, 0xbd, 0xc5, 0xc1, 0x7e, 0x5d, 0xa3, 0x40,
+ 0xd7, 0x7a, 0x8b, 0xe7, 0x8f, 0xfd, 0x28, 0x11, 0x7a, 0xf4, 0xd4, 0x78, 0x78, 0xe8, 0xe8, 0x5d,
+ 0xd8, 0x66, 0xfb, 0x27, 0x22, 0x94, 0x4f, 0xe6, 0x26, 0x85, 0x3b, 0xa1, 0x58, 0xe5, 0x73, 0xd8,
+ 0xa5, 0xfb, 0xb4, 0x13, 0xae, 0xea, 0xe9, 0x42, 0x7f, 0x0e, 0x5b, 0xd3, 0xfd, 0x6d, 0x5c, 0x59,
+ 0x8c, 0x92, 0x86, 0xb2, 0x39, 0x45, 0x4f, 0x2c, 0xe2, 0x2b, 0xdf, 0x42, 0x39, 0xf6, 0xfb, 0x87,
+ 0x9d, 0x07, 0x69, 0xe7, 0x62, 0x96, 0x73, 0x17, 0x4a, 0xd4, 0x79, 0x77, 0xdc, 0x8f, 0x34, 0xd5,
+ 0xfd, 0x9e, 0x2b, 0x50, 0xf0, 0xd9, 0xe7, 0x41, 0xb8, 0x81, 0x95, 0xc1, 0x98, 0xcb, 0x0e, 0xe7,
+ 0x09, 0x65, 0x8c, 0xec, 0x8e, 0xf6, 0x17, 0xa6, 0xff, 0xc3, 0xec, 0x9c, 0x8b, 0xd7, 0x5e, 0xb8,
+ 0xb7, 0xf6, 0x62, 0xbc, 0xf6, 0xca, 0xcf, 0x70, 0xf0, 0x4f, 0xce, 0x69, 0x75, 0x8f, 0x93, 0xd5,
+ 0xfd, 0xff, 0xb4, 0xba, 0xd1, 0x9f, 0xb0, 0x4f, 0xe8, 0x6e, 0x5d, 0xbc, 0xd8, 0x97, 0xd9, 0x02,
+ 0xfe, 0xf5, 0xb8, 0xbe, 0x84, 0xfd, 0x6c, 0xbf, 0x0b, 0xce, 0xeb, 0x37, 0x50, 0x6e, 0x60, 0xd2,
+ 0x9a, 0x1e, 0x92, 0x0b, 0xdc, 0x0a, 0xe7, 0x6a, 0xfc, 0x10, 0x50, 0xc2, 0x37, 0x15, 0x37, 0xdb,
+ 0xd6, 0x42, 0x6c, 0x5b, 0x97, 0x60, 0xbb, 0x81, 0x09, 0xbd, 0x2f, 0x85, 0x95, 0x57, 0x7e, 0x11,
+ 0x60, 0x73, 0x86, 0xd1, 0x1f, 0x7f, 0xc2, 0x6f, 0xa2, 0x61, 0x5c, 0xcf, 0xa6, 0x71, 0xc5, 0x3e,
+ 0x3b, 0x62, 0x8f, 0xaa, 0x4d, 0xbc, 0x09, 0xbf, 0x96, 0xfa, 0x8f, 0x8f, 0x01, 0x66, 0x20, 0x92,
+ 0x20, 0xf7, 0x1a, 0x4f, 0x98, 0x80, 0x75, 0x8d, 0x3e, 0xd2, 0xc5, 0x74, 0x6b, 0x0e, 0xc7, 0xbc,
+ 0x83, 0xf2, 0x1a, 0x37, 0x3e, 0x15, 0x8f, 0x05, 0xaa, 0xab, 0x8b, 0xc9, 0x97, 0xb7, 0x91, 0x8b,
+ 0xea, 0x0e, 0x94, 0x1a, 0x11, 0x88, 0x8b, 0x7d, 0x07, 0x56, 0x03, 0x04, 0xc9, 0xb0, 0x6a, 0xf9,
+ 0xa7, 0x43, 0x6c, 0xf2, 0x76, 0x5d, 0xd3, 0x42, 0x53, 0xf9, 0x43, 0x80, 0x02, 0x5f, 0x2f, 0xff,
+ 0xe5, 0xfb, 0xe8, 0x2b, 0x28, 0xf0, 0x3e, 0x5a, 0xf6, 0x89, 0xfd, 0x41, 0xf4, 0x84, 0x8d, 0xcc,
+ 0x5d, 0x16, 0x8f, 0xa2, 0x00, 0x9c, 0x59, 0x1e, 0x2d, 0xa8, 0x85, 0x7d, 0x5a, 0x40, 0x4c, 0x6b,
+ 0xcb, 0x3a, 0x63, 0x5d, 0xe3, 0xc6, 0x61, 0x0b, 0x60, 0x96, 0x25, 0x84, 0x60, 0xeb, 0xb2, 0xdd,
+ 0xec, 0x5d, 0xa8, 0xc6, 0x99, 0xfa, 0xb2, 0xde, 0x6b, 0xea, 0xd2, 0x23, 0x54, 0x06, 0x29, 0xc0,
+ 0xf4, 0xf6, 0xc5, 0x49, 0x57, 0x6f, 0xb7, 0x54, 0x49, 0x40, 0x15, 0x40, 0x01, 0xfa, 0xb5, 0x71,
+ 0xa6, 0x36, 0x55, 0x5d, 0x35, 0xea, 0xba, 0x24, 0x1e, 0x6a, 0x50, 0x8c, 0xfc, 0x6d, 0x41, 0x1b,
+ 0xb0, 0xd6, 0xd5, 0xeb, 0xba, 0x6a, 0x68, 0xaf, 0xa4, 0x47, 0x48, 0x86, 0x32, 0xb7, 0x4e, 0xdb,
+ 0x17, 0x9d, 0xfa, 0xa9, 0x7e, 0xde, 0x6e, 0x19, 0x5d, 0xed, 0x54, 0x12, 0xd0, 0x01, 0xec, 0xa5,
+ 0xde, 0xe8, 0x75, 0xad, 0xa1, 0xea, 0x92, 0x78, 0x55, 0x60, 0x7f, 0x8c, 0x3f, 0xfa, 0x3b, 0x00,
+ 0x00, 0xff, 0xff, 0x2f, 0xb4, 0x5f, 0x6a, 0x26, 0x0f, 0x00, 0x00,
+}
diff --git a/go/swift-rpc-losf/rpc.go b/go/swift-rpc-losf/rpc.go
new file mode 100644
index 000000000..622ad8c0e
--- /dev/null
+++ b/go/swift-rpc-losf/rpc.go
@@ -0,0 +1,1642 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// RPC functions
+//
+// TODO: The naming of things is not consistent with the python code.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/alecuyer/statsd/v2"
+ "github.com/golang/protobuf/proto"
+ "github.com/openstack/swift-rpc-losf/codes"
+ pb "github.com/openstack/swift-rpc-losf/proto"
+ "github.com/openstack/swift-rpc-losf/status"
+ "github.com/sirupsen/logrus"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+ "sync"
+ "time"
+)
+
+type server struct {
+ kv KV
+ httpServer *http.Server
+
+ // DB state (is it in sync with the volumes state)
+ isClean bool
+
+ diskPath string // full path to mountpoint
+ diskName string // without the path
+ socketPath string // full path to the socket
+
+ // statsd used as is done in swift
+ statsd_c *statsd.Client
+
+ // channel to signal server should stop
+ stopChan chan os.Signal
+}
+
+// The following consts are used as a key prefix for different types in the KV
+
+// prefix for "volumes" (large file to which we write objects)
+const volumePrefix = 'd'
+
+// prefix for "objects" ("vfile" in the python code, would be a POSIX file on a regular backend)
+const objectPrefix = 'o'
+
+// This is meant to be used when a new file is created with the same name as an existing file.
+// Deprecate this. As discussed in https://review.openstack.org/#/c/162243, overwriting an existing file
+// never seemed like a good idea, and was done to mimic the existing renamer() behavior.
+// We have no way to know if the new file is "better" than the existing one.
+const deleteQueuePrefix = 'q'
+
+// Quarantined objects
+const quarantinePrefix = 'r'
+
+// stats stored in the KV
+const statsPrefix = 's'
+
+// max key length in ascii format.
+const maxObjKeyLen = 96
+
+type rpcFunc func(*server, context.Context, *[]byte) (*[]byte, error)
+
+// RegisterVolume registers a new volume (volume) to the KV, given its index number and starting offset.
+// Will return an error if the volume index already exists.
+func RegisterVolume(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.RegisterVolumeRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("RegisterVolume failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "RegisterVolume", "Partition": in.Partition, "Type": in.Type,
+ "VolumeIndex": in.VolumeIndex, "Offset": in.Offset, "State": in.State})
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ key := EncodeVolumeKey(in.VolumeIndex)
+
+ // Does the volume already exist ?
+ value, err := s.kv.Get(volumePrefix, key)
+ if err != nil {
+ reqlog.Errorf("unable to check for existing volume key: %v", err)
+ s.statsd_c.Increment("register_volume.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to check for existing volume key")
+ }
+
+ if value != nil {
+ reqlog.Info("volume index already exists in db")
+ s.statsd_c.Increment("register_volume.ok")
+ return nil, status.Errorf(codes.AlreadyExists, "volume index already exists in db")
+ }
+
+ // Register the volume
+ usedSpace := int64(0)
+ value = EncodeVolumeValue(int64(in.Partition), int32(in.Type), int64(in.Offset), usedSpace, int64(in.State))
+
+ err = s.kv.Put(volumePrefix, key, value)
+ if err != nil {
+ reqlog.Errorf("failed to Put new volume entry: %v", err)
+ s.statsd_c.Increment("register_volume.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to register new volume")
+ }
+ s.statsd_c.Increment("register_volume.ok")
+
+ out, err := proto.Marshal(&pb.RegisterVolumeReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply for new volume entry: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply for new volume entry: %v", err)
+ }
+ return &out, nil
+}
+
+// UnregisterVolume will delete a volume entry from the kv.
+func UnregisterVolume(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.UnregisterVolumeRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("UnregisterVolume failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "UnregisterVolume", "VolumeIndex": in.Index})
+ reqlog.Debug("RPC Call")
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ key := EncodeVolumeKey(in.Index)
+
+ // Check for key
+ value, err := s.kv.Get(volumePrefix, key)
+ if err != nil {
+ reqlog.Errorf("unable to check for volume key: %v", err)
+ s.statsd_c.Increment("unregister_volume.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to check for volume key")
+ }
+
+ if value == nil {
+ reqlog.Info("volume index does not exist in db")
+ s.statsd_c.Increment("unregister_volume.ok")
+ return nil, status.Errorf(codes.NotFound, "volume index does not exist in db")
+ }
+
+ // Key exists, delete it
+ err = s.kv.Delete(volumePrefix, key)
+ if err != nil {
+ reqlog.Errorf("failed to Delete volume entry: %v", err)
+ s.statsd_c.Increment("unregister_volume.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to delete volume entry")
+ }
+
+ s.statsd_c.Increment("unregister_volume.ok")
+ return serializePb(&pb.UnregisterVolumeReply{})
+}
+
+// UpdateVolumeState will modify an existing volume state
+func UpdateVolumeState(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.UpdateVolumeStateRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("UpdateVolumeState failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "UpdateVolumeState", "VolumeIndex": in.VolumeIndex, "State": in.State})
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ key := EncodeVolumeKey(in.VolumeIndex)
+ value, err := s.kv.Get(volumePrefix, key)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve volume key: %v", err)
+ s.statsd_c.Increment("update_volume_state.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve volume key")
+ }
+
+ if value == nil {
+ reqlog.Info("volume index does not exist in db")
+ s.statsd_c.Increment("update_volume_state.ok")
+ return nil, status.Errorf(codes.NotFound, "volume index does not exist in db")
+ }
+
+ partition, dfType, offset, usedSpace, state, err := DecodeVolumeValue(value)
+ reqlog.WithFields(logrus.Fields{"current_state": state}).Info("updating state")
+ if err != nil {
+ reqlog.Errorf("failed to decode Volume value: %v", err)
+ s.statsd_c.Increment("update_volume_state.fail")
+ return nil, status.Errorf(codes.Internal, "failed to decode Volume value")
+ }
+
+ value = EncodeVolumeValue(partition, dfType, offset, usedSpace, int64(in.State))
+ err = s.kv.Put(volumePrefix, key, value)
+ if err != nil {
+ reqlog.Errorf("failed to Put updated volume entry: %v", err)
+ s.statsd_c.Increment("update_volume_state.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to update volume state")
+ }
+ s.statsd_c.Increment("update_volume_state.ok")
+
+ out, err := proto.Marshal(&pb.UpdateVolumeStateReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply for update volume: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply for update volume: %v", err)
+ }
+ return &out, nil
+}
+
+// GetVolume will return a volume information
+func GetVolume(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.GetVolumeRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("GetVolume failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "GetVolume", "Volume index": in.Index})
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ key := EncodeVolumeKey(in.Index)
+ value, err := s.kv.Get(volumePrefix, key)
+ if err != nil {
+ reqlog.Errorf("Failed to get volume key %d in KV: %v", key, err)
+ s.statsd_c.Increment("get_volume.fail")
+ return nil, status.Errorf(codes.Internal, "Failed to get volume key in KV")
+ }
+
+ if value == nil {
+ reqlog.Info("No such Volume")
+ s.statsd_c.Increment("get_volume.ok")
+ return nil, status.Errorf(codes.NotFound, "No such Volume")
+ }
+
+ partition, dfType, nextOffset, _, state, err := DecodeVolumeValue(value)
+ if err != nil {
+ reqlog.Errorf("Failed to decode Volume value: %v", err)
+ s.statsd_c.Increment("get_volume.fail")
+ return nil, status.Errorf(codes.Internal, "Failed to decode Volume value")
+ }
+
+ s.statsd_c.Increment("get_volume.ok")
+
+ pb_volume := pb.GetVolumeReply{VolumeIndex: in.Index, VolumeType: pb.VolumeType(dfType), VolumeState: uint32(state),
+ Partition: uint32(partition), NextOffset: uint64(nextOffset)}
+ out, err := proto.Marshal(&pb_volume)
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply for get volume: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply for get volume: %v", err)
+ }
+ return &out, nil
+}
+
+// ListVolumes will return all volumes of the given type, for the given partition.
+// If GetlAllVolumes is true, all volumes are listed (all types, all partitions)
+// Currently this scans all volumes in the KV. Likely fast enough as long as the KV is cached.
+// If it becomes a performance issue, we may want to add an in-memory cache indexed by partition.
+func ListVolumes(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.ListVolumesRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("ListVolumes failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "ListVolumes", "Partition": in.Partition, "Type": in.Type})
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ response := &pb.ListVolumesReply{}
+
+ // Iterate over volumes and return the ones that match the request
+ it := s.kv.NewIterator(volumePrefix)
+ defer it.Close()
+
+ for it.SeekToFirst(); it.Valid(); it.Next() {
+ idx, err := DecodeVolumeKey(it.Key())
+ if err != nil {
+ reqlog.Errorf("failed to decode volume key: %v", err)
+ s.statsd_c.Increment("list_volumes.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode volume value")
+ }
+
+ partition, dfType, nextOffset, _, state, err := DecodeVolumeValue(it.Value())
+ if err != nil {
+ reqlog.Errorf("failed to decode volume value: %v", err)
+ s.statsd_c.Increment("list_volumes.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode volume value")
+ }
+ if uint32(partition) == in.Partition && pb.VolumeType(dfType) == in.Type {
+ response.Volumes = append(response.Volumes, &pb.Volume{VolumeIndex: idx,
+ VolumeType: pb.VolumeType(in.Type), VolumeState: uint32(state),
+ Partition: uint32(partition), NextOffset: uint64(nextOffset)})
+ }
+ }
+
+ s.statsd_c.Increment("list_volumes.ok")
+ out, err := proto.Marshal(response)
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply for list volumes: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply for list volumes: %v", err)
+ }
+ return &out, nil
+}
+
+// RegisterObject registers a new object to the kv.
+func RegisterObject(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.RegisterObjectRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("RegisterObject failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "RegisterObject",
+ "Name": fmt.Sprintf("%s", in.Name),
+ "DiskPath": s.diskPath,
+ "VolumeIndex": in.VolumeIndex,
+ "Offset": in.Offset,
+ "NextOffset": in.NextOffset,
+ "Length": in.NextOffset - in.Offset, // debug
+ })
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ // Check if volume exists
+ volumeKey := EncodeVolumeKey(in.VolumeIndex)
+ volumeValue, err := s.kv.Get(volumePrefix, volumeKey)
+ if err != nil {
+ reqlog.Errorf("unable to check for existing volume key: %v", err)
+ s.statsd_c.Increment("register_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to check for existing volume key")
+ }
+
+ if volumeValue == nil {
+ reqlog.Info("volume index does not exist in db")
+ s.statsd_c.Increment("register_object.ok")
+ return nil, status.Errorf(codes.FailedPrecondition, "volume index does not exist in db")
+ }
+
+ partition, volumeType, _, currentUsedSpace, state, err := DecodeVolumeValue(volumeValue)
+
+ objectKey, err := EncodeObjectKey(in.Name)
+ if err != nil {
+ reqlog.Errorf("unable to encode object key: %v", err)
+ s.statsd_c.Increment("register_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ objectValue := EncodeObjectValue(in.VolumeIndex, in.Offset)
+
+ // If an object exists with the same name, we need to move it to the delete queue before overwriting the key.
+ // On the regular file backend, this would happen automatically with the rename operation. In our case,
+ // we would leak space. (The space will be reclaimed on compaction, but it shouldn't happen).
+
+ var objMutex = &sync.Mutex{}
+ objMutex.Lock()
+
+ existingValue, err := s.kv.Get(objectPrefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("unable to check for existing object: %v", err)
+ s.statsd_c.Increment("register_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve object")
+ }
+
+ if existingValue != nil {
+ reqlog.Info("object already exists")
+ s.statsd_c.Increment("register_object.ok")
+ return nil, status.Errorf(codes.AlreadyExists, "object already exists")
+ }
+
+ // Update volume offset
+ volumeNewValue := EncodeVolumeValue(int64(partition), volumeType, int64(in.NextOffset), int64(currentUsedSpace), state)
+ wb := s.kv.NewWriteBatch()
+ defer wb.Close()
+ wb.Put(volumePrefix, volumeKey, volumeNewValue)
+ wb.Put(objectPrefix, objectKey, objectValue)
+
+ err = wb.Commit()
+ if err != nil {
+ reqlog.Errorf("failed to Put new volume value and new object entry: %v", err)
+ s.statsd_c.Increment("register_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to update volume and register new object")
+ }
+ objMutex.Unlock()
+
+ s.statsd_c.Increment("register_object.ok")
+
+ out, err := proto.Marshal(&pb.RegisterObjectReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply: %v", err)
+ }
+ return &out, nil
+}
+
+// UnregisterObject removes an an object entry from the kv.
+func UnregisterObject(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.UnregisterObjectRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("UnregisterObject failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "UnregisterObject",
+ "Name": fmt.Sprintf("%s", in.Name),
+ "DiskPath": s.diskPath,
+ })
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ objectKey, err := EncodeObjectKey(in.Name)
+ if err != nil {
+ reqlog.Errorf("unable to encode object key: %v", err)
+ s.statsd_c.Increment("unregister_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ value, err := s.kv.Get(objectPrefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve object: %v", err)
+ s.statsd_c.Increment("unregister_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve object")
+ }
+
+ if value == nil {
+ reqlog.Debug("object does not exist")
+ s.statsd_c.Increment("unregister_object.ok")
+ return nil, status.Errorf(codes.NotFound, "%s", in.Name)
+ }
+
+ // Delete key
+ err = s.kv.Delete(objectPrefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("failed to Delete key: %v", err)
+ s.statsd_c.Increment("unregister_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to unregister object")
+ }
+
+ s.statsd_c.Increment("unregister_object.ok")
+ out, err := proto.Marshal(&pb.UnregisterObjectReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply for del object reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply for del object reply: %v", err)
+ }
+ return &out, nil
+}
+
+// RenameObject changes an object key in the kv. (used for erasure code)
+func RenameObject(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.RenameObjectRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "RenameObject",
+ "Name": fmt.Sprintf("%s", in.Name),
+ "NewName": fmt.Sprintf("%s", in.NewName),
+ })
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ objectKey, err := EncodeObjectKey(in.Name)
+ if err != nil {
+ reqlog.Errorf("unable to encode object key: %v", err)
+ s.statsd_c.Increment("rename_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ objectNewKey, err := EncodeObjectKey(in.NewName)
+ if err != nil {
+ reqlog.Errorf("unable to encode new object key: %v", err)
+ s.statsd_c.Increment("rename_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ value, err := s.kv.Get(objectPrefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve object: %v", err)
+ s.statsd_c.Increment("rename_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve object")
+ }
+
+ if value == nil {
+ reqlog.Debug("object does not exist")
+ s.statsd_c.Increment("rename_object.ok")
+ return nil, status.Errorf(codes.NotFound, "%s", in.Name)
+ }
+
+ // Delete old entry and create a new one
+ wb := s.kv.NewWriteBatch()
+ defer wb.Close()
+ wb.Delete(objectPrefix, objectKey)
+ wb.Put(objectPrefix, objectNewKey, value)
+
+ err = wb.Commit()
+ if err != nil {
+ reqlog.Errorf("failed to commit WriteBatch for rename: %v", err)
+ s.statsd_c.Increment("rename_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "failed to commit WriteBatch for rename")
+ }
+
+ s.statsd_c.Increment("rename_object.ok")
+
+ out, err := proto.Marshal(&pb.RenameObjectReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply: %v", err)
+ }
+ return &out, nil
+}
+
+// LoadObject returns an object information
+func LoadObject(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.LoadObjectRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "LoadObject",
+ "Name": fmt.Sprintf("%s", in.Name),
+ "IsQuarantined": fmt.Sprintf("%t", in.IsQuarantined),
+ })
+ reqlog.Debug("RPC Call")
+
+ var prefix byte
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ objectKey, err := EncodeObjectKey(in.Name)
+ if err != nil {
+ reqlog.Errorf("unable to encode object key: %v", err)
+ s.statsd_c.Increment("load_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ if in.IsQuarantined {
+ prefix = quarantinePrefix
+ } else {
+ prefix = objectPrefix
+ }
+ reqlog.Debugf("is quarantined: %v", in.IsQuarantined)
+ value, err := s.kv.Get(prefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve object: %v", err)
+ s.statsd_c.Increment("load_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve object")
+ }
+
+ if value == nil {
+ reqlog.Debug("object does not exist")
+ s.statsd_c.Increment("load_object.ok")
+ return nil, status.Errorf(codes.NotFound, "%s", in.Name)
+ }
+
+ volumeIndex, offset, err := DecodeObjectValue(value)
+ if err != nil {
+ reqlog.Errorf("failed to decode object value: %v", err)
+ s.statsd_c.Increment("load_object.fail")
+ return nil, status.Errorf(codes.Internal, "unable to read object")
+ }
+
+ s.statsd_c.Increment("load_object.ok")
+
+ out, err := proto.Marshal(&pb.LoadObjectReply{Name: in.Name, VolumeIndex: volumeIndex, Offset: offset})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply: %v", err)
+ }
+ return &out, nil
+}
+
+// QuarantineObject
+func QuarantineObject(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.QuarantineObjectRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "QuarantineObject",
+ "Name": fmt.Sprintf("%s", in.Name),
+ })
+ reqlog.Debug("RPC Call")
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ objectKey, err := EncodeObjectKey(in.Name)
+ if err != nil {
+ reqlog.Errorf("unable to encode object key: %v", err)
+ s.statsd_c.Increment("quarantine_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ value, err := s.kv.Get(objectPrefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve object: %v", err)
+ s.statsd_c.Increment("quarantine_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve object")
+ }
+
+ if value == nil {
+ reqlog.Debug("object does not exist")
+ s.statsd_c.Increment("quarantine_object.ok")
+ return nil, status.Errorf(codes.NotFound, "%s", in.Name)
+ }
+
+ // Add quarantine key, delete obj key
+ wb := s.kv.NewWriteBatch()
+ defer wb.Close()
+ // TODO: check here if an ohash already exists with the same name. Put files in the same dir, or make a new one ? (current swift code
+ // appears to add an extension in that case. This will require a new format (encode/decode) in the KV)
+ // Also check if full key already exists.
+ wb.Put(quarantinePrefix, objectKey, value)
+ wb.Delete(objectPrefix, objectKey)
+ err = wb.Commit()
+ if err != nil {
+ reqlog.Errorf("failed to quarantine object: %v", err)
+ s.statsd_c.Increment("quarantine_object.fail")
+ return nil, status.Error(codes.Unavailable, "unable to quarantine object")
+ }
+
+ s.statsd_c.Increment("quarantine_object.ok")
+
+ out, err := proto.Marshal(&pb.QuarantineObjectReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply: %v", err)
+ }
+ return &out, nil
+}
+
+// UnquarantineObject
+func UnquarantineObject(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.UnquarantineObjectRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "UnquarantineObject",
+ "Name": fmt.Sprintf("%s", in.Name),
+ })
+ reqlog.Debug("RPC Call")
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ objectKey, err := EncodeObjectKey(in.Name)
+ if err != nil {
+ reqlog.Errorf("unable to encode object key: %v", err)
+ s.statsd_c.Increment("unquarantine_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object key")
+ }
+
+ value, err := s.kv.Get(quarantinePrefix, objectKey)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve object: %v", err)
+ s.statsd_c.Increment("unquarantine_object.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve object")
+ }
+
+ if value == nil {
+ reqlog.Debug("object does not exist")
+ s.statsd_c.Increment("unquarantine_object.ok")
+ return nil, status.Errorf(codes.NotFound, "%s", in.Name)
+ }
+
+ // Add object key, delete quarantine key
+ wb := s.kv.NewWriteBatch()
+ defer wb.Close()
+ wb.Put(objectPrefix, objectKey, value)
+ wb.Delete(quarantinePrefix, objectKey)
+ err = wb.Commit()
+ if err != nil {
+ reqlog.Errorf("failed to unquarantine object: %v", err)
+ s.statsd_c.Increment("unquarantine_object.fail")
+ return nil, status.Error(codes.Unavailable, "unable to unquarantine object")
+ }
+
+ s.statsd_c.Increment("unquarantine_object.ok")
+
+ out, err := proto.Marshal(&pb.UnquarantineObjectReply{})
+ if err != nil {
+ reqlog.Errorf("failed to serialize reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply: %v", err)
+ }
+ return &out, nil
+}
+
+// LoadObjectsByPrefix returns list of objects with the given prefix.
+// In practice this is used to emulate the object hash directory that swift
+// would create with the regular diskfile backend.
+func LoadObjectsByPrefix(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.LoadObjectsByPrefixRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "LoadObjectsByPrefix",
+ "Prefix": fmt.Sprintf("%s", in.Prefix),
+ })
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ // prefix must be 32 characters for this to work (because we now encode the md5 hash, see
+ // EncodeObjectKey in encoding.go
+ if len(in.Prefix) != 32 {
+ reqlog.Error("prefix len != 32")
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "prefix len != 32")
+ }
+
+ prefix, err := EncodeObjectKey(in.Prefix)
+ if err != nil {
+ reqlog.Errorf("unable to encode object prefix: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object prefix")
+ }
+
+ it := s.kv.NewIterator(objectPrefix)
+ defer it.Close()
+
+ response := &pb.LoadObjectsByPrefixReply{}
+
+ // adds one byte because of prefix. Otherwise len(prefix) would be len(prefix)-1
+ for it.Seek(prefix); it.Valid() && len(prefix) <= len(it.Key()) && bytes.Equal(prefix, it.Key()[:len(prefix)]); it.Next() {
+
+ // Decode value
+ volumeIndex, offset, err := DecodeObjectValue(it.Value())
+ if err != nil {
+ reqlog.Errorf("failed to decode object value: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to read object")
+ }
+
+ key := make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key(), key)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ response.Objects = append(response.Objects, &pb.Object{Name: key, VolumeIndex: volumeIndex, Offset: offset})
+ }
+
+ s.statsd_c.Increment("load_objects_by_prefix.ok")
+
+ return serializePb(response)
+}
+
+// LoadObjectsByVolume returns a list of all objects within a volume, with pagination.
+// Quarantined, if true, will return only quarantined objects, if false, non-quarantined objects.
+// PageToken is the object name to start from, as returned from a previous call in the
+// NextPageToken field. If empty, the iterator will start from the first objects in the volume.
+// PageSize is the maximum count of items to return. If zero, the server will pick a reasonnable limit.
+// func (s *server) LoadObjectsByVolume(in *pb.VolumeIndex, stream pb.FileMgr_LoadObjectsByVolumeServer) error {
+func LoadObjectsByVolume(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.LoadObjectsByVolumeRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "LoadObjectsByVolume",
+ "VolumeIndex": in.Index,
+ "PageToken": in.PageToken,
+ "PageSize": in.PageSize,
+ })
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ limit := in.PageSize
+ if limit == 0 {
+ reqlog.Debug("page_size was not specified, set it to 10000")
+ limit = 10000
+ }
+
+ pageToken := make([]byte, len(in.PageToken))
+ pageToken = in.PageToken
+ if bytes.Equal(pageToken, []byte("")) {
+ pageToken = []byte(strings.Repeat("0", 32))
+ }
+
+ prefix, err := EncodeObjectKey(pageToken)
+ if err != nil {
+ reqlog.Errorf("unable to encode object prefix: %v", err)
+ s.statsd_c.Increment("load_objects_by_volume.fail")
+ return nil, status.Errorf(codes.Internal, "unable to encode object prefix")
+ }
+
+ // Return either quarantined or "regular" objects
+ var it Iterator
+ if in.Quarantined {
+ it = s.kv.NewIterator(quarantinePrefix)
+ } else {
+ it = s.kv.NewIterator(objectPrefix)
+ }
+ defer it.Close()
+
+ response := &pb.LoadObjectsByVolumeReply{}
+
+ // Objects are not indexed by volume. We have to scan the whole KV and examine each value.
+ // It shouldn't matter as this is only used for compaction, and each object will have to be copied.
+ // Disk activity dwarfs CPU usage. (for spinning rust anyway, but SSDs?)
+ count := uint32(0)
+ for it.Seek(prefix); it.Valid() && count < limit; it.Next() {
+ volumeIndex, offset, err := DecodeObjectValue(it.Value())
+ if err != nil {
+ reqlog.Errorf("failed to decode object value: %v", err)
+ s.statsd_c.Increment("load_objects_by_volume.fail")
+ return nil, status.Errorf(codes.Internal, "unable to read object")
+ }
+
+ if volumeIndex == in.Index {
+ key := make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key(), key)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ response.Objects = append(response.Objects, &pb.Object{Name: key, VolumeIndex: volumeIndex, Offset: offset})
+ count++
+ }
+ }
+
+ // Set NextPageToken if there is at least one ohash found in the same volume
+ for ; it.Valid(); it.Next() {
+ volumeIndex, _, err := DecodeObjectValue(it.Value())
+ if err != nil {
+ reqlog.Errorf("failed to decode object value: %v", err)
+ s.statsd_c.Increment("load_objects_by_volume.fail")
+ return nil, status.Errorf(codes.Internal, "unable to read object")
+ }
+
+ if volumeIndex == in.Index {
+ key := make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key(), key)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ nextPageToken := make([]byte, len(key))
+ copy(nextPageToken, key)
+ response.NextPageToken = key
+ break
+ }
+
+ }
+ s.statsd_c.Increment("load_objects_by_volume.ok")
+ return serializePb(response)
+}
+
+// ListPartitions returns a list of partitions for which we have objects.
+// This is used to emulate a listdir() of partitions below the "objects" directory.
+func ListPartitions(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.ListPartitionsRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "ListPartitions",
+ "PartitionBits": in.PartitionBits,
+ })
+ reqlog.Debug("RPC Call")
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ var currentPartition uint64
+ var err error
+ var ohash []byte
+
+ // Partition bits
+ pBits := int(in.PartitionBits)
+
+ response := &pb.DirEntries{}
+
+ // Seek to first object key
+ it := s.kv.NewIterator(objectPrefix)
+ defer it.Close()
+ it.SeekToFirst()
+
+ // No object in the KV.
+ if !it.Valid() {
+ s.statsd_c.Increment("list_partitions.ok")
+ return serializePb(response)
+ }
+
+ // Extract the md5 hash
+ if len(it.Key()) < 16 {
+ reqlog.WithFields(logrus.Fields{"key": it.Key()}).Error("object key < 16")
+ } else {
+ ohash = make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key()[:16], ohash)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ currentPartition, err = getPartitionFromOhash(ohash, pBits)
+ if err != nil {
+ s.statsd_c.Increment("list_partitions.fail")
+ return nil, err
+ }
+ }
+
+ response.Entry = append(response.Entry, fmt.Sprintf("%d", currentPartition))
+ if err != nil {
+ s.statsd_c.Increment("list_partitions.fail")
+ return nil, err
+ }
+
+ maxPartition, err := getLastPartition(pBits)
+
+ for currentPartition < maxPartition {
+ currentPartition++
+ firstKey, err := getEncodedObjPrefixFromPartition(currentPartition, pBits)
+ if err != nil {
+ s.statsd_c.Increment("list_partitions.fail")
+ return nil, err
+ }
+ nextFirstKey, err := getEncodedObjPrefixFromPartition(currentPartition+1, pBits)
+ if err != nil {
+ s.statsd_c.Increment("list_partitions.fail")
+ return nil, err
+ }
+
+ // key logging is now wrong, as it's not the ascii form
+ reqlog.WithFields(logrus.Fields{"currentPartition": currentPartition,
+ "maxPartition": maxPartition,
+ "firstKey": firstKey,
+ "ohash": ohash,
+ "nextFirstKey": nextFirstKey}).Debug("In loop")
+
+ it.Seek(firstKey)
+ if !it.Valid() {
+ s.statsd_c.Increment("list_partitions.ok")
+ return serializePb(response)
+ }
+
+ if len(it.Key()) < 16 {
+ reqlog.WithFields(logrus.Fields{"key": it.Key()}).Error("object key < 16")
+ } else {
+ ohash = make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key()[:16], ohash)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ // nextFirstKey is encoded, compare with encoded hash (16 first bits of the key)
+ if bytes.Compare(it.Key()[:16], nextFirstKey) > 0 {
+ // There was no key in currentPartition, find in which partition we are
+ currentPartition, err = getPartitionFromOhash(ohash, pBits)
+ if err != nil {
+ s.statsd_c.Increment("list_partitions.fail")
+ return nil, err
+ }
+ }
+ response.Entry = append(response.Entry, fmt.Sprintf("%d", currentPartition))
+ }
+ }
+
+ s.statsd_c.Increment("list_partitions.ok")
+ return serializePb(response)
+}
+
+// ListPartition returns a list of suffixes within a partition
+func ListPartition(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.ListPartitionRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "ListPartition",
+ "Partition": in.Partition,
+ "PartitionBits": in.PartitionBits,
+ })
+ reqlog.Debug("RPC Call")
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ // Set to hold suffixes within partition
+ suffixSet := make(map[[3]byte]bool)
+ var suffix [3]byte
+
+ // Partition bits
+ pBits := int(in.PartitionBits)
+ partition := uint64(in.Partition)
+
+ response := &pb.DirEntries{}
+
+ firstKey, err := getEncodedObjPrefixFromPartition(partition, pBits)
+ if err != nil {
+ s.statsd_c.Increment("list_partition.fail")
+ return nil, err
+ }
+
+ // Seek to first key in partition, if any
+ it := s.kv.NewIterator(objectPrefix)
+ defer it.Close()
+
+ it.Seek(firstKey)
+ // No object in the KV
+ if !it.Valid() {
+ s.statsd_c.Increment("list_partition.ok")
+ return serializePb(response)
+ }
+
+ key := make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key(), key)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ currentPartition, err := getPartitionFromOhash(key, pBits)
+ if err != nil {
+ s.statsd_c.Increment("list_partition.fail")
+ return nil, err
+ }
+
+ // Get all suffixes in the partition
+ for currentPartition == partition {
+ // Suffix is the last three bytes of the object hash
+ copy(suffix[:], key[29:32])
+ suffixSet[suffix] = true
+ it.Next()
+ if !it.Valid() {
+ break
+ }
+ key = make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key(), key)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ currentPartition, err = getPartitionFromOhash(key, pBits)
+ }
+
+ // Build the response from the hashmap
+ for suffix := range suffixSet {
+ response.Entry = append(response.Entry, fmt.Sprintf("%s", suffix))
+ }
+
+ s.statsd_c.Increment("list_partition.ok")
+ return serializePb(response)
+}
+
+// ListSuffix returns a list of object hashes below the partition and suffix
+func ListSuffix(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.ListSuffixRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "ListSuffix",
+ "Partition": in.Partition,
+ "Suffix": fmt.Sprintf("%s", in.Suffix),
+ "PartitionBits": in.PartitionBits,
+ })
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ execTimeSerie := fmt.Sprintf("list_suffix.runtime.%s", s.diskName)
+ defer s.statsd_c.NewTiming().Send(execTimeSerie)
+ reqlog.Debug("RPC Call")
+
+ lastOhash := make([]byte, 32)
+
+ pBits := int(in.PartitionBits)
+ partition := uint64(in.Partition)
+ suffix := in.Suffix
+
+ response := &pb.DirEntries{}
+
+ failSerie := fmt.Sprintf("list_suffix.fail.%s", s.diskName)
+ successSerie := fmt.Sprintf("list_suffix.ok.%s", s.diskName)
+ firstKey, err := getEncodedObjPrefixFromPartition(partition, pBits)
+ if err != nil {
+ s.statsd_c.Increment(failSerie)
+ return nil, err
+ }
+
+ // Seek to first key in partition, if any
+ it := s.kv.NewIterator(objectPrefix)
+ defer it.Close()
+
+ it.Seek(firstKey)
+ // No object in the KV
+ if !it.Valid() {
+ s.statsd_c.Increment(successSerie)
+ return serializePb(response)
+ }
+
+ // Allocate the slice with a capacity matching the length of the longest possible key
+ // We can then reuse it in the loop below. (avoid heap allocations, profiling showed it was an issue)
+ curKey := make([]byte, 32+len(firstKey[16:]), maxObjKeyLen)
+ err = DecodeObjectKey(firstKey, curKey)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ currentPartition, err := getPartitionFromOhash(curKey, pBits)
+ if err != nil {
+ s.statsd_c.Increment(failSerie)
+ return nil, err
+ }
+
+ for currentPartition == partition {
+ // Suffix is the last three bytes of the object hash
+ // key := make([]byte, 32+len(it.Key()[16:]))
+ curKey = curKey[:32+len(it.Key()[16:])]
+ err = DecodeObjectKey(it.Key(), curKey)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ if bytes.Compare(curKey[29:32], suffix) == 0 {
+ ohash := make([]byte, 32)
+ ohash = curKey[:32]
+ // Only add to the list if we have not already done so
+ if !bytes.Equal(ohash, lastOhash) {
+ response.Entry = append(response.Entry, (fmt.Sprintf("%s", ohash)))
+ copy(lastOhash, ohash)
+ }
+ }
+ it.Next()
+ if !it.Valid() {
+ break
+ }
+ curKey = curKey[:32+len(it.Key()[16:])]
+ err = DecodeObjectKey(it.Key(), curKey)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("load_objects_by_prefix.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ currentPartition, err = getPartitionFromOhash(curKey, pBits)
+ }
+
+ s.statsd_c.Increment(successSerie)
+ return serializePb(response)
+}
+
+// Returns a list of quarantiened object hashes, with pagination.
+// PageToken is the ohash to start from, as returned from a previous call in the
+// NextPageToken field. If empty, the iterator will start from the first quarantined
+// object hash. PageSize is the maximum count of items to return. If zero,
+// the server will pick a reasonnable limit.
+func ListQuarantinedOHashes(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.ListQuarantinedOHashesRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "ListQuarantinedOhashes",
+ "PageToken": fmt.Sprintf("%s", in.PageToken),
+ "PageSize": fmt.Sprintf("%d", in.PageSize),
+ })
+ reqlog.Debug("RPC Call")
+
+ if !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ limit := in.PageSize
+ if limit == 0 {
+ reqlog.Debug("page_size was not specified, set it to 10000")
+ limit = 10000
+ }
+
+ pageToken := make([]byte, 32)
+ pageToken = in.PageToken
+ if bytes.Equal(pageToken, []byte("")) {
+ pageToken = []byte(strings.Repeat("0", 32))
+ }
+ if len(pageToken) != 32 {
+ reqlog.Error("prefix len != 32")
+ s.statsd_c.Increment("list_quarantined_ohashes.fail")
+ return nil, status.Errorf(codes.InvalidArgument, "page token length != 32")
+ }
+
+ prefix, err := EncodeObjectKey(pageToken)
+ if err != nil {
+ reqlog.Errorf("unable to encode object prefix: %v", err)
+ s.statsd_c.Increment("list_quarantined_ohashes.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object prefix")
+ }
+
+ it := s.kv.NewIterator(quarantinePrefix)
+ defer it.Close()
+
+ response := &pb.ListQuarantinedOHashesReply{}
+ curKey := make([]byte, maxObjKeyLen)
+ lastOhash := make([]byte, 32)
+
+ count := uint32(0)
+ for it.Seek(prefix); it.Valid() && count < limit; it.Next() {
+ curKey = curKey[:32+len(it.Key()[16:])]
+ err = DecodeObjectKey(it.Key(), curKey)
+ if err != nil {
+ reqlog.Errorf("failed to decode quarantined object key: %v", err)
+ s.statsd_c.Increment("list_quarantined_ohashes.fail")
+ return nil, status.Errorf(codes.Internal, "unable decode quarantined object key")
+ }
+ if !bytes.Equal(curKey[:32], lastOhash) {
+ ohash := make([]byte, 32)
+ copy(ohash, curKey[:32])
+ response.Objects = append(response.Objects, &pb.QuarantinedObjectName{Name: ohash})
+ copy(lastOhash, curKey[:32])
+ count++
+ }
+ }
+
+ // Set NextPageToken if there is at least one ohash beyond what we have returned
+ for ; it.Valid(); it.Next() {
+ curKey = curKey[:32+len(it.Key()[16:])]
+ err = DecodeObjectKey(it.Key(), curKey)
+ if err != nil {
+ reqlog.Errorf("failed to decode quarantined object key: %v", err)
+ s.statsd_c.Increment("list_quarantined_ohashes.fail")
+ return nil, status.Errorf(codes.Internal, "unable decode quarantined object key")
+ }
+ if !bytes.Equal(curKey[:32], lastOhash) {
+ nextPageToken := make([]byte, 32)
+ copy(nextPageToken, curKey[:32])
+ response.NextPageToken = nextPageToken
+ break
+ }
+ }
+
+ s.statsd_c.Increment("list_quarantined_ohashes.ok")
+ return serializePb(response)
+}
+
+func ListQuarantinedOHash(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.ListQuarantinedOHashRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{
+ "Function": "ListQuarantineOHash",
+ "Prefix": fmt.Sprintf("%s", in.Prefix),
+ })
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ if len(in.Prefix) != 32 {
+ reqlog.Error("prefix len != 32")
+ s.statsd_c.Increment("list_quarantined_ohash.fail")
+ return nil, status.Errorf(codes.Internal, "prefix len != 32")
+ }
+
+ prefix, err := EncodeObjectKey(in.Prefix)
+ if err != nil {
+ reqlog.Errorf("unable to encode object prefix: %v", err)
+ s.statsd_c.Increment("list_quarantined_ohash.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to encode object prefix")
+ }
+
+ it := s.kv.NewIterator(quarantinePrefix)
+ defer it.Close()
+
+ response := &pb.ListQuarantinedOHashReply{}
+
+ // adds one byte because of prefix. Otherwise len(prefix) would be len(prefix)-1
+ for it.Seek(prefix); it.Valid() && len(prefix) <= len(it.Key()) && bytes.Equal(prefix, it.Key()[:len(prefix)]); it.Next() {
+
+ // Decode value
+ volumeIndex, offset, err := DecodeObjectValue(it.Value())
+ if err != nil {
+ reqlog.Errorf("failed to decode object value: %v", err)
+ s.statsd_c.Increment("list_quarantined_ohash.fail")
+ return nil, status.Errorf(codes.Internal, "unable to read object")
+ }
+
+ key := make([]byte, 32+len(it.Key()[16:]))
+ err = DecodeObjectKey(it.Key(), key)
+ if err != nil {
+ reqlog.Errorf("failed to decode object key: %v", err)
+ s.statsd_c.Increment("list_quarantined_ohash.fail")
+ return nil, status.Errorf(codes.Internal, "unable to decode object key")
+ }
+ response.Objects = append(response.Objects, &pb.Object{Name: key, VolumeIndex: volumeIndex, Offset: offset})
+ }
+
+ s.statsd_c.Increment("list_quarantined_ohash.ok")
+ return serializePb(response)
+}
+
+func GetNextOffset(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.GetNextOffsetRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "GetNextOffset", "VolumeIndex": in.VolumeIndex})
+ reqlog.Debug("RPC Call")
+
+ if !in.RepairTool && !s.isClean {
+ reqlog.Debug("KV out of sync with volumes")
+ return nil, status.Errorf(codes.FailedPrecondition, "KV out of sync with volumes")
+ }
+
+ key := EncodeVolumeKey(in.VolumeIndex)
+
+ value, err := s.kv.Get(volumePrefix, key)
+ if err != nil {
+ reqlog.Errorf("unable to retrieve volume key: %v", err)
+ s.statsd_c.Increment("get_next_offset.fail")
+ return nil, status.Errorf(codes.Unavailable, "unable to retrieve volume key")
+ }
+
+ if value == nil {
+ reqlog.Info("volume index does not exist in db")
+ s.statsd_c.Increment("get_next_offset.fail")
+ return nil, status.Errorf(codes.FailedPrecondition, "volume index does not exist in db")
+ }
+
+ _, _, nextOffset, _, _, err := DecodeVolumeValue(value)
+ if err != nil {
+ reqlog.WithFields(logrus.Fields{"value": value}).Errorf("failed to decode volume value: %v", err)
+ s.statsd_c.Increment("get_next_offset.fail")
+ return nil, status.Errorf(codes.Internal, "failed to decode volume value")
+ }
+
+ s.statsd_c.Increment("get_next_offset.ok")
+ return serializePb(&pb.GetNextOffsetReply{Offset: uint64(nextOffset)})
+}
+
+// GetStats returns stats for the KV. used for initial debugging, remove?
+func GetStats(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.GetStatsRequest{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ response := new(pb.GetStatsReply)
+
+ m := CollectStats(s)
+ response.Stats = m
+
+ return serializePb(response)
+}
+
+// Sets KV state (is in sync with volumes, or not)
+func SetKvState(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.KvState{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "SetClean", "IsClean": in.IsClean})
+ reqlog.Debug("RPC Call")
+
+ s.isClean = in.IsClean
+ return serializePb(&pb.SetKvStateReply{})
+}
+
+// Gets KV state (is in sync with volumes, or not)
+func GetKvState(s *server, ctx context.Context, pbIn *[]byte) (*[]byte, error) {
+ in := &pb.KvState{}
+ if err := proto.Unmarshal(*pbIn, in); err != nil {
+ logrus.Errorf("failed to unmarshal input: %v", err)
+ return nil, status.Errorf(codes.InvalidArgument, "unable to deserialize protobuf")
+ }
+
+ reqlog := log.WithFields(logrus.Fields{"Function": "GetKvState"})
+ reqlog.Debug("RPC Call")
+ state := new(pb.KvState)
+ state.IsClean = s.isClean
+ return serializePb(state)
+}
+
+// Stops serving RPC requests and closes KV if we receive SIGTERM/SIGINT
+func shutdownHandler(s *server, wg *sync.WaitGroup) {
+ <-s.stopChan
+ rlog := log.WithFields(logrus.Fields{"socket": s.socketPath})
+ rlog.Info("Shutting down")
+
+ // Stop serving RPC requests
+ // Give it a 5s delay to finish serving active requests, then force close
+ rlog.Debug("Stopping RPC")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ if err := s.httpServer.Shutdown(ctx); err != nil {
+ // Error or timeout
+ rlog.Infof("HTTP server Shutdown: %v", err)
+ if err = s.httpServer.Close(); err != nil {
+ rlog.Infof("HTTP server Close: %v", err)
+ }
+ }
+
+ // Mark DB as clean
+ if s.isClean == true {
+ rlog.Debug("Mark DB as closed")
+ err := MarkDbClosed(s.kv)
+ if err != nil {
+ rlog.Warn("Failed to mark db as clean when shutting down")
+ }
+ } else {
+ rlog.Warn("State is not clean, not marking DB as closed (still out of sync with volumes)")
+ }
+
+ // Close KV
+ rlog.Debug("Closing KV")
+ s.kv.Close()
+ wg.Done()
+}
+
+func runServer(kv KV, diskPath string, socketPath string, stopChan chan os.Signal, isClean bool) (err error) {
+ var wg sync.WaitGroup
+
+ if err != nil {
+ return
+ }
+ os.Chmod(socketPath, 0660)
+
+ _, diskName := path.Split(path.Clean(diskPath))
+ fs := &server{kv: kv, diskPath: diskPath, diskName: diskName, socketPath: socketPath,
+ isClean: isClean, stopChan: stopChan}
+
+ go func() {
+ unixListener, err := net.Listen("unix", fs.socketPath)
+ if err != nil {
+ log.Printf("Cannot serve")
+ }
+ server := http.Server{Handler: fs}
+ fs.httpServer = &server
+ server.Serve(unixListener)
+ }()
+
+ // Initialize statsd client
+ statsdPrefix := "kv"
+ fs.statsd_c, err = statsd.New(statsd.Prefix(statsdPrefix))
+ if err != nil {
+ return
+ }
+
+ // Start shutdown handler
+ wg.Add(1)
+ go shutdownHandler(fs, &wg)
+ wg.Wait()
+
+ return
+}
+
+var strToFunc = map[string]rpcFunc{
+ "/register_volume": RegisterVolume,
+ "/unregister_volume": UnregisterVolume,
+ "/update_volume_state": UpdateVolumeState,
+ "/get_volume": GetVolume,
+ "/list_volumes": ListVolumes,
+ "/register_object": RegisterObject,
+ "/unregister_object": UnregisterObject,
+ "/rename_object": RenameObject,
+ "/load_object": LoadObject,
+ "/quarantine_object": QuarantineObject,
+ "/unquarantine_object": UnquarantineObject,
+ "/load_objects_by_prefix": LoadObjectsByPrefix,
+ "/load_objects_by_volume": LoadObjectsByVolume,
+ "/list_partitions": ListPartitions,
+ "/list_partition": ListPartition,
+ "/list_suffix": ListSuffix,
+ "/list_quarantined_ohashes": ListQuarantinedOHashes,
+ "/list_quarantined_ohash": ListQuarantinedOHash,
+ "/get_next_offset": GetNextOffset,
+ "/get_stats": GetStats,
+ "/set_kv_state": SetKvState,
+ "/get_kv_state": GetKvState,
+}
+
+func serializePb(msg proto.Message) (*[]byte, error) {
+ out, err := proto.Marshal(msg)
+ if err != nil {
+ log.Errorf("failed to serialize reply: %v", err)
+ return nil, status.Errorf(codes.Unavailable, "unable to serialize reply: %v", err)
+ }
+ return &out, nil
+}
+
+func sendError(w http.ResponseWriter, rpcErr error) (err error) {
+ w.Header().Set("Content-Type", "Content-Type: text/plain; charset=utf-8")
+ w.WriteHeader(int(rpcErr.(*status.RpcError).Code()))
+ errorMsg := []byte(rpcErr.Error())
+ _, err = w.Write(errorMsg)
+ return
+}
+
+func sendReply(w http.ResponseWriter, serializedPb []byte) (err error) {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.WriteHeader(200)
+ _, err = w.Write(serializedPb)
+ return
+}
+
+func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ var err error
+ log.Debugf(r.URL.Path)
+
+ // Match URL to RPC function
+ fn, ok := strToFunc[r.URL.Path]
+ if !ok {
+ log.Printf("No match for URL Path %s", r.URL.Path)
+ if err = sendError(w, status.Errorf(codes.Unimplemented, "Unimplemented RPC function")); err != nil {
+ log.Printf("Error sending reply: %v", err)
+ }
+ return
+ }
+
+ // Read request (body should be serialized protobuf)
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading body: %v", err)
+ if err = sendError(w, status.Errorf(codes.Internal, "Failed to read request body")); err != nil {
+ log.Printf("Error sending reply: %v", err)
+ }
+ return
+ }
+
+ // Call RPC function and send reply
+ resp, err := fn(s, r.Context(), &body)
+ if err != nil {
+ log.Println(err)
+ if err = sendError(w, err); err != nil {
+ log.Printf("Error sending reply: %v", err)
+ }
+ return
+ }
+
+ if err = sendReply(w, *resp); err != nil {
+ log.Printf("Error sending reply: %v", err)
+ }
+}
diff --git a/go/swift-rpc-losf/rpc_test.go b/go/swift-rpc-losf/rpc_test.go
new file mode 100644
index 000000000..0da9b5e6a
--- /dev/null
+++ b/go/swift-rpc-losf/rpc_test.go
@@ -0,0 +1,1014 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/alecuyer/statsd/v2"
+ "github.com/golang/protobuf/proto"
+ "github.com/openstack/swift-rpc-losf/codes"
+ pb "github.com/openstack/swift-rpc-losf/proto"
+ "github.com/sirupsen/logrus"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+ "testing"
+)
+
+func runTestServer(kv KV, diskPath string, socketPath string, listening chan bool) (err error) {
+ _, diskName := path.Split(path.Clean(diskPath))
+ fs := &server{kv: kv, diskPath: diskPath, diskName: diskName, socketPath: socketPath, isClean: true}
+
+ statsdPrefix := "kv"
+ fs.statsd_c, err = statsd.New(statsd.Prefix(statsdPrefix))
+ if err != nil {
+ return
+ }
+
+ go func() {
+ os.Remove(fs.socketPath)
+ unixListener, err := net.Listen("unix", fs.socketPath)
+ if err != nil {
+ log.Fatalf("Cannot serve: %v", err)
+ }
+ listening <- true
+ server := http.Server{Handler: fs}
+ fs.httpServer = &server
+ log.Debug("Start serving")
+ server.Serve(unixListener)
+
+ }()
+ return
+}
+
+func teardown(tempdir string) {
+ if strings.HasPrefix(tempdir, "/tmp/") {
+ os.RemoveAll(tempdir)
+ }
+}
+
+// var client pb.FileMgrClient
+var client http.Client
+
+// Check that err == nil and HTTP's status code is 200. If err is not nil, return err,
+// if status code is not 200, returns an error with the status code received, if err is nil
+// and status code is 200, return nil)
+func check_200(response *http.Response, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if response.StatusCode != 200 {
+ return fmt.Errorf("HTTP status code is not 200: %v", response.StatusCode)
+ }
+
+ return nil
+}
+
+func populateKV() (err error) {
+ volumes := []pb.RegisterVolumeRequest{
+ {Partition: 9, Type: 0, VolumeIndex: 20, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 10, Type: 0, VolumeIndex: 35, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 40, Type: 0, VolumeIndex: 24, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 63, Type: 0, VolumeIndex: 27, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 65, Type: 0, VolumeIndex: 33, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 71, Type: 0, VolumeIndex: 19, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 111, Type: 0, VolumeIndex: 47, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 127, Type: 0, VolumeIndex: 43, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 139, Type: 0, VolumeIndex: 50, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 171, Type: 0, VolumeIndex: 49, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 195, Type: 0, VolumeIndex: 12, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 211, Type: 0, VolumeIndex: 16, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 213, Type: 0, VolumeIndex: 14, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 243, Type: 0, VolumeIndex: 17, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 271, Type: 0, VolumeIndex: 8, Offset: 24576, State: 0, RepairTool: false},
+ {Partition: 295, Type: 0, VolumeIndex: 28, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 327, Type: 0, VolumeIndex: 48, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 360, Type: 0, VolumeIndex: 15, Offset: 12288, State: 0, RepairTool: false},
+ {Partition: 379, Type: 0, VolumeIndex: 25, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 417, Type: 0, VolumeIndex: 22, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 420, Type: 0, VolumeIndex: 32, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 421, Type: 0, VolumeIndex: 46, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 428, Type: 0, VolumeIndex: 21, Offset: 12288, State: 0, RepairTool: false},
+ {Partition: 439, Type: 0, VolumeIndex: 38, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 453, Type: 0, VolumeIndex: 44, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 466, Type: 0, VolumeIndex: 40, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 500, Type: 0, VolumeIndex: 39, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 513, Type: 0, VolumeIndex: 26, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 530, Type: 0, VolumeIndex: 4, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 530, Type: 1, VolumeIndex: 5, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 535, Type: 0, VolumeIndex: 1, Offset: 20480, State: 0, RepairTool: false},
+ {Partition: 535, Type: 0, VolumeIndex: 2, Offset: 4096, State: 0, RepairTool: false},
+ {Partition: 535, Type: 1, VolumeIndex: 3, Offset: 12288, State: 0, RepairTool: false},
+ {Partition: 559, Type: 0, VolumeIndex: 30, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 602, Type: 0, VolumeIndex: 41, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 604, Type: 0, VolumeIndex: 29, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 673, Type: 0, VolumeIndex: 11, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 675, Type: 0, VolumeIndex: 42, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 710, Type: 0, VolumeIndex: 37, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 765, Type: 0, VolumeIndex: 36, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 766, Type: 0, VolumeIndex: 45, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 786, Type: 0, VolumeIndex: 23, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 809, Type: 0, VolumeIndex: 31, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 810, Type: 0, VolumeIndex: 13, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 855, Type: 0, VolumeIndex: 18, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 974, Type: 0, VolumeIndex: 9, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 977, Type: 0, VolumeIndex: 6, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 977, Type: 1, VolumeIndex: 7, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 1009, Type: 0, VolumeIndex: 34, Offset: 8192, State: 0, RepairTool: false},
+ {Partition: 1019, Type: 0, VolumeIndex: 10, Offset: 8192, State: 0, RepairTool: false},
+ }
+
+ objects := []pb.RegisterObjectRequest{
+ {Name: []byte("85fd12f8961e33cbf7229a94118524fa1515589781.45671.ts"), VolumeIndex: 3, Offset: 8192, NextOffset: 12288, RepairTool: false},
+ {Name: []byte("84afc1659c7e8271951fe370d6eee0f81515590332.51834.ts"), VolumeIndex: 5, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("f45bf9000f39092b9de5a74256e3eebe1515590648.06511.ts"), VolumeIndex: 7, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("43c8adc53dbb40d27add4f614fc49e5e1515595691.35618#0#d.data"), VolumeIndex: 8, Offset: 20480, NextOffset: 24576, RepairTool: false},
+ {Name: []byte("f3804523d91d294dab1500145b43395b1515596136.42189#4#d.data"), VolumeIndex: 9, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("fefe1ba1120cd6cd501927401d6b2ecc1515750800.13517#2#d.data"), VolumeIndex: 10, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("a8766d2608b77dc6cb0bfe3fe6782c731515750800.18975#0#d.data"), VolumeIndex: 11, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("30f12368ca25d11fb1a80d10e64b15431515750800.19224#4#d.data"), VolumeIndex: 12, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("ca9576ada218f74cb8f11648ecec439c1515750800.21553#2#d.data"), VolumeIndex: 13, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("3549df7ef11006af6852587bf16d82971515750800.22096#2#d.data"), VolumeIndex: 14, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("5a0a70e36a057a9982d1dc9188069b511515750803.50544#0#d.data"), VolumeIndex: 15, Offset: 8192, NextOffset: 12288, RepairTool: false},
+ {Name: []byte("5a1801fea97614f8c5f58511905773d01515750800.40035#0#d.data"), VolumeIndex: 15, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("34c46ce96897a24374d126d7d7eab2fb1515750800.42545#0#d.data"), VolumeIndex: 16, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("3cf60143ea488c84da9e1603158203a11515750800.93160#0#d.data"), VolumeIndex: 17, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("d5c64e9cb0b093441fb6b500141aa0531515750800.94069#2#d.data"), VolumeIndex: 18, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("11f5db768b6f9a37cf894af99b15c0d11515750801.05135#4#d.data"), VolumeIndex: 19, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("02573d31b770cda8e0effd7762e8a0751515750801.09785#2#d.data"), VolumeIndex: 20, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("6b08eabf5667557c72dc6570aa1fb8451515750801.08639#4#d.data"), VolumeIndex: 21, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("6b08eabf5667557c72dc6570aa1fb8451515750856.77219.meta"), VolumeIndex: 21, Offset: 8192, NextOffset: 12288, RepairTool: false},
+ {Name: []byte("6b08eabf5667557c72dc6570abcfb8451515643210.72429#4#d.data"), VolumeIndex: 22, Offset: 8192, NextOffset: 12288, RepairTool: false},
+ {Name: []byte("687ba0410f4323c66397a85292077b101515750801.10244#0#d.data"), VolumeIndex: 22, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("c4aaea9b28c425f45eb64d4d5b0b3f621515750801.19478#2#d.data"), VolumeIndex: 23, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("0a0898eb861579d1240adbb1c9f0c92b1515750801.20636#2#d.data"), VolumeIndex: 24, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("5efd43142db5913180ba865ef529eccd1515750801.64704#4#d.data"), VolumeIndex: 25, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("806a35f1e974f93161b2da51760f22701515750801.68309#2#d.data"), VolumeIndex: 26, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("0fdceb7af49cdd0cb1262acbdc88ae881515750801.93565#0#d.data"), VolumeIndex: 27, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("49d4fa294d2c97f08596148bf4615bfa1515750801.93739#4#d.data"), VolumeIndex: 28, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("971b4d05733f475d447d7f8b050bb0071515750802.09721#2#d.data"), VolumeIndex: 29, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("8bc66b3ae033db15ceb3729d89a07ece1515750802.51062#0#d.data"), VolumeIndex: 30, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("ca53beae1aeb4deacd17409e32305a2c1515750802.63996#2#d.data"), VolumeIndex: 31, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("69375433763d9d511114e8ac869c916c1515750802.63846#0#d.data"), VolumeIndex: 32, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("105de5f388ab4b72e56bc93f36ad388a1515750802.73393#2#d.data"), VolumeIndex: 33, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("105de5f388ab4b72e56bc93f36ad388a1515873948.27383#2#d.meta"), VolumeIndex: 33, Offset: 8192, NextOffset: 12288, RepairTool: false},
+ {Name: []byte("fc6916fd1e6a0267afac88c395b876ac1515750802.83459#2#d.data"), VolumeIndex: 34, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("02b10d6bfb205fe0f34f9bd82336dc711515750802.93662#2#d.data"), VolumeIndex: 35, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("bf43763a98208f15da803e76bf52e7d11515750803.01357#0#d.data"), VolumeIndex: 36, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("b1abadfed91b1cb4392dd2ec29e171ac1515750803.07767#4#d.data"), VolumeIndex: 37, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("6de30d74634d088f1f5923336af2b3ae1515750803.36199#4#d.data"), VolumeIndex: 38, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("7d234bbd1137d509105245ac78427b9f1515750803.49022#4#d.data"), VolumeIndex: 39, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("749057975c1bac830360530bdcd741591515750803.49647#0#d.data"), VolumeIndex: 40, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("9692991e77c9742cbc24469391d499981515750803.56295#0#d.data"), VolumeIndex: 41, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("a8dbd473e360787caff0b97aca33373f1515750803.68428#2#d.data"), VolumeIndex: 42, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("1ff88cb2b6b64f1fd3b6097f20203ee01515750803.73746#4#d.data"), VolumeIndex: 43, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("71572f46094d7ac440f5e2a3c72da17b1515750803.75628#2#d.data"), VolumeIndex: 44, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("bf8e83d954478d66ac1dba7eaa832c721515750803.81141#4#d.data"), VolumeIndex: 45, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("69724f682fe12b4a4306bceeb75825431515750804.10112#2#d.data"), VolumeIndex: 46, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("1bf38645ccc5f158c96480f1e0861a141515750804.31472#0#d.data"), VolumeIndex: 47, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("51fecf0e0bb30920fd0d83ee8fba29f71515750804.32492#2#d.data"), VolumeIndex: 48, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("2acbf85061e46b3bb3adb8930cb7414d1515750804.46622#2#d.data"), VolumeIndex: 49, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ {Name: []byte("22e4a97f1d4f2b6d4150bb9b481e4c971515750804.51987#0#d.data"), VolumeIndex: 50, Offset: 4096, NextOffset: 8192, RepairTool: false},
+ }
+
+ // Register volumes
+ for _, df := range volumes {
+ out, err := proto.Marshal(&df)
+ if err != nil {
+ log.Error("failed to marshal")
+ return err
+ }
+ body := bytes.NewReader(out)
+ resp, err := client.Post("http://unix/register_volume", "application/octet-stream", body)
+ if err = check_200(resp, err); err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ }
+
+ // Register objects
+ for _, obj := range objects {
+ out, err := proto.Marshal(&obj)
+ if err != nil {
+ log.Error("failed to marshal")
+ return err
+ }
+ body := bytes.NewReader(out)
+ resp, err := client.Post("http://unix/register_object", "application/octet-stream", body)
+ if err = check_200(resp, err); err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ }
+ return
+}
+
+func TestMain(m *testing.M) {
+ log.SetLevel(logrus.InfoLevel)
+ diskPath, err := ioutil.TempDir("/tmp", "losf-test")
+ if err != nil {
+ log.Fatal(err)
+ }
+ rootDir := path.Join(diskPath, "losf")
+ dbDir := path.Join(rootDir, "db")
+
+ err = os.MkdirAll(rootDir, 0700)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ kv, err := openLevigoDB(dbDir)
+ if err != nil {
+ log.Fatal("failed to create leveldb")
+ }
+ socket_path := "/tmp/rpc.socket"
+ listening := make(chan bool, 1)
+ go runTestServer(kv, diskPath, socket_path, listening)
+ // Wait for the socket
+ <-listening
+
+ client = http.Client{Transport: &http.Transport{DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial("unix", "/tmp/rpc.socket")
+ },
+ },
+ }
+
+ err = populateKV()
+ if err != nil {
+ log.Error(err)
+ log.Fatal("failed to populate test KV")
+ }
+
+ ret := m.Run()
+
+ teardown(diskPath)
+
+ os.Exit(ret)
+}
+
+// TODO, add more tests:
+// - prefix with no objects
+// - single object
+// - first and last elements of the KV
+func TestLoadObjectsByPrefix(t *testing.T) {
+ prefix := &pb.LoadObjectsByPrefixRequest{Prefix: []byte("105de5f388ab4b72e56bc93f36ad388a")}
+
+ out, err := proto.Marshal(prefix)
+ if err != nil {
+ t.Error("failed to marshal")
+ }
+ body := bytes.NewReader(out)
+
+ expectedObjects := []pb.Object{
+ {Name: []byte("105de5f388ab4b72e56bc93f36ad388a1515750802.73393#2#d.data"), VolumeIndex: 33, Offset: 4096},
+ {Name: []byte("105de5f388ab4b72e56bc93f36ad388a1515873948.27383#2#d.meta"), VolumeIndex: 33, Offset: 8192},
+ }
+
+ response, err := client.Post("http://unix/load_objects_by_prefix", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("RPC call failed: %v", err)
+ }
+ defer response.Body.Close()
+
+ r := &pb.LoadObjectsByPrefixReply{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Error("failed to unmarshal")
+ }
+
+ for i, obj := range r.Objects {
+ expected := expectedObjects[i]
+ if !bytes.Equal(obj.Name, expected.Name) {
+ t.Errorf("\ngot : %s\nexpected: %s", string(obj.Name), string(expected.Name))
+ }
+ }
+}
+
+func TestListPartitions(t *testing.T) {
+ partPower := uint32(10)
+
+ lpInfo := &pb.ListPartitionsRequest{PartitionBits: partPower}
+ out, err := proto.Marshal(lpInfo)
+ if err != nil {
+ t.Error("failed to marshal")
+ }
+ body := bytes.NewReader(out)
+
+ expectedPartitions := []string{"9", "10", "40", "63", "65", "71", "111", "127", "139", "171", "195", "211", "213", "243", "271", "295", "327", "360", "379", "417", "420", "421", "428", "439", "453", "466", "500", "513", "530", "535", "559", "602", "604", "673", "675", "710", "765", "766", "786", "809", "810", "855", "974", "977", "1009", "1019"}
+
+ response, err := client.Post("http://unix/list_partitions", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("RPC call failed: %v", err)
+ }
+ defer response.Body.Close()
+
+ r := &pb.DirEntries{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Error("failed to unmarshal")
+ }
+
+ if len(r.Entry) != len(expectedPartitions) {
+ t.Fatalf("\ngot: %v\nwant: %v", r.Entry, expectedPartitions)
+ }
+
+ for i, obj := range r.Entry {
+ if obj != expectedPartitions[i] {
+ t.Fatalf("checking individual elements\ngot: %v\nwant: %v", r.Entry, expectedPartitions)
+ }
+ }
+}
+
+// TODO: add more tests, have a suffix with multiple entries
+func TestListSuffix(t *testing.T) {
+ partition := uint32(428)
+ partPower := uint32(10)
+ suffix := []byte("845")
+
+ lsInfo := &pb.ListSuffixRequest{Partition: partition, Suffix: suffix, PartitionBits: partPower}
+ out, err := proto.Marshal(lsInfo)
+ if err != nil {
+ t.Error(err)
+ }
+ body := bytes.NewReader(out)
+
+ expectedHashes := []string{"6b08eabf5667557c72dc6570aa1fb845", "6b08eabf5667557c72dc6570abcfb845"}
+
+ response, err := client.Post("http://unix/list_suffix", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("RPC call failed: %v", err)
+ }
+ defer response.Body.Close()
+
+ r := &pb.DirEntries{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Error(err)
+ }
+
+ if len(r.Entry) != len(expectedHashes) {
+ t.Fatalf("\ngot: %v\nwant: %v", r.Entry, expectedHashes)
+ }
+
+ for i, obj := range r.Entry {
+ if obj != expectedHashes[i] {
+ t.Fatalf("checking individual elements\ngot: %v\nwant: %v", r.Entry, expectedHashes)
+ }
+ }
+}
+
+func TestState(t *testing.T) {
+ // Mark dirty and check
+ kvstate := &pb.KvState{}
+ out, err := proto.Marshal(kvstate)
+ if err != nil {
+ t.Error(err)
+ }
+ body := bytes.NewReader(out)
+
+ response, err := client.Post("http://unix/set_kv_state", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("Failed to change KV state: %v", err)
+ }
+ response.Body.Close()
+
+ empty := &pb.GetKvStateRequest{}
+ empty_serialized, err := proto.Marshal(empty)
+ if err != nil {
+ t.Error(err)
+ }
+ body = bytes.NewReader(empty_serialized)
+
+ response, err = client.Post("http://unix/get_kv_state", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("RPC call failed: %v", err)
+ }
+ r := &pb.KvState{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Error(err)
+ }
+
+ if r.IsClean != false {
+ t.Fatal("isClean true, should be false")
+ }
+
+ // Mark clean and check
+ kvstate = &pb.KvState{IsClean: true}
+ out, err = proto.Marshal(kvstate)
+ if err != nil {
+ t.Error(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/set_kv_state", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("Failed to change KV state: %v", err)
+ }
+ response.Body.Close()
+
+ body = bytes.NewReader(empty_serialized)
+ response, err = client.Post("http://unix/get_kv_state", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("RPC call failed: %v", err)
+ }
+ defer response.Body.Close()
+ buf.Reset()
+ buf.ReadFrom(response.Body)
+
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Error(err)
+ }
+
+ if r.IsClean != true {
+ t.Fatal("isClean false, should be true")
+ }
+
+}
+
+func TestRegisterObject(t *testing.T) {
+ // Register new non-existing object
+ name := []byte("33dea50d391ee52a8ead7cb562a9b4e2/1539791765.84449#5#d.data")
+ obj := &pb.RegisterObjectRequest{Name: name, VolumeIndex: 1, Offset: 4096, NextOffset: 8192, RepairTool: false}
+ out, err := proto.Marshal(obj)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := bytes.NewReader(out)
+
+ response, err := client.Post("http://unix/register_object", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to register object: %s", err)
+ }
+ response.Body.Close()
+
+ objInfo := &pb.LoadObjectRequest{Name: name, IsQuarantined: false, RepairTool: false}
+ out, err = proto.Marshal(objInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+ response, err = client.Post("http://unix/load_object", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("error getting registered object: %s", err)
+ }
+ r := &pb.Object{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+ response.Body.Close()
+
+ if !bytes.Equal(r.Name, name) || r.VolumeIndex != 1 || r.Offset != 4096 {
+ t.Fatalf("object found but name, volume index, or offset, is wrong: %v", r)
+ }
+
+ // Register existing object, which should fail
+ obj = &pb.RegisterObjectRequest{Name: name, VolumeIndex: 1, Offset: 4096, NextOffset: 8192, RepairTool: false}
+ out, err = proto.Marshal(obj)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+ response, err = client.Post("http://unix/register_object", "application/octet-stream", body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if response.StatusCode != int(codes.AlreadyExists) {
+ t.Fatalf("wrong status code, expected: %d, got: %d", codes.AlreadyExists, response.StatusCode)
+ }
+ response.Body.Close()
+
+ // Remove object
+ unregInfo := &pb.UnregisterObjectRequest{Name: name}
+ out, err = proto.Marshal(unregInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+ response, err = client.Post("http://unix/unregister_object", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to unregister object: %s", err)
+ }
+ response.Body.Close()
+
+ // Attempt to remove again, should fail
+ body = bytes.NewReader(out)
+ response, err = client.Post("http://unix/unregister_object", "application/octet-stream", body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if response.StatusCode != int(codes.NotFound) {
+ t.Fatalf("wrong status code, expected: %d, got: %d", codes.NotFound, response.StatusCode)
+ }
+}
+
+func TestQuarantineObject(t *testing.T) {
+ // Quarantine an existing object
+ name := []byte("bf43763a98208f15da803e76bf52e7d11515750803.01357#0#d.data")
+ objName := &pb.QuarantineObjectRequest{Name: name, RepairTool: false}
+ out, err := proto.Marshal(objName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := bytes.NewReader(out)
+ response, err := client.Post("http://unix/quarantine_object", "applicable/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatal("Failed to quarantine object")
+ }
+ response.Body.Close()
+
+ // We shouldn't be able to find it
+ objInfo := &pb.LoadObjectRequest{Name: name, IsQuarantined: false, RepairTool: false}
+ out, err = proto.Marshal(objInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+ response, err = client.Post("http://unix/quarantine_object", "applicable/octet-stream", body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if response.StatusCode != int(codes.NotFound) {
+ t.Fatalf("wrong status code, expected: %d, got: %d", codes.NotFound, response.StatusCode)
+ }
+ response.Body.Close()
+
+ // TODO, need to test that the quarantined object exists
+ // then try to quarantine non existent object
+}
+
+func TestUnquarantineObject(t *testing.T) {
+ // Unquarantine an existing quarantined object (check that)
+ name := []byte("bf43763a98208f15da803e76bf52e7d11515750803.01357#0#d.data")
+ objName := &pb.UnquarantineObjectRequest{Name: name, RepairTool: false}
+ out, err := proto.Marshal(objName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := bytes.NewReader(out)
+
+ response, err := client.Post("http://unix/unquarantine_object", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatal("Failed to quarantine object")
+ }
+ response.Body.Close()
+
+ // We should be able to find it
+ objInfo := &pb.LoadObjectRequest{Name: name, IsQuarantined: false, RepairTool: false}
+ out, err = proto.Marshal(objInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/load_object", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatal("cannot find unquarantined object")
+ }
+ response.Body.Close()
+
+ // TODO, need to test that the quarantined object exists
+ // then try to quarantine non existent object
+}
+
+// This test modifies the DB
+func TestListQuarantinedOHashes(t *testing.T) {
+ // We shouldn't find any quarantined object initially
+ lqInfo := &pb.ListQuarantinedOHashesRequest{PageSize: 100}
+ out, err := proto.Marshal(lqInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := bytes.NewReader(out)
+
+ response, err := client.Post("http://unix/list_quarantined_ohashes", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to list quarantined ohashes: %v", err)
+ }
+
+ r := &pb.ListQuarantinedOHashesReply{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ if r.Objects != nil {
+ t.Fatalf("Did not expect to find any quarantined objects. Found: %v", r.Objects)
+ }
+ response.Body.Close()
+
+ // Quarantine a few objects and check we can find them
+ objectsToQuarantine := []pb.QuarantineObjectRequest{
+ {Name: []byte("02573d31b770cda8e0effd7762e8a0751515750801.09785#2#d.data"), RepairTool: false},
+ {Name: []byte("6b08eabf5667557c72dc6570aa1fb8451515750801.08639#4#d.data"), RepairTool: false},
+ {Name: []byte("6b08eabf5667557c72dc6570aa1fb8451515750856.77219.meta"), RepairTool: false},
+ {Name: []byte("6b08eabf5667557c72dc6570abcfb8451515643210.72429#4#d.data"), RepairTool: false},
+ {Name: []byte("687ba0410f4323c66397a85292077b101515750801.10244#0#d.data"), RepairTool: false},
+ {Name: []byte("c4aaea9b28c425f45eb64d4d5b0b3f621515750801.19478#2#d.data"), RepairTool: false},
+ {Name: []byte("0a0898eb861579d1240adbb1c9f0c92b1515750801.20636#2#d.data"), RepairTool: false},
+ }
+
+ expectedOhashes := [][]byte{
+ []byte("02573d31b770cda8e0effd7762e8a075"),
+ []byte("0a0898eb861579d1240adbb1c9f0c92b"),
+ []byte("687ba0410f4323c66397a85292077b10"),
+ []byte("6b08eabf5667557c72dc6570aa1fb845"),
+ []byte("6b08eabf5667557c72dc6570abcfb845"),
+ []byte("c4aaea9b28c425f45eb64d4d5b0b3f62"),
+ }
+
+ for _, qObj := range objectsToQuarantine {
+ out, err = proto.Marshal(&qObj)
+ if err != nil {
+ t.Error(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/quarantine_object", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to quarantine object: %s", err)
+ }
+ response.Body.Close()
+ }
+
+ // List quarantined objects
+ lqInfo = &pb.ListQuarantinedOHashesRequest{PageSize: 100}
+ out, err = proto.Marshal(lqInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/list_quarantined_ohashes", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to list quarantined ohashes: %v", err)
+ }
+
+ r = &pb.ListQuarantinedOHashesReply{}
+ buf.Reset()
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ response.Body.Close()
+
+ receivedOhashes := [][]byte{}
+ for _, obj := range r.Objects {
+ receivedOhashes = append(receivedOhashes, obj.Name)
+ }
+
+ if !testEqSliceBytes(receivedOhashes, expectedOhashes) {
+ t.Fatalf("\nexpected %v\ngot %v", expectedOhashes, receivedOhashes)
+ }
+
+ // We got all quarantined objects, so NextPageToken shouldn't be set
+ if !bytes.Equal(r.NextPageToken, []byte("")) {
+ t.Fatalf("\nexpected %v got %v", []byte("foo"), r.NextPageToken)
+ }
+
+ // List quarantined objects, with a PageSize of 1
+ lqInfo = &pb.ListQuarantinedOHashesRequest{PageSize: 1}
+ out, err = proto.Marshal(lqInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/list_quarantined_ohashes", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to list quarantined ohashes: %v", err)
+ }
+
+ r = &pb.ListQuarantinedOHashesReply{}
+ buf.Reset()
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ response.Body.Close()
+
+ receivedOhashes = [][]byte{}
+ for _, obj := range r.Objects {
+ receivedOhashes = append(receivedOhashes, obj.Name)
+ }
+
+ if !testEqSliceBytes(receivedOhashes, [][]byte{expectedOhashes[0]}) {
+ t.Fatalf("\nexpected %v\ngot %v", [][]byte{expectedOhashes[0]}, receivedOhashes)
+ }
+
+ // We got the first object, expect NextPageToken to be the second quarantined object hash
+ if !bytes.Equal(r.NextPageToken, expectedOhashes[1]) {
+ t.Fatalf("\nexpected %v got %v", expectedOhashes[1], r.NextPageToken)
+ }
+
+ // Get the next two entries
+ lqInfo = &pb.ListQuarantinedOHashesRequest{PageSize: 2, PageToken: r.NextPageToken}
+ out, err = proto.Marshal(lqInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/list_quarantined_ohashes", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to list quarantined ohashes: %v", err)
+ }
+
+ r = &pb.ListQuarantinedOHashesReply{}
+ buf.Reset()
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ response.Body.Close()
+
+ receivedOhashes = [][]byte{}
+ for _, obj := range r.Objects {
+ receivedOhashes = append(receivedOhashes, obj.Name)
+ }
+
+ if !testEqSliceBytes(receivedOhashes, expectedOhashes[1:3]) {
+ t.Fatalf("\nexpected %v\ngot %v", expectedOhashes[1:3], receivedOhashes)
+ }
+
+ // We've read 3, expecte NextPageToken to be the 4th quarantined object
+ if !bytes.Equal(r.NextPageToken, expectedOhashes[3]) {
+ t.Fatalf("\nexpected %v got %v", expectedOhashes[3], r.NextPageToken)
+ }
+
+ // Get all remaining entries
+ lqInfo = &pb.ListQuarantinedOHashesRequest{PageSize: 100, PageToken: r.NextPageToken}
+ out, err = proto.Marshal(lqInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/list_quarantined_ohashes", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to list quarantined ohashes: %v", err)
+ }
+
+ r = &pb.ListQuarantinedOHashesReply{}
+ buf.Reset()
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ response.Body.Close()
+
+ receivedOhashes = [][]byte{}
+ for _, obj := range r.Objects {
+ receivedOhashes = append(receivedOhashes, obj.Name)
+ }
+
+ if !testEqSliceBytes(receivedOhashes, expectedOhashes[3:]) {
+ t.Fatalf("\nexpected %v\ngot %v", expectedOhashes[1:3], receivedOhashes)
+ }
+
+ // We've read all quarantined objects, NextPageToken should not be set
+ if !bytes.Equal(r.NextPageToken, []byte("")) {
+ t.Fatalf("\nexpected %v got %v", []byte(""), r.NextPageToken)
+ }
+
+}
+
+func TestLoadObjectsByVolume(t *testing.T) {
+ // List non quarantined objects from volume 22, we should not find any
+ volIndex := &pb.LoadObjectsByVolumeRequest{Index: 22}
+ out, err := proto.Marshal(volIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := bytes.NewReader(out)
+
+ response, err := client.Post("http://unix/load_objects_by_volume", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to call LoadObjectsByVolume: %v", err)
+ }
+
+ r := &pb.LoadObjectsByVolumeReply{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ if r.Objects != nil {
+ t.Fatalf("did not expect to find objects")
+ }
+
+ // List quarantined objects from volume 22
+ volIndex = &pb.LoadObjectsByVolumeRequest{Index: 22, Quarantined: true}
+ out, err = proto.Marshal(volIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/load_objects_by_volume", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to call LoadObjectsByVolume: %v", err)
+ }
+
+ r = &pb.LoadObjectsByVolumeReply{}
+ buf = new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ expectedObjects := []pb.Object{
+ {Name: []byte("687ba0410f4323c66397a85292077b101515750801.10244#0#d.data"), VolumeIndex: 22, Offset: 4096},
+ {Name: []byte("6b08eabf5667557c72dc6570abcfb8451515643210.72429#4#d.data"), VolumeIndex: 22, Offset: 8192},
+ }
+
+ // we should have all of them
+ if len(r.Objects) != len(expectedObjects) {
+ t.Fatalf("Expected %d objects, got %d", len(expectedObjects), len(r.Objects))
+ }
+ if r.NextPageToken != nil {
+ t.Fatalf("Expected NextPageToken to be nil, but got: %s", string(r.NextPageToken))
+ }
+
+ for i, obj := range r.Objects {
+ if !bytes.Equal(obj.Name, expectedObjects[i].Name) {
+ // t.Fatalf("expected %v, got %v", expectedObjects[i].Name, obj.Name)
+ t.Fatalf("expected %s, got %s", string(expectedObjects[i].Name), string(obj.Name))
+ }
+ if obj.VolumeIndex != expectedObjects[i].VolumeIndex {
+ t.Fatalf("expected %d, got %d", expectedObjects[i].VolumeIndex, obj.VolumeIndex)
+ }
+ if obj.Offset != expectedObjects[i].Offset {
+ t.Fatalf("expected %d, got %d", expectedObjects[i].Offset, obj.Offset)
+ }
+ }
+
+ // List quarantined objects from volume 22 with pagination
+ volIndex = &pb.LoadObjectsByVolumeRequest{Index: 22, Quarantined: true, PageSize: 1}
+ out, err = proto.Marshal(volIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/load_objects_by_volume", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to call LoadObjectsByVolume: %v", err)
+ }
+
+ r = &pb.LoadObjectsByVolumeReply{}
+ buf = new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ // we should have one object
+ if len(r.Objects) != 1 {
+ t.Fatalf("Expected 1 objects, got %d", len(r.Objects))
+ }
+ if !bytes.Equal(r.NextPageToken, expectedObjects[1].Name) {
+ t.Fatalf("Expected NextPageToken to be %s, but got: %s", string(expectedObjects[1].Name), string(r.NextPageToken))
+ }
+
+ if !bytes.Equal(r.Objects[0].Name, expectedObjects[0].Name) {
+ // t.Fatalf("expected %v, got %v", expectedObjects[i].Name, obj.Name)
+ t.Fatalf("expected %s, got %s", string(expectedObjects[0].Name), string(r.Objects[0].Name))
+ }
+ if r.Objects[0].VolumeIndex != expectedObjects[0].VolumeIndex {
+ t.Fatalf("expected %d, got %d", expectedObjects[0].VolumeIndex, r.Objects[0].VolumeIndex)
+ }
+ if r.Objects[0].Offset != expectedObjects[0].Offset {
+ t.Fatalf("expected %d, got %d", expectedObjects[0].Offset, r.Objects[0].Offset)
+ }
+
+ // Second call with pagination
+ volIndex = &pb.LoadObjectsByVolumeRequest{Index: 22, Quarantined: true, PageSize: 1, PageToken: r.NextPageToken}
+ out, err = proto.Marshal(volIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body = bytes.NewReader(out)
+
+ response, err = client.Post("http://unix/load_objects_by_volume", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("failed to call LoadObjectsByVolume: %v", err)
+ }
+
+ r = &pb.LoadObjectsByVolumeReply{}
+ buf = new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), r); err != nil {
+ t.Fatal(err)
+ }
+
+ // we should have one object
+ if len(r.Objects) != 1 {
+ t.Fatalf("Expected 1 objects, got %d", len(r.Objects))
+ }
+ if r.NextPageToken != nil {
+ t.Fatalf("Expected NextPageToken to be nil, but got: %s", string(r.NextPageToken))
+ }
+
+ if !bytes.Equal(r.Objects[0].Name, expectedObjects[1].Name) {
+ t.Fatalf("expected %s, got %s", string(expectedObjects[0].Name), string(r.Objects[0].Name))
+ }
+ if r.Objects[0].VolumeIndex != expectedObjects[1].VolumeIndex {
+ t.Fatalf("expected %d, got %d", expectedObjects[1].VolumeIndex, r.Objects[0].VolumeIndex)
+ }
+ if r.Objects[0].Offset != expectedObjects[1].Offset {
+ t.Fatalf("expected %d, got %d", expectedObjects[1].Offset, r.Objects[0].Offset)
+ }
+}
+
+func TestListQuarantinedOHash(t *testing.T) {
+ ohash := &pb.ListQuarantinedOHashRequest{Prefix: []byte("6b08eabf5667557c72dc6570aa1fb845"), RepairTool: false}
+ out, err := proto.Marshal(ohash)
+ if err != nil {
+ t.Fatal(err)
+ }
+ body := bytes.NewReader(out)
+
+ response, err := client.Post("http://unix/list_quarantined_ohash", "application/octet-stream", body)
+ if err = check_200(response, err); err != nil {
+ t.Fatalf("error listing quarantined object files: %s", err)
+ }
+
+ qList := &pb.ListQuarantinedOHashReply{}
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(response.Body)
+ if err = proto.Unmarshal(buf.Bytes(), qList); err != nil {
+ t.Fatal(err)
+ }
+ response.Body.Close()
+
+ expectedFiles := [][]byte{
+ []byte("6b08eabf5667557c72dc6570aa1fb8451515750801.08639#4#d.data"),
+ []byte("6b08eabf5667557c72dc6570aa1fb8451515750856.77219.meta"),
+ }
+
+ if len(qList.Objects) != len(expectedFiles) {
+ t.Fatalf("got %d objects, expected %d", len(qList.Objects), len(expectedFiles))
+ }
+
+ receivedFiles := make([][]byte, len(qList.Objects))
+ for i, obj := range qList.Objects {
+ receivedFiles[i] = obj.Name
+ }
+
+ if !testEqSliceBytes(receivedFiles, expectedFiles) {
+ t.Fatalf("\nexpected %v\ngot %v", expectedFiles, receivedFiles)
+ }
+
+ // Add test, non existent ohash
+ // Add test, unquarantine one file, list again
+}
+
+func testEqSliceBytes(a, b [][]byte) bool {
+ if a == nil && b == nil {
+ return true
+ }
+ if a == nil || b == nil {
+ return false
+ }
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if !bytes.Equal(a[i], b[i]) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/go/swift-rpc-losf/snappy b/go/swift-rpc-losf/snappy
new file mode 160000
+Subproject 3f194acb57e0487531c96b97af61dcbd025a78a
diff --git a/go/swift-rpc-losf/stats.go b/go/swift-rpc-losf/stats.go
new file mode 100644
index 000000000..cc8ebd740
--- /dev/null
+++ b/go/swift-rpc-losf/stats.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+// Returns the number of entries within the namespace
+func countItems(kv KV, namespace byte) (itemCount uint64) {
+ it := kv.NewIterator(namespace)
+ defer it.Close()
+
+ for it.SeekToFirst(); it.Valid(); it.Next() {
+ itemCount++
+ }
+
+ return
+}
+
+// Returns KV stats in a map
+// It will walk the whole KV to do so, used mostly for debugging, don't use for monitoring.
+func CollectStats(s *server) (entriesCount map[string]uint64) {
+ entriesCount = make(map[string]uint64)
+
+ entriesCount["volume_count"] = countItems(s.kv, volumePrefix)
+ entriesCount["object_count"] = countItems(s.kv, objectPrefix)
+ entriesCount["deletequeue_count"] = countItems(s.kv, deleteQueuePrefix)
+ entriesCount["quarantine_count"] = countItems(s.kv, quarantinePrefix)
+
+ return
+}
diff --git a/go/swift-rpc-losf/status/status.go b/go/swift-rpc-losf/status/status.go
new file mode 100644
index 000000000..66b21646c
--- /dev/null
+++ b/go/swift-rpc-losf/status/status.go
@@ -0,0 +1,27 @@
+package status
+
+import (
+ "fmt"
+ "github.com/openstack/swift-rpc-losf/codes"
+)
+
+type RpcError struct {
+ code codes.StatusCode
+ msg string
+}
+
+func (e *RpcError) Error() string {
+ return fmt.Sprintf("rpc error: %d. %s", e.code, e.msg)
+}
+
+func Error(code codes.StatusCode, msg string) error {
+ return &RpcError{code, msg}
+}
+
+func Errorf(code codes.StatusCode, format string, a ...interface{}) error {
+ return Error(code, fmt.Sprintf(format, a...))
+}
+
+func (e *RpcError) Code() codes.StatusCode {
+ return e.code
+}
diff --git a/go/swift-rpc-losf/swift.go b/go/swift-rpc-losf/swift.go
new file mode 100644
index 000000000..c51362dac
--- /dev/null
+++ b/go/swift-rpc-losf/swift.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+)
+
+// getPartitionFromOhash returns the partition, given an object hash and the partition bit count
+func getPartitionFromOhash(ohash []byte, partitionBits int) (partition uint64, err error) {
+ if len(ohash) < 16 {
+ err = errors.New("ohash must be at least 16 bits long")
+ return
+ }
+ highHash, err := strconv.ParseUint(string(ohash[0:16]), 16, 64)
+ if err != nil {
+ return
+ }
+
+ // shift to get the partition
+ partition = highHash >> uint64(64-partitionBits)
+ return
+}
+
+// getLastPartition returns the last possible partition given the partition bit count
+func getLastPartition(partitionBits int) (partition uint64, err error) {
+ for i := 0; i < partitionBits; i++ {
+ partition |= 1 << uint64(i)
+ }
+ return
+}
+
+// Returns the first possible object prefix the KV for the given partition and bit count
+// Example: 876, 18 bits -> 00db000000000000
+func getObjPrefixFromPartition(partition uint64, partitionBits int) (prefix []byte, err error) {
+ firstnum := partition << uint64(64-partitionBits)
+ prefix = []byte(fmt.Sprintf("%016x0000000000000000", firstnum))
+ return
+}
+
+// Returns the first possible object prefix the KV for the given partition and bit count,
+// in its encoded form
+func getEncodedObjPrefixFromPartition(partition uint64, partitionBits int) (prefix []byte, err error) {
+ key, err := getObjPrefixFromPartition(partition, partitionBits)
+ if err != nil {
+ return
+ }
+
+ prefix, err = EncodeObjectKey(key)
+ return
+}
diff --git a/go/swift-rpc-losf/swift_test.go b/go/swift-rpc-losf/swift_test.go
new file mode 100644
index 000000000..5e488558b
--- /dev/null
+++ b/go/swift-rpc-losf/swift_test.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "testing"
+)
+
+type getPartitionTest struct {
+ ohash string
+ bitCount int
+ expectedPartition uint64
+}
+
+func TestGetPartitionFromOhash(t *testing.T) {
+ var getPartitionTests = []getPartitionTest{
+ {"b80362143ac3221d15a75f4bd1af3fac", 18, 188429},
+ {"00db344e979c8c8fa4376dc60ba8102e", 18, 876},
+ {"01342cbbf02d9b27396ac937b0f049e1", 18, 1232},
+ {"ffffc63ac2fa908fc137e7e0f1c4df97", 18, 262143},
+ }
+
+ for _, tt := range getPartitionTests {
+ partition, err := getPartitionFromOhash([]byte(tt.ohash), tt.bitCount)
+ if err != nil {
+ t.Error(err)
+ }
+ if partition != tt.expectedPartition {
+ t.Errorf("For ohash: %s, got partition %d, expected %d\n", tt.ohash, partition, tt.expectedPartition)
+ }
+ }
+
+ // Test invalid data, too short
+ invalidHash := []byte("abcd")
+ bitCount := 18
+ _, err := getPartitionFromOhash(invalidHash, bitCount)
+ if err == nil {
+ t.Fatalf("Should fail to getPartitionFromOhash for: %x, %d", invalidHash, bitCount)
+ }
+
+ // invalid md5
+ invalidHash = []byte("zzzz2cbbf02d9b27396ac937b0f049e1")
+ _, err = getPartitionFromOhash(invalidHash, bitCount)
+ if err == nil {
+ t.Fatalf("Should fail to getPartitionFromOhash for: %x, %d", invalidHash, bitCount)
+ }
+}
+
+type getLastPartitionTest struct {
+ bitCount int
+ expectedPartition uint64
+}
+
+func TestGetLastPartition(t *testing.T) {
+ var getLastPartitionTests = []getLastPartitionTest{
+ {18, 262143},
+ {17, 131071},
+ {16, 65535},
+ }
+
+ for _, tt := range getLastPartitionTests {
+ partition, err := getLastPartition(tt.bitCount)
+ if err != nil {
+ t.Error(err)
+ }
+ if partition != tt.expectedPartition {
+ t.Errorf("For bitcount: %d, got last partition: %d, expected %d\n", tt.bitCount, partition, tt.expectedPartition)
+ }
+ }
+}
+
+type getObjTest struct {
+ partition uint64
+ bitCount int
+ expectedPrefix []byte
+}
+
+func TestGetObjPrefixFromPartition(t *testing.T) {
+ var getObjTests = []getObjTest{
+ {876, 18, []byte("00db0000000000000000000000000000")},
+ {209827, 18, []byte("cce8c000000000000000000000000000")},
+ {260177, 18, []byte("fe144000000000000000000000000000")},
+ {260179, 18, []byte("fe14c000000000000000000000000000")},
+ {260180, 18, []byte("fe150000000000000000000000000000")},
+ }
+
+ for _, tt := range getObjTests {
+ prefix, err := getObjPrefixFromPartition(tt.partition, tt.bitCount)
+ if err != nil {
+ t.Error(err)
+ }
+ if bytes.Compare(prefix, tt.expectedPrefix) != 0 {
+ t.Errorf("For partition: %d, bitCount: %d, got prefix: %s, expected %s\n", tt.partition, tt.bitCount, prefix, tt.expectedPrefix)
+ }
+ }
+}
+
+func TestGetEncodedObjPrefixFromPartition(t *testing.T) {
+ var getObjTests = []getObjTest{
+ {876, 18, []byte("\x00\xdb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")},
+ {209827, 18, []byte("\xcc\xe8\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")},
+ {260177, 18, []byte("\xfe\x14\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")},
+ {260179, 18, []byte("\xfe\x14\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")},
+ {260180, 18, []byte("\xfe\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")},
+ }
+
+ for _, tt := range getObjTests {
+ prefix, err := getEncodedObjPrefixFromPartition(tt.partition, tt.bitCount)
+ if err != nil {
+ t.Error(err)
+ }
+ if bytes.Compare(prefix, tt.expectedPrefix) != 0 {
+ t.Errorf("For partition: %d, bitCount: %d, got prefix: %x, expected %x\n", tt.partition, tt.bitCount, prefix, tt.expectedPrefix)
+ }
+ }
+}
diff --git a/go/swift-rpc-losf/utils.go b/go/swift-rpc-losf/utils.go
new file mode 100644
index 000000000..c8b9f963a
--- /dev/null
+++ b/go/swift-rpc-losf/utils.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2010-2012 OpenStack Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+// implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "golang.org/x/sys/unix"
+ "os"
+ "strings"
+)
+
+// returns true is dirPath is mounted, false otherwise
+func isMounted(dirPath string) (bool, error) {
+ f, err := os.Open("/proc/mounts")
+ if err != nil {
+ return false, err
+ }
+ defer f.Close()
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ elems := strings.Split(scanner.Text(), " ")
+ if dirPath == elems[1] {
+ return true, nil
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return false, err
+ }
+
+ return false, nil
+}
+
+// Returns true if path exists and is a directory, false otherwise
+func dirExists(dirPath string) (bool, error) {
+ stat, err := os.Stat(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ if stat.IsDir() {
+ return true, nil
+ }
+ return false, nil
+}
+
+// Returns the baseDir string from rootDir
+func getBaseDirName(rootDir string, policyIdx int) string {
+ if policyIdx == 0 {
+ return rootDir
+ }
+
+ return fmt.Sprintf("%s-%d", rootDir, policyIdx)
+}
+
+// Create a file on the filesystem that will signal to the object-server
+// that the KV cannot be used. (done along with check_mount)
+func CreateDirtyFile(dirtyFilePath string) (err error) {
+ f, err := os.Create(dirtyFilePath)
+ if err != nil {
+ return
+ }
+ f.Close()
+ return
+}
+
+// global variable, lest the file will be autoclosed and the lock released when
+// the lockSocket() function returns.
+var lockFile *os.File
+
+// Acquire a lock on a file to protect the RPC socket.
+// Does not block and will return an error if the lock cannot be acquired
+// There is no explicit unlock, it will be unlocked when the process stops
+func lockSocket(socketPath string) (err error) {
+ lockFilePath := fmt.Sprintf("%s.lock", socketPath)
+
+ lockFile, err = os.OpenFile(lockFilePath, os.O_WRONLY|os.O_CREATE, 00600)
+ if err != nil {
+ return
+ }
+
+ err = unix.Flock(int(lockFile.Fd()), unix.LOCK_EX|unix.LOCK_NB)
+ return
+}
diff --git a/requirements.txt b/requirements.txt
index 223fc617b..ecfdac0ba 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -12,6 +12,8 @@ six>=1.10.0
xattr>=0.4;sys_platform!='win32' # MIT
PyECLib>=1.3.1 # BSD
cryptography>=2.0.2 # BSD/Apache-2.0
+fusepy>=2.0.4
+protobuf>=3.9.1
# For python 2.7, the following requirements are needed; they are not
# included since the requirments-check check will fail otherwise since
diff --git a/setup.cfg b/setup.cfg
index a8ee4f617..c0526f237 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -67,6 +67,9 @@ scripts =
bin/swift-ring-builder
bin/swift-ring-builder-analyzer
bin/swift-ring-composer
+ bin/swift-object-rpcmanager
+ bin/swift-losf-volume-check
+ bin/swift-mount-losf
[extras]
kms_keymaster =
@@ -132,6 +135,9 @@ paste.filter_factory =
swift.diskfile =
replication.fs = swift.obj.diskfile:DiskFileManager
erasure_coding.fs = swift.obj.diskfile:ECDiskFileManager
+ replication.kv = swift.obj.kvfile:KVFileManager
+ erasure_coding.kv = swift.obj.kvfile:ECKVFileManager
+ erasure_coding.hybrid = swift.obj.hybrid:ECHybridFileManager
[egg_info]
tag_build =
diff --git a/swift/common/manager.py b/swift/common/manager.py
index 3af96e580..9f417757b 100644
--- a/swift/common/manager.py
+++ b/swift/common/manager.py
@@ -39,7 +39,8 @@ ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator',
'object-reconstructor', 'object-updater',
- 'proxy-server', 'account-replicator', 'account-reaper']
+ 'proxy-server', 'account-replicator', 'account-reaper',
+ 'object-rpcmanager', 'object-migrator']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py
index 68f23b3e4..9e5fdd93f 100644
--- a/swift/obj/diskfile.py
+++ b/swift/obj/diskfile.py
@@ -37,6 +37,7 @@ import errno
import fcntl
import json
import os
+import shutil
import re
import time
import uuid
@@ -44,7 +45,6 @@ from hashlib import md5
import logging
import traceback
import xattr
-from os.path import basename, dirname, exists, join, splitext
from random import shuffle
from tempfile import mkstemp
from contextlib import contextmanager
@@ -323,11 +323,11 @@ def quarantine_renamer(device_path, corrupted_file_path):
if policy is None:
# TODO: support a quarantine-unknown location
policy = POLICIES.legacy
- from_dir = dirname(corrupted_file_path)
- to_dir = join(device_path, 'quarantined',
- get_data_dir(policy),
- basename(from_dir))
- invalidate_hash(dirname(from_dir))
+ from_dir = os.path.dirname(corrupted_file_path)
+ to_dir = os.path.join(device_path, 'quarantined',
+ get_data_dir(policy),
+ os.path.basename(from_dir))
+ invalidate_hash(os.path.dirname(from_dir))
try:
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
@@ -345,7 +345,7 @@ def read_hashes(partition_dir):
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
- hashes_file = join(partition_dir, HASH_FILE)
+ hashes_file = os.path.join(partition_dir, HASH_FILE)
hashes = {'valid': False}
try:
with open(hashes_file, 'rb') as hashes_fp:
@@ -378,7 +378,7 @@ def write_hashes(partition_dir, hashes):
The updated key is added to hashes before it is written.
"""
- hashes_file = join(partition_dir, HASH_FILE)
+ hashes_file = os.path.join(partition_dir, HASH_FILE)
# 'valid' key should always be set by the caller; however, if there's a bug
# setting invalid is most safe
hashes.setdefault('valid', False)
@@ -397,7 +397,7 @@ def consolidate_hashes(partition_dir):
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
- invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
+ invalidations_file = os.path.join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
hashes = read_hashes(partition_dir)
@@ -431,9 +431,9 @@ def invalidate_hash(suffix_dir):
invalidating
"""
- suffix = basename(suffix_dir)
- partition_dir = dirname(suffix_dir)
- invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
+ suffix = os.path.basename(suffix_dir)
+ partition_dir = os.path.dirname(suffix_dir)
+ invalidations_file = os.path.join(partition_dir, HASH_INVALIDATIONS_FILE)
if not isinstance(suffix, bytes):
suffix = suffix.encode('utf-8')
with lock_path(partition_dir), open(invalidations_file, 'ab') as inv_fh:
@@ -803,7 +803,7 @@ class BaseDiskFileManager(object):
validated.
"""
ts_ctype = None
- fname, ext = splitext(filename)
+ fname, ext = os.path.splitext(filename)
try:
if ext == '.meta':
timestamp, ts_ctype = decode_timestamps(
@@ -1032,7 +1032,8 @@ class BaseDiskFileManager(object):
for info_key in ('data_info', 'meta_info', 'ts_info', 'ctype_info'):
info = results.get(info_key)
key = info_key[:-5] + '_file'
- results[key] = join(datadir, info['filename']) if info else None
+ results[key] = os.path.join(
+ datadir, info['filename']) if info else None
if verify:
assert self._verify_ondisk_files(
@@ -1074,14 +1075,14 @@ class BaseDiskFileManager(object):
files, hsh_path, verify=False, **kwargs)
if 'ts_info' in results and is_reclaimable(
results['ts_info']['timestamp']):
- remove_file(join(hsh_path, results['ts_info']['filename']))
+ remove_file(os.path.join(hsh_path, results['ts_info']['filename']))
files.remove(results.pop('ts_info')['filename'])
for file_info in results.get('possible_reclaim', []):
# stray files are not deleted until reclaim-age
if is_reclaimable(file_info['timestamp']):
results.setdefault('obsolete', []).append(file_info)
for file_info in results.get('obsolete', []):
- remove_file(join(hsh_path, file_info['filename']))
+ remove_file(os.path.join(hsh_path, file_info['filename']))
files.remove(file_info['filename'])
results['files'] = files
if not files: # everything got unlinked
@@ -1135,23 +1136,23 @@ class BaseDiskFileManager(object):
raise PathNotDir()
raise
for hsh in path_contents:
- hsh_path = join(path, hsh)
+ hsh_path = os.path.join(path, hsh)
try:
ondisk_info = self.cleanup_ondisk_files(
hsh_path, policy=policy)
except OSError as err:
if err.errno == errno.ENOTDIR:
- partition_path = dirname(path)
- objects_path = dirname(partition_path)
- device_path = dirname(objects_path)
+ partition_path = os.path.dirname(path)
+ objects_path = os.path.dirname(partition_path)
+ device_path = os.path.dirname(objects_path)
# The made-up filename is so that the eventual dirpath()
# will result in this object directory that we care about.
# Some failures will result in an object directory
# becoming a file, thus causing the parent directory to
# be qarantined.
- quar_path = quarantine_renamer(device_path,
- join(hsh_path,
- "made-up-filename"))
+ quar_path = quarantine_renamer(
+ device_path, os.path.join(
+ hsh_path, "made-up-filename"))
logging.exception(
_('Quarantined %(hsh_path)s to %(quar_path)s because '
'it is not a directory'), {'hsh_path': hsh_path,
@@ -1236,7 +1237,7 @@ class BaseDiskFileManager(object):
hashed = 0
dev_path = self.get_dev_path(device)
partition_path = get_part_path(dev_path, policy, partition)
- hashes_file = join(partition_path, HASH_FILE)
+ hashes_file = os.path.join(partition_path, HASH_FILE)
modified = False
orig_hashes = {'valid': False}
@@ -1278,7 +1279,7 @@ class BaseDiskFileManager(object):
hashes.update((suffix, None) for suffix in recalculate)
for suffix, hash_ in list(hashes.items()):
if not hash_:
- suffix_dir = join(partition_path, suffix)
+ suffix_dir = os.path.join(partition_path, suffix)
try:
hashes[suffix] = self._hash_suffix(
suffix_dir, policy=policy)
@@ -1322,7 +1323,7 @@ class BaseDiskFileManager(object):
"""
if mount_check is False:
# explicitly forbidden from syscall, just return path
- return join(self.devices, device)
+ return os.path.join(self.devices, device)
# we'll do some kind of check if not explicitly forbidden
try:
return check_drive(self.devices, device,
@@ -1471,9 +1472,9 @@ class BaseDiskFileManager(object):
# Some failures will result in an object directory
# becoming a file, thus causing the parent directory to
# be qarantined.
- quar_path = self.quarantine_renamer(dev_path,
- join(object_path,
- "made-up-filename"))
+ quar_path = self.quarantine_renamer(
+ dev_path, os.path.join(
+ object_path, "made-up-filename"))
logging.exception(
_('Quarantined %(object_path)s to %(quar_path)s because '
'it is not a directory'), {'object_path': object_path,
@@ -1529,6 +1530,63 @@ class BaseDiskFileManager(object):
path, err)
return []
+ def exists(self, path):
+ """
+ :param path: full path to directory
+ """
+ return os.path.exists(path)
+
+ def mkdirs(self, path):
+ """
+ :param path: full path to directory
+ """
+ return mkdirs(path)
+
+ def listdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ return os.listdir(path)
+
+ def rmtree(self, path, ignore_errors=False):
+ """
+ :param path: full path to directory
+ :param ignore_errors: if True, ignore errors from failed removals,
+ else, raise an exception.
+ """
+ return shutil.rmtree(path, ignore_errors)
+
+ def remove_file(self, path):
+ """
+ quiet wrapper around os.unlink. can be merged with remove?
+ :param path: full path to directory
+ """
+ return remove_file(path)
+
+ def remove(self, path):
+ """
+ :param path: full path to directory
+ """
+ return os.remove(path)
+
+ def isdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ return os.path.isdir(path)
+
+ def isfile(self, path):
+ """
+ :param path: full path to directory
+ """
+ return os.path.isfile(path)
+
+ def rmdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ return os.rmdir(path)
+
def yield_suffixes(self, device, partition, policy):
"""
Yields tuples of (full_path, suffix_only) for suffixes stored
@@ -1701,9 +1759,9 @@ class BaseDiskFileWriter(object):
else:
raise
if not self.manager.use_linkat:
- tmpdir = join(self._diskfile._device_path,
- get_tmp_dir(self._diskfile.policy))
- if not exists(tmpdir):
+ tmpdir = os.path.join(self._diskfile._device_path,
+ get_tmp_dir(self._diskfile.policy))
+ if not os.path.exists(tmpdir):
mkdirs(tmpdir)
fd, tmppath = mkstemp(dir=tmpdir)
return fd, tmppath
@@ -1792,7 +1850,7 @@ class BaseDiskFileWriter(object):
# drop_cache() after fsync() to avoid redundant work (pages all
# clean).
drop_buffer_cache(self._fd, 0, self._upload_size)
- self.manager.invalidate_hash(dirname(self._datadir))
+ self.manager.invalidate_hash(os.path.dirname(self._datadir))
# After the rename/linkat completes, this object will be available for
# requests to reference.
if self._tmppath:
@@ -1852,7 +1910,7 @@ class BaseDiskFileWriter(object):
timestamp, self._extension, ctype_timestamp=ctype_timestamp,
*a, **kw)
metadata['name'] = self._name
- target_path = join(self._datadir, filename)
+ target_path = os.path.join(self._datadir, filename)
tpool.execute(self._finalize_put, metadata, target_path, cleanup)
@@ -2294,7 +2352,7 @@ class BaseDiskFile(object):
self._account = None
self._container = None
self._obj = None
- self._tmpdir = join(device_path, get_tmp_dir(policy))
+ self._tmpdir = os.path.join(device_path, get_tmp_dir(policy))
self._ondisk_info = None
self._metadata = None
self._datafile_metadata = None
@@ -2307,7 +2365,7 @@ class BaseDiskFile(object):
self._datadir = _datadir
else:
name_hash = hash_path(account, container, obj)
- self._datadir = join(
+ self._datadir = os.path.join(
device_path, storage_directory(get_data_dir(policy),
partition, name_hash))
@@ -3111,7 +3169,7 @@ class ECDiskFileWriter(BaseDiskFileWriter):
:raises DiskFileError: if the diskfile frag_index has not been set
(either during initialisation or a call to put())
"""
- data_file_path = join(
+ data_file_path = os.path.join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index))
durable_data_file_path = os.path.join(
@@ -3270,7 +3328,7 @@ class ECDiskFile(BaseDiskFile):
timestamp, ext='.data', frag_index=frag_index, durable=True)
remove_file(os.path.join(self._datadir, purge_file))
remove_directory(self._datadir)
- self.manager.invalidate_hash(dirname(self._datadir))
+ self.manager.invalidate_hash(os.path.dirname(self._datadir))
class ECDiskFileManager(BaseDiskFileManager):
@@ -3354,7 +3412,7 @@ class ECDiskFileManager(BaseDiskFileManager):
validated.
"""
frag_index = None
- float_frag, ext = splitext(filename)
+ float_frag, ext = os.path.splitext(filename)
if ext == '.data':
parts = float_frag.split('#')
try:
diff --git a/swift/obj/fmgr.proto b/swift/obj/fmgr.proto
new file mode 100644
index 000000000..2604305bb
--- /dev/null
+++ b/swift/obj/fmgr.proto
@@ -0,0 +1,225 @@
+syntax = "proto3";
+
+package filemgr;
+
+// Python: protoc -I. --python_out=. fmgr.proto
+// Golang : protoc -I proto proto/fmgr.proto --go_out=proto
+
+message RegisterVolumeRequest {
+ uint32 partition = 1; // Swift partition
+ VolumeType type = 2;
+ uint32 volume_index = 3;
+ uint64 offset = 4; // Next available offset to use in the volume.
+ VolumeState state = 5;
+ bool repair_tool = 6; // Request is coming from a repair tool
+}
+
+message RegisterVolumeReply {}
+
+message UnregisterVolumeRequest {
+ uint32 index = 1;
+ bool repair_tool = 2;
+}
+
+message UnregisterVolumeReply {}
+
+message UpdateVolumeStateRequest {
+ uint32 volume_index = 1;
+ VolumeState state = 2;
+ bool repair_tool = 3;
+}
+
+message UpdateVolumeStateReply {}
+
+message GetVolumeRequest {
+ uint32 index = 1;
+ bool repair_tool = 2;
+}
+
+message GetVolumeReply {
+ uint32 volume_index = 1;
+ VolumeType volume_type = 2;
+ uint32 volume_state = 3;
+ uint32 partition = 4;
+ uint64 next_offset = 5;
+}
+
+message ListVolumesRequest {
+ uint32 partition = 1;
+ VolumeType type = 2;
+ bool repair_tool = 3;
+}
+
+message ListVolumesReply {
+ repeated Volume volumes = 1;
+}
+
+message RegisterObjectRequest {
+ bytes name = 1;
+ uint32 volume_index = 2;
+ uint64 offset = 3; // Object offset within volume
+ uint64 next_offset = 4; // Next offset to start from in the volume
+ bool repair_tool = 5;
+}
+
+message RegisterObjectReply {}
+
+message UnregisterObjectRequest {
+ bytes name = 1;
+ bool repair_tool = 2;
+}
+
+message UnregisterObjectReply {}
+
+message RenameObjectRequest {
+ bytes name = 1;
+ bytes new_name = 2;
+ bool repair_tool = 3;
+}
+
+message RenameObjectReply {}
+
+message LoadObjectRequest {
+ bytes name = 1;
+ bool is_quarantined = 2;
+ bool repair_tool = 3;
+}
+
+message LoadObjectReply {
+ bytes name = 1;
+ uint32 volume_index = 2;
+ uint64 offset = 3;
+}
+
+message QuarantineObjectRequest {
+ bytes name = 1;
+ bool repair_tool = 2;
+}
+
+message QuarantineObjectReply {}
+
+message UnquarantineObjectRequest {
+ bytes name = 1;
+ bool repair_tool = 2;
+}
+
+message UnquarantineObjectReply {}
+
+message LoadObjectsByPrefixRequest {
+ bytes prefix = 1;
+ bool repair_tool = 2;
+}
+
+message LoadObjectsByPrefixReply {
+ repeated Object objects = 1;
+}
+
+message LoadObjectsByVolumeRequest {
+ uint32 index = 1;
+ bool quarantined = 2; // List only quarantined files, if true
+ bytes page_token = 3;
+ uint32 page_size = 4;
+ bool repair_tool = 5;
+}
+
+message LoadObjectsByVolumeReply {
+ repeated Object objects = 1;
+ bytes next_page_token = 2;
+}
+
+message ListPartitionsRequest {
+ uint32 partition_bits = 1;
+}
+
+message ListPartitionRequest {
+ uint32 partition = 1;
+ uint32 partition_bits = 2;
+}
+
+message ListSuffixRequest {
+ uint32 partition = 1;
+ bytes suffix = 2;
+ uint32 partition_bits = 3;
+}
+
+message ListQuarantinedOHashesRequest {
+ bytes page_token = 1;
+ uint32 page_size = 2;
+}
+
+message ListQuarantinedOHashesReply {
+ repeated QuarantinedObjectName objects = 1;
+ bytes next_page_token = 2;
+}
+
+message ListQuarantinedOHashRequest {
+ bytes prefix = 1;
+ bool repair_tool = 2;
+}
+
+message ListQuarantinedOHashReply {
+ repeated Object objects = 1;
+}
+
+message GetNextOffsetRequest {
+ uint32 volume_index = 1;
+ bool repair_tool = 2;
+}
+
+message GetNextOffsetReply {
+ uint64 offset = 1;
+}
+
+message GetStatsRequest {}
+
+message GetStatsReply {
+ map<string, uint64> stats = 1;
+}
+
+message SetKvStateReply {}
+
+message GetKvStateRequest {}
+
+message KvState {
+ bool isClean = 1;
+}
+
+// Generic messages
+message Volume {
+ uint32 volume_index = 1;
+ VolumeType volume_type = 2;
+ uint32 volume_state = 3;
+ uint32 partition = 4;
+ uint64 next_offset = 5;
+}
+
+message Object {
+ bytes name = 1;
+ uint32 volume_index = 2;
+ uint64 offset = 3;
+}
+
+message QuarantinedObjectName {
+ bytes name = 1;
+}
+
+// For listdir() like functions
+message DirEntries {
+ repeated string entry = 1;
+}
+
+// Enums
+enum VolumeType {
+ VOLUME_DEFAULT = 0;
+ VOLUME_TOMBSTONE = 1;
+ VOLUME_X_DELETE_AT = 2;
+}
+
+enum VolumeState {
+ // Default state, volume can be read from and written to
+ STATE_RW = 0;
+ // Volume is being compacted (source). New objects cannot be appended
+ STATE_COMPACTION_SRC = 1;
+ // Volume is a compaction target. New objects cannot be appended
+ STATE_COMPACTION_TARGET = 2;
+}
diff --git a/swift/obj/fmgr_pb2.py b/swift/obj/fmgr_pb2.py
new file mode 100644
index 000000000..161aa7693
--- /dev/null
+++ b/swift/obj/fmgr_pb2.py
@@ -0,0 +1,2119 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: fmgr.proto
+
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='fmgr.proto',
+ package='filemgr',
+ syntax='proto3',
+ serialized_options=None,
+ serialized_pb=b'\n\nfmgr.proto\x12\x07\x66ilemgr\"\xad\x01\n\x15RegisterVolumeRequest\x12\x11\n\tpartition\x18\x01 \x01(\r\x12!\n\x04type\x18\x02 \x01(\x0e\x32\x13.filemgr.VolumeType\x12\x14\n\x0cvolume_index\x18\x03 \x01(\r\x12\x0e\n\x06offset\x18\x04 \x01(\x04\x12#\n\x05state\x18\x05 \x01(\x0e\x32\x14.filemgr.VolumeState\x12\x13\n\x0brepair_tool\x18\x06 \x01(\x08\"\x15\n\x13RegisterVolumeReply\"=\n\x17UnregisterVolumeRequest\x12\r\n\x05index\x18\x01 \x01(\r\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"\x17\n\x15UnregisterVolumeReply\"j\n\x18UpdateVolumeStateRequest\x12\x14\n\x0cvolume_index\x18\x01 \x01(\r\x12#\n\x05state\x18\x02 \x01(\x0e\x32\x14.filemgr.VolumeState\x12\x13\n\x0brepair_tool\x18\x03 \x01(\x08\"\x18\n\x16UpdateVolumeStateReply\"6\n\x10GetVolumeRequest\x12\r\n\x05index\x18\x01 \x01(\r\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"\x8e\x01\n\x0eGetVolumeReply\x12\x14\n\x0cvolume_index\x18\x01 \x01(\r\x12(\n\x0bvolume_type\x18\x02 \x01(\x0e\x32\x13.filemgr.VolumeType\x12\x14\n\x0cvolume_state\x18\x03 \x01(\r\x12\x11\n\tpartition\x18\x04 \x01(\r\x12\x13\n\x0bnext_offset\x18\x05 \x01(\x04\"_\n\x12ListVolumesRequest\x12\x11\n\tpartition\x18\x01 \x01(\r\x12!\n\x04type\x18\x02 \x01(\x0e\x32\x13.filemgr.VolumeType\x12\x13\n\x0brepair_tool\x18\x03 \x01(\x08\"4\n\x10ListVolumesReply\x12 \n\x07volumes\x18\x01 \x03(\x0b\x32\x0f.filemgr.Volume\"u\n\x15RegisterObjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x14\n\x0cvolume_index\x18\x02 \x01(\r\x12\x0e\n\x06offset\x18\x03 \x01(\x04\x12\x13\n\x0bnext_offset\x18\x04 \x01(\x04\x12\x13\n\x0brepair_tool\x18\x05 \x01(\x08\"\x15\n\x13RegisterObjectReply\"<\n\x17UnregisterObjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"\x17\n\x15UnregisterObjectReply\"J\n\x13RenameObjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x10\n\x08new_name\x18\x02 \x01(\x0c\x12\x13\n\x0brepair_tool\x18\x03 \x01(\x08\"\x13\n\x11RenameObjectReply\"N\n\x11LoadObjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x16\n\x0eis_quarantined\x18\x02 \x01(\x08\x12\x13\n\x0brepair_tool\x18\x03 \x01(\x08\"E\n\x0fLoadObjectReply\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x14\n\x0cvolume_index\x18\x02 \x01(\r\x12\x0e\n\x06offset\x18\x03 \x01(\x04\"<\n\x17QuarantineObjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"\x17\n\x15QuarantineObjectReply\">\n\x19UnquarantineObjectRequest\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"\x19\n\x17UnquarantineObjectReply\"A\n\x1aLoadObjectsByPrefixRequest\x12\x0e\n\x06prefix\x18\x01 \x01(\x0c\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"<\n\x18LoadObjectsByPrefixReply\x12 \n\x07objects\x18\x01 \x03(\x0b\x32\x0f.filemgr.Object\"|\n\x1aLoadObjectsByVolumeRequest\x12\r\n\x05index\x18\x01 \x01(\r\x12\x13\n\x0bquarantined\x18\x02 \x01(\x08\x12\x12\n\npage_token\x18\x03 \x01(\x0c\x12\x11\n\tpage_size\x18\x04 \x01(\r\x12\x13\n\x0brepair_tool\x18\x05 \x01(\x08\"U\n\x18LoadObjectsByVolumeReply\x12 \n\x07objects\x18\x01 \x03(\x0b\x32\x0f.filemgr.Object\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\x0c\"/\n\x15ListPartitionsRequest\x12\x16\n\x0epartition_bits\x18\x01 \x01(\r\"A\n\x14ListPartitionRequest\x12\x11\n\tpartition\x18\x01 \x01(\r\x12\x16\n\x0epartition_bits\x18\x02 \x01(\r\"N\n\x11ListSuffixRequest\x12\x11\n\tpartition\x18\x01 \x01(\r\x12\x0e\n\x06suffix\x18\x02 \x01(\x0c\x12\x16\n\x0epartition_bits\x18\x03 \x01(\r\"F\n\x1dListQuarantinedOHashesRequest\x12\x12\n\npage_token\x18\x01 \x01(\x0c\x12\x11\n\tpage_size\x18\x02 \x01(\r\"g\n\x1bListQuarantinedOHashesReply\x12/\n\x07objects\x18\x01 \x03(\x0b\x32\x1e.filemgr.QuarantinedObjectName\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\x0c\"%\n\x15QuarantinedObjectName\x12\x0c\n\x04name\x18\x01 \x01(\x0c\"B\n\x1bListQuarantinedOHashRequest\x12\x0e\n\x06prefix\x18\x01 \x01(\x0c\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"=\n\x19ListQuarantinedOHashReply\x12 \n\x07objects\x18\x01 \x03(\x0b\x32\x0f.filemgr.Object\"A\n\x14GetNextOffsetRequest\x12\x14\n\x0cvolume_index\x18\x01 \x01(\r\x12\x13\n\x0brepair_tool\x18\x02 \x01(\x08\"$\n\x12GetNextOffsetReply\x12\x0e\n\x06offset\x18\x01 \x01(\x04\"\x11\n\x0fGetStatsRequest\"o\n\rGetStatsReply\x12\x30\n\x05stats\x18\x01 \x03(\x0b\x32!.filemgr.GetStatsReply.StatsEntry\x1a,\n\nStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x04:\x02\x38\x01\"\x11\n\x0fSetKvStateReply\"\x13\n\x11GetKvStateRequest\"\x1a\n\x07KvState\x12\x0f\n\x07isClean\x18\x01 \x01(\x08\"\x86\x01\n\x06Volume\x12\x14\n\x0cvolume_index\x18\x01 \x01(\r\x12(\n\x0bvolume_type\x18\x02 \x01(\x0e\x32\x13.filemgr.VolumeType\x12\x14\n\x0cvolume_state\x18\x03 \x01(\r\x12\x11\n\tpartition\x18\x04 \x01(\r\x12\x13\n\x0bnext_offset\x18\x05 \x01(\x04\"<\n\x06Object\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\x14\n\x0cvolume_index\x18\x02 \x01(\r\x12\x0e\n\x06offset\x18\x03 \x01(\x04\"\x1b\n\nDirEntries\x12\r\n\x05\x65ntry\x18\x01 \x03(\t*N\n\nVolumeType\x12\x12\n\x0eVOLUME_DEFAULT\x10\x00\x12\x14\n\x10VOLUME_TOMBSTONE\x10\x01\x12\x16\n\x12VOLUME_X_DELETE_AT\x10\x02*R\n\x0bVolumeState\x12\x0c\n\x08STATE_RW\x10\x00\x12\x18\n\x14STATE_COMPACTION_SRC\x10\x01\x12\x1b\n\x17STATE_COMPACTION_TARGET\x10\x02\x62\x06proto3'
+)
+
+_VOLUMETYPE = _descriptor.EnumDescriptor(
+ name='VolumeType',
+ full_name='filemgr.VolumeType',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='VOLUME_DEFAULT', index=0, number=0,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='VOLUME_TOMBSTONE', index=1, number=1,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='VOLUME_X_DELETE_AT', index=2, number=2,
+ serialized_options=None,
+ type=None),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=2869,
+ serialized_end=2947,
+)
+_sym_db.RegisterEnumDescriptor(_VOLUMETYPE)
+
+VolumeType = enum_type_wrapper.EnumTypeWrapper(_VOLUMETYPE)
+_VOLUMESTATE = _descriptor.EnumDescriptor(
+ name='VolumeState',
+ full_name='filemgr.VolumeState',
+ filename=None,
+ file=DESCRIPTOR,
+ values=[
+ _descriptor.EnumValueDescriptor(
+ name='STATE_RW', index=0, number=0,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='STATE_COMPACTION_SRC', index=1, number=1,
+ serialized_options=None,
+ type=None),
+ _descriptor.EnumValueDescriptor(
+ name='STATE_COMPACTION_TARGET', index=2, number=2,
+ serialized_options=None,
+ type=None),
+ ],
+ containing_type=None,
+ serialized_options=None,
+ serialized_start=2949,
+ serialized_end=3031,
+)
+_sym_db.RegisterEnumDescriptor(_VOLUMESTATE)
+
+VolumeState = enum_type_wrapper.EnumTypeWrapper(_VOLUMESTATE)
+VOLUME_DEFAULT = 0
+VOLUME_TOMBSTONE = 1
+VOLUME_X_DELETE_AT = 2
+STATE_RW = 0
+STATE_COMPACTION_SRC = 1
+STATE_COMPACTION_TARGET = 2
+
+
+
+_REGISTERVOLUMEREQUEST = _descriptor.Descriptor(
+ name='RegisterVolumeRequest',
+ full_name='filemgr.RegisterVolumeRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='partition', full_name='filemgr.RegisterVolumeRequest.partition', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='type', full_name='filemgr.RegisterVolumeRequest.type', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.RegisterVolumeRequest.volume_index', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='offset', full_name='filemgr.RegisterVolumeRequest.offset', index=3,
+ number=4, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='state', full_name='filemgr.RegisterVolumeRequest.state', index=4,
+ number=5, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.RegisterVolumeRequest.repair_tool', index=5,
+ number=6, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=24,
+ serialized_end=197,
+)
+
+
+_REGISTERVOLUMEREPLY = _descriptor.Descriptor(
+ name='RegisterVolumeReply',
+ full_name='filemgr.RegisterVolumeReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=199,
+ serialized_end=220,
+)
+
+
+_UNREGISTERVOLUMEREQUEST = _descriptor.Descriptor(
+ name='UnregisterVolumeRequest',
+ full_name='filemgr.UnregisterVolumeRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='index', full_name='filemgr.UnregisterVolumeRequest.index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.UnregisterVolumeRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=222,
+ serialized_end=283,
+)
+
+
+_UNREGISTERVOLUMEREPLY = _descriptor.Descriptor(
+ name='UnregisterVolumeReply',
+ full_name='filemgr.UnregisterVolumeReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=285,
+ serialized_end=308,
+)
+
+
+_UPDATEVOLUMESTATEREQUEST = _descriptor.Descriptor(
+ name='UpdateVolumeStateRequest',
+ full_name='filemgr.UpdateVolumeStateRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.UpdateVolumeStateRequest.volume_index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='state', full_name='filemgr.UpdateVolumeStateRequest.state', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.UpdateVolumeStateRequest.repair_tool', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=310,
+ serialized_end=416,
+)
+
+
+_UPDATEVOLUMESTATEREPLY = _descriptor.Descriptor(
+ name='UpdateVolumeStateReply',
+ full_name='filemgr.UpdateVolumeStateReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=418,
+ serialized_end=442,
+)
+
+
+_GETVOLUMEREQUEST = _descriptor.Descriptor(
+ name='GetVolumeRequest',
+ full_name='filemgr.GetVolumeRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='index', full_name='filemgr.GetVolumeRequest.index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.GetVolumeRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=444,
+ serialized_end=498,
+)
+
+
+_GETVOLUMEREPLY = _descriptor.Descriptor(
+ name='GetVolumeReply',
+ full_name='filemgr.GetVolumeReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.GetVolumeReply.volume_index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_type', full_name='filemgr.GetVolumeReply.volume_type', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_state', full_name='filemgr.GetVolumeReply.volume_state', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='partition', full_name='filemgr.GetVolumeReply.partition', index=3,
+ number=4, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_offset', full_name='filemgr.GetVolumeReply.next_offset', index=4,
+ number=5, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=501,
+ serialized_end=643,
+)
+
+
+_LISTVOLUMESREQUEST = _descriptor.Descriptor(
+ name='ListVolumesRequest',
+ full_name='filemgr.ListVolumesRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='partition', full_name='filemgr.ListVolumesRequest.partition', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='type', full_name='filemgr.ListVolumesRequest.type', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.ListVolumesRequest.repair_tool', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=645,
+ serialized_end=740,
+)
+
+
+_LISTVOLUMESREPLY = _descriptor.Descriptor(
+ name='ListVolumesReply',
+ full_name='filemgr.ListVolumesReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='volumes', full_name='filemgr.ListVolumesReply.volumes', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=742,
+ serialized_end=794,
+)
+
+
+_REGISTEROBJECTREQUEST = _descriptor.Descriptor(
+ name='RegisterObjectRequest',
+ full_name='filemgr.RegisterObjectRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.RegisterObjectRequest.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.RegisterObjectRequest.volume_index', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='offset', full_name='filemgr.RegisterObjectRequest.offset', index=2,
+ number=3, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_offset', full_name='filemgr.RegisterObjectRequest.next_offset', index=3,
+ number=4, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.RegisterObjectRequest.repair_tool', index=4,
+ number=5, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=796,
+ serialized_end=913,
+)
+
+
+_REGISTEROBJECTREPLY = _descriptor.Descriptor(
+ name='RegisterObjectReply',
+ full_name='filemgr.RegisterObjectReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=915,
+ serialized_end=936,
+)
+
+
+_UNREGISTEROBJECTREQUEST = _descriptor.Descriptor(
+ name='UnregisterObjectRequest',
+ full_name='filemgr.UnregisterObjectRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.UnregisterObjectRequest.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.UnregisterObjectRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=938,
+ serialized_end=998,
+)
+
+
+_UNREGISTEROBJECTREPLY = _descriptor.Descriptor(
+ name='UnregisterObjectReply',
+ full_name='filemgr.UnregisterObjectReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1000,
+ serialized_end=1023,
+)
+
+
+_RENAMEOBJECTREQUEST = _descriptor.Descriptor(
+ name='RenameObjectRequest',
+ full_name='filemgr.RenameObjectRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.RenameObjectRequest.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='new_name', full_name='filemgr.RenameObjectRequest.new_name', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.RenameObjectRequest.repair_tool', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1025,
+ serialized_end=1099,
+)
+
+
+_RENAMEOBJECTREPLY = _descriptor.Descriptor(
+ name='RenameObjectReply',
+ full_name='filemgr.RenameObjectReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1101,
+ serialized_end=1120,
+)
+
+
+_LOADOBJECTREQUEST = _descriptor.Descriptor(
+ name='LoadObjectRequest',
+ full_name='filemgr.LoadObjectRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.LoadObjectRequest.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='is_quarantined', full_name='filemgr.LoadObjectRequest.is_quarantined', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.LoadObjectRequest.repair_tool', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1122,
+ serialized_end=1200,
+)
+
+
+_LOADOBJECTREPLY = _descriptor.Descriptor(
+ name='LoadObjectReply',
+ full_name='filemgr.LoadObjectReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.LoadObjectReply.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.LoadObjectReply.volume_index', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='offset', full_name='filemgr.LoadObjectReply.offset', index=2,
+ number=3, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1202,
+ serialized_end=1271,
+)
+
+
+_QUARANTINEOBJECTREQUEST = _descriptor.Descriptor(
+ name='QuarantineObjectRequest',
+ full_name='filemgr.QuarantineObjectRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.QuarantineObjectRequest.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.QuarantineObjectRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1273,
+ serialized_end=1333,
+)
+
+
+_QUARANTINEOBJECTREPLY = _descriptor.Descriptor(
+ name='QuarantineObjectReply',
+ full_name='filemgr.QuarantineObjectReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1335,
+ serialized_end=1358,
+)
+
+
+_UNQUARANTINEOBJECTREQUEST = _descriptor.Descriptor(
+ name='UnquarantineObjectRequest',
+ full_name='filemgr.UnquarantineObjectRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.UnquarantineObjectRequest.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.UnquarantineObjectRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1360,
+ serialized_end=1422,
+)
+
+
+_UNQUARANTINEOBJECTREPLY = _descriptor.Descriptor(
+ name='UnquarantineObjectReply',
+ full_name='filemgr.UnquarantineObjectReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1424,
+ serialized_end=1449,
+)
+
+
+_LOADOBJECTSBYPREFIXREQUEST = _descriptor.Descriptor(
+ name='LoadObjectsByPrefixRequest',
+ full_name='filemgr.LoadObjectsByPrefixRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='prefix', full_name='filemgr.LoadObjectsByPrefixRequest.prefix', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.LoadObjectsByPrefixRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1451,
+ serialized_end=1516,
+)
+
+
+_LOADOBJECTSBYPREFIXREPLY = _descriptor.Descriptor(
+ name='LoadObjectsByPrefixReply',
+ full_name='filemgr.LoadObjectsByPrefixReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='objects', full_name='filemgr.LoadObjectsByPrefixReply.objects', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1518,
+ serialized_end=1578,
+)
+
+
+_LOADOBJECTSBYVOLUMEREQUEST = _descriptor.Descriptor(
+ name='LoadObjectsByVolumeRequest',
+ full_name='filemgr.LoadObjectsByVolumeRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='index', full_name='filemgr.LoadObjectsByVolumeRequest.index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='quarantined', full_name='filemgr.LoadObjectsByVolumeRequest.quarantined', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_token', full_name='filemgr.LoadObjectsByVolumeRequest.page_token', index=2,
+ number=3, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_size', full_name='filemgr.LoadObjectsByVolumeRequest.page_size', index=3,
+ number=4, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.LoadObjectsByVolumeRequest.repair_tool', index=4,
+ number=5, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1580,
+ serialized_end=1704,
+)
+
+
+_LOADOBJECTSBYVOLUMEREPLY = _descriptor.Descriptor(
+ name='LoadObjectsByVolumeReply',
+ full_name='filemgr.LoadObjectsByVolumeReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='objects', full_name='filemgr.LoadObjectsByVolumeReply.objects', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_page_token', full_name='filemgr.LoadObjectsByVolumeReply.next_page_token', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1706,
+ serialized_end=1791,
+)
+
+
+_LISTPARTITIONSREQUEST = _descriptor.Descriptor(
+ name='ListPartitionsRequest',
+ full_name='filemgr.ListPartitionsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='partition_bits', full_name='filemgr.ListPartitionsRequest.partition_bits', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1793,
+ serialized_end=1840,
+)
+
+
+_LISTPARTITIONREQUEST = _descriptor.Descriptor(
+ name='ListPartitionRequest',
+ full_name='filemgr.ListPartitionRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='partition', full_name='filemgr.ListPartitionRequest.partition', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='partition_bits', full_name='filemgr.ListPartitionRequest.partition_bits', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1842,
+ serialized_end=1907,
+)
+
+
+_LISTSUFFIXREQUEST = _descriptor.Descriptor(
+ name='ListSuffixRequest',
+ full_name='filemgr.ListSuffixRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='partition', full_name='filemgr.ListSuffixRequest.partition', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='suffix', full_name='filemgr.ListSuffixRequest.suffix', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='partition_bits', full_name='filemgr.ListSuffixRequest.partition_bits', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1909,
+ serialized_end=1987,
+)
+
+
+_LISTQUARANTINEDOHASHESREQUEST = _descriptor.Descriptor(
+ name='ListQuarantinedOHashesRequest',
+ full_name='filemgr.ListQuarantinedOHashesRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='page_token', full_name='filemgr.ListQuarantinedOHashesRequest.page_token', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='page_size', full_name='filemgr.ListQuarantinedOHashesRequest.page_size', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=1989,
+ serialized_end=2059,
+)
+
+
+_LISTQUARANTINEDOHASHESREPLY = _descriptor.Descriptor(
+ name='ListQuarantinedOHashesReply',
+ full_name='filemgr.ListQuarantinedOHashesReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='objects', full_name='filemgr.ListQuarantinedOHashesReply.objects', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_page_token', full_name='filemgr.ListQuarantinedOHashesReply.next_page_token', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2061,
+ serialized_end=2164,
+)
+
+
+_QUARANTINEDOBJECTNAME = _descriptor.Descriptor(
+ name='QuarantinedObjectName',
+ full_name='filemgr.QuarantinedObjectName',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.QuarantinedObjectName.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2166,
+ serialized_end=2203,
+)
+
+
+_LISTQUARANTINEDOHASHREQUEST = _descriptor.Descriptor(
+ name='ListQuarantinedOHashRequest',
+ full_name='filemgr.ListQuarantinedOHashRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='prefix', full_name='filemgr.ListQuarantinedOHashRequest.prefix', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.ListQuarantinedOHashRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2205,
+ serialized_end=2271,
+)
+
+
+_LISTQUARANTINEDOHASHREPLY = _descriptor.Descriptor(
+ name='ListQuarantinedOHashReply',
+ full_name='filemgr.ListQuarantinedOHashReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='objects', full_name='filemgr.ListQuarantinedOHashReply.objects', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2273,
+ serialized_end=2334,
+)
+
+
+_GETNEXTOFFSETREQUEST = _descriptor.Descriptor(
+ name='GetNextOffsetRequest',
+ full_name='filemgr.GetNextOffsetRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.GetNextOffsetRequest.volume_index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='repair_tool', full_name='filemgr.GetNextOffsetRequest.repair_tool', index=1,
+ number=2, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2336,
+ serialized_end=2401,
+)
+
+
+_GETNEXTOFFSETREPLY = _descriptor.Descriptor(
+ name='GetNextOffsetReply',
+ full_name='filemgr.GetNextOffsetReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='offset', full_name='filemgr.GetNextOffsetReply.offset', index=0,
+ number=1, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2403,
+ serialized_end=2439,
+)
+
+
+_GETSTATSREQUEST = _descriptor.Descriptor(
+ name='GetStatsRequest',
+ full_name='filemgr.GetStatsRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2441,
+ serialized_end=2458,
+)
+
+
+_GETSTATSREPLY_STATSENTRY = _descriptor.Descriptor(
+ name='StatsEntry',
+ full_name='filemgr.GetStatsReply.StatsEntry',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='filemgr.GetStatsReply.StatsEntry.key', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='filemgr.GetStatsReply.StatsEntry.value', index=1,
+ number=2, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=b'8\001',
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2527,
+ serialized_end=2571,
+)
+
+_GETSTATSREPLY = _descriptor.Descriptor(
+ name='GetStatsReply',
+ full_name='filemgr.GetStatsReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='stats', full_name='filemgr.GetStatsReply.stats', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[_GETSTATSREPLY_STATSENTRY, ],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2460,
+ serialized_end=2571,
+)
+
+
+_SETKVSTATEREPLY = _descriptor.Descriptor(
+ name='SetKvStateReply',
+ full_name='filemgr.SetKvStateReply',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2573,
+ serialized_end=2590,
+)
+
+
+_GETKVSTATEREQUEST = _descriptor.Descriptor(
+ name='GetKvStateRequest',
+ full_name='filemgr.GetKvStateRequest',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2592,
+ serialized_end=2611,
+)
+
+
+_KVSTATE = _descriptor.Descriptor(
+ name='KvState',
+ full_name='filemgr.KvState',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='isClean', full_name='filemgr.KvState.isClean', index=0,
+ number=1, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2613,
+ serialized_end=2639,
+)
+
+
+_VOLUME = _descriptor.Descriptor(
+ name='Volume',
+ full_name='filemgr.Volume',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.Volume.volume_index', index=0,
+ number=1, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_type', full_name='filemgr.Volume.volume_type', index=1,
+ number=2, type=14, cpp_type=8, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_state', full_name='filemgr.Volume.volume_state', index=2,
+ number=3, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='partition', full_name='filemgr.Volume.partition', index=3,
+ number=4, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='next_offset', full_name='filemgr.Volume.next_offset', index=4,
+ number=5, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2642,
+ serialized_end=2776,
+)
+
+
+_OBJECT = _descriptor.Descriptor(
+ name='Object',
+ full_name='filemgr.Object',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='name', full_name='filemgr.Object.name', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='volume_index', full_name='filemgr.Object.volume_index', index=1,
+ number=2, type=13, cpp_type=3, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='offset', full_name='filemgr.Object.offset', index=2,
+ number=3, type=4, cpp_type=4, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2778,
+ serialized_end=2838,
+)
+
+
+_DIRENTRIES = _descriptor.Descriptor(
+ name='DirEntries',
+ full_name='filemgr.DirEntries',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='entry', full_name='filemgr.DirEntries.entry', index=0,
+ number=1, type=9, cpp_type=9, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=2840,
+ serialized_end=2867,
+)
+
+_REGISTERVOLUMEREQUEST.fields_by_name['type'].enum_type = _VOLUMETYPE
+_REGISTERVOLUMEREQUEST.fields_by_name['state'].enum_type = _VOLUMESTATE
+_UPDATEVOLUMESTATEREQUEST.fields_by_name['state'].enum_type = _VOLUMESTATE
+_GETVOLUMEREPLY.fields_by_name['volume_type'].enum_type = _VOLUMETYPE
+_LISTVOLUMESREQUEST.fields_by_name['type'].enum_type = _VOLUMETYPE
+_LISTVOLUMESREPLY.fields_by_name['volumes'].message_type = _VOLUME
+_LOADOBJECTSBYPREFIXREPLY.fields_by_name['objects'].message_type = _OBJECT
+_LOADOBJECTSBYVOLUMEREPLY.fields_by_name['objects'].message_type = _OBJECT
+_LISTQUARANTINEDOHASHESREPLY.fields_by_name['objects'].message_type = _QUARANTINEDOBJECTNAME
+_LISTQUARANTINEDOHASHREPLY.fields_by_name['objects'].message_type = _OBJECT
+_GETSTATSREPLY_STATSENTRY.containing_type = _GETSTATSREPLY
+_GETSTATSREPLY.fields_by_name['stats'].message_type = _GETSTATSREPLY_STATSENTRY
+_VOLUME.fields_by_name['volume_type'].enum_type = _VOLUMETYPE
+DESCRIPTOR.message_types_by_name['RegisterVolumeRequest'] = _REGISTERVOLUMEREQUEST
+DESCRIPTOR.message_types_by_name['RegisterVolumeReply'] = _REGISTERVOLUMEREPLY
+DESCRIPTOR.message_types_by_name['UnregisterVolumeRequest'] = _UNREGISTERVOLUMEREQUEST
+DESCRIPTOR.message_types_by_name['UnregisterVolumeReply'] = _UNREGISTERVOLUMEREPLY
+DESCRIPTOR.message_types_by_name['UpdateVolumeStateRequest'] = _UPDATEVOLUMESTATEREQUEST
+DESCRIPTOR.message_types_by_name['UpdateVolumeStateReply'] = _UPDATEVOLUMESTATEREPLY
+DESCRIPTOR.message_types_by_name['GetVolumeRequest'] = _GETVOLUMEREQUEST
+DESCRIPTOR.message_types_by_name['GetVolumeReply'] = _GETVOLUMEREPLY
+DESCRIPTOR.message_types_by_name['ListVolumesRequest'] = _LISTVOLUMESREQUEST
+DESCRIPTOR.message_types_by_name['ListVolumesReply'] = _LISTVOLUMESREPLY
+DESCRIPTOR.message_types_by_name['RegisterObjectRequest'] = _REGISTEROBJECTREQUEST
+DESCRIPTOR.message_types_by_name['RegisterObjectReply'] = _REGISTEROBJECTREPLY
+DESCRIPTOR.message_types_by_name['UnregisterObjectRequest'] = _UNREGISTEROBJECTREQUEST
+DESCRIPTOR.message_types_by_name['UnregisterObjectReply'] = _UNREGISTEROBJECTREPLY
+DESCRIPTOR.message_types_by_name['RenameObjectRequest'] = _RENAMEOBJECTREQUEST
+DESCRIPTOR.message_types_by_name['RenameObjectReply'] = _RENAMEOBJECTREPLY
+DESCRIPTOR.message_types_by_name['LoadObjectRequest'] = _LOADOBJECTREQUEST
+DESCRIPTOR.message_types_by_name['LoadObjectReply'] = _LOADOBJECTREPLY
+DESCRIPTOR.message_types_by_name['QuarantineObjectRequest'] = _QUARANTINEOBJECTREQUEST
+DESCRIPTOR.message_types_by_name['QuarantineObjectReply'] = _QUARANTINEOBJECTREPLY
+DESCRIPTOR.message_types_by_name['UnquarantineObjectRequest'] = _UNQUARANTINEOBJECTREQUEST
+DESCRIPTOR.message_types_by_name['UnquarantineObjectReply'] = _UNQUARANTINEOBJECTREPLY
+DESCRIPTOR.message_types_by_name['LoadObjectsByPrefixRequest'] = _LOADOBJECTSBYPREFIXREQUEST
+DESCRIPTOR.message_types_by_name['LoadObjectsByPrefixReply'] = _LOADOBJECTSBYPREFIXREPLY
+DESCRIPTOR.message_types_by_name['LoadObjectsByVolumeRequest'] = _LOADOBJECTSBYVOLUMEREQUEST
+DESCRIPTOR.message_types_by_name['LoadObjectsByVolumeReply'] = _LOADOBJECTSBYVOLUMEREPLY
+DESCRIPTOR.message_types_by_name['ListPartitionsRequest'] = _LISTPARTITIONSREQUEST
+DESCRIPTOR.message_types_by_name['ListPartitionRequest'] = _LISTPARTITIONREQUEST
+DESCRIPTOR.message_types_by_name['ListSuffixRequest'] = _LISTSUFFIXREQUEST
+DESCRIPTOR.message_types_by_name['ListQuarantinedOHashesRequest'] = _LISTQUARANTINEDOHASHESREQUEST
+DESCRIPTOR.message_types_by_name['ListQuarantinedOHashesReply'] = _LISTQUARANTINEDOHASHESREPLY
+DESCRIPTOR.message_types_by_name['QuarantinedObjectName'] = _QUARANTINEDOBJECTNAME
+DESCRIPTOR.message_types_by_name['ListQuarantinedOHashRequest'] = _LISTQUARANTINEDOHASHREQUEST
+DESCRIPTOR.message_types_by_name['ListQuarantinedOHashReply'] = _LISTQUARANTINEDOHASHREPLY
+DESCRIPTOR.message_types_by_name['GetNextOffsetRequest'] = _GETNEXTOFFSETREQUEST
+DESCRIPTOR.message_types_by_name['GetNextOffsetReply'] = _GETNEXTOFFSETREPLY
+DESCRIPTOR.message_types_by_name['GetStatsRequest'] = _GETSTATSREQUEST
+DESCRIPTOR.message_types_by_name['GetStatsReply'] = _GETSTATSREPLY
+DESCRIPTOR.message_types_by_name['SetKvStateReply'] = _SETKVSTATEREPLY
+DESCRIPTOR.message_types_by_name['GetKvStateRequest'] = _GETKVSTATEREQUEST
+DESCRIPTOR.message_types_by_name['KvState'] = _KVSTATE
+DESCRIPTOR.message_types_by_name['Volume'] = _VOLUME
+DESCRIPTOR.message_types_by_name['Object'] = _OBJECT
+DESCRIPTOR.message_types_by_name['DirEntries'] = _DIRENTRIES
+DESCRIPTOR.enum_types_by_name['VolumeType'] = _VOLUMETYPE
+DESCRIPTOR.enum_types_by_name['VolumeState'] = _VOLUMESTATE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+RegisterVolumeRequest = _reflection.GeneratedProtocolMessageType('RegisterVolumeRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _REGISTERVOLUMEREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.RegisterVolumeRequest)
+ })
+_sym_db.RegisterMessage(RegisterVolumeRequest)
+
+RegisterVolumeReply = _reflection.GeneratedProtocolMessageType('RegisterVolumeReply', (_message.Message,), {
+ 'DESCRIPTOR' : _REGISTERVOLUMEREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.RegisterVolumeReply)
+ })
+_sym_db.RegisterMessage(RegisterVolumeReply)
+
+UnregisterVolumeRequest = _reflection.GeneratedProtocolMessageType('UnregisterVolumeRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _UNREGISTERVOLUMEREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UnregisterVolumeRequest)
+ })
+_sym_db.RegisterMessage(UnregisterVolumeRequest)
+
+UnregisterVolumeReply = _reflection.GeneratedProtocolMessageType('UnregisterVolumeReply', (_message.Message,), {
+ 'DESCRIPTOR' : _UNREGISTERVOLUMEREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UnregisterVolumeReply)
+ })
+_sym_db.RegisterMessage(UnregisterVolumeReply)
+
+UpdateVolumeStateRequest = _reflection.GeneratedProtocolMessageType('UpdateVolumeStateRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _UPDATEVOLUMESTATEREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UpdateVolumeStateRequest)
+ })
+_sym_db.RegisterMessage(UpdateVolumeStateRequest)
+
+UpdateVolumeStateReply = _reflection.GeneratedProtocolMessageType('UpdateVolumeStateReply', (_message.Message,), {
+ 'DESCRIPTOR' : _UPDATEVOLUMESTATEREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UpdateVolumeStateReply)
+ })
+_sym_db.RegisterMessage(UpdateVolumeStateReply)
+
+GetVolumeRequest = _reflection.GeneratedProtocolMessageType('GetVolumeRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _GETVOLUMEREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetVolumeRequest)
+ })
+_sym_db.RegisterMessage(GetVolumeRequest)
+
+GetVolumeReply = _reflection.GeneratedProtocolMessageType('GetVolumeReply', (_message.Message,), {
+ 'DESCRIPTOR' : _GETVOLUMEREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetVolumeReply)
+ })
+_sym_db.RegisterMessage(GetVolumeReply)
+
+ListVolumesRequest = _reflection.GeneratedProtocolMessageType('ListVolumesRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTVOLUMESREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListVolumesRequest)
+ })
+_sym_db.RegisterMessage(ListVolumesRequest)
+
+ListVolumesReply = _reflection.GeneratedProtocolMessageType('ListVolumesReply', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTVOLUMESREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListVolumesReply)
+ })
+_sym_db.RegisterMessage(ListVolumesReply)
+
+RegisterObjectRequest = _reflection.GeneratedProtocolMessageType('RegisterObjectRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _REGISTEROBJECTREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.RegisterObjectRequest)
+ })
+_sym_db.RegisterMessage(RegisterObjectRequest)
+
+RegisterObjectReply = _reflection.GeneratedProtocolMessageType('RegisterObjectReply', (_message.Message,), {
+ 'DESCRIPTOR' : _REGISTEROBJECTREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.RegisterObjectReply)
+ })
+_sym_db.RegisterMessage(RegisterObjectReply)
+
+UnregisterObjectRequest = _reflection.GeneratedProtocolMessageType('UnregisterObjectRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _UNREGISTEROBJECTREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UnregisterObjectRequest)
+ })
+_sym_db.RegisterMessage(UnregisterObjectRequest)
+
+UnregisterObjectReply = _reflection.GeneratedProtocolMessageType('UnregisterObjectReply', (_message.Message,), {
+ 'DESCRIPTOR' : _UNREGISTEROBJECTREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UnregisterObjectReply)
+ })
+_sym_db.RegisterMessage(UnregisterObjectReply)
+
+RenameObjectRequest = _reflection.GeneratedProtocolMessageType('RenameObjectRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _RENAMEOBJECTREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.RenameObjectRequest)
+ })
+_sym_db.RegisterMessage(RenameObjectRequest)
+
+RenameObjectReply = _reflection.GeneratedProtocolMessageType('RenameObjectReply', (_message.Message,), {
+ 'DESCRIPTOR' : _RENAMEOBJECTREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.RenameObjectReply)
+ })
+_sym_db.RegisterMessage(RenameObjectReply)
+
+LoadObjectRequest = _reflection.GeneratedProtocolMessageType('LoadObjectRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LOADOBJECTREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.LoadObjectRequest)
+ })
+_sym_db.RegisterMessage(LoadObjectRequest)
+
+LoadObjectReply = _reflection.GeneratedProtocolMessageType('LoadObjectReply', (_message.Message,), {
+ 'DESCRIPTOR' : _LOADOBJECTREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.LoadObjectReply)
+ })
+_sym_db.RegisterMessage(LoadObjectReply)
+
+QuarantineObjectRequest = _reflection.GeneratedProtocolMessageType('QuarantineObjectRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _QUARANTINEOBJECTREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.QuarantineObjectRequest)
+ })
+_sym_db.RegisterMessage(QuarantineObjectRequest)
+
+QuarantineObjectReply = _reflection.GeneratedProtocolMessageType('QuarantineObjectReply', (_message.Message,), {
+ 'DESCRIPTOR' : _QUARANTINEOBJECTREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.QuarantineObjectReply)
+ })
+_sym_db.RegisterMessage(QuarantineObjectReply)
+
+UnquarantineObjectRequest = _reflection.GeneratedProtocolMessageType('UnquarantineObjectRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _UNQUARANTINEOBJECTREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UnquarantineObjectRequest)
+ })
+_sym_db.RegisterMessage(UnquarantineObjectRequest)
+
+UnquarantineObjectReply = _reflection.GeneratedProtocolMessageType('UnquarantineObjectReply', (_message.Message,), {
+ 'DESCRIPTOR' : _UNQUARANTINEOBJECTREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.UnquarantineObjectReply)
+ })
+_sym_db.RegisterMessage(UnquarantineObjectReply)
+
+LoadObjectsByPrefixRequest = _reflection.GeneratedProtocolMessageType('LoadObjectsByPrefixRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LOADOBJECTSBYPREFIXREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.LoadObjectsByPrefixRequest)
+ })
+_sym_db.RegisterMessage(LoadObjectsByPrefixRequest)
+
+LoadObjectsByPrefixReply = _reflection.GeneratedProtocolMessageType('LoadObjectsByPrefixReply', (_message.Message,), {
+ 'DESCRIPTOR' : _LOADOBJECTSBYPREFIXREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.LoadObjectsByPrefixReply)
+ })
+_sym_db.RegisterMessage(LoadObjectsByPrefixReply)
+
+LoadObjectsByVolumeRequest = _reflection.GeneratedProtocolMessageType('LoadObjectsByVolumeRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LOADOBJECTSBYVOLUMEREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.LoadObjectsByVolumeRequest)
+ })
+_sym_db.RegisterMessage(LoadObjectsByVolumeRequest)
+
+LoadObjectsByVolumeReply = _reflection.GeneratedProtocolMessageType('LoadObjectsByVolumeReply', (_message.Message,), {
+ 'DESCRIPTOR' : _LOADOBJECTSBYVOLUMEREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.LoadObjectsByVolumeReply)
+ })
+_sym_db.RegisterMessage(LoadObjectsByVolumeReply)
+
+ListPartitionsRequest = _reflection.GeneratedProtocolMessageType('ListPartitionsRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTPARTITIONSREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListPartitionsRequest)
+ })
+_sym_db.RegisterMessage(ListPartitionsRequest)
+
+ListPartitionRequest = _reflection.GeneratedProtocolMessageType('ListPartitionRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTPARTITIONREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListPartitionRequest)
+ })
+_sym_db.RegisterMessage(ListPartitionRequest)
+
+ListSuffixRequest = _reflection.GeneratedProtocolMessageType('ListSuffixRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTSUFFIXREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListSuffixRequest)
+ })
+_sym_db.RegisterMessage(ListSuffixRequest)
+
+ListQuarantinedOHashesRequest = _reflection.GeneratedProtocolMessageType('ListQuarantinedOHashesRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTQUARANTINEDOHASHESREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListQuarantinedOHashesRequest)
+ })
+_sym_db.RegisterMessage(ListQuarantinedOHashesRequest)
+
+ListQuarantinedOHashesReply = _reflection.GeneratedProtocolMessageType('ListQuarantinedOHashesReply', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTQUARANTINEDOHASHESREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListQuarantinedOHashesReply)
+ })
+_sym_db.RegisterMessage(ListQuarantinedOHashesReply)
+
+QuarantinedObjectName = _reflection.GeneratedProtocolMessageType('QuarantinedObjectName', (_message.Message,), {
+ 'DESCRIPTOR' : _QUARANTINEDOBJECTNAME,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.QuarantinedObjectName)
+ })
+_sym_db.RegisterMessage(QuarantinedObjectName)
+
+ListQuarantinedOHashRequest = _reflection.GeneratedProtocolMessageType('ListQuarantinedOHashRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTQUARANTINEDOHASHREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListQuarantinedOHashRequest)
+ })
+_sym_db.RegisterMessage(ListQuarantinedOHashRequest)
+
+ListQuarantinedOHashReply = _reflection.GeneratedProtocolMessageType('ListQuarantinedOHashReply', (_message.Message,), {
+ 'DESCRIPTOR' : _LISTQUARANTINEDOHASHREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.ListQuarantinedOHashReply)
+ })
+_sym_db.RegisterMessage(ListQuarantinedOHashReply)
+
+GetNextOffsetRequest = _reflection.GeneratedProtocolMessageType('GetNextOffsetRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _GETNEXTOFFSETREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetNextOffsetRequest)
+ })
+_sym_db.RegisterMessage(GetNextOffsetRequest)
+
+GetNextOffsetReply = _reflection.GeneratedProtocolMessageType('GetNextOffsetReply', (_message.Message,), {
+ 'DESCRIPTOR' : _GETNEXTOFFSETREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetNextOffsetReply)
+ })
+_sym_db.RegisterMessage(GetNextOffsetReply)
+
+GetStatsRequest = _reflection.GeneratedProtocolMessageType('GetStatsRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _GETSTATSREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetStatsRequest)
+ })
+_sym_db.RegisterMessage(GetStatsRequest)
+
+GetStatsReply = _reflection.GeneratedProtocolMessageType('GetStatsReply', (_message.Message,), {
+
+ 'StatsEntry' : _reflection.GeneratedProtocolMessageType('StatsEntry', (_message.Message,), {
+ 'DESCRIPTOR' : _GETSTATSREPLY_STATSENTRY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetStatsReply.StatsEntry)
+ })
+ ,
+ 'DESCRIPTOR' : _GETSTATSREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetStatsReply)
+ })
+_sym_db.RegisterMessage(GetStatsReply)
+_sym_db.RegisterMessage(GetStatsReply.StatsEntry)
+
+SetKvStateReply = _reflection.GeneratedProtocolMessageType('SetKvStateReply', (_message.Message,), {
+ 'DESCRIPTOR' : _SETKVSTATEREPLY,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.SetKvStateReply)
+ })
+_sym_db.RegisterMessage(SetKvStateReply)
+
+GetKvStateRequest = _reflection.GeneratedProtocolMessageType('GetKvStateRequest', (_message.Message,), {
+ 'DESCRIPTOR' : _GETKVSTATEREQUEST,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.GetKvStateRequest)
+ })
+_sym_db.RegisterMessage(GetKvStateRequest)
+
+KvState = _reflection.GeneratedProtocolMessageType('KvState', (_message.Message,), {
+ 'DESCRIPTOR' : _KVSTATE,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.KvState)
+ })
+_sym_db.RegisterMessage(KvState)
+
+Volume = _reflection.GeneratedProtocolMessageType('Volume', (_message.Message,), {
+ 'DESCRIPTOR' : _VOLUME,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.Volume)
+ })
+_sym_db.RegisterMessage(Volume)
+
+Object = _reflection.GeneratedProtocolMessageType('Object', (_message.Message,), {
+ 'DESCRIPTOR' : _OBJECT,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.Object)
+ })
+_sym_db.RegisterMessage(Object)
+
+DirEntries = _reflection.GeneratedProtocolMessageType('DirEntries', (_message.Message,), {
+ 'DESCRIPTOR' : _DIRENTRIES,
+ '__module__' : 'fmgr_pb2'
+ # @@protoc_insertion_point(class_scope:filemgr.DirEntries)
+ })
+_sym_db.RegisterMessage(DirEntries)
+
+
+_GETSTATSREPLY_STATSENTRY._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/swift/obj/header.py b/swift/obj/header.py
new file mode 100644
index 000000000..fd442ea83
--- /dev/null
+++ b/swift/obj/header.py
@@ -0,0 +1,394 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import six
+import os
+import struct
+
+from swift.common.utils import fdatasync
+
+PICKLE_PROTOCOL = 2
+
+# header version to use for new objects
+OBJECT_HEADER_VERSION = 4
+VOLUME_HEADER_VERSION = 1
+
+# maximum serialized header length
+MAX_OBJECT_HEADER_LEN = 512
+MAX_VOLUME_HEADER_LEN = 128
+
+OBJECT_START_MARKER = b"SWIFTOBJ"
+VOLUME_START_MARKER = b"SWIFTVOL"
+
+# object alignment within a volume.
+# this is needed so that FALLOC_FL_PUNCH_HOLE can actually return space back
+# to the filesystem (tested on XFS and ext4)
+# we may not need to align files in volumes dedicated to short-lived files,
+# such as tombstones (.ts extension),
+# but currently we do align for all volume types.
+ALIGNMENT = 4096
+
+# constants
+STATE_OBJ_FILE = 0
+STATE_OBJ_QUARANTINED = 1
+
+
+class HeaderException(IOError):
+ def __init__(self, message):
+ self.message = message
+ super(HeaderException, self).__init__(message)
+
+
+object_header_formats = {
+ 1: '8sBQQQ30sQQQQQ',
+ 2: '8sBQQQ64sQQQQQ', # 64 characters for the filename
+ 3: '8sBQQQ64sQQQQQB', # add state field
+ 4: '8sBQQQ64sQQQQQB32s' # add metadata checksum
+}
+
+
+class ObjectHeader(object):
+ """
+ Version 1:
+ Magic string (8 bytes)
+ Header version (1 byte)
+ Policy index (8 bytes)
+ Object hash (16 bytes) (__init__)
+ Filename (30 chars)
+ Metadata offset (8 bytes)
+ Metadata size (8 bytes)
+ Data offset (8 bytes)
+ Data size (8 bytes)
+ Total object size (8 bytes)
+
+ Version 2: similar but 64 chars for the filename
+ Version 3: Adds a "state" field (unsigned char)
+ """
+
+ def __init__(self, version=OBJECT_HEADER_VERSION):
+ if version not in object_header_formats.keys():
+ raise HeaderException('Unsupported object header version')
+ self.magic_string = OBJECT_START_MARKER
+ self.version = version
+
+ def __eq__(self, other):
+ return self.__dict__ == other.__dict__
+
+ def __len__(self):
+ try:
+ fmt = object_header_formats[self.version]
+ except KeyError:
+ raise HeaderException('Unsupported header version')
+ return struct.calcsize(fmt)
+
+ def pack(self):
+ version_to_pack = {
+ 1: self.__pack_v1,
+ 2: self.__pack_v2,
+ 3: self.__pack_v3,
+ 4: self.__pack_v4
+ }
+ return version_to_pack[self.version]()
+
+ def __pack_v1(self):
+ fmt = object_header_formats[1]
+ ohash_h = int(self.ohash, 16) >> 64
+ ohash_l = int(self.ohash, 16) & 0x0000000000000000ffffffffffffffff
+
+ args = (self.magic_string, self.version,
+ self.policy_idx, ohash_h, ohash_l,
+ str(self.filename).encode('ascii'),
+ self.metadata_offset, self.metadata_size,
+ self.data_offset, self.data_size, self.total_size)
+
+ return struct.pack(fmt, *args)
+
+ def __pack_v2(self):
+ fmt = object_header_formats[2]
+ ohash_h = int(self.ohash, 16) >> 64
+ ohash_l = int(self.ohash, 16) & 0x0000000000000000ffffffffffffffff
+
+ args = (self.magic_string, self.version,
+ self.policy_idx, ohash_h, ohash_l,
+ str(self.filename).encode('ascii'),
+ self.metadata_offset, self.metadata_size,
+ self.data_offset, self.data_size, self.total_size)
+
+ return struct.pack(fmt, *args)
+
+ def __pack_v3(self):
+ fmt = object_header_formats[3]
+ ohash_h = int(self.ohash, 16) >> 64
+ ohash_l = int(self.ohash, 16) & 0x0000000000000000ffffffffffffffff
+
+ args = (self.magic_string, self.version,
+ self.policy_idx, ohash_h, ohash_l,
+ str(self.filename).encode('ascii'),
+ self.metadata_offset, self.metadata_size,
+ self.data_offset, self.data_size, self.total_size, self.state)
+
+ return struct.pack(fmt, *args)
+
+ def __pack_v4(self):
+ fmt = object_header_formats[4]
+ ohash_h = int(self.ohash, 16) >> 64
+ ohash_l = int(self.ohash, 16) & 0x0000000000000000ffffffffffffffff
+
+ args = (self.magic_string, self.version,
+ self.policy_idx, ohash_h, ohash_l,
+ str(self.filename).encode('ascii'),
+ self.metadata_offset, self.metadata_size,
+ self.data_offset, self.data_size, self.total_size, self.state,
+ self.metastr_md5)
+
+ return struct.pack(fmt, *args)
+
+ @classmethod
+ def unpack(cls, buf):
+ version_to_unpack = {
+ 1: cls.__unpack_v1,
+ 2: cls.__unpack_v2,
+ 3: cls.__unpack_v3,
+ 4: cls.__unpack_v4
+ }
+
+ if buf[0:8] != OBJECT_START_MARKER:
+ raise HeaderException('Not a header')
+ version = struct.unpack('<B', buf[8:9])[0]
+ if version not in object_header_formats.keys():
+ raise HeaderException('Unsupported header version')
+
+ return version_to_unpack[version](buf)
+
+ @classmethod
+ def __unpack_v1(cls, buf):
+ fmt = object_header_formats[1]
+ raw_header = struct.unpack(fmt, buf[0:struct.calcsize(fmt)])
+ header = cls()
+ header.magic_string = raw_header[0]
+ header.version = raw_header[1]
+ header.policy_idx = raw_header[2]
+ header.ohash = "{:032x}".format((raw_header[3] << 64) + raw_header[4])
+ if six.PY2:
+ header.filename = raw_header[5].rstrip(b'\0')
+ else:
+ header.filename = raw_header[5].rstrip(b'\0').decode('ascii')
+ header.metadata_offset = raw_header[6]
+ header.metadata_size = raw_header[7]
+ header.data_offset = raw_header[8]
+ header.data_size = raw_header[9]
+ # currently, total_size gets padded to the next 4k boundary, so that
+ # fallocate can reclaim the block when hole punching.
+ header.total_size = raw_header[10]
+
+ return header
+
+ @classmethod
+ def __unpack_v2(cls, buf):
+ fmt = object_header_formats[2]
+ raw_header = struct.unpack(fmt, buf[0:struct.calcsize(fmt)])
+ header = cls()
+ header.magic_string = raw_header[0]
+ header.version = raw_header[1]
+ header.policy_idx = raw_header[2]
+ header.ohash = "{:032x}".format((raw_header[3] << 64) + raw_header[4])
+ if six.PY2:
+ header.filename = raw_header[5].rstrip(b'\0')
+ else:
+ header.filename = raw_header[5].rstrip(b'\0').decode('ascii')
+ header.metadata_offset = raw_header[6]
+ header.metadata_size = raw_header[7]
+ header.data_offset = raw_header[8]
+ header.data_size = raw_header[9]
+ # currently, total_size gets padded to the next 4k boundary, so that
+ # fallocate can reclaim the block when hole punching.
+ header.total_size = raw_header[10]
+
+ return header
+
+ @classmethod
+ def __unpack_v3(cls, buf):
+ fmt = object_header_formats[3]
+ raw_header = struct.unpack(fmt, buf[0:struct.calcsize(fmt)])
+ header = cls()
+ header.magic_string = raw_header[0]
+ header.version = raw_header[1]
+ header.policy_idx = raw_header[2]
+ header.ohash = "{:032x}".format((raw_header[3] << 64) + raw_header[4])
+ if six.PY2:
+ header.filename = raw_header[5].rstrip(b'\0')
+ else:
+ header.filename = raw_header[5].rstrip(b'\0').decode('ascii')
+ header.metadata_offset = raw_header[6]
+ header.metadata_size = raw_header[7]
+ header.data_offset = raw_header[8]
+ header.data_size = raw_header[9]
+ # currently, total_size gets padded to the next 4k boundary, so that
+ # fallocate can reclaim the block when hole punching.
+ header.total_size = raw_header[10]
+ header.state = raw_header[11]
+
+ return header
+
+ @classmethod
+ def __unpack_v4(cls, buf):
+ fmt = object_header_formats[4]
+ raw_header = struct.unpack(fmt, buf[0:struct.calcsize(fmt)])
+ header = cls()
+ header.magic_string = raw_header[0]
+ header.version = raw_header[1]
+ header.policy_idx = raw_header[2]
+ header.ohash = "{:032x}".format((raw_header[3] << 64) + raw_header[4])
+ if six.PY2:
+ header.filename = raw_header[5].rstrip(b'\0')
+ else:
+ header.filename = raw_header[5].rstrip(b'\0').decode('ascii')
+ header.metadata_offset = raw_header[6]
+ header.metadata_size = raw_header[7]
+ header.data_offset = raw_header[8]
+ header.data_size = raw_header[9]
+ # currently, total_size gets padded to the next 4k boundary, so that
+ # fallocate can reclaim the block when hole punching.
+ header.total_size = raw_header[10]
+ header.state = raw_header[11]
+ header.metastr_md5 = raw_header[12]
+
+ return header
+
+
+volume_header_formats = {
+ 1: '8sBQQQQLQ'
+}
+
+
+class VolumeHeader(object):
+ """
+ Version 1:
+ Magic string (8 bytes)
+ Header version (1 byte)
+ Volume index (8 bytes)
+ Partition index (8 bytes)
+ Volume type (8 bytes)
+ First object offset (8 bytes)
+ Volume state (4 bytes) (enum from fmgr.proto)
+ Volume compaction target (8 bytes)
+ (only valid if state is STATE_COMPACTION_SRC)
+ """
+ def __init__(self, version=VOLUME_HEADER_VERSION):
+ self.magic_string = VOLUME_START_MARKER
+ self.version = version
+ self.state = 0
+ self.compaction_target = 0
+
+ def __str__(self):
+ prop_list = ['volume_idx', 'partition', 'type',
+ 'state', 'compaction_target']
+ h_str = ""
+ for prop in prop_list:
+ h_str += "{}: {}\n".format(prop, getattr(self, prop))
+ return h_str[:-1]
+
+ def __len__(self):
+ try:
+ fmt = volume_header_formats[self.version]
+ except KeyError:
+ raise HeaderException('Unsupported header version')
+ return struct.calcsize(fmt)
+
+ def pack(self):
+ version_to_pack = {
+ 1: self.__pack_v1,
+ }
+ return version_to_pack[self.version]()
+
+ def __pack_v1(self):
+ fmt = volume_header_formats[1]
+
+ args = (self.magic_string, self.version,
+ self.volume_idx, self.partition, self.type,
+ self.first_obj_offset, self.state,
+ self.compaction_target)
+
+ return struct.pack(fmt, *args)
+
+ @classmethod
+ def unpack(cls, buf):
+ version_to_unpack = {
+ 1: cls.__unpack_v1
+ }
+ if buf[0:8] != VOLUME_START_MARKER:
+ raise HeaderException('Not a header')
+ version = struct.unpack('<B', buf[8:9])[0]
+ if version not in volume_header_formats.keys():
+ raise HeaderException('Unsupported header version')
+
+ return version_to_unpack[version](buf)
+
+ @classmethod
+ def __unpack_v1(cls, buf):
+ fmt = volume_header_formats[1]
+ raw_header = struct.unpack(fmt, buf[0:struct.calcsize(fmt)])
+ header = cls()
+ header.magic_string = raw_header[0]
+ header.version = raw_header[1]
+ header.volume_idx = raw_header[2]
+ header.partition = raw_header[3]
+ header.type = raw_header[4]
+ header.first_obj_offset = raw_header[5]
+ header.state = raw_header[6]
+ header.compaction_target = raw_header[7]
+
+ return header
+
+
+# Read volume header. Expects fp to be positionned at header offset
+def read_volume_header(fp):
+ buf = fp.read(MAX_VOLUME_HEADER_LEN)
+ header = VolumeHeader.unpack(buf)
+ return header
+
+
+def write_volume_header(header, fd):
+ os.write(fd, header.pack())
+
+
+def read_object_header(fp):
+ """
+ Read object header
+ :param fp: opened file, positioned at header start
+ :return: an ObjectHeader
+ """
+ buf = fp.read(MAX_OBJECT_HEADER_LEN)
+ header = ObjectHeader.unpack(buf)
+ return header
+
+
+def write_object_header(header, fp):
+ """
+ Rewrites header in open file
+ :param header: header to write
+ :param fp: opened volume
+ """
+ fp.write(header.pack())
+ fdatasync(fp.fileno())
+
+
+def erase_object_header(fd, offset):
+ """
+ Erase an object header by writing null bytes over it
+ :param fd: volume file descriptor
+ :param offset: absolute header offset
+ """
+ os.lseek(fd, offset, os.SEEK_SET)
+ os.write(fd, b"\x00" * MAX_OBJECT_HEADER_LEN)
diff --git a/swift/obj/kvfile.py b/swift/obj/kvfile.py
new file mode 100644
index 000000000..9a0347329
--- /dev/null
+++ b/swift/obj/kvfile.py
@@ -0,0 +1,1260 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import errno
+import os
+import time
+import json
+from hashlib import md5
+import logging
+import traceback
+from os.path import basename, dirname, join
+from random import shuffle
+from contextlib import contextmanager
+from collections import defaultdict
+
+from eventlet import Timeout, tpool
+import six
+
+from swift import gettext_ as _
+from swift.common.constraints import check_drive
+from swift.common.request_helpers import is_sys_meta
+from swift.common.utils import fdatasync, \
+ config_true_value, listdir, split_path, lock_path
+from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
+ DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
+ DiskFileError, PathNotDir, \
+ DiskFileExpired, DiskFileXattrNotSupported, \
+ DiskFileBadMetadataChecksum
+from swift.common.storage_policy import (
+ split_policy_string, POLICIES,
+ REPL_POLICY, EC_POLICY)
+
+from swift.obj import vfile
+from swift.obj.diskfile import BaseDiskFileManager, DiskFileManager, \
+ ECDiskFileManager, BaseDiskFile, DiskFile, DiskFileReader, DiskFileWriter,\
+ BaseDiskFileReader, BaseDiskFileWriter, ECDiskFile, ECDiskFileReader, \
+ ECDiskFileWriter, AuditLocation, RESERVED_DATAFILE_META, \
+ DATAFILE_SYSTEM_META, strip_self, DEFAULT_RECLAIM_AGE, _encode_metadata, \
+ get_part_path, get_data_dir, update_auditor_status, extract_policy, \
+ HASH_FILE, read_hashes, write_hashes, consolidate_hashes, invalidate_hash
+
+
+def quarantine_vrenamer(device_path, corrupted_file_path):
+ """
+ In the case that a file is corrupted, move it to a quarantined
+ area to allow replication to fix it.
+
+ :params device_path: The path to the device the corrupted file is on.
+ :params corrupted_file_path: The path to the file you want quarantined.
+
+ :returns: path (str) of directory the file was moved to
+ :raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
+ exceptions from rename
+ """
+ policy = extract_policy(corrupted_file_path)
+ if policy is None:
+ # TODO: support a quarantine-unknown location
+ policy = POLICIES.legacy
+
+ # rename key in KV
+ return vfile.quarantine_ohash(dirname(corrupted_file_path), policy)
+
+
+def object_audit_location_generator(devices, datadir, mount_check=True,
+ logger=None, device_dirs=None,
+ auditor_type="ALL"):
+ """
+ Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
+ objects stored under that directory for the given datadir (policy),
+ if device_dirs isn't set. If device_dirs is set, only yield AuditLocation
+ for the objects under the entries in device_dirs. The AuditLocation only
+ knows the path to the hash directory, not to the .data file therein
+ (if any). This is to avoid a double listdir(hash_dir); the DiskFile object
+ will always do one, so we don't.
+
+ :param devices: parent directory of the devices to be audited
+ :param datadir: objects directory
+ :param mount_check: flag to check if a mount check should be performed
+ on devices
+ :param logger: a logger object
+ :param device_dirs: a list of directories under devices to traverse
+ :param auditor_type: either ALL or ZBF
+ """
+ if not device_dirs:
+ device_dirs = listdir(devices)
+ else:
+ # remove bogus devices and duplicates from device_dirs
+ device_dirs = list(
+ set(listdir(devices)).intersection(set(device_dirs)))
+ # randomize devices in case of process restart before sweep completed
+ shuffle(device_dirs)
+
+ base, policy = split_policy_string(datadir)
+ for device in device_dirs:
+ if not check_drive(devices, device, mount_check):
+ if logger:
+ logger.debug(
+ 'Skipping %s as it is not %s', device,
+ 'mounted' if mount_check else 'a dir')
+ continue
+
+ datadir_path = os.path.join(devices, device, datadir)
+ if not os.path.exists(datadir_path):
+ continue
+
+ partitions = get_auditor_status(datadir_path, logger, auditor_type)
+
+ for pos, partition in enumerate(partitions):
+ update_auditor_status(datadir_path, logger,
+ partitions[pos:], auditor_type)
+ part_path = os.path.join(datadir_path, partition)
+ try:
+ suffixes = vfile.listdir(part_path)
+ except OSError as e:
+ if e.errno != errno.ENOTDIR:
+ raise
+ continue
+ for asuffix in suffixes:
+ suff_path = os.path.join(part_path, asuffix)
+ try:
+ hashes = vfile.listdir(suff_path)
+ except OSError as e:
+ if e.errno != errno.ENOTDIR:
+ raise
+ continue
+ for hsh in hashes:
+ hsh_path = os.path.join(suff_path, hsh)
+ yield AuditLocation(hsh_path, device, partition,
+ policy)
+
+ update_auditor_status(datadir_path, logger, [], auditor_type)
+
+
+def get_auditor_status(datadir_path, logger, auditor_type):
+ auditor_status = os.path.join(
+ datadir_path, "auditor_status_%s.json" % auditor_type)
+ status = {}
+ try:
+ if six.PY3:
+ statusfile = open(auditor_status, encoding='utf8')
+ else:
+ statusfile = open(auditor_status, 'rb')
+ with statusfile:
+ status = statusfile.read()
+ except (OSError, IOError) as e:
+ if e.errno != errno.ENOENT and logger:
+ logger.warning(_('Cannot read %(auditor_status)s (%(err)s)') %
+ {'auditor_status': auditor_status, 'err': e})
+ return vfile.listdir(datadir_path)
+ try:
+ status = json.loads(status)
+ except ValueError as e:
+ logger.warning(_('Loading JSON from %(auditor_status)s failed'
+ ' (%(err)s)') %
+ {'auditor_status': auditor_status, 'err': e})
+ return vfile.listdir(datadir_path)
+ return status['partitions']
+
+
+class BaseKVFile(BaseDiskFile):
+ # Todo: we may want a separate __init__ to define KV specific attribute
+ def open(self, modernize=False, current_time=None):
+ """
+ Open the object.
+
+ This implementation opens the data file representing the object, reads
+ the associated metadata in the extended attributes, additionally
+ combining metadata from fast-POST `.meta` files.
+
+ :param modernize: if set, update this diskfile to the latest format.
+ Currently, this means adding metadata checksums if none are
+ present.
+ :param current_time: Unix time used in checking expiration. If not
+ present, the current time will be used.
+
+ .. note::
+
+ An implementation is allowed to raise any of the following
+ exceptions, but is only required to raise `DiskFileNotExist` when
+ the object representation does not exist.
+
+ :raises DiskFileCollision: on name mis-match with metadata
+ :raises DiskFileNotExist: if the object does not exist
+ :raises DiskFileDeleted: if the object was previously deleted
+ :raises DiskFileQuarantined: if while reading metadata of the file
+ some data did pass cross checks
+ :returns: itself for use as a context manager
+ """
+ try:
+ files = vfile.listdir(self._datadir)
+ except (OSError, vfile.VFileException) as err:
+ raise DiskFileError(
+ "Error listing directory %s: %s" % (self._datadir, err))
+
+ # gather info about the valid files to use to open the DiskFile
+ file_info = self._get_ondisk_files(files)
+
+ self._data_file = file_info.get('data_file')
+ if not self._data_file:
+ raise self._construct_exception_from_ts_file(**file_info)
+ self._vfr = self._construct_from_data_file(
+ current_time=current_time, modernize=modernize, **file_info)
+ # This method must populate the internal _metadata attribute.
+ self._metadata = self._metadata or {}
+ return self
+
+ def _verify_data_file(self, data_file, data_vfr, current_time):
+ """
+ Verify the metadata's name value matches what we think the object is
+ named.
+
+ :param data_file: data file name being consider, used when quarantines
+ occur
+ :param fp: open file pointer so that we can `fstat()` the file to
+ verify the on-disk size with Content-Length metadata value
+ :param current_time: Unix time used in checking expiration
+ :raises DiskFileCollision: if the metadata stored name does not match
+ the referenced name of the file
+ :raises DiskFileExpired: if the object has expired
+ :raises DiskFileQuarantined: if data inconsistencies were detected
+ between the metadata and the file-system
+ metadata
+ """
+ try:
+ mname = self._metadata['name']
+ except KeyError:
+ raise self._quarantine(data_file, "missing name metadata")
+ else:
+ if mname != self._name:
+ self._logger.error(
+ _('Client path %(client)s does not match '
+ 'path stored in object metadata %(meta)s'),
+ {'client': self._name, 'meta': mname})
+ raise DiskFileCollision('Client path does not match path '
+ 'stored in object metadata')
+ try:
+ x_delete_at = int(self._metadata['X-Delete-At'])
+ except KeyError:
+ pass
+ except ValueError:
+ # Quarantine, the x-delete-at key is present but not an
+ # integer.
+ raise self._quarantine(
+ data_file, "bad metadata x-delete-at value %s" % (
+ self._metadata['X-Delete-At']))
+ else:
+ if current_time is None:
+ current_time = time.time()
+ if x_delete_at <= current_time and not self._open_expired:
+ raise DiskFileExpired(metadata=self._metadata)
+ try:
+ metadata_size = int(self._metadata['Content-Length'])
+ except KeyError:
+ raise self._quarantine(
+ data_file, "missing content-length in metadata")
+ except ValueError:
+ # Quarantine, the content-length key is present but not an
+ # integer.
+ raise self._quarantine(
+ data_file, "bad metadata content-length value %s" % (
+ self._metadata['Content-Length']))
+
+ obj_size = data_vfr.data_size
+ if obj_size != metadata_size:
+ raise self._quarantine(
+ data_file, "metadata content-length %s does"
+ " not match actual object size %s" % (
+ metadata_size, obj_size))
+ self._content_length = obj_size
+ return obj_size
+
+ def _failsafe_read_metadata(self, source, quarantine_filename=None,
+ add_missing_checksum=False):
+ """
+ Read metadata from source object file. In case of failure, quarantine
+ the file.
+
+ Takes source and filename separately so we can read from an open
+ file if we have one.
+
+ :param source: file descriptor or filename to load the metadata from
+ :param quarantine_filename: full path of file to load the metadata from
+ :param add_missing_checksum: ignored
+ """
+ try:
+ vfr = vfile.VFileReader.get_vfile(source, self._logger)
+ vfr_metadata = vfr.metadata
+ vfr.close()
+ return vfr_metadata
+ except (DiskFileXattrNotSupported, DiskFileNotExist):
+ raise
+ except DiskFileBadMetadataChecksum as err:
+ raise self._quarantine(quarantine_filename, str(err))
+ except Exception as err:
+ raise self._quarantine(
+ quarantine_filename,
+ "Exception reading metadata: %s" % err)
+
+ # This could be left unchanged now that _failsafe_read_metadata() is
+ # patched. Test it.
+ def _merge_content_type_metadata(self, ctype_file):
+ """
+ When a second .meta file is providing the most recent Content-Type
+ metadata then merge it into the metafile_metadata.
+
+ :param ctype_file: An on-disk .meta file
+ """
+ try:
+ ctype_vfr = vfile.VFileReader.get_vfile(ctype_file, self._logger)
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ raise DiskFileNotExist()
+ raise
+ ctypefile_metadata = ctype_vfr.metadata
+ ctype_vfr.close()
+ if ('Content-Type' in ctypefile_metadata
+ and (ctypefile_metadata.get('Content-Type-Timestamp') >
+ self._metafile_metadata.get('Content-Type-Timestamp'))
+ and (ctypefile_metadata.get('Content-Type-Timestamp') >
+ self.data_timestamp)):
+ self._metafile_metadata['Content-Type'] = \
+ ctypefile_metadata['Content-Type']
+ self._metafile_metadata['Content-Type-Timestamp'] = \
+ ctypefile_metadata.get('Content-Type-Timestamp')
+
+ def _construct_from_data_file(self, data_file, meta_file, ctype_file,
+ current_time, modernize=False,
+ **kwargs):
+ """
+ Open the `.data` file to fetch its metadata, and fetch the metadata
+ from fast-POST `.meta` files as well if any exist, merging them
+ properly.
+
+ :param data_file: on-disk `.data` file being considered
+ :param meta_file: on-disk fast-POST `.meta` file being considered
+ :param ctype_file: on-disk fast-POST `.meta` file being considered that
+ contains content-type and content-type timestamp
+ :param current_time: Unix time used in checking expiration
+ :param modernize: ignored
+ :returns: an opened data file pointer
+ :raises DiskFileError: various exceptions from
+ :func:`swift.obj.diskfile.DiskFile._verify_data_file`
+ """
+ # TODO: need to catch exception, check if ENOENT (see in diskfile)
+ try:
+ data_vfr = vfile.VFileReader.get_vfile(data_file, self._logger)
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ raise DiskFileNotExist()
+ raise
+
+ self._datafile_metadata = data_vfr.metadata
+
+ self._metadata = {}
+ if meta_file:
+ self._metafile_metadata = self._failsafe_read_metadata(
+ meta_file, meta_file)
+
+ if ctype_file and ctype_file != meta_file:
+ self._merge_content_type_metadata(ctype_file)
+ sys_metadata = dict(
+ [(key, val) for key, val in self._datafile_metadata.items()
+ if key.lower() in (RESERVED_DATAFILE_META |
+ DATAFILE_SYSTEM_META)
+ or is_sys_meta('object', key)])
+ self._metadata.update(self._metafile_metadata)
+ self._metadata.update(sys_metadata)
+ # diskfile writer added 'name' to metafile, so remove it here
+ self._metafile_metadata.pop('name', None)
+ # TODO: the check for Content-Type is only here for tests that
+ # create .data files without Content-Type
+ if ('Content-Type' in self._datafile_metadata and
+ (self.data_timestamp >
+ self._metafile_metadata.get('Content-Type-Timestamp'))):
+ self._metadata['Content-Type'] = \
+ self._datafile_metadata['Content-Type']
+ self._metadata.pop('Content-Type-Timestamp', None)
+ else:
+ self._metadata.update(self._datafile_metadata)
+ if self._name is None:
+ # If we don't know our name, we were just given a hash dir at
+ # instantiation, so we'd better validate that the name hashes back
+ # to us
+ self._name = self._metadata['name']
+ self._verify_name_matches_hash(data_file)
+ self._verify_data_file(data_file, data_vfr, current_time)
+ return data_vfr
+
+ def reader(self, keep_cache=False,
+ _quarantine_hook=lambda m: None):
+ """
+ Return a :class:`swift.common.swob.Response` class compatible
+ "`app_iter`" object as defined by
+ :class:`swift.obj.diskfile.DiskFileReader`.
+
+ For this implementation, the responsibility of closing the open file
+ is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
+
+ :param keep_cache: caller's preference for keeping data read in the
+ OS buffer cache
+ :param _quarantine_hook: 1-arg callable called when obj quarantined;
+ the arg is the reason for quarantine.
+ Default is to ignore it.
+ Not needed by the REST layer.
+ :returns: a :class:`swift.obj.diskfile.DiskFileReader` object
+ """
+ dr = self.reader_cls(
+ self._vfr, self._data_file, int(self._metadata['Content-Length']),
+ self._metadata['ETag'], self._disk_chunk_size,
+ self._manager.keep_cache_size, self._device_path, self._logger,
+ use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
+ pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache)
+ # At this point the reader object is now responsible for closing
+ # the file pointer.
+ # self._fp = None
+ self._vfr = None
+ return dr
+
+ def writer(self, size=None):
+ return self.writer_cls(self._manager, self._name, self._datadir, size,
+ self._bytes_per_sync, self, self._logger)
+
+ def _get_tempfile(self):
+ raise Exception("_get_tempfile called, shouldn't happen")
+
+ @contextmanager
+ def create(self, size=None, extension=None):
+ """
+ Context manager to create a vfile.
+ It could create separate volumes based on the extension.
+
+ Currently no caller will set the extension. The easiest would be to
+ patch server.py, in DELETE(), add an extension=".ts" parameter to the
+ self.get_diskfile() call, as kwargs is passed all the way down to here.
+
+ It's also possible to have the writer_cls handle the volume creation
+ later: at the first write() call, if any, assume it's not a tombstone,
+ and in put(), check self._extension.
+
+ :param size: optional initial size of file to explicitly allocate on
+ disk
+ :param extension: file extension, with dot ('.ts')
+ :raises DiskFileNoSpace: if a size is specified and allocation fails
+ """
+ dfw = self.writer(size)
+ try:
+ yield dfw.open()
+ finally:
+ dfw.close()
+
+
+class BaseKVFileReader(BaseDiskFileReader):
+ def __init__(self, vfr, data_file, obj_size, etag,
+ disk_chunk_size, keep_cache_size, device_path, logger,
+ quarantine_hook, use_splice, pipe_size, diskfile,
+ keep_cache=False):
+ # Parameter tracking
+ self._vfr = vfr
+ self._data_file = data_file
+ self._obj_size = obj_size
+ self._etag = etag
+ self._diskfile = diskfile
+ self._disk_chunk_size = disk_chunk_size
+ self._device_path = device_path
+ self._logger = logger
+ self._quarantine_hook = quarantine_hook
+ self._use_splice = use_splice
+ self._pipe_size = pipe_size
+ if keep_cache:
+ # Caller suggests we keep this in cache, only do it if the
+ # object's size is less than the maximum.
+ self._keep_cache = obj_size < keep_cache_size
+ else:
+ self._keep_cache = False
+
+ # Internal Attributes
+ self._iter_etag = None
+ self._bytes_read = 0
+ self._started_at_0 = False
+ self._read_to_eof = False
+ self._md5_of_sent_bytes = None
+ self._suppress_file_closing = False
+ self._quarantined_dir = None
+
+ def _init_checks(self):
+ if self._vfr.tell() == 0:
+ self._started_at_0 = True
+ self._iter_etag = md5()
+
+ def __iter__(self):
+ """Returns an iterator over the data file."""
+ try:
+ self._bytes_read = 0
+ self._started_at_0 = False
+ self._read_to_eof = False
+ self._init_checks()
+ while True:
+ chunk = self._vfr.read(self._disk_chunk_size)
+ if chunk:
+ self._update_checks(chunk)
+ self._bytes_read += len(chunk)
+ yield chunk
+ else:
+ self._read_to_eof = True
+ break
+ finally:
+ if not self._suppress_file_closing:
+ self.close()
+
+ def can_zero_copy_send(self):
+ return False
+
+ def app_iter_range(self, start, stop):
+ """
+ Returns an iterator over the data file for range (start, stop)
+
+ """
+ if start or start == 0:
+ self._vfr.seek(start)
+ if stop is not None:
+ length = stop - start
+ else:
+ length = None
+ try:
+ for chunk in self:
+ if length is not None:
+ length -= len(chunk)
+ if length < 0:
+ # Chop off the extra:
+ yield chunk[:length]
+ break
+ yield chunk
+ finally:
+ if not self._suppress_file_closing:
+ self.close()
+
+ def close(self):
+ """
+ Close the open file handle if present.
+
+ For this specific implementation, this method will handle quarantining
+ the file if necessary.
+ """
+ if self._vfr:
+ try:
+ if self._started_at_0 and self._read_to_eof:
+ self._handle_close_quarantine()
+ except DiskFileQuarantined:
+ raise
+ except (Exception, Timeout) as e:
+ self._logger.error(_(
+ 'ERROR DiskFile %(data_file)s'
+ ' close failure: %(exc)s : %(stack)s'),
+ {'exc': e, 'stack': ''.join(traceback.format_stack()),
+ 'data_file': self._data_file})
+ finally:
+ vfr, self._vfr = self._vfr, None
+ vfr.close()
+
+
+class BaseKVFileWriter(BaseDiskFileWriter):
+ def __init__(self, mgr, name, datadir, size, bytes_per_sync, diskfile,
+ logger):
+ # Parameter tracking
+ self._manager = mgr
+ self._name = name
+ self._datadir = datadir
+ self._vfile_writer = None
+ self._tmppath = None
+ self._size = size
+ self._chunks_etag = md5()
+ self._bytes_per_sync = bytes_per_sync
+ self._diskfile = diskfile
+ self._logger = logger
+
+ # Internal attributes
+ self._upload_size = 0
+ self._last_sync = 0
+ self._extension = '.data'
+ self._put_succeeded = False
+
+ def open(self):
+ if self._vfile_writer is not None:
+ raise ValueError('DiskFileWriter is already open')
+
+ try:
+ # TODO: support extension
+ self._vfile_writer = vfile.VFileWriter.create(
+ self._datadir, self._size, self._manager.vfile_conf,
+ self._logger, extension=None)
+ except OSError as err:
+ if err.errno in (errno.ENOSPC, errno.EDQUOT):
+ # No more inodes in filesystem
+ raise DiskFileNoSpace(err.strerror)
+ raise
+ return self
+
+ def close(self):
+ if self._vfile_writer:
+ try:
+ os.close(self._vfile_writer.fd)
+ os.close(self._vfile_writer.lock_fd)
+ except OSError:
+ pass
+ self._vfile_writer = None
+
+ def write(self, chunk):
+ """
+ Write a chunk of data to disk. All invocations of this method must
+ come before invoking the :func:
+
+ :param chunk: the chunk of data to write as a string object
+
+ :returns: the total number of bytes written to an object
+ """
+
+ if not self._vfile_writer:
+ raise ValueError('Writer is not open')
+ self._chunks_etag.update(chunk)
+ while chunk:
+ written = os.write(self._vfile_writer.fd, chunk)
+ self._upload_size += written
+ chunk = chunk[written:]
+
+ # For large files sync every 512MB (by default) written
+ diff = self._upload_size - self._last_sync
+ if diff >= self._bytes_per_sync:
+ tpool.execute(fdatasync, self._vfile_writer.fd)
+ # drop_buffer_cache(self._vfile_writer.fd, self._last_sync, diff)
+ self._last_sync = self._upload_size
+
+ return self._upload_size
+
+ def _finalize_put(self, metadata, target_path, cleanup):
+ filename = basename(target_path)
+ # write metadata and sync
+ self._vfile_writer.commit(filename, _encode_metadata(metadata))
+ self._put_succeeded = True
+ if cleanup:
+ try:
+ self.manager.cleanup_ondisk_files(self._datadir)['files']
+ except OSError:
+ logging.exception(_('Problem cleaning up %s'), self._datadir)
+
+
+class KVFileReader(BaseKVFileReader, DiskFileReader):
+ pass
+
+
+class KVFileWriter(BaseKVFileWriter, DiskFileWriter):
+ def put(self, metadata):
+ """
+ Finalize writing the file on disk.
+
+ :param metadata: dictionary of metadata to be associated with the
+ object
+ """
+ super(KVFileWriter, self)._put(metadata, True)
+
+
+class KVFile(BaseKVFile, DiskFile):
+ reader_cls = KVFileReader
+ writer_cls = KVFileWriter
+
+
+class BaseKVFileManager(BaseDiskFileManager):
+ diskfile_cls = None # must be set by subclasses
+
+ invalidate_hash = strip_self(invalidate_hash)
+ consolidate_hashes = strip_self(consolidate_hashes)
+ quarantine_renamer = strip_self(quarantine_vrenamer)
+
+ def __init__(self, conf, logger):
+ self.logger = logger
+ self.devices = conf.get('devices', '/srv/node')
+ self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
+ self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
+ self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
+ # vfile specific config
+ self.vfile_conf = {}
+ self.vfile_conf['volume_alloc_chunk_size'] = int(
+ conf.get('volume_alloc_chunk_size', 16 * 1024))
+ self.vfile_conf['volume_low_free_space'] = int(
+ conf.get('volume_low_free_space', 8 * 1024))
+ self.vfile_conf['metadata_reserve'] = int(
+ conf.get('metadata_reserve', 500))
+ self.vfile_conf['max_volume_count'] = int(
+ conf.get('max_volume_count', 1000))
+ self.vfile_conf['max_volume_size'] = int(
+ conf.get('max_volume_size', 10 * 1024 * 1024 * 1024))
+ # end vfile specific config
+ self.mount_check = config_true_value(conf.get('mount_check', 'true'))
+
+ self.reclaim_age = int(conf.get('reclaim_age', DEFAULT_RECLAIM_AGE))
+ replication_concurrency_per_device = conf.get(
+ 'replication_concurrency_per_device')
+ replication_one_per_device = conf.get('replication_one_per_device')
+ if replication_concurrency_per_device is None \
+ and replication_one_per_device is not None:
+ self.logger.warning('Option replication_one_per_device is '
+ 'deprecated and will be removed in a future '
+ 'version. Update your configuration to use '
+ 'option replication_concurrency_per_device.')
+ if config_true_value(replication_one_per_device):
+ replication_concurrency_per_device = 1
+ else:
+ replication_concurrency_per_device = 0
+ elif replication_one_per_device is not None:
+ self.logger.warning('Option replication_one_per_device ignored as '
+ 'replication_concurrency_per_device is '
+ 'defined.')
+ if replication_concurrency_per_device is None:
+ self.replication_concurrency_per_device = 1
+ else:
+ self.replication_concurrency_per_device = int(
+ replication_concurrency_per_device)
+ self.replication_lock_timeout = int(conf.get(
+ 'replication_lock_timeout', 15))
+
+ self.use_splice = False
+ self.pipe_size = None
+
+ def cleanup_ondisk_files(self, hsh_path, **kwargs):
+ """
+ Clean up on-disk files that are obsolete and gather the set of valid
+ on-disk files for an object.
+
+ :param hsh_path: object hash path
+ :param frag_index: if set, search for a specific fragment index .data
+ file, otherwise accept the first valid .data file
+ :returns: a dict that may contain: valid on disk files keyed by their
+ filename extension; a list of obsolete files stored under the
+ key 'obsolete'; a list of files remaining in the directory,
+ reverse sorted, stored under the key 'files'.
+ """
+
+ def is_reclaimable(timestamp):
+ return (time.time() - float(timestamp)) > self.reclaim_age
+
+ files = vfile.listdir(hsh_path)
+
+ files.sort(reverse=True)
+ results = self.get_ondisk_files(
+ files, hsh_path, verify=False, **kwargs)
+ if 'ts_info' in results and is_reclaimable(
+ results['ts_info']['timestamp']):
+ remove_vfile(join(hsh_path, results['ts_info']['filename']))
+ files.remove(results.pop('ts_info')['filename'])
+ for file_info in results.get('possible_reclaim', []):
+ # stray files are not deleted until reclaim-age
+ if is_reclaimable(file_info['timestamp']):
+ results.setdefault('obsolete', []).append(file_info)
+ for file_info in results.get('obsolete', []):
+ remove_vfile(join(hsh_path, file_info['filename']))
+ files.remove(file_info['filename'])
+ results['files'] = files
+
+ return results
+
+ def object_audit_location_generator(self, policy, device_dirs=None,
+ auditor_type="ALL"):
+ """
+ Yield an AuditLocation for all objects stored under device_dirs.
+
+ :param policy: the StoragePolicy instance
+ :param device_dirs: directory of target device
+ :param auditor_type: either ALL or ZBF
+ """
+ datadir = get_data_dir(policy)
+ return object_audit_location_generator(self.devices, datadir,
+ self.mount_check,
+ self.logger, device_dirs,
+ auditor_type)
+
+ def _hash_suffix_dir(self, path, policy):
+ """
+
+ :param path: full path to directory
+ :param policy: storage policy used
+ """
+ if six.PY2:
+ hashes = defaultdict(md5)
+ else:
+ class shim(object):
+ def __init__(self):
+ self.md5 = md5()
+
+ def update(self, s):
+ if isinstance(s, str):
+ self.md5.update(s.encode('utf-8'))
+ else:
+ self.md5.update(s)
+
+ def hexdigest(self):
+ return self.md5.hexdigest()
+ hashes = defaultdict(shim)
+ try:
+ path_contents = sorted(vfile.listdir(path))
+ except OSError as err:
+ if err.errno in (errno.ENOTDIR, errno.ENOENT):
+ raise PathNotDir()
+ raise
+ for hsh in path_contents:
+ hsh_path = os.path.join(path, hsh)
+ try:
+ ondisk_info = self.cleanup_ondisk_files(
+ hsh_path, policy=policy)
+ except OSError as err:
+ if err.errno == errno.ENOTDIR:
+ partition_path = os.path.dirname(path)
+ objects_path = os.path.dirname(partition_path)
+ device_path = os.path.dirname(objects_path)
+ # The made-up filename is so that the eventual dirpath()
+ # will result in this object directory that we care about.
+ # Some failures will result in an object directory
+ # becoming a file, thus causing the parent directory to
+ # be qarantined.
+ quar_path = quarantine_vrenamer(
+ device_path, os.path.join(
+ hsh_path, "made-up-filename"))
+ logging.exception(
+ _('Quarantined %(hsh_path)s to %(quar_path)s because '
+ 'it is not a directory'), {'hsh_path': hsh_path,
+ 'quar_path': quar_path})
+ continue
+ raise
+ if not ondisk_info['files']:
+ continue
+
+ # ondisk_info has info dicts containing timestamps for those
+ # files that could determine the state of the diskfile if it were
+ # to be opened. We update the suffix hash with the concatenation of
+ # each file's timestamp and extension. The extension is added to
+ # guarantee distinct hash values from two object dirs that have
+ # different file types at the same timestamp(s).
+ #
+ # Files that may be in the object dir but would have no effect on
+ # the state of the diskfile are not used to update the hash.
+ for key in (k for k in ('meta_info', 'ts_info')
+ if k in ondisk_info):
+ info = ondisk_info[key]
+ hashes[None].update(info['timestamp'].internal + info['ext'])
+
+ # delegate to subclass for data file related updates...
+ self._update_suffix_hashes(hashes, ondisk_info)
+
+ if 'ctype_info' in ondisk_info:
+ # We have a distinct content-type timestamp so update the
+ # hash. As a precaution, append '_ctype' to differentiate this
+ # value from any other timestamp value that might included in
+ # the hash in future. There is no .ctype file so use _ctype to
+ # avoid any confusion.
+ info = ondisk_info['ctype_info']
+ hashes[None].update(info['ctype_timestamp'].internal
+ + '_ctype')
+
+ return hashes
+
+ def _get_hashes(self, *args, **kwargs):
+ hashed, hashes = self.__get_hashes(*args, **kwargs)
+ hashes.pop('updated', None)
+ hashes.pop('valid', None)
+ return hashed, hashes
+
+ def __get_hashes(self, device, partition, policy, recalculate=None,
+ do_listdir=False):
+ """
+ Get hashes for each suffix dir in a partition. do_listdir causes it to
+ mistrust the hash cache for suffix existence at the (unexpectedly high)
+ cost of a listdir.
+
+ :param device: name of target device
+ :param partition: partition on the device in which the object lives
+ :param policy: the StoragePolicy instance
+ :param recalculate: list of suffixes which should be recalculated when
+ got
+ :param do_listdir: force existence check for all hashes in the
+ partition
+
+ :returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
+ """
+ hashed = 0
+ dev_path = self.get_dev_path(device)
+ partition_path = get_part_path(dev_path, policy, partition)
+ hashes_file = os.path.join(partition_path, HASH_FILE)
+ modified = False
+ orig_hashes = {'valid': False}
+
+ if recalculate is None:
+ recalculate = []
+
+ try:
+ orig_hashes = self.consolidate_hashes(partition_path)
+ except Exception:
+ self.logger.warning('Unable to read %r', hashes_file,
+ exc_info=True)
+
+ if not orig_hashes['valid']:
+ # This is the only path to a valid hashes from invalid read (e.g.
+ # does not exist, corrupt, etc.). Moreover, in order to write this
+ # valid hashes we must read *the exact same* invalid state or we'll
+ # trigger race detection.
+ do_listdir = True
+ hashes = {'valid': True}
+ # If the exception handling around consolidate_hashes fired we're
+ # going to do a full rehash regardless; but we need to avoid
+ # needless recursion if the on-disk hashes.pkl is actually readable
+ # (worst case is consolidate_hashes keeps raising exceptions and we
+ # eventually run out of stack).
+ # N.B. orig_hashes invalid only effects new parts and error/edge
+ # conditions - so try not to get overly caught up trying to
+ # optimize it out unless you manage to convince yourself there's a
+ # bad behavior.
+ orig_hashes = read_hashes(partition_path)
+ else:
+ hashes = copy.deepcopy(orig_hashes)
+
+ if do_listdir:
+ for suff in vfile.listdir(partition_path):
+ if len(suff) == 3:
+ hashes.setdefault(suff, None)
+ modified = True
+ hashes.update((suffix, None) for suffix in recalculate)
+ for suffix, hash_ in list(hashes.items()):
+ if not hash_:
+ suffix_dir = os.path.join(partition_path, suffix)
+ try:
+ hashes[suffix] = self._hash_suffix(
+ suffix_dir, policy=policy)
+ hashed += 1
+ except PathNotDir:
+ del hashes[suffix]
+ except OSError:
+ logging.exception(_('Error hashing suffix'))
+ modified = True
+ if modified:
+ with lock_path(partition_path):
+ if read_hashes(partition_path) == orig_hashes:
+ write_hashes(partition_path, hashes)
+ return hashed, hashes
+ return self.__get_hashes(device, partition, policy,
+ recalculate=recalculate,
+ do_listdir=do_listdir)
+ else:
+ return hashed, hashes
+
+ def get_diskfile_from_hash(self, device, partition, object_hash,
+ policy, **kwargs):
+ """
+ Returns a DiskFile instance for an object at the given
+ object_hash. Just in case someone thinks of refactoring, be
+ sure DiskFileDeleted is *not* raised, but the DiskFile
+ instance representing the tombstoned object is returned
+ instead.
+
+ :param device: name of target device
+ :param partition: partition on the device in which the object lives
+ :param object_hash: the hash of an object path
+ :param policy: the StoragePolicy instance
+ :raises DiskFileNotExist: if the object does not exist
+ :returns: an instance of BaseDiskFile
+ """
+ dev_path = self.get_dev_path(device)
+ if not dev_path:
+ raise DiskFileDeviceUnavailable()
+ object_path = join(
+ dev_path, get_data_dir(policy), str(partition), object_hash[-3:],
+ object_hash)
+ try:
+ filenames = self.cleanup_ondisk_files(object_path)['files']
+ except OSError as err:
+ if err.errno == errno.ENOTDIR:
+ quar_path = self.quarantine_renamer(dev_path, object_path)
+ logging.exception(
+ _('Quarantined %(object_path)s to %(quar_path)s because '
+ 'it is not a directory'), {'object_path': object_path,
+ 'quar_path': quar_path})
+ raise DiskFileNotExist()
+ if err.errno != errno.ENOENT:
+ raise
+ raise DiskFileNotExist()
+ if not filenames:
+ raise DiskFileNotExist()
+
+ try:
+ vf = vfile.VFileReader.get_vfile(join(object_path, filenames[-1]),
+ self.logger)
+ metadata = vf.metadata
+ vf.close()
+ except EOFError:
+ raise DiskFileNotExist()
+ try:
+ account, container, obj = split_path(
+ metadata.get('name', ''), 3, 3, True)
+ except ValueError:
+ raise DiskFileNotExist()
+ return self.diskfile_cls(self, dev_path,
+ partition, account, container, obj,
+ policy=policy, **kwargs)
+
+ def _listdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ try:
+ return vfile.listdir(path)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ self.logger.error(
+ 'ERROR: Skipping %r due to error with listdir attempt: %s',
+ path, err)
+ return []
+
+ def exists(self, path):
+ """
+ :param path: full path to directory
+ """
+ return vfile.exists(path)
+
+ def mkdirs(self, path):
+ """
+ :param path: full path to directory
+ """
+ return vfile.mkdirs(path)
+
+ def listdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ return vfile.listdir(path)
+
+ def rmtree(self, path, ignore_errors=False):
+ vfile.rmtree(path)
+
+ def remove_file(self, path):
+ """
+ similar to utils.remove_file
+ :param path: full path to directory
+ """
+ try:
+ return vfile.delete_vfile_from_path(path)
+ except (OSError, vfile.VFileException):
+ pass
+
+ def remove(self, path):
+ """
+ :param path: full path to directory
+ """
+ return vfile.delete_vfile_from_path(path)
+
+ def isdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ return vfile.isdir(path)
+
+ def isfile(self, path):
+ """
+ :param path: full path to directory
+ """
+ return vfile.isfile(path)
+
+ def rmdir(self, path):
+ """
+ :param path: full path to directory
+ """
+ pass
+
+
+class KVFileManager(BaseKVFileManager, DiskFileManager):
+ diskfile_cls = KVFile
+ policy_type = REPL_POLICY
+
+
+class ECKVFileReader(BaseKVFileReader, ECDiskFileReader):
+ def __init__(self, vfr, data_file, obj_size, etag,
+ disk_chunk_size, keep_cache_size, device_path, logger,
+ quarantine_hook, use_splice, pipe_size, diskfile,
+ keep_cache=False):
+ super(ECKVFileReader, self).__init__(
+ vfr, data_file, obj_size, etag,
+ disk_chunk_size, keep_cache_size, device_path, logger,
+ quarantine_hook, use_splice, pipe_size, diskfile, keep_cache)
+ self.frag_buf = None
+ self.frag_offset = 0
+ self.frag_size = self._diskfile.policy.fragment_size
+
+ def _init_checks(self):
+ super(ECKVFileReader, self)._init_checks()
+ # for a multi-range GET this will be called at the start of each range;
+ # only initialise the frag_buf for reads starting at 0.
+ # TODO: reset frag buf to '' if tell() shows that start is on a frag
+ # boundary so that we check frags selected by a range not starting at 0
+ # ECDOING - check _started_at_0 is defined correctly
+ if self._started_at_0:
+ self.frag_buf = ''
+ else:
+ self.frag_buf = None
+
+ def _update_checks(self, chunk):
+ # super(ECKVFileReader, self)._update_checks(chunk)
+
+ # Because of python's MRO, this will call
+ # ECDiskFileReader._update_checks, and blow up.
+ # rather than mess with the class's mro() function, explicitely call
+ # the one we want.
+ BaseDiskFileReader._update_checks(self, chunk)
+ if self.frag_buf is not None:
+ self.frag_buf += chunk
+ cursor = 0
+ while len(self.frag_buf) >= cursor + self.frag_size:
+ self._check_frag(self.frag_buf[cursor:cursor + self.frag_size])
+ cursor += self.frag_size
+ self.frag_offset += self.frag_size
+ if cursor:
+ self.frag_buf = self.frag_buf[cursor:]
+
+ def _handle_close_quarantine(self):
+ # super(ECKVFileReader, self)._handle_close_quarantine()
+ BaseDiskFileReader._handle_close_quarantine(self)
+ self._check_frag(self.frag_buf)
+
+
+class ECKVFileWriter(BaseKVFileWriter, ECDiskFileWriter):
+ # TODO: this needs to be updated wrt. next_part_power, and other changes
+ # in diskfile.py
+ def _finalize_durable(self, data_file_path, durable_data_file_path,
+ timestamp):
+ exc = None
+ try:
+ try:
+ vfile.rename_vfile(data_file_path, durable_data_file_path,
+ self._diskfile._logger)
+ except (OSError, IOError) as err:
+ if err.errno == errno.ENOENT:
+ files = vfile.listdir(self._datadir)
+ results = self.manager.get_ondisk_files(
+ files, self._datadir,
+ frag_index=self._diskfile._frag_index,
+ policy=self._diskfile.policy)
+ # We "succeeded" if another writer cleaned up our data
+ ts_info = results.get('ts_info')
+ durables = results.get('durable_frag_set', [])
+ if ts_info and ts_info['timestamp'] > timestamp:
+ return
+ elif any(frag_set['timestamp'] > timestamp
+ for frag_set in durables):
+ return
+
+ if err.errno not in (errno.ENOSPC, errno.EDQUOT):
+ # re-raise to catch all handler
+ raise
+ params = {'file': durable_data_file_path, 'err': err}
+ self.manager.logger.exception(
+ _('No space left on device for %(file)s (%(err)s)'),
+ params)
+ exc = DiskFileNoSpace(
+ 'No space left on device for %(file)s (%(err)s)' % params)
+ else:
+ try:
+ self.manager.cleanup_ondisk_files(self._datadir)['files']
+ except OSError as os_err:
+ self.manager.logger.exception(
+ _('Problem cleaning up %(datadir)s (%(err)s)'),
+ {'datadir': self._datadir, 'err': os_err})
+ except Exception as err:
+ params = {'file': durable_data_file_path, 'err': err}
+ self.manager.logger.exception(
+ _('Problem making data file durable %(file)s (%(err)s)'),
+ params)
+ exc = DiskFileError(
+ 'Problem making data file durable %(file)s (%(err)s)' % params)
+ if exc:
+ raise exc
+
+ def put(self, metadata):
+ """
+ The only difference between this method and the replication policy
+ DiskFileWriter method is adding the frag index to the metadata.
+
+ :param metadata: dictionary of metadata to be associated with object
+ """
+ fi = None
+ cleanup = True
+ if self._extension == '.data':
+ # generally we treat the fragment index provided in metadata as
+ # canon, but if it's unavailable (e.g. tests) it's reasonable to
+ # use the frag_index provided at instantiation. Either way make
+ # sure that the fragment index is included in object sysmeta.
+ fi = metadata.setdefault('X-Object-Sysmeta-Ec-Frag-Index',
+ self._diskfile._frag_index)
+ fi = self.manager.validate_fragment_index(fi)
+ self._diskfile._frag_index = fi
+ # defer cleanup until commit() writes makes diskfile durable
+ cleanup = False
+ super(ECKVFileWriter, self)._put(metadata, cleanup, frag_index=fi)
+
+
+class ECKVFile(BaseKVFile, ECDiskFile):
+
+ reader_cls = ECKVFileReader
+ writer_cls = ECKVFileWriter
+
+ def purge(self, timestamp, frag_index):
+ """
+ Remove a tombstone file matching the specified timestamp or
+ datafile matching the specified timestamp and fragment index
+ from the object directory.
+
+ This provides the EC reconstructor/ssync process with a way to
+ remove a tombstone or fragment from a handoff node after
+ reverting it to its primary node.
+
+ The hash will be invalidated, and if empty or invalid the
+ hsh_path will be removed on next cleanup_ondisk_files.
+
+ :param timestamp: the object timestamp, an instance of
+ :class:`~swift.common.utils.Timestamp`
+ :param frag_index: fragment archive index, must be
+ a whole number or None.
+ """
+ purge_file = self.manager.make_on_disk_filename(
+ timestamp, ext='.ts')
+ remove_vfile(os.path.join(self._datadir, purge_file))
+ if frag_index is not None:
+ # data file may or may not be durable so try removing both filename
+ # possibilities
+ purge_file = self.manager.make_on_disk_filename(
+ timestamp, ext='.data', frag_index=frag_index)
+ remove_vfile(os.path.join(self._datadir, purge_file))
+ purge_file = self.manager.make_on_disk_filename(
+ timestamp, ext='.data', frag_index=frag_index, durable=True)
+ remove_vfile(os.path.join(self._datadir, purge_file))
+ # we don't use hashes.pkl files
+ # self.manager.invalidate_hash(dirname(self._datadir))
+
+
+class ECKVFileManager(BaseKVFileManager, ECDiskFileManager):
+ diskfile_cls = ECKVFile
+ policy_type = EC_POLICY
+
+
+def remove_vfile(filepath):
+ try:
+ vfile.delete_vfile_from_path(filepath)
+ except OSError:
+ pass
diff --git a/swift/obj/meta.proto b/swift/obj/meta.proto
new file mode 100644
index 000000000..55d8492f3
--- /dev/null
+++ b/swift/obj/meta.proto
@@ -0,0 +1,14 @@
+syntax = "proto3";
+
+package meta;
+
+// Generate module with: protoc -I. --python_out=. meta.proto
+
+message Attr {
+ bytes key = 1;
+ bytes value = 2;
+}
+
+message Metadata {
+ repeated Attr attrs = 1;
+}
diff --git a/swift/obj/meta_pb2.py b/swift/obj/meta_pb2.py
new file mode 100644
index 000000000..a0cadec66
--- /dev/null
+++ b/swift/obj/meta_pb2.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# source: meta.proto
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+ name='meta.proto',
+ package='meta',
+ syntax='proto3',
+ serialized_options=None,
+ serialized_pb=b'\n\nmeta.proto\x12\x04meta\"\"\n\x04\x41ttr\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c\"%\n\x08Metadata\x12\x19\n\x05\x61ttrs\x18\x01 \x03(\x0b\x32\n.meta.Attrb\x06proto3'
+)
+
+
+
+
+_ATTR = _descriptor.Descriptor(
+ name='Attr',
+ full_name='meta.Attr',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='key', full_name='meta.Attr.key', index=0,
+ number=1, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ _descriptor.FieldDescriptor(
+ name='value', full_name='meta.Attr.value', index=1,
+ number=2, type=12, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"",
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=20,
+ serialized_end=54,
+)
+
+
+_METADATA = _descriptor.Descriptor(
+ name='Metadata',
+ full_name='meta.Metadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='attrs', full_name='meta.Metadata.attrs', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=56,
+ serialized_end=93,
+)
+
+_METADATA.fields_by_name['attrs'].message_type = _ATTR
+DESCRIPTOR.message_types_by_name['Attr'] = _ATTR
+DESCRIPTOR.message_types_by_name['Metadata'] = _METADATA
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Attr = _reflection.GeneratedProtocolMessageType('Attr', (_message.Message,), {
+ 'DESCRIPTOR' : _ATTR,
+ '__module__' : 'meta_pb2'
+ # @@protoc_insertion_point(class_scope:meta.Attr)
+ })
+_sym_db.RegisterMessage(Attr)
+
+Metadata = _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
+ 'DESCRIPTOR' : _METADATA,
+ '__module__' : 'meta_pb2'
+ # @@protoc_insertion_point(class_scope:meta.Metadata)
+ })
+_sym_db.RegisterMessage(Metadata)
+
+
+# @@protoc_insertion_point(module_scope)
diff --git a/swift/obj/objectrpcmanager.py b/swift/obj/objectrpcmanager.py
new file mode 100644
index 000000000..a64dfb49d
--- /dev/null
+++ b/swift/obj/objectrpcmanager.py
@@ -0,0 +1,157 @@
+# Copyright (c) 2010-2015 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import os
+import subprocess
+import six
+from eventlet import sleep
+
+from swift.common.daemon import Daemon
+from swift.common.ring.utils import is_local_device
+from swift.common.storage_policy import POLICIES, get_policy_string
+from swift.common.utils import PrefixLoggerAdapter, get_logger, \
+ config_true_value, whataremyips
+from swift.obj import rpc_http as rpc
+
+
+class ObjectRpcManager(Daemon):
+ def __init__(self, conf, logger=None):
+ self.conf = conf
+ self.logger = PrefixLoggerAdapter(
+ logger or get_logger(conf, log_route='object-rpcmanager'), {})
+ self.devices_dir = conf.get('devices', '/srv/node')
+ self.mount_check = config_true_value(conf.get('mount_check', 'true'))
+ # use native golang leveldb implementation
+ self.use_go_leveldb = config_true_value(
+ conf.get('use_go_leveldb', 'false'))
+ self.swift_dir = conf.get('swift_dir', '/etc/swift')
+ self.bind_ip = conf.get('bind_ip', '0.0.0.0')
+ self.servers_per_port = int(conf.get('servers_per_port', '0') or 0)
+ self.port = None if self.servers_per_port else \
+ int(conf.get('bind_port', 6200))
+ self.volcheck = conf.get('volcheck', '')
+ self.losf_bin = conf.get('losf_bin', '')
+ self.healthcheck_interval = int(conf.get('healthcheck_interval', 10))
+
+ # check if the path to LOSF binary and volume checker exist
+ if not os.path.exists(self.volcheck):
+ raise AttributeError(
+ "Invalid or missing volcheck in your config file")
+ if not os.path.exists(self.losf_bin):
+ raise AttributeError(
+ "Invalid or missing losf_bin in your config file")
+
+ # this should select only kv enabled policies
+ # (requires loading policies?)
+ self.policies = POLICIES
+ self.ring_check_interval = int(conf.get('ring_check_interval', 15))
+
+ # add RPC state check interval
+ self.kv_disks = self.get_kv_disks()
+
+ def load_object_ring(self, policy):
+ """
+ Make sure the policy's rings are loaded.
+
+ :param policy: the StoragePolicy instance
+ :returns: appropriate ring object
+ """
+ policy.load_ring(self.swift_dir)
+ return policy.object_ring
+
+ # add filter for KV only
+ def get_policy2devices(self):
+ ips = whataremyips(self.bind_ip)
+ policy2devices = {}
+ for policy in self.policies:
+ self.load_object_ring(policy)
+ local_devices = list(six.moves.filter(
+ lambda dev: dev and is_local_device(
+ ips, self.port,
+ dev['replication_ip'], dev['replication_port']),
+ policy.object_ring.devs))
+ policy2devices[policy] = local_devices
+ return policy2devices
+
+ def get_kv_disks(self):
+ """
+ Returns a dict of KV backed policies to list of devices
+ :return: dict
+ """
+ policy2devices = self.get_policy2devices()
+ kv_disks = {}
+ for policy, devs in policy2devices.items():
+ if policy.diskfile_module.endswith(('.kv', '.hybrid')):
+ kv_disks[policy.idx] = [d['device'] for d in devs]
+
+ return kv_disks
+
+ def get_worker_args(self, once=False, **kwargs):
+ """
+ Take the set of all local devices for this node from all the KV
+ backed policies rings.
+
+ :param once: False if the worker(s) will be daemonized, True if the
+ worker(s) will be run once
+ :param kwargs: optional overrides from the command line
+ """
+
+ # Note that this get re-used in is_healthy
+ self.kv_disks = self.get_kv_disks()
+
+ # TODO: what to do in this case ?
+ if not self.kv_disks:
+ # we only need a single worker to do nothing until a ring change
+ yield dict(multiprocess_worker_index=0)
+ return
+
+ for policy_idx, devs in self.kv_disks.iteritems():
+ for dev in devs:
+ disk_path = os.path.join(self.devices_dir, dev)
+ losf_dir = get_policy_string('losf', policy_idx)
+ socket_path = os.path.join(disk_path, losf_dir, 'rpc.socket')
+ yield dict(policy_idx=policy_idx, disk_path=disk_path)
+ yield dict(policy_idx=policy_idx, disk_path=disk_path,
+ socket_path=socket_path, statecheck=True)
+
+ def is_healthy(self):
+ return self.get_kv_disks() == self.kv_disks
+
+ def run_forever(self, policy_idx=None, disk_path=None, socket_path=None,
+ statecheck=None, *args, **kwargs):
+
+ if statecheck:
+ volcheck_args = [self.volcheck, '--disk_path', str(disk_path),
+ '--policy_idx', str(policy_idx),
+ '--keepuser', '--repair', '--no_prompt']
+ # sleep a bit to let the RPC server start. Otherwise it will
+ # timeout and take longer to get the checks started.
+ sleep(2)
+ while True:
+ try:
+ state = rpc.get_kv_state(socket_path)
+ if not state.isClean:
+ self.logger.debug(volcheck_args)
+ subprocess.call(volcheck_args)
+ except Exception:
+ self.logger.exception("state check failed, continue")
+ sleep(10)
+ else:
+ losf_args = ['swift-losf-rpc', '-diskPath', str(disk_path),
+ '-debug', 'info',
+ '-policyIdx', str(policy_idx),
+ '-waitForMount={}'.format(str(self.mount_check))]
+ if self.use_go_leveldb:
+ losf_args.append('-useGoLevelDB')
+ os.execv(self.losf_bin, losf_args)
diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py
index 135f9d5f9..ede7ecad7 100644
--- a/swift/obj/reconstructor.py
+++ b/swift/obj/reconstructor.py
@@ -22,7 +22,6 @@ import time
from collections import defaultdict
import six
import six.moves.cPickle as pickle
-import shutil
from eventlet import (GreenPile, GreenPool, Timeout, sleep, tpool, spawn)
from eventlet.support.greenlets import GreenletExit
@@ -30,10 +29,10 @@ from eventlet.support.greenlets import GreenletExit
from swift import gettext_ as _
from swift.common.utils import (
whataremyips, unlink_older_than, compute_eta, get_logger,
- dump_recon_cache, mkdirs, config_true_value,
- GreenAsyncPile, Timestamp, remove_file,
- load_recon_cache, parse_override_options, distribute_evenly,
- PrefixLoggerAdapter, remove_directory)
+ dump_recon_cache, config_true_value,
+ GreenAsyncPile, Timestamp, load_recon_cache,
+ parse_override_options, distribute_evenly, PrefixLoggerAdapter,
+ remove_directory)
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.bufferedhttp import http_connect
from swift.common.daemon import Daemon
@@ -1113,15 +1112,16 @@ class ObjectReconstructor(Daemon):
tmp_path = join(dev_path, get_tmp_dir(int(policy)))
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
- if not os.path.exists(obj_path):
+
+ if not df_mgr.exists(obj_path):
try:
- mkdirs(obj_path)
+ df_mgr.mkdirs(obj_path)
except Exception:
self.logger.exception(
'Unable to create %s' % obj_path)
continue
try:
- partitions = os.listdir(obj_path)
+ partitions = df_mgr.listdir(obj_path)
except OSError:
self.logger.exception(
'Unable to list partitions in %r' % obj_path)
@@ -1137,7 +1137,7 @@ class ObjectReconstructor(Daemon):
if not partition.isdigit():
self.logger.warning(
'Unexpected entity in data dir: %r' % part_path)
- self.delete_partition(part_path)
+ self.delete_partition(df_mgr, part_path)
self.reconstruction_part_count += 1
continue
partition = int(partition)
@@ -1194,13 +1194,13 @@ class ObjectReconstructor(Daemon):
self.last_reconstruction_count = -1
self.handoffs_remaining = 0
- def delete_partition(self, path):
- def kill_it(path):
- shutil.rmtree(path, ignore_errors=True)
- remove_file(path)
+ def delete_partition(self, df_mgr, path):
+ def kill_it(df_mgr, path):
+ df_mgr.rmtree(path, ignore_errors=True)
+ df_mgr.remove_file(path)
self.logger.info(_("Removing partition: %s"), path)
- tpool.execute(kill_it, path)
+ tpool.execute(kill_it, df_mgr, path)
def reconstruct(self, **kwargs):
"""Run a reconstruction pass"""
@@ -1230,6 +1230,7 @@ class ObjectReconstructor(Daemon):
# Therefore we know this part a) doesn't belong on
# this node and b) doesn't have any suffixes in it.
self.run_pool.spawn(self.delete_partition,
+ self._df_router[part_info['policy']],
part_info['part_path'])
for job in jobs:
self.run_pool.spawn(self.process_job, job)
diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py
index e3634bb8f..e9d19975a 100644
--- a/swift/obj/replicator.py
+++ b/swift/obj/replicator.py
@@ -16,9 +16,8 @@
from collections import defaultdict
import os
import errno
-from os.path import isdir, isfile, join, dirname
+from os.path import join, dirname
import random
-import shutil
import time
import itertools
from six import viewkeys
@@ -33,7 +32,7 @@ from swift.common.constraints import check_drive
from swift.common.ring.utils import is_local_device
from swift.common.utils import whataremyips, unlink_older_than, \
compute_eta, get_logger, dump_recon_cache, \
- rsync_module_interpolation, mkdirs, config_true_value, \
+ rsync_module_interpolation, config_true_value, \
config_auto_int_value, storage_directory, \
load_recon_cache, PrefixLoggerAdapter, parse_override_options, \
distribute_evenly
@@ -485,9 +484,11 @@ class ObjectReplicator(Daemon):
:param job: a dict containing info about the partition to be replicated
"""
+ df_mgr = self._df_router[job['policy']]
+
def tpool_get_suffixes(path):
- return [suff for suff in os.listdir(path)
- if len(suff) == 3 and isdir(join(path, suff))]
+ return [suff for suff in df_mgr.listdir(path)
+ if len(suff) == 3 and df_mgr.isdir(join(path, suff))]
stats = self.stats_for_dev[job['device']]
stats.attempted += 1
@@ -562,10 +563,10 @@ class ObjectReplicator(Daemon):
failure_dev['device'])
for failure_dev in job['nodes']])
else:
- self.delete_partition(job['path'])
+ self.delete_partition(df_mgr, job['path'])
handoff_partition_deleted = True
elif not suffixes:
- self.delete_partition(job['path'])
+ self.delete_partition(df_mgr, job['path'])
handoff_partition_deleted = True
except (Exception, Timeout):
self.logger.exception(_("Error syncing handoff partition"))
@@ -580,25 +581,26 @@ class ObjectReplicator(Daemon):
self.partition_times.append(time.time() - begin)
self.logger.timing_since('partition.delete.timing', begin)
- def delete_partition(self, path):
+ def delete_partition(self, df_mgr, path):
self.logger.info(_("Removing partition: %s"), path)
try:
- tpool.execute(shutil.rmtree, path)
+ tpool.execute(df_mgr.rmtree, path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
# If there was a race to create or delete, don't worry
raise
def delete_handoff_objs(self, job, delete_objs):
+ df_mgr = self._df_router[job['policy']]
success_paths = []
error_paths = []
for object_hash in delete_objs:
object_path = storage_directory(job['obj_path'], job['partition'],
object_hash)
- tpool.execute(shutil.rmtree, object_path, ignore_errors=True)
+ tpool.execute(df_mgr.rmtree, object_path, ignore_errors=True)
suffix_dir = dirname(object_path)
try:
- os.rmdir(suffix_dir)
+ df_mgr.rmdir(suffix_dir)
success_paths.append(object_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
@@ -816,13 +818,14 @@ class ObjectReplicator(Daemon):
tmp_path = join(dev_path, get_tmp_dir(policy))
unlink_older_than(tmp_path, time.time() -
df_mgr.reclaim_age)
- if not os.path.exists(obj_path):
+ if not df_mgr.exists(obj_path):
try:
- mkdirs(obj_path)
+ df_mgr.mkdirs(obj_path)
except Exception:
self.logger.exception('ERROR creating %s' % obj_path)
continue
- for partition in os.listdir(obj_path):
+
+ for partition in df_mgr.listdir(obj_path):
if (override_partitions is not None and partition.isdigit()
and int(partition) not in override_partitions):
continue
@@ -937,6 +940,7 @@ class ObjectReplicator(Daemon):
override_partitions=override_partitions,
override_policies=override_policies)
for job in jobs:
+ df_mgr = self._df_router[job['policy']]
dev_stats = self.stats_for_dev[job['device']]
num_jobs += 1
current_nodes = job['nodes']
@@ -964,13 +968,13 @@ class ObjectReplicator(Daemon):
return
try:
- if isfile(job['path']):
+ if df_mgr.isfile(job['path']):
# Clean up any (probably zero-byte) files where a
# partition should be.
self.logger.warning(
'Removing partition directory '
'which was a file: %s', job['path'])
- os.remove(job['path'])
+ df_mgr.remove(job['path'])
continue
except OSError:
continue
diff --git a/swift/obj/rpc_http.py b/swift/obj/rpc_http.py
new file mode 100644
index 000000000..df3ff6e94
--- /dev/null
+++ b/swift/obj/rpc_http.py
@@ -0,0 +1,370 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Implements RPC: protobuf over a UNIX domain socket
+
+import socket
+from eventlet.green import httplib
+from swift.obj import fmgr_pb2 as pb
+
+
+class UnixHTTPConnection(httplib.HTTPConnection):
+ """Support for unix domain socket with httplib"""
+
+ def __init__(self, path, host='localhost', port=None, strict=None,
+ timeout=None):
+ httplib.HTTPConnection.__init__(self, host, port=port, strict=strict,
+ timeout=timeout)
+ self.path = path
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.path)
+ self.sock = sock
+
+
+class StatusCode(object):
+ Ok = 200
+ Cancelled = 299
+ InvalidArgument = 400
+ NotFound = 404
+ AlreadyExists = 409
+ PermissionDenied = 403
+ FailedPrecondition = 412
+ Unimplemented = 501
+ Internal = 500
+ Unavailable = 503
+
+
+class RpcError(Exception):
+ def __init__(self, message, code):
+ self.code = code
+ super(RpcError, self).__init__(message)
+
+
+def get_rpc_reply(conn, pb_type):
+ """
+ Read the response from the index server over HTTP. If the status is 200,
+ deserialize the body as a protobuf object and return it.
+ If the status is not 200, raise an RpcError exception.
+ :param conn: HTTP connection to the index server
+ :param pb_type: protobuf type we expect in return
+ :return: protobuf object, or raise an exception if HTTP status is not 200
+ """
+ # if buffering is not set, httplib will call recvfrom() for every char
+ http_response = conn.getresponse(buffering=True)
+ if http_response.status != StatusCode.Ok:
+ raise RpcError(http_response.read(), http_response.status)
+
+ pb_obj = pb_type()
+ pb_obj.ParseFromString(http_response.read())
+ return pb_obj
+
+
+def get_next_offset(socket_path, volume_index, repair_tool=False):
+ """
+ Returns the next offset to use in the volume
+ """
+ volume = pb.GetNextOffsetRequest(volume_index=int(volume_index),
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/get_next_offset', volume.SerializeToString())
+ response = get_rpc_reply(conn, pb.GetNextOffsetReply)
+ return response.offset
+
+
+def register_volume(socket_path, partition, volume_type, volume_index,
+ first_obj_offset, state, repair_tool=False):
+ volume = pb.RegisterVolumeRequest(partition=int(partition),
+ type=int(volume_type),
+ volume_index=int(volume_index),
+ offset=first_obj_offset, state=state,
+ repair_tool=repair_tool)
+
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/register_volume', volume.SerializeToString())
+ response = get_rpc_reply(conn, pb.RegisterVolumeReply)
+ return response
+
+
+def unregister_volume(socket_path, volume_index):
+ index = pb.UnregisterVolumeRequest(index=volume_index)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/unregister_volume', index.SerializeToString())
+ response = get_rpc_reply(conn, pb.UnregisterVolumeReply)
+ return response
+
+
+def update_volume_state(socket_path, volume_index, new_state,
+ repair_tool=False):
+ state_update = pb.UpdateVolumeStateRequest(volume_index=int(volume_index),
+ state=new_state,
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/update_volume_state',
+ state_update.SerializeToString())
+ response = get_rpc_reply(conn, pb.UpdateVolumeStateReply)
+ return response
+
+
+def register_object(socket_path, name, volume_index, offset, next_offset,
+ repair_tool=False):
+ """
+ register a vfile
+ """
+ obj = pb.RegisterObjectRequest(name=str(name),
+ volume_index=int(volume_index),
+ offset=int(offset),
+ next_offset=int(next_offset),
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/register_object', obj.SerializeToString())
+ response = get_rpc_reply(conn, pb.RegisterObjectReply)
+ return response
+
+
+def unregister_object(socket_path, name, repair_tool=False):
+ obj = pb.UnregisterObjectRequest(name=str(name), repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/unregister_object', obj.SerializeToString())
+ response = get_rpc_reply(conn, pb.UnregisterObjectReply)
+ return response
+
+
+def rename_object(socket_path, name, new_name, repair_tool=False):
+ rename_req = pb.RenameObjectRequest(name=str(name), new_name=str(new_name),
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/rename_object', rename_req.SerializeToString())
+ response = get_rpc_reply(conn, pb.RenameObjectReply)
+ return response
+
+
+def quarantine_object(socket_path, name, repair_tool=False):
+ objname = pb.QuarantineObjectRequest(name=str(name),
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/quarantine_object', objname.SerializeToString())
+ response = get_rpc_reply(conn, pb.QuarantineObjectReply)
+ return response
+
+
+def unquarantine_object(socket_path, name, repair_tool=False):
+ objname = pb.UnquarantineObjectRequest(name=str(name),
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/unquarantine_object', objname.SerializeToString())
+ response = get_rpc_reply(conn, pb.UnquarantineObjectReply)
+ return response
+
+
+def _list_quarantined_ohashes(socket_path, page_token, page_size):
+ """
+ Returns quarantined hashes, with pagination (as with the regular diskfile,
+ they are not below partition/suffix directories)
+ :param socket_path: socket_path for index-server
+ :param page_token: where to start for pagination
+ :param page_size: maximum number of results to be returned
+ :return: A list of quarantined object hashes
+ """
+ req_args = pb.ListQuarantinedOHashesRequest(page_token=str(page_token),
+ page_size=page_size)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/list_quarantined_ohashes',
+ req_args.SerializeToString())
+ response = get_rpc_reply(conn, pb.ListQuarantinedOHashesReply)
+ return response
+
+
+def list_quarantined_ohashes(socket_path, page_size=10000):
+ """
+ Returns all quarantined hashes, wraps _list_quarantined_ohashes so caller
+ does not have to deal with pagination
+ :param socket_path: socket_path
+ :param page_size: page_size to pass to wrapped function
+ :return: an iterator for all quarantined objects
+ """
+ page_token = ""
+ while True:
+ response = _list_quarantined_ohashes(socket_path, page_token,
+ page_size)
+ for r in response.objects:
+ yield (r)
+ page_token = response.next_page_token
+ if not page_token:
+ break
+
+
+def _list_objects_by_volume(socket_path, volume_index, quarantined, page_token,
+ page_size, repair_tool=False):
+ """
+ Returns objects within the volume, either quarantined or not, with
+ pagination.
+ :param socket_path: socket_path for index-server
+ :param volume_index: index of the volume for which to list objects
+ :param quarantined: if true, returns quarantined objects. if false, returns
+ non-quarantined objects.
+ :param page_token: where to start for pagination
+ :param page_size: maximum number of results to be returned
+ :param repair_tool: set to true if caller is a repair tool
+ :return: A list of objects for the volume
+ """
+ req_args = pb.LoadObjectsByVolumeRequest(index=volume_index,
+ quarantined=quarantined,
+ page_token=page_token,
+ page_size=page_size,
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/load_objects_by_volume',
+ req_args.SerializeToString())
+ response = get_rpc_reply(conn, pb.LoadObjectsByVolumeReply)
+ return response
+
+
+def list_objects_by_volume(socket_path, volume_index, quarantined=False,
+ page_size=10000, repair_tool=False):
+ page_token = ""
+ while True:
+ response = _list_objects_by_volume(socket_path, volume_index,
+ quarantined, page_token, page_size,
+ repair_tool)
+ for r in response.objects:
+ yield (r)
+ page_token = response.next_page_token
+ if not page_token:
+ break
+
+
+def list_quarantined_ohash(socket_path, prefix, repair_tool=False):
+ len_prefix = len(prefix)
+ prefix = pb.ListQuarantinedOHashRequest(prefix=str(prefix),
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/list_quarantined_ohash', prefix.SerializeToString())
+ response = get_rpc_reply(conn, pb.ListQuarantinedOHashReply)
+
+ # Caller expects object names without the prefix, similar
+ # to os.listdir, not actual objects.
+ objnames = []
+ for obj in response.objects:
+ objnames.append(obj.name[len_prefix:])
+
+ return objnames
+
+
+# listdir like function for the KV
+def list_prefix(socket_path, prefix, repair_tool=False):
+ len_prefix = len(prefix)
+ prefix = str(prefix)
+ prefix = pb.LoadObjectsByPrefixRequest(prefix=prefix,
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/load_objects_by_prefix', prefix.SerializeToString())
+ response = get_rpc_reply(conn, pb.LoadObjectsByPrefixReply)
+ # response.objets is an iterable
+ # TBD, caller expects object names without the prefix, similar
+ # to os.listdir, not actual objects.
+ # Fix this in the rpc server
+ # return response.objects
+ objnames = []
+ for obj in response.objects:
+ objnames.append(obj.name[len_prefix:])
+
+ return objnames
+
+
+def get_object(socket_path, name, is_quarantined=False, repair_tool=False):
+ """
+ returns an object given its whole key
+ """
+ object_name = pb.LoadObjectRequest(name=str(name),
+ is_quarantined=is_quarantined,
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/load_object', object_name.SerializeToString())
+ response = get_rpc_reply(conn, pb.LoadObjectReply)
+ return response
+
+
+def list_partitions(socket_path, partition_bits):
+ list_partitions_req = pb.ListPartitionsRequest(
+ partition_bits=partition_bits)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/list_partitions',
+ list_partitions_req.SerializeToString())
+ response = get_rpc_reply(conn, pb.DirEntries)
+ return response.entry
+
+
+def list_partition(socket_path, partition, partition_bits):
+ list_partition_req = pb.ListPartitionRequest(partition=partition,
+ partition_bits=partition_bits)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/list_partition',
+ list_partition_req.SerializeToString())
+ response = get_rpc_reply(conn, pb.DirEntries)
+ return response.entry
+
+
+def list_suffix(socket_path, partition, suffix, partition_bits):
+ suffix = str(suffix)
+ list_suffix_req = pb.ListSuffixRequest(partition=partition,
+ suffix=suffix,
+ partition_bits=partition_bits)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/list_suffix', list_suffix_req.SerializeToString())
+ response = get_rpc_reply(conn, pb.DirEntries)
+ return response.entry
+
+
+def list_volumes(socket_path, partition, type, repair_tool=False):
+ list_req = pb.ListVolumesRequest(partition=int(partition), type=type,
+ repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/list_volumes', list_req.SerializeToString())
+ response = get_rpc_reply(conn, pb.ListVolumesReply)
+ return response.volumes
+
+
+def get_volume(socket_path, index, repair_tool=False):
+ volume_idx = pb.GetVolumeRequest(index=index, repair_tool=repair_tool)
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/get_volume', volume_idx.SerializeToString())
+ response = get_rpc_reply(conn, pb.Volume)
+ return response
+
+
+def get_stats(socket_path):
+ stats_req = pb.GetStatsInfo()
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/get_stats', stats_req.SerializeToString())
+ response = get_rpc_reply(conn, pb.GetStatsReply)
+ return response
+
+
+def get_kv_state(socket_path):
+ pb_out = pb.GetKvStateRequest()
+ conn = UnixHTTPConnection(socket_path)
+ conn.request('POST', '/get_kv_state', pb_out.SerializeToString())
+ response = get_rpc_reply(conn, pb.KvState)
+ return response
+
+
+def set_kv_state(socket_path, isClean):
+ conn = UnixHTTPConnection(socket_path)
+ newKvState = pb.KvState(isClean=isClean)
+ conn.request('POST', '/set_kv_state', newKvState.SerializeToString())
+ response = get_rpc_reply(conn, pb.SetKvStateReply)
+ return response
diff --git a/swift/obj/vfile.py b/swift/obj/vfile.py
new file mode 100644
index 000000000..57bacd558
--- /dev/null
+++ b/swift/obj/vfile.py
@@ -0,0 +1,1201 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A "vfile" is a virtual file stored in a "volume".
+A volume is an actual file where vfiles are stored.
+vfile names and metadata (xattr) are also stored in the volume.
+"""
+
+import errno
+import fcntl
+import six
+import hashlib
+import re
+from eventlet.green import os
+from swift.obj.header import ObjectHeader, VolumeHeader, ALIGNMENT, \
+ read_volume_header, HeaderException, STATE_OBJ_QUARANTINED, \
+ STATE_OBJ_FILE, write_object_header, \
+ read_object_header, OBJECT_HEADER_VERSION, write_volume_header, \
+ erase_object_header, MAX_OBJECT_HEADER_LEN
+from swift.common.exceptions import DiskFileNoSpace, \
+ DiskFileBadMetadataChecksum
+from swift.common.storage_policy import POLICIES
+from swift.common.utils import fsync, fdatasync, fsync_dir, \
+ fallocate
+from swift.obj import rpc_http as rpc
+from swift.obj.rpc_http import RpcError, StatusCode
+from swift.obj.fmgr_pb2 import STATE_RW
+from swift.obj.meta_pb2 import Metadata
+from swift.obj.diskfile import _encode_metadata
+from swift.common import utils
+from swift.obj.vfile_utils import SwiftPathInfo, get_volume_index, \
+ get_volume_type, next_aligned_offset, SwiftQuarantinedPathInfo, VOSError, \
+ VIOError, VFileException
+
+VCREATION_LOCK_NAME = "volume_creation.lock"
+
+PICKLE_PROTOCOL = 2
+METADATA_RESERVE = 500
+VOL_AND_LOCKS_RE = re.compile(r'v\d{7}(.writelock)?')
+
+
+def increment(logger, counter, count=1):
+ if logger is not None:
+ try:
+ logger.update_stats(counter, count)
+ except Exception:
+ pass
+
+
+class VFileReader(object):
+ """
+ Represents a vfile stored in a volume.
+ """
+ def __init__(self, fp, name, offset, header, metadata, logger):
+ self.fp = fp
+ self.name = name
+ self.offset = offset
+ self._header = header
+ self.metadata = metadata
+ self.logger = logger
+
+ @property
+ def data_size(self):
+ return self._header.data_size
+
+ @classmethod
+ def get_vfile(cls, filepath, logger):
+ """
+ Returns a VFileReader instance from the path expected by swift
+ :param filepath: full path to file
+ :param logger: a logger object
+ """
+ si = SwiftPathInfo.from_path(filepath)
+ if si.type != "file":
+ err_msg = "Not a path to a swift file ({})".format(filepath)
+ raise VIOError(errno.EINVAL, err_msg)
+
+ full_name = si.ohash + si.filename
+ return cls._get_vfile(full_name, si.volume_dir, si.socket_path, logger)
+
+ @classmethod
+ def get_quarantined_vfile(cls, filepath, logger):
+ si = SwiftQuarantinedPathInfo.from_path(filepath)
+
+ if si.type != "file":
+ err_msg = "Not a path to a swift file ({})".format(filepath)
+ raise VIOError(errno.EINVAL, err_msg)
+
+ full_name = si.ohash + si.filename
+ return cls._get_vfile(full_name, si.volume_dir, si.socket_path, logger,
+ is_quarantined=True)
+
+ @classmethod
+ def _get_vfile(cls, name, volume_dir, socket_path, logger,
+ is_quarantined=False, repair_tool=False):
+ """
+ Returns a VFileReader instance
+ :param name: full name: object hash+filename
+ :param volume_dir: directory where the volume is stored
+ :param socket_path: full path to KV socket
+ :param logger: logger object
+ :param is_quarantined: object is quarantined
+ :param repair_tool: True if requests comes from a repair tool
+ """
+ # get the object
+ try:
+ obj = rpc.get_object(socket_path, name,
+ is_quarantined=is_quarantined,
+ repair_tool=repair_tool)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ raise VIOError(errno.ENOENT,
+ "No such file or directory: {}".format(name))
+ # May need to handle more cases ?
+ raise (e)
+
+ # get the volume file name from the object
+ volume_filename = get_volume_name(obj.volume_index)
+ volume_filepath = os.path.join(volume_dir, volume_filename)
+
+ fp = open(volume_filepath, 'rb')
+ fp.seek(obj.offset)
+ try:
+ header = read_object_header(fp)
+ except HeaderException:
+ fp.seek(obj.offset)
+ data = fp.read(512)
+ if all(c == '\x00' for c in data):
+ # unregister the object here
+ rpc.unregister_object(socket_path, name)
+ msg = "Zeroed header found for {} at offset {} in volume\
+ {}".format(name, obj.offset, volume_filepath)
+ increment(logger, 'vfile.already_punched')
+ raise VFileException(msg)
+ msg = "Failed to read header for {} at offset {} in volume\
+ {}".format(name, obj.offset, volume_filepath)
+ raise VIOError(errno.EIO, msg)
+
+ # check that we have the object we were expecting
+ header_fullname = "{}{}".format(header.ohash, header.filename)
+ if header_fullname != name:
+ # until we journal the renames, after a crash we may not have the
+ # rename in the KV. Handle this here for now
+ non_durable_name = re.sub(r'(#\d+)#d.', r'\1.', header_fullname)
+ if non_durable_name == name:
+ increment(logger, 'vfile.already_renamed')
+ rpc.rename_object(socket_path, name, header_fullname)
+ else:
+ increment(logger, 'vfile.wrong_object_header_name')
+ raise VIOError(errno.EIO,
+ "Wrong object header name. Header: {} Expected:\
+ {}".format(header_fullname, name))
+
+ metadata = read_metadata(fp, obj.offset, header)
+
+ # seek to beginning of data
+ fp.seek(obj.offset + header.data_offset)
+
+ return cls(fp, obj.name, obj.offset, header, metadata, logger)
+
+ def read(self, size=None):
+ """
+ Wraps read to prevent reading beyond the vfile content.
+ """
+ curpos = self.fp.tell()
+ data_size = self._header.data_size
+ data_start_offset = self.offset + self._header.data_offset
+ data_end_offset = data_start_offset + data_size
+
+ if curpos >= data_end_offset or size == 0:
+ return ''
+ if size:
+ if size > data_end_offset - curpos:
+ size = data_end_offset - curpos
+ else:
+ size = data_end_offset - curpos
+
+ buf = self.fp.read(size)
+ return buf
+
+ def seek(self, pos):
+ """
+ Wraps seek to bind offset from the vfile start to its end.
+ """
+ real_data_offset = self.offset + self._header.data_offset
+ real_new_pos = real_data_offset + pos
+ if (real_new_pos < real_data_offset or
+ real_new_pos > real_data_offset + self._header.data_size):
+ raise VIOError(errno.EINVAL, "Invalid seek")
+ self.fp.seek(real_new_pos)
+
+ def tell(self):
+ curpos = self.fp.tell()
+ vpos = curpos - (self.offset + self._header.data_offset)
+ return vpos
+
+ def close(self):
+ self.fp.close()
+
+
+def _may_grow_volume(volume_fd, volume_offset, obj_size, conf, logger):
+ """
+ Grows a volume if needed.
+ if free_space < obj_size + object header len, allocate obj_size padded to
+ volume_alloc_chunk_size
+
+ """
+ volume_alloc_chunk_size = conf['volume_alloc_chunk_size']
+
+ if obj_size is None:
+ obj_size = 0
+
+ volume_size = os.lseek(volume_fd, 0, os.SEEK_END)
+ free_space = volume_size - volume_offset
+
+ obj_header_len = len(ObjectHeader(version=OBJECT_HEADER_VERSION))
+ required_space = obj_header_len + obj_size
+
+ if free_space < required_space:
+ _allocate_volume_space(volume_fd, volume_offset, required_space,
+ volume_alloc_chunk_size, logger)
+
+
+class VFileWriter(object):
+ def __init__(self, datadir, fd, lock_fd, volume_dir,
+ volume_index, header, offset, logger):
+ si = SwiftPathInfo.from_path(datadir)
+
+ self.fd = fd
+ self.lock_fd = lock_fd
+ self.volume_dir = volume_dir
+ self.volume_index = volume_index
+ self.header = header
+ self.offset = offset
+ self.socket_path = si.socket_path
+ self.partition = si.partition
+ # may be used for statsd. Do not use it to log or it will hang the
+ # object-server process. (eventlet)
+ self.logger = logger
+
+ @classmethod
+ def create(cls, datadir, obj_size, conf, logger, extension=None):
+ # parse datadir
+ si = SwiftPathInfo.from_path(datadir)
+
+ if si.type != "ohash":
+ raise VOSError("not a valid object hash path")
+
+ if obj_size is not None:
+ if obj_size < 0:
+ raise VOSError("obj size may not be negative")
+
+ socket_path = os.path.normpath(si.socket_path)
+ volume_dir = os.path.normpath(si.volume_dir)
+
+ # get a writable volume
+ # TODO : check that we fallocate enough if obj_size > volume
+ # chunk alloc size
+ volume_file, lock_file, volume_path = open_or_create_volume(
+ socket_path, si.partition, extension, volume_dir,
+ conf, logger, size=obj_size)
+ volume_index = get_volume_index(volume_path)
+
+ # create object header
+ header = ObjectHeader(version=OBJECT_HEADER_VERSION)
+ # TODO: this is unused, always set to zero.
+ header.ohash = si.ohash
+ header.policy_idx = 0
+ header.data_offset = len(header) + conf['metadata_reserve']
+ header.data_size = 0
+ # requires header v3
+ header.state = STATE_OBJ_FILE
+
+ try:
+ # get offset at which to start writing
+ offset = rpc.get_next_offset(socket_path, volume_index)
+
+ # pre-allocate space if needed
+ _may_grow_volume(volume_file, offset, obj_size, conf, logger)
+
+ # seek to absolute object offset + relative data offset
+ # (we leave space for the header and some metadata)
+ os.lseek(volume_file, offset + header.data_offset,
+ os.SEEK_SET)
+ except Exception:
+ os.close(volume_file)
+ os.close(lock_file)
+ raise
+
+ return cls(datadir, volume_file, lock_file, volume_dir,
+ volume_index, header, offset, logger)
+
+ def commit(self, filename, metadata):
+ """
+ Write the header, metadata, sync, and register vfile in KV.
+ """
+ if self.fd < 0:
+ raise VIOError(errno.EBADF, "Bad file descriptor")
+
+ if not filename:
+ raise VIOError("filename cannot be empty")
+
+ # how much data has been written ?
+ # header.data_offset is relative to the object's offset
+ data_offset = self.offset + self.header.data_offset
+ data_end = os.lseek(self.fd, 0, os.SEEK_CUR)
+ self.header.data_size = data_end - data_offset
+ # FIXME: this is unused message, please fix as expected
+ # txt = "commit: {} data_end {} data_offset: {}"
+
+ self.header.filename = filename
+ # metastr = pickle.dumps(self.metadata, PICKLE_PROTOCOL)
+ # create and populate protobuf object
+ meta = Metadata()
+ enc_metadata = _encode_metadata(metadata)
+ for k, v in enc_metadata.items():
+ meta.attrs.add(key=k, value=v)
+
+ metastr = meta.SerializeToString()
+ metastr_md5 = hashlib.md5(metastr).hexdigest().encode('ascii')
+
+ self.header.metadata_size = len(metastr)
+ self.header.metadata_offset = len(self.header)
+ self.header.metastr_md5 = metastr_md5
+
+ # calculate the end object offset (this includes the padding, if any)
+ # start from data_end, and add: metadata remainder if any, footer,
+ # padding
+
+ # how much reserved metadata space do we have ?
+ # Should be equal to "metadata_reserve"
+ metadata_available_space = (self.header.data_offset -
+ self.header.metadata_offset)
+ metadata_remainder = max(0, self.header.metadata_size -
+ metadata_available_space)
+
+ object_end = data_end + metadata_remainder
+
+ object_end = next_aligned_offset(object_end, ALIGNMENT)
+
+ self.header.total_size = object_end - self.offset
+
+ # write header
+ os.lseek(self.fd, self.offset, os.SEEK_SET)
+ os.write(self.fd, self.header.pack())
+
+ # write metadata, and footer
+ metadata_offset = self.offset + self.header.metadata_offset
+ if self.header.metadata_size > metadata_available_space:
+ os.lseek(self.fd, metadata_offset, os.SEEK_SET)
+ os.write(self.fd, metastr[:metadata_available_space])
+ # metadata does not fit in reserved space,
+ # write the remainder after the data
+ os.lseek(self.fd, data_end, os.SEEK_SET)
+ os.write(self.fd, metastr[metadata_available_space:])
+ else:
+ os.lseek(self.fd, metadata_offset, os.SEEK_SET)
+ os.write(self.fd, metastr)
+
+ # Sanity check, we should not go beyond object_end
+ curpos = os.lseek(self.fd, 0, os.SEEK_CUR)
+ if curpos > object_end:
+ errtxt = "BUG: wrote past object_end! curpos: {} object_end: {}"
+ raise Exception(errtxt.format(curpos, object_end))
+
+ # sync data. fdatasync() is enough, if the volume was just created,
+ # it has been fsync()'ed previously, along with its parent directory.
+ fdatasync(self.fd)
+
+ # register object
+ full_name = "{}{}".format(self.header.ohash, filename)
+ try:
+ rpc.register_object(self.socket_path, full_name, self.volume_index,
+ self.offset, object_end)
+ except RpcError:
+ # If we failed to register the object, erase the header so that it
+ # will not be picked up by the volume checker if there is a crash
+ # or power failure before it gets overwritten by another object.
+ erase_object_header(self.fd, self.offset)
+ raise
+
+ increment(self.logger, 'vfile.vfile_creation')
+ increment(self.logger, 'vfile.total_space_used',
+ self.header.total_size)
+
+
+def open_or_create_volume(socket_path, partition, extension, volume_dir,
+ conf, logger, size=0):
+ """
+ Tries to open or create a volume for writing. If a volume cannot be
+ opened or created, a VOSError exception is raised.
+ :return: volume file descriptor, lock file descriptor, absolute path
+ to volume.
+ """
+ volume_file, lock_file, volume_path = open_writable_volume(socket_path,
+ partition,
+ extension,
+ volume_dir,
+ conf,
+ logger)
+ if not volume_file:
+ # attempt to create new volume for partition
+ try:
+ volume_file, lock_file, volume_path = create_writable_volume(
+ socket_path, partition, extension, volume_dir,
+ conf, logger, size=size)
+ except Exception as err:
+ error_msg = "Failed to open or create a volume for writing: "
+ error_msg += getattr(err, "strerror", "Unknown error")
+ raise VOSError(errno.ENOSPC, error_msg)
+
+ return volume_file, lock_file, volume_path
+
+
+def _create_new_lock_file(volume_dir, logger):
+ creation_lock_path = os.path.join(volume_dir, VCREATION_LOCK_NAME)
+ with open(creation_lock_path, 'w') as creation_lock_file:
+ # this may block
+ fcntl.flock(creation_lock_file, fcntl.LOCK_EX)
+
+ index = get_next_volume_index(volume_dir)
+ next_lock_name = get_lock_file_name(index)
+ next_lock_path = os.path.join(volume_dir, next_lock_name)
+
+ try:
+ lock_file = os.open(next_lock_path,
+ os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)
+ except OSError:
+ increment(logger, 'vfile.volume_creation.fail_other')
+ raise
+
+ try:
+ fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError:
+ increment(logger, 'vfile.volume_creation.fail_other')
+ os.close(lock_file)
+ os.unlink(next_lock_path)
+ raise
+
+ return index, next_lock_path, lock_file
+
+
+# create a new volume
+def create_writable_volume(socket_path, partition, extension, volume_dir,
+ conf, logger, state=STATE_RW, size=0):
+ """
+ Creates a new writable volume, and associated lock file.
+ returns a volume_file, a lock_file, and the index of the volume that has
+ been created.
+ If the extension is specified, a specific volume may be used.
+ (Currently, .ts files go to separate volumes as they are short-lived, in
+ order to limit fragmentation)
+ state can be STATE_RW (new RW volume) or STATE_COMPACTION_TARGET (new empty
+ volume which will be used for compaction, and to which new objects cannot
+ be written).
+ size is the space that should be allocated to the volume (in addition to
+ the volume header)
+ """
+
+ if size is None:
+ size = 0
+
+ # Check if we have exceeded the allowed volume count for this partition
+ # Move this check below with the lock held ? (now, we may have
+ # a few extra volumes)
+ volume_type = get_volume_type(extension)
+ volumes = rpc.list_volumes(socket_path, partition, volume_type)
+ max_volume_count = conf['max_volume_count']
+ if len(volumes) >= max_volume_count:
+ err_txt = ("Maximum count of volumes reached for partition:"
+ " {} type: {}".format(partition, volume_type))
+ increment(logger, 'vfile.volume_creation.fail_count_exceeded')
+ raise VOSError(errno.EDQUOT, err_txt)
+
+ try:
+ os.makedirs(volume_dir)
+ except OSError as err:
+ if err.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ index, next_lock_path, lock_file = _create_new_lock_file(
+ volume_dir, logger)
+
+ # create the volume
+ next_volume_name = get_volume_name(index)
+ next_volume_path = os.path.join(volume_dir, next_volume_name)
+
+ vol_header = VolumeHeader()
+ vol_header.volume_idx = index
+ vol_header.type = volume_type
+ vol_header.partition = int(partition)
+ # first object alignment
+ vol_header.first_obj_offset = len(vol_header) + (
+ ALIGNMENT - len(vol_header) % ALIGNMENT)
+ vol_header.state = state
+
+ # How much space is needed for the object ? (assuming metadata fits in the
+ # reserved space, but we cannot know this in advance)
+ alloc_size = vol_header.first_obj_offset + size
+ volume_alloc_chunk_size = conf['volume_alloc_chunk_size']
+
+ try:
+ volume_file = os.open(next_volume_path,
+ os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)
+ _allocate_volume_space(volume_file, 0, alloc_size,
+ volume_alloc_chunk_size, logger)
+
+ # Write volume header
+ write_volume_header(vol_header, volume_file)
+
+ # If the uploader is slow to send data to the object server, a crash
+ # may occur before the object is received and a call to fsync() is
+ # issued. We end up with volumes without a header.
+ # Issue a fsync() here, at the cost of performance early on. As
+ # partitions get volumes we switch to open_writable_volume, avoiding
+ # the fsync.
+ fsync(volume_file)
+ fsync_dir(volume_dir)
+
+ # Register volume
+ rpc.register_volume(socket_path, partition, vol_header.type, index,
+ vol_header.first_obj_offset, vol_header.state)
+ except Exception:
+ os.close(lock_file)
+ os.close(volume_file)
+ os.unlink(next_lock_path)
+ os.unlink(next_volume_path)
+ increment(logger, 'vfile.volume_creation.fail_other')
+ raise
+
+ increment(logger, 'vfile.volume_creation.ok')
+ return volume_file, lock_file, next_volume_path
+
+
+def _allocate_volume_space(volume_fd, offset, length, volume_alloc_chunk_size,
+ logger, ignore_error=False):
+ """
+ Will pre-allocate space for the volume given the offset and length,
+ aligned to volume_alloc_chunk_size.
+ May ignore an OSError
+ :param volume_fd: file descriptor of the volume
+ :param offset: offset from which to grow the volume
+ :param length: length to grow, relative to offset
+ :param volume_alloc_chunk_size: pad length to align to this chunk size
+ :param ignore_error: ignore OSError
+ :return:
+ """
+ try:
+ alloc_size = next_aligned_offset(length, volume_alloc_chunk_size)
+ fallocate(volume_fd, alloc_size, offset)
+ increment(logger, 'vfile.volume_alloc_space',
+ alloc_size)
+ except OSError as err:
+ if not ignore_error:
+ if err.errno in (errno.ENOSPC, errno.EDQUOT):
+ raise DiskFileNoSpace()
+ raise
+
+
+def delete_volume(socket_path, volume_path, logger):
+ """
+ Deletes a volume from disk and removes entry in the KV
+ """
+ index = get_volume_index(volume_path)
+ volume_lock_path = "{}.writelock".format(volume_path)
+
+ # Remove KV entry
+ rpc.unregister_volume(socket_path, index)
+
+ # Remove volume and lock
+ os.unlink(volume_path)
+ os.unlink(volume_lock_path)
+
+
+def open_writable_volume(socket_path, partition, extension, volume_dir, conf,
+ logger):
+ """
+ Opens a volume available for writing.
+ returns a volume file, a lock_file, and the volume path
+ :param socket_path: full path to KV socket
+ :param partition: partition name
+ :param extension: file extension
+ """
+ volume_type = get_volume_type(extension)
+ volume_file = None
+ lock_file = None
+ volume_file_path = None
+ # query the KV for available volumes given the partition and type
+ volumes = rpc.list_volumes(socket_path, partition, volume_type)
+
+ # writable candidates are volumes which are in RW state and not too large
+ volumes = [vol for vol in volumes if vol.volume_state == STATE_RW and
+ vol.next_offset < conf['max_volume_size']]
+ volume_files = [get_volume_name(volume.volume_index) for volume in
+ volumes]
+
+ for volume_file_name in volume_files:
+ volume_file_path = os.path.join(volume_dir, volume_file_name)
+ volume_file, lock_file = open_volume(volume_file_path)
+ if volume_file:
+ break
+
+ return volume_file, lock_file, volume_file_path
+
+
+def open_volume(volume_path):
+ """Locks the volume, and returns a fd to the volume and a fd to its lock
+ file. Returns None, None, if it cannot be locked. Raises for any other
+ error.
+ :param volume_path: full path to volume
+ :return: (volume fd, lock fd)
+ """
+ lock_file_path = "{}.writelock".format(volume_path)
+
+ try:
+ lock_file = os.open(lock_file_path, os.O_WRONLY)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ # if the volume lock file as been removed, create it
+ lock_file = os.open(lock_file_path,
+ os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)
+
+ try:
+ fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except IOError as err:
+ if err.errno in (errno.EACCES, errno.EAGAIN):
+ # volume is locked
+ os.close(lock_file)
+ return None, None
+ else:
+ try:
+ os.close(lock_file)
+ except Exception:
+ pass
+ raise
+ except Exception:
+ try:
+ os.close(lock_file)
+ except Exception:
+ pass
+ raise
+
+ volume_file = os.open(volume_path, os.O_WRONLY)
+ return volume_file, lock_file
+
+
+def change_volume_state(volume_file_path, state, compaction_target=None):
+ """
+ Changes the volumes state. caller locks the volume
+ TODO: should this handle RPC as well ? (currently done by caller)
+ TODO: take an optional size parameter so we can fallocate() the file
+ :param volume_file_path: path of volume to modify
+ :param state: new state
+ :param compaction_target: ID of the volume compaction target, if applicable
+ """
+ volume_file = open(volume_file_path, "rb+")
+
+ h = read_volume_header(volume_file)
+ h.state = state
+ if compaction_target:
+ h.compaction_target = compaction_target
+ volume_file.seek(0)
+ volume_file.write(h.pack())
+
+
+def get_next_volume_index(volume_dir):
+ """
+ Returns the next volume index to use for the given dir.
+ Caller must hold the volume creation lock.
+ :param volume_dir: volume directory
+ :return: the next volume index to use
+ """
+ dir_entries = os.listdir(volume_dir)
+ # Get all volumes and their lock: a volume should always have a lock,
+ # but a fsck may have removed either. If we find such a case, skip the
+ # index.
+ volumes_and_locks_idxs = set([name[1:8] for name in dir_entries if
+ VOL_AND_LOCKS_RE.match(name)])
+ if len(volumes_and_locks_idxs) < 1:
+ return 1
+
+ # This is about 30% faster than calling int() in the list comprehension
+ # above.
+ idxs = sorted(int(i) for i in volumes_and_locks_idxs)
+
+ # find the first "hole" in the indexes
+ for pos, idx in enumerate(idxs, start=1):
+ if pos != idx:
+ return pos
+
+ # no hole found
+ return idx + 1
+
+
+def get_lock_file_name(index):
+ if index <= 0 or index > 9999999:
+ raise VFileException("invalid lock file index")
+ lock_file_name = "v{0:07d}.writelock".format(index)
+ return lock_file_name
+
+
+def get_volume_name(index):
+ if index <= 0 or index > 9999999:
+ raise VFileException("invalid volume file index")
+ volume_file_name = "v{0:07d}".format(index)
+ return volume_file_name
+
+
+def listdir(path):
+ type_to_func = {
+ 'ohash': _list_ohash,
+ 'suffix': _list_suffix,
+ 'partition': _list_partition,
+ 'partitions': _list_partitions
+ }
+
+ path = os.path.normpath(path)
+ si = SwiftPathInfo.from_path(path)
+
+ # get part power from the ring (needed as we generate "directories" based
+ # on it.
+ if not POLICIES[si.policy_idx].object_ring:
+ POLICIES[si.policy_idx].load_ring('/etc/swift')
+ part_power = 32 - POLICIES[si.policy_idx].object_ring._part_shift
+
+ ret = type_to_func[si.type](si, part_power)
+ return [str(e) for e in ret]
+
+
+def exists(path):
+ """
+ Similar to os.path.exists
+ LOSF manages files below the "objects" directory. if the query is about
+ that directory, use os.path.exists, otherwise check in the KV.
+ It does not really make sense in LOSF context as callers will then issue a
+ "mkdir", which is a noop. But having this means we touch less of the
+ existing code. (diskfile, reconstructor, replicator)
+ :param path: full path to directory
+ :return: True is path exists, False otherwise
+ """
+ si = SwiftPathInfo.from_path(path)
+ if si.type == 'partitions':
+ return os.path.exists(path)
+
+ # if a directory is empty, it does not exist
+ if listdir(path):
+ return True
+ else:
+ return False
+
+ # Does not handle "file"
+
+
+def isdir(path):
+ """
+ Similar to os.path.isdir
+ :param path: full path to directory
+ :return:
+ """
+ si = SwiftPathInfo.from_path(path)
+ if si.type == 'partitions':
+ return os.path.isdir(path)
+ if si.type == 'file':
+ return False
+ if listdir(path):
+ return True
+ else:
+ return False
+
+
+def isfile(path):
+ """
+ Similar to os.path.isfile
+ :param path: full path to directory
+ :return:
+ """
+ si = SwiftPathInfo.from_path(path)
+ if si.type == 'partitions':
+ return os.path.isfile(path)
+ if si.type == 'file':
+ return True
+ return False
+
+
+def mkdirs(path):
+ """
+ Similar to utils.mkdirs
+ Noop, except if the directory is the "objects" directory
+ :param path: full path to directory
+ """
+ si = SwiftPathInfo.from_path(path)
+ if si.type == 'partitions':
+ return utils.mkdirs(path)
+
+
+def list_quarantine(quarantine_path):
+ """
+ Lists all quarantined object hashes for the disk/policy
+ :param quarantine_path: quarantined path
+ :return: a list of quarantined object hashes
+ """
+ si = SwiftQuarantinedPathInfo.from_path(quarantine_path)
+ if si.type != "ohashes":
+ err_msg = "Not a path to a quarantined file ({})".format(
+ quarantine_path)
+ raise VIOError(errno.EINVAL, err_msg)
+ return rpc.list_quarantined_ohashes(si.socket_path)
+
+
+def list_quarantined_ohash(quarantined_ohash_path):
+ si = SwiftQuarantinedPathInfo.from_path(quarantined_ohash_path)
+ if si.type != "ohash":
+ err_msg = "Not a path to a quarantined file ({})".format(
+ quarantined_ohash_path)
+ raise VIOError(errno.EINVAL, err_msg)
+ return rpc.list_quarantined_ohash(si.socket_path, si.ohash)
+
+
+def _list_ohash(si, part_power):
+ """
+ :param si: SwiftPathInfo object
+ :param part_power:
+ :return: list of files within the object directory
+ """
+ return rpc.list_prefix(si.socket_path, si.ohash)
+
+
+def _list_suffix(si, part_power):
+ """
+ :param si: SwiftPathInfo object
+ :param part_power:
+ :return: list of object hashes directory within the suffix directory
+ """
+ return rpc.list_suffix(si.socket_path, int(si.partition),
+ si.suffix, part_power)
+
+
+def _list_partition(si, part_power):
+ """
+ :param si: SwiftPathInfo object
+ :param part_power:
+ :return: list of suffixes within the partition
+ """
+ return rpc.list_partition(si.socket_path, int(si.partition),
+ part_power)
+
+
+def _list_partitions(si, part_power):
+ """
+ :param si: SwiftPathInfo object
+ :param part_power:
+ :return: list of partitions
+ """
+ return rpc.list_partitions(si.socket_path, part_power)
+
+
+def set_header_state(socket_path, name, quarantine):
+ """
+ Set a vfile header state (quarantined or not)
+ :param name: full name
+ :param socket_path: socket path
+ :param quarantine: True to quarantine, False to unquarantine
+ :return:
+ """
+ try:
+ obj = rpc.get_object(socket_path, name, is_quarantined=not quarantine,
+ repair_tool=False)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ raise IOError("No such file or directory: {}".format(name))
+ raise (e)
+
+ volume_filename = get_volume_name(obj.volume_index)
+ volume_dir = socket_path.replace("rpc.socket", "volumes")
+ volume_filepath = os.path.join(volume_dir, volume_filename)
+ with open(volume_filepath, 'r+b') as fp:
+ fp.seek(obj.offset)
+ try:
+ header = read_object_header(fp)
+ except HeaderException:
+ # until we journal the deletes, after a crash we may have an entry
+ # for an object that has been "punched" from the volume.
+ # if we find a hole instead of the header, remove entry from
+ # kv and return.
+ fp.seek(obj.offset)
+ data = fp.read(MAX_OBJECT_HEADER_LEN)
+ if all(c == '\x00' for c in data):
+ # unregister the object here
+ rpc.unregister_object(socket_path, name)
+ return
+ msg = "Failed to read header for {} at offset {} in volume\
+ {}".format(name, obj.offset, volume_filepath)
+ raise VFileException(msg)
+ if quarantine:
+ header.state = STATE_OBJ_QUARANTINED
+ else:
+ header.state = STATE_OBJ_FILE
+ fp.seek(obj.offset)
+ write_object_header(header, fp)
+
+
+def quarantine_ohash(dirpath, policy):
+ """
+ Quarantine the object (all files below the object hash directory)
+ :param dirpath: path to object directory
+ :param policy: policy
+ :return:
+ """
+ si = SwiftPathInfo.from_path(dirpath)
+ if si.type != 'ohash':
+ raise VFileException("dirpath not an object dir: {}".format(dirpath))
+
+ if policy.policy_type == 'erasure_coding':
+ sort_f = lambda x: utils.Timestamp(x.split('#')[0])
+ else:
+ sort_f = lambda x: utils.Timestamp(os.path.splitext(x)[0])
+
+ final = []
+ vfiles = listdir(dirpath)
+
+ try:
+ for ext in ['.data', '.meta', '.ts']:
+ partial = [v for v in vfiles if os.path.splitext(v)[1] == ext]
+ partial.sort(key=sort_f)
+ final.extend(partial)
+ except Exception:
+ final = vfiles
+
+ for vfile in final:
+ vfilepath = os.path.join(dirpath, vfile)
+ sif = SwiftPathInfo.from_path(vfilepath)
+ full_name = sif.ohash + sif.filename
+ # update header
+ set_header_state(sif.socket_path, full_name, quarantine=True)
+ try:
+ # update KV
+ rpc.quarantine_object(si.socket_path, full_name)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ errmsg = "No such file or directory: '{}'"
+ raise OSError(2, errmsg.format(vfilepath))
+ raise(e)
+
+
+def unquarantine_ohash(socket_path, ohash):
+ """
+ Unquarantine the object (all files below the object hash directory).
+ Is this needed? Used for tests but currently not called from anywhere
+ :param socket_path: path to KV socket
+ :param ohash: object hash
+ """
+ for objname in rpc.list_quarantined_ohash(socket_path, ohash):
+ full_name = "{}{}".format(ohash, objname)
+ set_header_state(socket_path, full_name, quarantine=False)
+ try:
+ rpc.unquarantine_object(socket_path, full_name)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ errmsg = "No such file or directory: '{}'"
+ raise OSError(2, errmsg.format(full_name))
+ raise(e)
+
+
+# def exists(path):
+# """
+# :param filepath: path to vfile
+# :return: True if file exists, False otherwise
+# """
+# si = SwiftPathInfo.from_path(path)
+# if si.type == 'partitions':
+# os.path.exists
+# try:
+# VFileReader.get_vfile(path, None)
+# return True
+# except RpcError as e:
+# if e.code == StatusCode.NotFound:
+# return False
+# raise (e)
+
+
+def rmtree(path):
+ """
+ Delete a directory recursively. (Actually, it only delete objects, as
+ directories do not exist)
+ :param path: path to the "directory" to remove
+ :param logger:
+ """
+ type_to_func = {
+ 'ohash': _rmtree_ohash,
+ 'suffix': _rmtree_suffix,
+ 'partition': _rmtree_partition
+ # 'partitions': _rmtree_partitions
+ }
+
+ path = os.path.normpath(path)
+ si = SwiftPathInfo.from_path(path)
+ type_to_func[si.type](path)
+
+
+def _rmtree_ohash(path):
+ files = listdir(path)
+ for name in files:
+ filepath = os.path.join(path, name)
+ delete_vfile_from_path(filepath)
+
+
+def _rmtree_suffix(path):
+ ohashes = listdir(path)
+ for ohash in ohashes:
+ ohashpath = os.path.join(path, ohash)
+ _rmtree_ohash(ohashpath)
+
+
+def _rmtree_partition(path):
+ suffixes = listdir(path)
+ for suffix in suffixes:
+ suffixpath = os.path.join(path, suffix)
+ _rmtree_suffix(suffixpath)
+
+
+def delete_vfile_from_path(filepath):
+ si = SwiftPathInfo.from_path(filepath)
+ full_name = si.ohash + si.filename
+
+ def _unregister_object(socket_path, name, volume_index, offset, size):
+ try:
+ rpc.unregister_object(socket_path, name)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ raise VOSError(errno.ENOENT, "No such file or directory:\
+ '{}'".format(filepath))
+ raise(e)
+
+ try:
+ obj = rpc.get_object(si.socket_path, full_name)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ raise VOSError(errno.ENOENT, "No such file or directory:\
+ '{}'".format(filepath))
+ volume_filename = get_volume_name(obj.volume_index)
+ volume_filepath = os.path.join(si.volume_dir, volume_filename)
+
+ with open(volume_filepath, 'r+b') as fp:
+ # get object length
+ fp.seek(obj.offset)
+ try:
+ header = read_object_header(fp)
+ except HeaderException:
+ # until we journal the deletes, after a crash we may have an entry
+ # for an object that has been "punched" from the volume.
+ # if we find a hole instead of the header, remove entry from
+ # kv and return.
+ fp.seek(obj.offset)
+ data = fp.read(MAX_OBJECT_HEADER_LEN)
+ if all(c == '\x00' for c in data):
+ # unregister the object here
+ _unregister_object(si.socket_path, full_name,
+ obj.volume_index, obj.offset, 0)
+ return
+
+ msg = "Failed to read header for {} at offset {} in volume\
+ {}".format(full_name, obj.offset, volume_filepath)
+ raise VFileException(msg)
+
+ # check that we have the object we were expecting
+ header_fullname = "{}{}".format(header.ohash, header.filename)
+ if header_fullname != full_name:
+ # until we journal the renames, after a crash we may not have the
+ # rename in the KV. If that's the case, continue.
+ non_durable_name = re.sub(r'(#\d+)#d.', r'\1.', header_fullname)
+ if non_durable_name != full_name:
+ raise VFileException(
+ "Wrong header name. Header: {} Expected: {}".format(
+ header_fullname, full_name))
+ utils.punch_hole(fp.fileno(), obj.offset, header.total_size)
+
+ _unregister_object(si.socket_path, full_name, obj.volume_index,
+ obj.offset, header.total_size)
+
+
+# delete an object hash directory
+def rmtree_ohash(path):
+ pass
+
+
+def read_metadata(fp, offset, header):
+ """
+ Reads vfile metadata
+ :param fp: opened file
+ :param offset: absolute offset to the beginning of the vfile
+ :param header: vfile header
+ :return: metadata dict
+ """
+ metadata_offset = offset + header.metadata_offset
+ metadata_size = header.metadata_size
+ data_offset = offset + header.data_offset
+ data_end = offset + header.data_offset + header.data_size
+ metadata_available_space = data_offset - metadata_offset
+
+ fp.seek(metadata_offset)
+ if metadata_size > metadata_available_space:
+ metastr = fp.read(metadata_available_space)
+ fp.seek(data_end)
+ metastr += fp.read(metadata_size - metadata_available_space)
+ else:
+ metastr = fp.read(metadata_size)
+
+ # Verify checksum, if any
+ if hasattr(header, 'metastr_md5'):
+ metadata_checksum = header.metastr_md5
+ computed_checksum = hashlib.md5(metastr).hexdigest().encode('ascii')
+ if metadata_checksum != computed_checksum:
+ raise DiskFileBadMetadataChecksum(
+ "Metadata checksum mismatch for %s: "
+ "stored checksum='%s', computed='%s'" % (
+ header.filename, metadata_checksum, computed_checksum))
+ else:
+ # we don't support updating from the older format for now
+ pass
+
+ meta = Metadata()
+ meta.ParseFromString(metastr)
+ metadata = {}
+ for attr in meta.attrs:
+ if attr.key:
+ if six.PY2:
+ metadata[attr.key] = attr.value
+ else:
+ metadata[attr.key.decode('utf8', 'surrogateescape')] = \
+ attr.value.decode('utf8', 'surrogateescape')
+
+ return metadata
+
+
+def rename_vfile(filepath, newfilepath, logger):
+ """
+ Renames a vfile. All writes to the KV are asynchronous. If we were to make
+ a synchronous WriteBatch call, all previous writes would also be synced,
+ killing performance. See:
+ https://github.com/google/leveldb/blob/master/doc/index.md
+ Currently :
+ - update the header in place , synchronously
+ - update the KV asynchronously (delete, put)
+
+ A file can only be renamed within a KV
+ This is currently only used by the erasure code diskfile manager
+ """
+ # Get current file info
+ si = SwiftPathInfo.from_path(filepath)
+ full_name = si.ohash + si.filename
+
+ # Get new file info
+ si_new = SwiftPathInfo.from_path(newfilepath)
+ new_full_name = si_new.ohash + si_new.filename
+
+ # Sanity check, same KV
+ if si.socket_path != si_new.socket_path:
+ raise VFileException("attempted to rename a file to a different KV")
+
+ # rename file in place in the header
+ vf_reader = VFileReader._get_vfile(full_name, si.volume_dir,
+ si.socket_path, logger)
+ vf_offset = vf_reader.offset
+ header = vf_reader._header
+ volume_path = vf_reader.fp.name
+ vf_reader.close()
+
+ header.filename = si_new.filename
+
+ vol_fd = os.open(volume_path, os.O_WRONLY)
+ os.lseek(vol_fd, vf_offset, os.SEEK_SET)
+ os.write(vol_fd, header.pack())
+ fdatasync(vol_fd)
+ os.close(vol_fd)
+
+ # Update the KV (async)
+ try:
+ rpc.rename_object(si.socket_path, full_name, new_full_name,
+ si.partition)
+ except RpcError as e:
+ if e.code == StatusCode.NotFound:
+ raise VIOError(errno.ENOENT,
+ "No such file or directory: {}".format(full_name))
+ else:
+ raise
diff --git a/swift/obj/vfile_utils.py b/swift/obj/vfile_utils.py
new file mode 100644
index 000000000..8b9036b6c
--- /dev/null
+++ b/swift/obj/vfile_utils.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path
+import pwd
+import re
+
+from swift.common.storage_policy import split_policy_string
+from swift.obj.fmgr_pb2 import VOLUME_DEFAULT, VOLUME_TOMBSTONE
+
+# regex to extract policy from path (one KV per policy)
+# TODO: use split_policy_string or similar, not re
+policy_re = re.compile(r"^objects(-\d+)?$")
+volume_name_re = re.compile(r"^v\d{7}$")
+losf_name_re = re.compile(r"^losf(-\d+)?$")
+
+
+class VIOError(IOError):
+ """
+ Exceptions are part of the interface, subclass IOError to make it easier
+ to interface with diskfile.py
+ """
+
+
+class VOSError(OSError):
+ """
+ Exceptions are part of the interface, subclass OSError to make it easier
+ to interface with diskfile.py
+ """
+
+
+class VFileException(Exception):
+ pass
+
+
+def get_volume_type(extension):
+ ext_map = {
+ ".ts": VOLUME_TOMBSTONE
+ }
+
+ return ext_map.get(extension, VOLUME_DEFAULT)
+
+
+def valid_volume_name(name):
+ """Returns True if name is a valid volume name, False otherwise"""
+ if volume_name_re.match(name):
+ return True
+ else:
+ return False
+
+
+def valid_losf_name(name):
+ """Returns True if name is a valid losf dir name, False otherwise"""
+ if losf_name_re.match(name):
+ return True
+ else:
+ return False
+
+
+# used by "fsck" to get the socket path from the volume path
+def get_socket_path_from_volume_path(volume_path):
+ volume_path = os.path.normpath(volume_path)
+ volume_dir_path, volume_name = os.path.split(volume_path)
+ losf_path, volume_dir = os.path.split(volume_dir_path)
+ mount_path, losf_dir = os.path.split(losf_path)
+ if volume_dir != "volumes" or not valid_volume_name(volume_name) or \
+ not valid_losf_name(losf_dir):
+ raise ValueError("Invalid volume path")
+
+ socket_path = os.path.join(losf_path, "rpc.socket")
+ return socket_path
+
+
+def get_mountpoint_from_volume_path(volume_path):
+ volume_path = os.path.normpath(volume_path)
+ volume_dir_path, volume_name = os.path.split(volume_path)
+ losf_path, volume_dir = os.path.split(volume_dir_path)
+ mount_path, losf_dir = os.path.split(losf_path)
+ if volume_dir != "volumes" or not valid_volume_name(volume_name) or \
+ not valid_losf_name(losf_dir):
+ raise ValueError("Invalid volume path")
+ return mount_path
+
+
+class SwiftPathInfo(object):
+ def __init__(self, type, socket_path=None, volume_dir=None,
+ policy_idx=None, partition=None, suffix=None, ohash=None,
+ filename=None):
+ self.type = type
+ self.socket_path = socket_path
+ self.volume_dir = volume_dir
+ self.policy_idx = policy_idx
+ self.partition = partition
+ self.suffix = suffix
+ self.ohash = ohash
+ self.filename = filename
+
+ # parses a swift path, returns a SwiftPathInfo instance
+ @classmethod
+ def from_path(cls, path):
+ count_to_type = {
+ 4: "file",
+ 3: "ohash",
+ 2: "suffix",
+ 1: "partition",
+ 0: "partitions" # "objects" directory
+ }
+
+ clean_path = os.path.normpath(path)
+ ldir = clean_path.split(os.sep)
+
+ try:
+ obj_idx = [i for i, elem in enumerate(ldir)
+ if elem.startswith("objects")][0]
+ except IndexError:
+ raise VOSError("cannot parse object directory")
+
+ elements = ldir[(obj_idx + 1):]
+ count = len(elements)
+
+ if count > 4:
+ raise VOSError("cannot parse swift file path")
+
+ _, policy = split_policy_string(ldir[obj_idx])
+ policy_idx = policy.idx
+
+ prefix = os.path.join("/", *ldir[0:obj_idx])
+ m = policy_re.match(ldir[obj_idx])
+ if not m:
+ raise VOSError(
+ "cannot parse object element of directory")
+ if m.group(1):
+ sofsdir = "losf{}".format(m.group(1))
+ else:
+ sofsdir = "losf"
+ socket_path = os.path.join(prefix, sofsdir, "rpc.socket")
+ volume_dir = os.path.join(prefix, sofsdir, "volumes")
+
+ type = count_to_type[count]
+ return cls(type, socket_path, volume_dir, policy_idx, *elements)
+
+
+class SwiftQuarantinedPathInfo(object):
+ def __init__(self, type, socket_path=None, volume_dir=None,
+ policy_idx=None, ohash=None, filename=None):
+ self.type = type
+ self.socket_path = socket_path
+ self.volume_dir = volume_dir
+ self.policy_idx = policy_idx
+ self.ohash = ohash
+ self.filename = filename
+
+ # parses a quarantined path (<device>/quarantined/objects-X or below),
+ # returns a SwiftQuarantinedPathInfo instance
+ @classmethod
+ def from_path(cls, path):
+ count_to_type = {
+ 3: "file",
+ 2: "ohash",
+ 1: "ohashes",
+ }
+
+ clean_path = os.path.normpath(path)
+ ldir = clean_path.split(os.sep)
+
+ try:
+ quar_idx = ldir.index("quarantined")
+ except ValueError:
+ raise VOSError("cannot parse quarantined path %s" %
+ path)
+
+ elements = ldir[(quar_idx + 1):]
+ count = len(elements)
+
+ if count < 1 or count > 3 or "objects" not in elements[0]:
+ raise VOSError("cannot parse quarantined path %s" %
+ path)
+
+ _, policy = split_policy_string(elements[0])
+ policy_idx = policy.idx
+
+ prefix = os.path.join("/", *ldir[:quar_idx])
+ prefix = os.path.join(prefix, elements[0].replace("objects", "losf"))
+ socket_path = os.path.join(prefix, "rpc.socket")
+ volume_dir = os.path.join(prefix, "volumes")
+
+ type = count_to_type[count]
+ return cls(type, socket_path, volume_dir, policy_idx, *elements[1:])
+
+
+def get_volume_index(volume_path):
+ """
+ returns the volume index, either from its basename, or full path
+ """
+ name = os.path.split(volume_path)[1]
+
+ if not valid_volume_name(name):
+ raise ValueError("Invalid volume name")
+
+ index = int(name[1:8])
+ return index
+
+
+# given an offset and alignement, returns the next aligned offset
+def next_aligned_offset(offset, alignment):
+ if offset % alignment != 0:
+ return (offset + (alignment - offset % alignment))
+ else:
+ return offset
+
+
+def change_user(username):
+ pw = pwd.getpwnam(username)
+ uid = pw.pw_uid
+ os.setuid(uid)
diff --git a/test/functional/__init__.py b/test/functional/__init__.py
index ace552610..a4f4c5a6e 100644
--- a/test/functional/__init__.py
+++ b/test/functional/__init__.py
@@ -475,6 +475,39 @@ def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
return test_conf_file, swift_conf_file
+def _load_losf_as_default_policy(proxy_conf_file, swift_conf_file, **kwargs):
+ """
+ Override swift.conf [storage-policy:0] section to use REPL LOSF policy.
+
+ :param proxy_conf_file: Source proxy conf filename
+ :param swift_conf_file: Source swift conf filename
+ :returns: Tuple of paths to the proxy conf file and swift conf file to use
+ """
+ _debug('Setting configuration for default LOSF policy')
+
+ conf = ConfigParser()
+ conf.read(swift_conf_file)
+ # remove existing policy sections that came with swift.conf-sample
+ for section in list(conf.sections()):
+ if section.startswith('storage-policy'):
+ conf.remove_section(section)
+ # add new policy 0 section for an EC policy
+ conf.add_section('storage-policy:0')
+ ec_policy_spec = {
+ 'name': 'losf-test',
+ 'policy_type': 'replication',
+ 'diskfile_module': 'egg:swift#replication.kv',
+ 'default': True
+ }
+
+ for k, v in ec_policy_spec.items():
+ conf.set('storage-policy:0', k, str(v))
+
+ with open(swift_conf_file, 'w') as fp:
+ conf.write(fp)
+ return proxy_conf_file, swift_conf_file
+
+
# Mapping from possible values of the variable
# SWIFT_TEST_IN_PROCESS_CONF_LOADER
# to the method to call for loading the associated configuration
@@ -483,6 +516,7 @@ def _load_s3api(proxy_conf_file, swift_conf_file, **kwargs):
conf_loaders = {
'encryption': _load_encryption,
'ec': _load_ec_as_default_policy,
+ 'losf': _load_losf_as_default_policy,
}
diff --git a/test/unit/obj/test_reconstructor.py b/test/unit/obj/test_reconstructor.py
index a528741ef..fcb3118c5 100644
--- a/test/unit/obj/test_reconstructor.py
+++ b/test/unit/obj/test_reconstructor.py
@@ -1027,7 +1027,7 @@ class TestGlobalSetupObjectReconstructor(unittest.TestCase):
raise OSError('Ow!')
self.reconstructor._reset_stats()
- with mock.patch.object(object_reconstructor, 'mkdirs', blowup_mkdirs):
+ with mock.patch.object(diskfile, 'mkdirs', blowup_mkdirs):
rmtree(self.objects_1, ignore_errors=1)
parts = []
for part_info in self.reconstructor.collect_parts():
@@ -3197,7 +3197,7 @@ class TestObjectReconstructor(BaseTestObjectReconstructor):
utils.mkdirs(os.path.dirname(datadir_path))
with mock.patch('swift.obj.reconstructor.whataremyips',
return_value=[self.ip]), \
- mock.patch('swift.obj.reconstructor.mkdirs',
+ mock.patch('swift.obj.diskfile.mkdirs',
side_effect=OSError('kaboom!')):
self.assertEqual([], list(self.reconstructor.collect_parts()))
error_lines = self.logger.get_lines_for_level('error')
diff --git a/test/unit/obj/test_replicator.py b/test/unit/obj/test_replicator.py
index b94bc8f01..f04f88321 100644
--- a/test/unit/obj/test_replicator.py
+++ b/test/unit/obj/test_replicator.py
@@ -456,7 +456,7 @@ class TestObjectReplicator(unittest.TestCase):
non_local['path'] = path
raise OSError('Ow!')
- with mock.patch.object(object_replicator, 'mkdirs', blowup_mkdirs):
+ with mock.patch.object(diskfile, 'mkdirs', blowup_mkdirs):
rmtree(self.objects, ignore_errors=1)
object_replicator.mkdirs = blowup_mkdirs
self.replicator.collect_jobs()
diff --git a/test/unit/obj/test_rpc_http.py b/test/unit/obj/test_rpc_http.py
new file mode 100644
index 000000000..08a1a4cf2
--- /dev/null
+++ b/test/unit/obj/test_rpc_http.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.obj.rpc_http"""
+
+import unittest
+from swift.obj import rpc_http, fmgr_pb2
+import mock
+
+
+class TestRpcHttp(unittest.TestCase):
+
+ def setUp(self):
+ self.socket_path = "/path/to/rpc.socket"
+ self.part_power = 18
+
+ @mock.patch("swift.obj.rpc_http.get_rpc_reply")
+ def test_vfile_list_partitions(self, m_get_rpc_reply):
+ m_conn = mock.MagicMock()
+ with mock.patch("swift.obj.rpc_http.UnixHTTPConnection",
+ return_value=m_conn):
+ rpc_http.list_partitions(self.socket_path, self.part_power)
+ arg = fmgr_pb2.ListPartitionsRequest(
+ partition_bits=self.part_power)
+ serialized_arg = arg.SerializeToString()
+ m_conn.request.assert_called_once_with('POST', '/list_partitions',
+ serialized_arg)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unit/obj/test_vfile.py b/test/unit/obj/test_vfile.py
new file mode 100644
index 000000000..66c1125a5
--- /dev/null
+++ b/test/unit/obj/test_vfile.py
@@ -0,0 +1,1232 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.obj.vfile"""
+import errno
+import fcntl
+import shutil
+import unittest
+from random import randint
+
+import six
+
+from swift.common.storage_policy import StoragePolicy
+from swift.obj.header import ObjectHeader, STATE_OBJ_FILE, \
+ MAX_OBJECT_HEADER_LEN
+from swift.obj.meta_pb2 import Metadata
+from swift.obj.vfile import VFileWriter
+from swift.obj.vfile_utils import VOSError, next_aligned_offset
+from swift.obj.rpc_http import RpcError, StatusCode
+from test.unit import patch_policies, FakeRing
+import os.path
+from swift.common import utils
+from shutil import rmtree
+from swift.obj import vfile, header, fmgr_pb2
+import tempfile
+import mock
+
+
+class TestVFileModuleMethods(unittest.TestCase):
+
+ def setUp(self):
+ self.testdir = tempfile.mkdtemp()
+ self.object_dir = os.path.join(self.testdir, 'objects')
+ utils.mkdirs(self.object_dir)
+ self.ring = FakeRing(part_power=18)
+
+ def tearDown(self):
+ rmtree(self.testdir, ignore_errors=1)
+
+ @patch_policies([StoragePolicy(0, 'zero', False,
+ object_ring=FakeRing(part_power=18)),
+ StoragePolicy(1, 'one', True,
+ object_ring=FakeRing(part_power=20))])
+ def test_vfile_listdir_partitions(self):
+
+ tests = [
+ {"path": "/srv/node/sda/objects",
+ "expected": ["/srv/node/sda/losf/rpc.socket", 18]},
+ {"path": "/sdb/objects",
+ "expected": ["/sdb/losf/rpc.socket", 18]},
+ {"path": "/sdc/objects-1",
+ "expected": ["/sdc/losf-1/rpc.socket", 20]},
+ ]
+ with mock.patch(
+ "swift.obj.vfile.rpc.list_partitions") as m_list_partitions:
+ for test in tests:
+ vfile.listdir(test["path"])
+ m_list_partitions.assert_called_once_with(*test["expected"])
+ m_list_partitions.reset_mock()
+
+ @patch_policies([StoragePolicy(0, 'zero', False,
+ object_ring=FakeRing(part_power=18)),
+ StoragePolicy(1, 'one', True,
+ object_ring=FakeRing(part_power=20))])
+ def test_vfile_listdir_partition(self):
+
+ tests = [
+ {"path": "/srv/node/sda/objects/123",
+ "expected": ["/srv/node/sda/losf/rpc.socket", 123, 18]},
+ {"path": "/sdb/objects/124",
+ "expected": ["/sdb/losf/rpc.socket", 124, 18]},
+ {"path": "/sdc/objects-1/789",
+ "expected": ["/sdc/losf-1/rpc.socket", 789, 20]},
+ ]
+ with mock.patch(
+ "swift.obj.vfile.rpc.list_partition") as m_list_partition:
+ for test in tests:
+ vfile.listdir(test["path"])
+ m_list_partition.assert_called_once_with(*test["expected"])
+ m_list_partition.reset_mock()
+
+ @patch_policies([StoragePolicy(0, 'zero', False,
+ object_ring=FakeRing(part_power=18)),
+ StoragePolicy(1, 'one', True,
+ object_ring=FakeRing(part_power=20))])
+ def test_vfile_listdir_suffix(self):
+
+ tests = [
+ {"path": "/srv/node/sda/objects/123/abc",
+ "expected": ["/srv/node/sda/losf/rpc.socket", 123, "abc", 18]},
+ {"path": "/sdb/objects/124/bcd",
+ "expected": ["/sdb/losf/rpc.socket", 124, "bcd", 18]},
+ {"path": "/sdc/objects-1/789/def",
+ "expected": ["/sdc/losf-1/rpc.socket", 789, "def", 20]},
+ ]
+ with mock.patch(
+ "swift.obj.vfile.rpc.list_suffix") as m_list_suffix:
+ for test in tests:
+ vfile.listdir(test["path"])
+ m_list_suffix.assert_called_once_with(*test["expected"])
+ m_list_suffix.reset_mock()
+
+ def test_vfile_listdir_with_invalid_path(self):
+ self.assertRaises(VOSError, vfile.listdir, "/test/no-obj/")
+
+
+@patch_policies([StoragePolicy(0, 'zero', False),
+ StoragePolicy(1, 'one', True)])
+class TestVFileWriter(unittest.TestCase):
+ @mock.patch("swift.obj.vfile.open_or_create_volume")
+ @mock.patch("swift.obj.rpc_http.get_next_offset")
+ @mock.patch("swift.obj.vfile._may_grow_volume")
+ @mock.patch("swift.obj.vfile.os.lseek")
+ @mock.patch("swift.obj.vfile.VFileWriter.__new__")
+ def test_create(self, m_cls, m_lseek, m_grow_vol, m_get_next_offset,
+ m_open_or_create_vol):
+ vfile_conf = {
+ 'volume_alloc_chunk_size': 16 * 1024,
+ 'volume_low_free_space': 8 * 1024,
+ 'metadata_reserve': 500,
+ 'max_volume_count': 1000,
+ 'max_volume_size': 10 * 1024 * 1024 * 1024,
+ }
+
+ test_sets = [
+ # small file with policy 0
+ {
+ "datadir": "/sda/objects/123/4d8/"
+ "acbd18db4cc2f85cedef654fccc4a4d8",
+ "obj_size": 421,
+
+ "volume_file": 101, # file descriptor
+ "lock_file": 102, # file descriptor
+ "volume_path": "/sda/losf/volumes/v0000001",
+ "volume_next_offset": 8192,
+
+ "expected_socket": "/sda/losf/rpc.socket",
+ "expected_ohash": "acbd18db4cc2f85cedef654fccc4a4d8",
+ "expected_partition": "123",
+ "expected_extension": None,
+ "expected_vol_dir": "/sda/losf/volumes",
+ "expected_vol_index": 1,
+ },
+ # large file with policy 1
+ {
+ "datadir": "/sdb/objects-1/456/289/"
+ "48e2e79fec9bc01d9a00e0a8fa68b289",
+ "obj_size": 2 * 1024 * 1024 * 1024,
+
+ "volume_file": 201,
+ "lock_file": 202,
+ "volume_path": "/sdb/losf-1/volumes/v0012345",
+ "volume_next_offset": 8 * 1024 * 1024 * 1024,
+
+ "expected_socket": "/sdb/losf-1/rpc.socket",
+ "expected_ohash": "48e2e79fec9bc01d9a00e0a8fa68b289",
+ "expected_partition": "456",
+ "expected_extension": None,
+ "expected_vol_dir": "/sdb/losf-1/volumes",
+ "expected_vol_index": 12345,
+ },
+ # file of unknown size, policy 1
+ {
+ "datadir": "/sdb/objects-1/789/45d/"
+ "b2f5ff47436671b6e533d8dc3614845d",
+ "obj_size": None,
+
+ "volume_file": 301,
+ "lock_file": 302,
+ "volume_path": "/sdb/losf/volumes/v9999999",
+ "volume_next_offset": 8 * 1024 * 1024 * 1024,
+
+ "expected_socket": "/sdb/losf-1/rpc.socket",
+ "expected_ohash": "b2f5ff47436671b6e533d8dc3614845d",
+ "expected_partition": "789",
+ "expected_extension": None,
+ "expected_vol_dir": "/sdb/losf-1/volumes",
+ "expected_vol_index": 9999999,
+ },
+ # empty file is ok (zero length)
+ {
+ "datadir": "/sdb/objects-1/789/45d/"
+ "b2f5ff47436671b6e533d8dc3614845d",
+ "obj_size": 0,
+
+ "volume_file": 301,
+ "lock_file": 302,
+ "volume_path": "/sdb/losf/volumes/v9999999",
+ "volume_next_offset": 8 * 1024 * 1024 * 1024,
+
+ "expected_socket": "/sdb/losf-1/rpc.socket",
+ "expected_ohash": "b2f5ff47436671b6e533d8dc3614845d",
+ "expected_partition": "789",
+ "expected_extension": None,
+ "expected_vol_dir": "/sdb/losf-1/volumes",
+ "expected_vol_index": 9999999,
+ },
+ ]
+
+ for t in test_sets:
+ # When creating a new "vfile", expect the writer to seek to the
+ # offset where we can start writing the file content.
+ # The layout of a single vfile within a volume is :
+ # |header | metadata(*) | file content | (more metadata, optional)|
+ #
+ # The first reserved metadata field size is defined in the
+ # configuration by the "metadata_reserve" parameter.
+ #
+ # So the absolute location is:
+ # next_offset for the volume + len(header) + len(metadata_reserve)
+ expected_absolute_offset = (t["volume_next_offset"] +
+ len(ObjectHeader()) +
+ vfile_conf["metadata_reserve"])
+ expected_relative_offset = (len(ObjectHeader()) +
+ vfile_conf["metadata_reserve"])
+
+ m_open_or_create_vol.return_value = (t["volume_file"],
+ t["lock_file"],
+ t["volume_path"])
+
+ m_get_next_offset.return_value = t["volume_next_offset"]
+
+ VFileWriter.create(t["datadir"], t["obj_size"], vfile_conf, None)
+ ordered_args = m_open_or_create_vol.call_args[0]
+ named_args = m_open_or_create_vol.call_args[1]
+ self.assertEqual(ordered_args[0], t["expected_socket"])
+ self.assertEqual(ordered_args[1], t["expected_partition"])
+ self.assertEqual(ordered_args[2], t["expected_extension"])
+ self.assertEqual(ordered_args[3], t["expected_vol_dir"])
+ self.assertEqual(named_args["size"], t["obj_size"])
+
+ m_get_next_offset.assert_called_once_with(t["expected_socket"],
+ t["expected_vol_index"])
+
+ ordered_args = m_grow_vol.call_args[0]
+ self.assertEqual(ordered_args[0], t["volume_file"])
+ self.assertEqual(ordered_args[1], t["volume_next_offset"])
+ self.assertEqual(ordered_args[2], t["obj_size"])
+
+ m_lseek.assert_called_with(t["volume_file"],
+ expected_absolute_offset, os.SEEK_SET)
+
+ args = m_cls.call_args[0]
+ self.assertEqual(args[1], t["datadir"])
+ self.assertEqual(args[2], t["volume_file"])
+ self.assertEqual(args[3], t["lock_file"])
+ self.assertEqual(args[4], t["expected_vol_dir"])
+ self.assertEqual(args[5], t["expected_vol_index"])
+ header = args[6]
+ self.assertEqual(header.ohash, t["expected_ohash"])
+ self.assertEqual(header.data_offset, expected_relative_offset)
+ # we have not written anything yet so data_size should be zero
+ self.assertEqual(header.data_size, 0)
+ # state should be STATE_OBJ_FILE (not quarantined)
+ self.assertEqual(header.state, STATE_OBJ_FILE)
+
+ for test_m in [m_lseek, m_grow_vol, m_get_next_offset,
+ m_open_or_create_vol]:
+ test_m.reset_mock()
+
+ with self.assertRaises(VOSError):
+ VFileWriter.create("/foo", 123, vfile_conf, None)
+
+ with self.assertRaises(VOSError):
+ VFileWriter.create("/mnt/objects/"
+ "b2f5ff47436671b6e533d8dc3614845d", 123,
+ vfile_conf, None)
+
+ with self.assertRaises(VOSError):
+ VFileWriter.create("/mnt/objects/123/"
+ "b2f5ff47436671b6e533d8dc3614845d", 123,
+ vfile_conf, None)
+
+ # negative size
+ with self.assertRaises(VOSError):
+ VFileWriter.create("/mnt/objects/123/abc/"
+ "b2f5ff47436671b6e533d8dc3614845d", -1,
+ vfile_conf, None)
+
+ @mock.patch("swift.obj.vfile.open_or_create_volume")
+ @mock.patch("swift.obj.rpc_http.get_next_offset")
+ @mock.patch("swift.obj.vfile.os.close")
+ def test_create_rpc_error(self, m_os_close, m_get_next_offset,
+ m_open_or_create_vol):
+ vfile_conf = {
+ 'volume_alloc_chunk_size': 16 * 1024,
+ 'volume_low_free_space': 8 * 1024,
+ 'metadata_reserve': 500,
+ 'max_volume_count': 1000,
+ 'max_volume_size': 10 * 1024 * 1024 * 1024,
+ }
+
+ datadir = "/sda/objects/123/4d8/acbd18db4cc2f85cedef654fccc4a4d8"
+ obj_size = 0
+ volume_file = 101
+ lock_file = 102
+ volume_path = "/sda/losf/volumes/v0000001"
+
+ m_open_or_create_vol.return_value = (volume_file, lock_file,
+ volume_path)
+ m_get_next_offset.side_effect = RpcError(StatusCode.Unavailable,
+ "Unavailable")
+
+ try:
+ VFileWriter.create(datadir, obj_size, vfile_conf, None)
+ except RpcError:
+ close_args = m_os_close.call_args_list
+ expected_args = [mock.call(volume_file), mock.call(lock_file)]
+ self.assertEqual(close_args, expected_args)
+
+ def _get_vfile_writer(self, offset=0, metadata_reserve=500,
+ ohash="d41d8cd98f00b204e9800998ecf8427e"):
+ """
+ returns a fake VFileWriter, backed by a TemporaryFile
+ :param offset: absolute offset in volume where the file starts
+ :param metadata_reserve: space to reserve for metadata between header
+ and file content. (if insufficient, the remaining serialized metadata
+ will be written after the file content)
+ :param ohash: object hash
+ :return: a VFileWriter instance
+ """
+ volume_file = tempfile.TemporaryFile()
+ lock_file = 102 # dummy fd
+ volume_dir = "/sda/losf/volumes"
+ volume_index = randint(1, 9999999)
+ datadir = "/sda/objects/123/{}/{}".format(ohash[-3:], ohash)
+ header = ObjectHeader(version=vfile.OBJECT_HEADER_VERSION)
+ header.ohash = ohash
+ header.policy_idx = 0
+ header.data_offset = len(header) + metadata_reserve
+ header.data_size = 0
+ header.state = STATE_OBJ_FILE
+ logger = None
+
+ volume_fd = volume_file.fileno()
+ # seek to where file data would start as expected by caller
+ os.lseek(volume_fd, offset + header.data_offset, os.SEEK_SET)
+
+ return (vfile.VFileWriter(datadir, volume_fd, lock_file, volume_dir,
+ volume_index, header, offset, logger),
+ volume_file)
+
+ @mock.patch("swift.obj.vfile.fdatasync")
+ @mock.patch("swift.obj.vfile.rpc.register_object")
+ def test_commit(self, m_register, m_fdatasync):
+ default_metadata = {"Content-Length": "92",
+ "name": "/AUTH_test/foo/truc",
+ "Content-Type": "application/octet-stream",
+ "ETag": "89408008f2585c957c031716600d5a80",
+ "X-Timestamp": "1560866451.10093",
+ "X-Object-Meta-Mtime": "1523272228.000000"}
+ test_sets = [
+ # empty file, volume offset 0
+ {
+ "offset": 0,
+ "metadata_reserve": 500,
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1560866451.10093.data",
+ "metadata": default_metadata,
+ "data_bytes": 0, # file content length
+ },
+ # non-zero random file size with random offset
+ {
+ "offset": 4096 * randint(0, 2621440), # up to 10GB
+ "metadata_reserve": 500,
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1560866451.10093.data",
+ "metadata": default_metadata,
+ "data_bytes": randint(1, 1024 * 1024) # up to 1MB
+ },
+ {
+ "offset": 4096 * randint(0, 2621440), # up to 10GB
+ "metadata_reserve": 10,
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1560866451.10093.data",
+ "metadata": default_metadata,
+ "data_bytes": randint(1, 1024 * 1024) # up to 1MB
+ },
+ ]
+
+ for t in test_sets:
+ vfile_writer, vol_file = self._get_vfile_writer(
+ offset=t["offset"],
+ metadata_reserve=t["metadata_reserve"],
+ )
+ if t["data_bytes"]:
+ os.write(vfile_writer.fd, b"x" * t["data_bytes"])
+ vfile_writer.commit(t["filename"], t["metadata"])
+
+ # check header
+ vol_file.seek(t["offset"])
+ serialized_header = vol_file.read(MAX_OBJECT_HEADER_LEN)
+ header = ObjectHeader.unpack(serialized_header)
+ self.assertEqual(header.version, vfile.OBJECT_HEADER_VERSION)
+ self.assertEqual(header.ohash, "d41d8cd98f00b204e9800998ecf8427e")
+ self.assertEqual(header.filename, t["filename"])
+ self.assertEqual(header.data_size, t["data_bytes"])
+ self.assertEqual(header.state, STATE_OBJ_FILE)
+
+ # check swift metadata
+ vol_file.seek(t["offset"] + header.metadata_offset)
+ # if metadata couldn't fit in the reserved space, we should find
+ # the rest after the file content.
+ if header.metadata_size <= t["metadata_reserve"]:
+ serialized_metadata = vol_file.read(header.metadata_size)
+ else:
+ reserved_bytes = header.data_offset - header.metadata_offset
+ serialized_metadata = vol_file.read(reserved_bytes)
+ vol_file.seek(
+ t["offset"] + header.data_offset + header.data_size)
+ serialized_metadata += vol_file.read(header.metadata_size -
+ t["metadata_reserve"])
+ ondisk_meta = Metadata()
+ ondisk_meta.ParseFromString(serialized_metadata)
+ # Metadata() is a protobuf message instance, with a single field, a
+ # repeated Attr. an Attr has a key and a value
+ self.assertEqual(len(ondisk_meta.attrs), len(t["metadata"]))
+ for attr in ondisk_meta.attrs:
+ if six.PY2:
+ self.assertEqual(attr.value, t["metadata"][attr.key])
+ else:
+ self.assertEqual(
+ attr.value.decode("utf8", "surrogateescape"),
+ t["metadata"][attr.key.decode("utf8",
+ "surrogateescape")])
+
+ # check data has been flushed to disk
+ m_fdatasync.assert_called_once_with(vol_file.fileno())
+
+ # check call to register the file to the index server
+ m_register.assert_called_once()
+ socket_path, full_name, vol_idx, offset, end_offset = \
+ m_register.call_args_list[0][0]
+
+ expected_full_name = "{}{}".format(t["ohash"], t["filename"])
+ self.assertEqual(socket_path, vfile_writer.socket_path)
+ self.assertEqual(full_name, expected_full_name)
+ self.assertEqual(vol_idx, vfile_writer.volume_index)
+ # end offset should be the next 4k aligned offset
+ volume_length = os.lseek(vol_file.fileno(), 0, os.SEEK_END)
+ expected_end_offset = next_aligned_offset(volume_length, 4096)
+ self.assertEqual(end_offset, expected_end_offset)
+
+ m_fdatasync.reset_mock()
+ m_register.reset_mock()
+
+ def test_commit_bad_file(self):
+ vfile_writer, _ = self._get_vfile_writer()
+ vfile_writer.fd = -1
+ self.assertRaises(vfile.VIOError, vfile_writer.commit, "foo", {})
+
+ def test_commit_no_name(self):
+ vfile_writer, _ = self._get_vfile_writer()
+ self.assertRaises(vfile.VIOError, vfile_writer.commit, "", {})
+
+ @mock.patch("swift.obj.rpc_http.register_object")
+ def test_commit_register_fail(self, m_register_object):
+ """
+ Check that the header object is erased if commit() fails to register
+ the object on the index server.
+ """
+ m_register_object.side_effect = RpcError("failed to register object",
+ StatusCode.Unavailable)
+ offset = 4096
+ metadata_reserve = 500
+ vfile_writer, vol_file = self._get_vfile_writer(
+ offset=offset, metadata_reserve=metadata_reserve)
+ content = b"dummy data"
+ os.write(vfile_writer.fd, content)
+
+ filename = "dummy-filename"
+ metadata = {"dummy": "metadata"}
+
+ self.assertRaises(RpcError, vfile_writer.commit, filename, metadata)
+
+ # check the header was erased
+ vol_file.seek(offset)
+ serialized_header = vol_file.read(MAX_OBJECT_HEADER_LEN)
+ self.assertEqual(serialized_header, b"\x00" * MAX_OBJECT_HEADER_LEN)
+
+ # check we did not write past the header by checking the file data
+ data_offset = (offset +
+ len(ObjectHeader(
+ version=header.OBJECT_HEADER_VERSION)) +
+ metadata_reserve)
+ vol_file.seek(data_offset)
+ data = vol_file.read(len(content))
+ self.assertEqual(data, content)
+
+ @mock.patch("swift.obj.vfile.open", new_callable=mock.mock_open)
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ @mock.patch("swift.obj.vfile.get_next_volume_index")
+ @mock.patch("swift.obj.vfile.os.open")
+ def test__create_new_lock_file(self, m_os_open, m_get_next_v_idx,
+ m_flock, m_open):
+ volume_dir = "/sda/losf/volumes"
+ logger = None
+ m_get_next_v_idx.return_value = 1
+
+ expected_vol_creation_lock = os.path.join(volume_dir,
+ "volume_creation.lock")
+
+ m_creation_lock = mock.mock_open().return_value
+ m_open.side_effect = [m_creation_lock]
+
+ m_volume_lock = mock.Mock()
+ m_os_open.return_value = m_volume_lock
+
+ index, next_lock_path, lock_file = vfile._create_new_lock_file(
+ volume_dir, logger)
+ m_open.assert_called_once_with(expected_vol_creation_lock, "w")
+ m_os_open.assert_called_once_with(
+ "/sda/losf/volumes/v0000001.writelock",
+ os.O_CREAT | os.O_EXCL | os.O_WRONLY,
+ 0o600)
+
+ # Check locking order
+ calls = m_flock.call_args_list
+ self.assertEqual(calls[0], mock.call(m_creation_lock, fcntl.LOCK_EX))
+ self.assertEqual(calls[1], mock.call(m_volume_lock,
+ fcntl.LOCK_EX | fcntl.LOCK_NB))
+
+ self.assertEqual(index, 1)
+ self.assertEqual(next_lock_path,
+ "/sda/losf/volumes/v0000001.writelock")
+ self.assertEqual(lock_file, m_volume_lock)
+
+ def test_get_next_volume_index(self):
+ tests = [
+ {
+ "dir_entries": [],
+ "expected_next_index": 1,
+ },
+ {
+ "dir_entries": ["invalid-file"],
+ "expected_next_index": 1,
+ },
+ {
+ "dir_entries": [
+ "v0000001",
+ "v0000001.writelock",
+ "v0000002",
+ "v0000002.writelock",
+ ],
+ "expected_next_index": 3,
+ },
+ # the volume is gone. shouldn't reuse the index anyway
+ {
+ "dir_entries": [
+ "v0000001",
+ "v0000001.writelock",
+ "v0000002.writelock",
+ ],
+ "expected_next_index": 3,
+ },
+ {
+ "dir_entries": [
+ "v0000002",
+ "v0000002.writelock",
+ "v0000003",
+ "v0000003.writelock",
+ "v0000005",
+ "v0000005.writelock",
+ ],
+ "expected_next_index": 1,
+ },
+ {
+ "dir_entries": ["invalid.txt"],
+ "expected_next_index": 1,
+ },
+ {
+ "dir_entries": [
+ "v0000001",
+ "v0000001.writelock",
+ "invalid-name",
+ "v0000003",
+ "v0000003.writelock",
+ ],
+ "expected_next_index": 2,
+ },
+ # the lock file is gone. shouldn't reuse the index anyway
+ {
+ "dir_entries": [
+ "v0000001",
+ ],
+ "expected_next_index": 2,
+ },
+ ]
+
+ for t in tests:
+ tempdir = tempfile.mkdtemp()
+ try:
+ for filename in t["dir_entries"]:
+ filepath = os.path.join(tempdir, filename)
+ open(filepath, "w").close()
+ result = vfile.get_next_volume_index(tempdir)
+ self.assertEqual(result, t["expected_next_index"])
+ finally:
+ shutil.rmtree(tempdir)
+
+ @mock.patch("swift.obj.vfile.open_writable_volume")
+ @mock.patch("swift.obj.vfile.create_writable_volume")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ def test_open_or_create_volume_available(self, m_makedirs, m_create_vol,
+ m_open_vol):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = "123"
+ extension = ".data"
+ volume_dir = "/sda/losf/volumes"
+ conf = {"dummy": "conf"}
+ logger = None
+
+ # open_writable_volume finds an available volume
+ m_open_vol.return_value = 321, 322, "/path/to/vol"
+
+ vol_file, lock_file, vol_path = vfile.open_or_create_volume(
+ socket_path, partition, extension, volume_dir, conf, logger)
+
+ m_open_vol.assert_called_once_with(socket_path, partition, extension,
+ volume_dir, conf, logger)
+ m_create_vol.assert_not_called()
+
+ self.assertEqual((vol_file, lock_file, vol_path),
+ (321, 322, "/path/to/vol"))
+
+ @mock.patch("swift.obj.vfile.open_writable_volume")
+ @mock.patch("swift.obj.vfile.create_writable_volume")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ def test_open_or_create_volume_create(self, m_makedirs, m_create_vol,
+ m_open_vol):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = "123"
+ extension = None
+ volume_dir = "/sda/losf/volumes"
+ conf = {}
+ logger = None
+
+ # open_writable_volume does not return a volume, but
+ # create_writabl_volume does.
+ m_open_vol.return_value = None, None, None
+ m_create_vol.return_value = 543, 544, "/path/to/vol"
+
+ vol_file, lock_file, vol_path = vfile.open_or_create_volume(
+ socket_path, partition, extension, volume_dir, conf, logger)
+
+ m_open_vol.assert_called_once_with(socket_path, partition, extension,
+ volume_dir, conf, logger)
+ self.assertEqual((vol_file, lock_file, vol_path),
+ (543, 544, "/path/to/vol"))
+
+ @mock.patch("swift.obj.vfile.open_writable_volume")
+ @mock.patch("swift.obj.vfile.create_writable_volume")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ def test_open_or_create_volume_fail(self, m_makedirs, m_create_vol,
+ m_open_vol):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = "123"
+ extension = None
+ volume_dir = "/sda/losf/volumes"
+ conf = {}
+ logger = None
+
+ # Cannot find an existing volume, then creating a new one fails
+ # because we have exceeded the volume count.
+ m_open_vol.return_value = None, None, None
+ m_create_vol.side_effect = OSError(errno.EDQUOT, "max vol count")
+
+ try:
+ vfile.open_or_create_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+ except vfile.VOSError as exc:
+ self.assertEqual(str(exc), "[Errno 28] Failed to open or create"
+ " a volume for writing: max vol count")
+ m_open_vol.assert_called_once()
+ m_create_vol.assert_called_once()
+
+ # Similar failure but with an exception that has no strerror
+ m_open_vol.reset_mock()
+ m_create_vol.reset_mock()
+ m_create_vol.side_effect = Exception("Dummy exception")
+ try:
+ vfile.open_or_create_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+ except vfile.VOSError as exc:
+ self.assertEqual(str(exc), "[Errno 28] Failed to open or create"
+ " a volume for writing: Unknown error")
+ m_open_vol.assert_called_once()
+ m_create_vol.assert_called_once()
+
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ def test_create_writable_volume_max_count_exceeded(self, m_list_volumes):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = "123"
+ extension = None
+ volume_dir = "/sda/losf/volumes"
+ conf = {"max_volume_count": 1}
+ logger = None
+
+ m_list_volumes.return_value = ["v1"]
+ try:
+ vfile.create_writable_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+ except vfile.VOSError as exc:
+ self.assertEqual(str(exc),
+ "[Errno 122] Maximum count of volumes reached for"
+ " partition: 123 type: 0")
+
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ def test_create_writable_volume_makedirs_exceptions(self, m_os_makedirs,
+ m_list_volumes):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = "123"
+ extension = None
+ volume_dir = "/sda/losf/volumes"
+ conf = {"max_volume_count": 1}
+ logger = None
+
+ m_list_volumes.return_value = ["v1"]
+ m_os_makedirs.side_effect = OSError(errno.ENOSPC, "No space")
+
+ self.assertRaises(OSError, vfile.create_writable_volume, socket_path,
+ partition, extension, volume_dir, conf, logger)
+
+ m_os_makedirs.side_effect = vfile.VFileException("test error")
+ self.assertRaises(VOSError, vfile.create_writable_volume, socket_path,
+ partition, extension, volume_dir, conf, logger)
+
+ @mock.patch("swift.obj.vfile.get_next_volume_index")
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ @mock.patch("swift.obj.vfile._allocate_volume_space")
+ @mock.patch("swift.obj.vfile.fsync")
+ @mock.patch("swift.obj.vfile.fsync_dir")
+ @mock.patch("swift.obj.vfile.rpc.register_volume")
+ def test_create_writable_volume(self, m_register_volume,
+ m_fsync_dir, m_fsync,
+ m_allocate_volume_space,
+ m_flock, m_os_makedirs,
+ m_list_volumes,
+ m_get_next_volume_index):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = "123"
+ extension = None
+ conf = {"max_volume_count": 1, "volume_alloc_chunk_size": 50}
+ logger = None
+
+ m_list_volumes.return_value = []
+ next_vol_idx = 1
+ m_get_next_volume_index.return_value = next_vol_idx
+
+ tempdir = tempfile.mkdtemp()
+ volume_dir = tempdir
+
+ try:
+ vfile.create_writable_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+
+ # are the expected files here?
+ expected = ["v0000001", "v0000001.writelock",
+ "volume_creation.lock"]
+ files = os.listdir(tempdir)
+ files.sort()
+ self.assertEqual(files, expected)
+
+ # have the locks been taken?
+ # TODO: how to properly assert both locks? (one for the global
+ # volume creation lock, another for the volume itself)
+ self.assertEqual(m_flock.call_count, 2)
+
+ # check the volume header is correct
+ with open(os.path.join(tempdir, "v0000001"), 'rb') as vol:
+ vol_header = header.read_volume_header(vol)
+ self.assertEqual(vol_header.volume_idx, next_vol_idx)
+ self.assertEqual(vol_header.partition, int(partition))
+ self.assertEqual(vol_header.first_obj_offset, 4096)
+ self.assertEqual(vol_header.type,
+ fmgr_pb2.VOLUME_DEFAULT) # extension was None
+ self.assertEqual(vol_header.state, fmgr_pb2.STATE_RW)
+
+ # check volume registration to the index server
+ m_register_volume.assert_called_once_with(socket_path,
+ str(partition),
+ fmgr_pb2.VOLUME_DEFAULT,
+ next_vol_idx, 4096,
+ fmgr_pb2.STATE_RW)
+ finally:
+ shutil.rmtree(tempdir)
+ m_flock.reset_mock()
+ m_register_volume.reset_mock()
+
+ partition = "456"
+ extension = ".ts"
+ conf = {"max_volume_count": 101, "volume_alloc_chunk_size": 50}
+ logger = None
+ m_list_volumes.return_value = ["x"] * 100
+ next_vol_idx = 101
+ m_get_next_volume_index.return_value = next_vol_idx
+
+ tempdir = tempfile.mkdtemp()
+ volume_dir = tempdir
+ try:
+ vfile.create_writable_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+
+ # are the expected files here?
+ expected = ["v0000101", "v0000101.writelock",
+ "volume_creation.lock"]
+ files = os.listdir(tempdir)
+ files.sort()
+ self.assertEqual(files, expected)
+
+ # have the locks been taken?
+ # TODO: how to properly assert both locks? (one for the global
+ # volume creation lock, another for the volume itself)
+ self.assertEqual(m_flock.call_count, 2)
+
+ # check the volume header is correct
+ with open(os.path.join(tempdir, "v0000101"), 'rb') as vol:
+ vol_header = header.read_volume_header(vol)
+ self.assertEqual(vol_header.volume_idx, next_vol_idx)
+ self.assertEqual(vol_header.partition, int(partition))
+ self.assertEqual(vol_header.first_obj_offset, 4096)
+ self.assertEqual(vol_header.type, fmgr_pb2.VOLUME_TOMBSTONE)
+ self.assertEqual(vol_header.state, fmgr_pb2.STATE_RW)
+
+ # check volume registration to the index server
+ m_register_volume.assert_called_once_with(
+ socket_path, str(partition), fmgr_pb2.VOLUME_TOMBSTONE,
+ next_vol_idx, 4096, fmgr_pb2.STATE_RW)
+ finally:
+ shutil.rmtree(tempdir)
+ m_flock.reset_mock()
+ m_register_volume.reset_mock()
+
+ @mock.patch("swift.obj.vfile.get_next_volume_index")
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ @mock.patch("swift.obj.vfile._allocate_volume_space")
+ @mock.patch("swift.obj.vfile.fsync")
+ @mock.patch("swift.obj.vfile.fsync_dir")
+ @mock.patch("swift.obj.vfile.rpc.register_volume")
+ @mock.patch("swift.obj.vfile.os.close")
+ def test_create_writable_volume_flock_error(self, m_os_close,
+ m_register_volume,
+ m_fsync_dir, m_fsync,
+ m_allocate_volume_space,
+ m_flock, m_os_makedirs,
+ m_list_volumes,
+ m_get_next_volume_index):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = 123
+ extension = None
+ conf = {"max_volume_count": 1, "volume_alloc_chunk_size": 50}
+ logger = None
+
+ m_list_volumes.return_value = []
+ next_vol_idx = 1
+ m_get_next_volume_index.return_value = next_vol_idx
+
+ tempdir = tempfile.mkdtemp()
+ volume_dir = tempdir
+
+ m_flock.side_effect = [True, IOError(errno.EACCES, "cannot lock")]
+
+ with self.assertRaises(IOError):
+ vfile.create_writable_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+
+ try:
+ # how to assert close() arguments ?
+ self.assertEqual(m_os_close.call_count, 1)
+ # check that the volume and its lock file have been removed
+ self.assertEqual(os.listdir(tempdir), ['volume_creation.lock'])
+ m_register_volume.assert_not_called()
+ finally:
+ shutil.rmtree(tempdir)
+
+ @mock.patch("swift.obj.vfile.get_next_volume_index")
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ @mock.patch("swift.obj.vfile.os.makedirs")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ @mock.patch("swift.obj.vfile._allocate_volume_space")
+ @mock.patch("swift.obj.vfile.fsync")
+ @mock.patch("swift.obj.vfile.fsync_dir")
+ @mock.patch("swift.obj.vfile.rpc.register_volume")
+ @mock.patch("swift.obj.vfile.os.close")
+ def test_create_writable_volume_rpc_error(self, m_os_close,
+ m_register_volume,
+ m_fsync_dir, m_fsync,
+ m_allocate_volume_space,
+ m_flock, m_os_makedirs,
+ m_list_volumes,
+ m_get_next_volume_index):
+ socket_path = "/sda/losf/rpc.socket"
+ partition = 123
+ extension = None
+ conf = {"max_volume_count": 1, "volume_alloc_chunk_size": 50}
+ logger = None
+
+ m_list_volumes.return_value = []
+ next_vol_idx = 1
+ m_get_next_volume_index.return_value = next_vol_idx
+
+ tempdir = tempfile.mkdtemp()
+ volume_dir = tempdir
+
+ m_register_volume.side_effect = RpcError(StatusCode.InvalidArgument,
+ "volume exists")
+
+ try:
+ vfile.create_writable_volume(socket_path, partition, extension,
+ volume_dir, conf, logger)
+ except RpcError:
+ pass
+
+ try:
+ # how to assert close() arguments ?
+ self.assertEqual(m_os_close.call_count, 2)
+ # check that the volume and its lock file have been removed
+ self.assertEqual(os.listdir(tempdir), ['volume_creation.lock'])
+ finally:
+ shutil.rmtree(tempdir)
+
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ def test_open_writable_volume_no_volume(self, m_list_volumes):
+ socket_path = "/path/to/rpc.socket"
+ volume_dir = "/path/to/volumes"
+ partition = 123,
+ extension = None
+ conf = {}
+ logger = None
+
+ rpc_reply = fmgr_pb2.ListVolumesReply()
+ volume = fmgr_pb2.Volume(volume_index=1,
+ volume_type=fmgr_pb2.VOLUME_DEFAULT,
+ volume_state=fmgr_pb2.STATE_COMPACTION_TARGET,
+ partition=123)
+ rpc_reply.volumes.append(volume)
+
+ m_list_volumes.return_value = rpc_reply.volumes
+
+ m_list_volumes.return_value = []
+
+ vol_file, lock_file, volume_file_path = vfile.open_writable_volume(
+ socket_path, partition, extension, volume_dir, conf, logger)
+ self.assertEqual((vol_file, lock_file, volume_file_path),
+ (None, None, None))
+
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ def test_open_writable_volume_no_rw_volume(self, m_list_volumes):
+ socket_path = "/path/to/rpc.socket"
+ volume_dir = "/path/to/volumes"
+ partition = 123,
+ extension = None
+ conf = {}
+ logger = None
+
+ m_list_volumes.return_value = []
+
+ vol_file, lock_file, volume_file_path = vfile.open_writable_volume(
+ socket_path, partition, extension, volume_dir, conf, logger)
+ self.assertEqual((vol_file, lock_file, volume_file_path),
+ (None, None, None))
+
+ @mock.patch("swift.obj.vfile.open_volume")
+ @mock.patch("swift.obj.vfile.rpc.list_volumes")
+ def test_open_writable_volume(self, m_list_volumes, m_open_volume):
+ socket_path = "/path/to/rpc.socket"
+ volume_dir = "/path/to/volumes"
+ conf = {"max_volume_size": 100 * 1024 * 1024}
+ logger = None
+
+ test_sets = [
+ {
+ # partition and extension have no impact on the test as we
+ # mock the RPC call that returns a list of volumes based on
+ # partition, and extension.
+ "partition": 123,
+ "extension": None,
+ "volumes": [
+ {
+ "index": 1,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_RW,
+ },
+ ],
+ "expected_vol_path": "/path/to/volumes/v0000001"
+
+ },
+ {
+ "partition": 123,
+ "extension": None,
+ "volumes": [
+ {
+ "index": 1,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_COMPACTION_SRC,
+ },
+ {
+ "index": 2,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_RW,
+ }
+ ],
+ "expected_vol_path": "/path/to/volumes/v0000002"
+
+ },
+ {
+ "partition": 123,
+ "extension": None,
+ "volumes": [
+ {
+ "index": 1,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_COMPACTION_SRC,
+ },
+ {
+ "index": 2,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_COMPACTION_TARGET,
+ },
+ {
+ "index": 999,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_RW,
+ },
+ {
+ "index": 1234,
+ "partition": 123,
+ "type": fmgr_pb2.VOLUME_DEFAULT,
+ "state": fmgr_pb2.STATE_COMPACTION_SRC,
+ },
+ ],
+ "expected_vol_path": "/path/to/volumes/v0000999"
+
+ },
+ ]
+
+ for t in test_sets:
+ # build the RPC reply
+ rpc_reply = fmgr_pb2.ListVolumesReply()
+ for vol in t["volumes"]:
+ volume = fmgr_pb2.Volume(volume_index=vol["index"],
+ volume_type=vol["type"],
+ volume_state=vol["state"],
+ partition=vol["partition"])
+ rpc_reply.volumes.append(volume)
+
+ m_list_volumes.return_value = rpc_reply.volumes
+
+ # files that would be returned by open_volume
+ m_vol_file = mock.Mock()
+ m_lock_file = mock.Mock()
+ m_open_volume.return_value = (m_vol_file, m_lock_file)
+
+ vol_file, lock_file, volume_file_path = vfile.open_writable_volume(
+ socket_path, t["partition"], t["extension"], volume_dir, conf,
+ logger)
+ m_open_volume.assert_called_once_with(t["expected_vol_path"])
+ self.assertEqual((vol_file, lock_file, volume_file_path),
+ (m_vol_file, m_lock_file, t["expected_vol_path"]))
+ m_open_volume.reset_mock()
+
+ @mock.patch("swift.obj.vfile.os.open")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ def test_open_volume(self, m_flock, m_os_open):
+ volume_path = "/path/to/volumes/v0000001"
+ expected_lock_path = "/path/to/volumes/v0000001.writelock"
+
+ m_lock_file = mock.Mock()
+ m_volume_file = mock.Mock()
+ m_os_open.side_effect = [m_lock_file, m_volume_file]
+ vol_fd, lock_fd = vfile.open_volume(volume_path)
+ # assert the volume lock file has been opened and locked
+ args = m_os_open.call_args_list[0]
+ self.assertEqual(args, mock.call(expected_lock_path, os.O_WRONLY))
+ m_flock.assert_called_once_with(m_lock_file, fcntl.LOCK_EX |
+ fcntl.LOCK_NB)
+ # expect the second call to open to be for the volume itself
+ args = m_os_open.call_args_list[1]
+ self.assertEqual(args, mock.call(volume_path, os.O_WRONLY))
+
+ self.assertEqual((vol_fd, lock_fd), (m_volume_file, m_lock_file))
+
+ @mock.patch("swift.obj.vfile.os.open")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ @mock.patch("swift.obj.vfile.os.close")
+ def test_open_volume_missing_lock_file(self, m_os_close, m_flock,
+ m_os_open):
+ """If the lock file is missing, it should be created"""
+ def fake_os_open(path, flags, mode=None):
+ if path == "/path/to/volumes/v0000001":
+ return 123
+ if flags != (os.O_CREAT | os.O_EXCL | os.O_WRONLY):
+ raise OSError(errno.ENOENT,
+ "No such file or directory: {}".format(path))
+ else:
+ return 456
+
+ volume_path = "/path/to/volumes/v0000001"
+ expected_lock_path = "/path/to/volumes/v0000001.writelock"
+
+ m_os_open.side_effect = fake_os_open
+ vfile.open_volume(volume_path)
+ second_open_args = m_os_open.call_args_list[1]
+ self.assertEqual(
+ second_open_args,
+ mock.call(expected_lock_path,
+ os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600)
+ )
+
+ @mock.patch("swift.obj.vfile.os.open")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ @mock.patch("swift.obj.vfile.os.close")
+ def test_open_volume_cannot_open_lock_file(self, m_os_close, m_flock,
+ m_os_open):
+ """The lock file cannot be opened"""
+ def fake_os_open(path, flags, mode=None):
+ if path == "/path/to/volumes/v0000001":
+ return 123
+ raise OSError(errno.EPERM, "Permission denied")
+
+ volume_path = "/path/to/volumes/v0000001"
+
+ m_os_open.side_effect = fake_os_open
+ self.assertRaises(OSError, vfile.open_volume, volume_path)
+
+ @mock.patch("swift.obj.vfile.os.open")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ def test_open_volume_missing_volume(self, m_flock, m_os_open):
+ volume_path = "/path/to/volumes/v0000001"
+ expected_lock_path = "/path/to/volumes/v0000001.writelock"
+
+ m_lock_file = mock.Mock()
+ m_os_open.side_effect = [m_lock_file,
+ OSError(2, "No such file or directory")]
+ self.assertRaises(OSError, vfile.open_volume, volume_path)
+ args_lock = m_os_open.call_args_list[0]
+ self.assertEqual(args_lock, mock.call(expected_lock_path, os.O_WRONLY))
+ m_flock.assert_called_once_with(m_lock_file, fcntl.LOCK_EX |
+ fcntl.LOCK_NB)
+ args_volume = m_os_open.call_args_list[1]
+ self.assertEqual(args_volume, mock.call(volume_path, os.O_WRONLY))
+
+ @mock.patch("swift.obj.vfile.os.open")
+ @mock.patch("swift.obj.vfile.os.close")
+ @mock.patch("swift.obj.vfile.fcntl.flock")
+ def test_open_volume_cannot_lock(self, m_flock, m_os_close, m_os_open):
+ volume_path = "/path/to/volumes/v0000001"
+ expected_lock_path = "/path/to/volumes/v0000001.writelock"
+
+ # Test we get (None, None) and not an exception if we get an IOError
+ # with EACCES when attempting to get a lock
+ m_lock_file = mock.Mock()
+ m_os_open.return_value = m_lock_file
+ m_flock.side_effect = IOError(errno.EACCES, "cannot lock")
+ vol_fd, lock_fd = vfile.open_volume(volume_path)
+ self.assertEqual((vol_fd, lock_fd), (None, None))
+ m_os_open.assert_called_once_with(expected_lock_path, os.O_WRONLY)
+ m_flock.assert_called_once_with(m_lock_file, fcntl.LOCK_EX |
+ fcntl.LOCK_NB)
+ m_os_close.assert_called_once_with(m_lock_file)
+
+ # Same test with EAGAIN
+ m_os_open.reset_mock()
+ m_flock.reset_mock()
+ m_os_close.reset_mock()
+ m_flock.side_effect = IOError(errno.EAGAIN, "cannot lock")
+ vol_fd, lock_fd = vfile.open_volume(volume_path)
+ self.assertEqual((vol_fd, lock_fd), (None, None))
+ m_os_open.assert_called_once_with(expected_lock_path, os.O_WRONLY)
+ m_flock.assert_called_once_with(m_lock_file, fcntl.LOCK_EX |
+ fcntl.LOCK_NB)
+ m_os_close.assert_called_once_with(m_lock_file)
+
+ # Same test with EBADF, this should raise
+ m_os_open.reset_mock()
+ m_flock.reset_mock()
+ m_os_close.reset_mock()
+ m_flock.side_effect = IOError(errno.EBADF, "cannot lock")
+ self.assertRaises(IOError, vfile.open_volume, volume_path)
+ m_os_open.assert_called_once_with(expected_lock_path, os.O_WRONLY)
+ m_flock.assert_called_once_with(m_lock_file, fcntl.LOCK_EX |
+ fcntl.LOCK_NB)
+ m_os_close.assert_called_once_with(m_lock_file)
+
+ def test_get_lock_file_name(self):
+ name = vfile.get_lock_file_name(1)
+ self.assertEqual(name, "v0000001.writelock")
+
+ name = vfile.get_lock_file_name(9999999)
+ self.assertEqual(name, "v9999999.writelock")
+
+ self.assertRaises(vfile.VFileException, vfile.get_lock_file_name, 0)
+ self.assertRaises(vfile.VFileException, vfile.get_lock_file_name,
+ 10000000)
+
+ def test_get_volume_name(self):
+ name = vfile.get_volume_name(1)
+ self.assertEqual(name, "v0000001")
+
+ name = vfile.get_volume_name(9999999)
+ self.assertEqual(name, "v9999999")
+
+ self.assertRaises(vfile.VFileException, vfile.get_volume_name, 0)
+ self.assertRaises(vfile.VFileException, vfile.get_volume_name,
+ 10000000)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/unit/obj/test_vfile_utils.py b/test/unit/obj/test_vfile_utils.py
new file mode 100644
index 000000000..b77509c4e
--- /dev/null
+++ b/test/unit/obj/test_vfile_utils.py
@@ -0,0 +1,328 @@
+# Copyright (c) 2010-2012 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Tests for swift.obj.vfile_utils"""
+
+import unittest
+from itertools import chain
+
+import mock
+
+from swift.common.storage_policy import StoragePolicy
+from swift.obj import vfile_utils
+from test.unit import patch_policies
+
+
+class TestVfileUtils(unittest.TestCase):
+
+ def test_get_socket_path_from_volume_path(self):
+ volume_path = "/srv/node/sdb1/losf/volumes/v0000001"
+ expected = "/srv/node/sdb1/losf/rpc.socket"
+ socket_path = vfile_utils.get_socket_path_from_volume_path(volume_path)
+ self.assertEqual(socket_path, expected)
+
+ volume_path = "/sdc1/losf-2/volumes/v0000234"
+ expected = "/sdc1/losf-2/rpc.socket"
+ socket_path = vfile_utils.get_socket_path_from_volume_path(volume_path)
+ self.assertEqual(expected, socket_path)
+
+ volume_path = "/losf-99/volumes/v0000001"
+ expected = "/losf-99/rpc.socket"
+ socket_path = vfile_utils.get_socket_path_from_volume_path(volume_path)
+ self.assertEqual(expected, socket_path)
+
+ volume_path = "/volumes/v0000001"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_socket_path_from_volume_path(volume_path)
+
+ volume_path = "/srv/node/sdb1/losf-notAnInt/volumes/v0000001"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_socket_path_from_volume_path(volume_path)
+
+ volume_path = "/srv/node/sdb1/losf/not-volumes/v0000001"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_socket_path_from_volume_path(volume_path)
+
+ volume_path = "/srv/node/sdb1/losf/volumes/not-a-volume"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_socket_path_from_volume_path(volume_path)
+
+ def test_get_mountpoint_from_volume_path(self):
+ volume_path = "/srv/node/sdb1/losf/volumes/v0000001"
+ expected = "/srv/node/sdb1"
+ mountpoint = vfile_utils.get_mountpoint_from_volume_path(volume_path)
+ self.assertEqual(expected, mountpoint)
+
+ volume_path = "/sdb1/losf-2/volumes/v0000234"
+ expected = "/sdb1"
+ mountpoint = vfile_utils.get_mountpoint_from_volume_path(volume_path)
+ self.assertEqual(expected, mountpoint)
+
+ volume_path = "/losf-99/volumes/v0000001"
+ expected = "/"
+ socket_path = vfile_utils.get_mountpoint_from_volume_path(volume_path)
+ self.assertEqual(expected, socket_path)
+
+ volume_path = "/volumes/v0000001"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_mountpoint_from_volume_path(volume_path)
+
+ volume_path = "/srv/node/sdb1/losf-notAnInt/volumes/v0000001"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_mountpoint_from_volume_path(volume_path)
+
+ volume_path = "/srv/node/sdb1/losf/not-volumes/v0000001"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_mountpoint_from_volume_path(volume_path)
+
+ volume_path = "/srv/node/sdb1/losf/volumes/not-a-volume"
+ with self.assertRaises(ValueError):
+ vfile_utils.get_mountpoint_from_volume_path(volume_path)
+
+ def test_get_volume_index(self):
+ v_index = vfile_utils.get_volume_index("v0000001")
+ self.assertEqual(v_index, 1)
+ v_index = vfile_utils.get_volume_index("/sda/losf/volumes/v0000001")
+ self.assertEqual(v_index, 1)
+ v_index = vfile_utils.get_volume_index("v9876543")
+ self.assertEqual(v_index, 9876543)
+ with self.assertRaises(ValueError):
+ vfile_utils.get_volume_index("v1")
+ with self.assertRaises(ValueError):
+ vfile_utils.get_volume_index("v12345678")
+ with self.assertRaises(ValueError):
+ vfile_utils.get_volume_index("x0000001")
+ with self.assertRaises(ValueError):
+ vfile_utils.get_volume_index("12345678")
+ with self.assertRaises(ValueError):
+ vfile_utils.get_volume_index("vz000001")
+
+ def test_next_aligned_offset(self):
+ test_data = [
+ {"args": [0, 4096], "expected": 0},
+ {"args": [4096, 4096], "expected": 4096},
+ {"args": [4095, 4096], "expected": 4096},
+ {"args": [4097, 4096], "expected": 8192},
+ {"args": [4095, 8192], "expected": 8192},
+ ]
+ for test in test_data:
+ aligned_offset = vfile_utils.next_aligned_offset(*test["args"])
+ self.assertEqual(aligned_offset, test["expected"])
+
+ def test_change_user(self):
+ with mock.patch("swift.obj.vfile_utils.pwd.getpwnam") as m_getpwnam, \
+ mock.patch("swift.obj.vfile_utils.os.setuid") as m_setuid:
+ pw = mock.MagicMock()
+ pw.pw_uid = 123
+ m_getpwnam.return_value = pw
+
+ vfile_utils.change_user("dummy")
+ m_getpwnam.assert_called_once_with("dummy")
+ m_setuid.assert_called_once_with(123)
+
+
+@patch_policies([StoragePolicy(0, 'zero', False),
+ StoragePolicy(1, 'one', True)])
+class TestSwiftPathInfo(unittest.TestCase):
+
+ def test_swift_path_info(self):
+ test_data = [
+ {"path": "/sda/objects/1234/def/d41d8cd98f00b204e9800998ecf8427e/"
+ "1522913866.16520#12#d.data",
+ "type": "file",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": "1234",
+ "suffix": "def",
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1522913866.16520#12#d.data"},
+ {"path": "/sda/objects/1234/def/d41d8cd98f00b204e9800998ecf8427e",
+ "type": "ohash",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": "1234",
+ "suffix": "def",
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": None},
+ {"path": "/sda/objects/1234/def",
+ "type": "suffix",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": "1234",
+ "suffix": "def",
+ "ohash": None,
+ "filename": None},
+ {"path": "/sda/objects/1234",
+ "type": "partition",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": "1234",
+ "suffix": None,
+ "ohash": None,
+ "filename": None},
+ {"path": "/sda/objects",
+ "type": "partitions",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": None,
+ "suffix": None,
+ "ohash": None,
+ "filename": None},
+ ]
+
+ test_data_others = [
+ # extra slashes
+ {"path": "//sda/objects/1234/def/d41d8cd98f00b204e9800998ecf8427e/"
+ "1522913866.16520#12#d.data/",
+ "type": "file",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": "1234",
+ "suffix": "def",
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1522913866.16520#12#d.data"},
+ # Longer mountpoint
+ {"path": "/srv/node1/sda1/objects",
+ "type": "partitions",
+ "socket_path": "/srv/node1/sda1/losf/rpc.socket",
+ "volume_dir": "/srv/node1/sda1/losf/volumes",
+ "policy_idx": 0,
+ "partition": None,
+ "suffix": None,
+ "ohash": None,
+ "filename": None},
+ # Policy 1
+ {"path": "/srv/node1/sda1/objects-1",
+ "type": "partitions",
+ "socket_path": "/srv/node1/sda1/losf-1/rpc.socket",
+ "volume_dir": "/srv/node1/sda1/losf-1/volumes",
+ "policy_idx": 1,
+ "partition": None,
+ "suffix": None,
+ "ohash": None,
+ "filename": None},
+ ]
+
+ for test in chain(test_data, test_data_others):
+ si = vfile_utils.SwiftPathInfo.from_path(test["path"])
+ self.assertEqual(si.type, test["type"])
+ self.assertEqual(si.socket_path, test["socket_path"])
+ self.assertEqual(si.volume_dir, test["volume_dir"])
+ self.assertEqual(si.policy_idx, test["policy_idx"])
+ self.assertEqual(si.partition, test["partition"])
+ self.assertEqual(si.suffix, test["suffix"])
+ self.assertEqual(si.ohash, test["ohash"])
+ self.assertEqual(si.filename, test["filename"])
+
+ def test_swift_path_info_error(self):
+ with self.assertRaises(vfile_utils.VOSError):
+ vfile_utils.SwiftPathInfo.from_path("/invalid")
+ with self.assertRaises(vfile_utils.VOSError):
+ vfile_utils.SwiftPathInfo.from_path("/srv/node/sda2")
+ with self.assertRaises(vfile_utils.VOSError):
+ invalid_path = "/sda/objects/1234/def" \
+ "/d41d8cd98f00b204e9800998ecf8427e/" \
+ "1522913866.16520#12#d.data/extra_dir"
+ vfile_utils.SwiftPathInfo.from_path(invalid_path)
+
+
+@patch_policies([StoragePolicy(0, 'zero', False),
+ StoragePolicy(1, 'one', True)])
+class TestSwiftQuarantinedPathInfo(unittest.TestCase):
+
+ def test_swift_path_info(self):
+ test_data = [
+ {"path": "/sda/quarantined/objects/"
+ "d41d8cd98f00b204e9800998ecf8427e/"
+ "1522913866.16520#12#d.data",
+ "type": "file",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1522913866.16520#12#d.data"},
+ {"path": "/sda/quarantined/objects/"
+ "d41d8cd98f00b204e9800998ecf8427e/",
+ "type": "ohash",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": None},
+ {"path": "/sda/quarantined/objects",
+ "type": "ohashes",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "partition": None,
+ "suffix": None,
+ "ohash": None,
+ "filename": None},
+ ]
+
+ test_data_others = [
+ # extra slashes
+ {"path": "//sda/quarantined/objects/"
+ "d41d8cd98f00b204e9800998ecf8427e//"
+ "1522913866.16520#12#d.data/",
+ "type": "file",
+ "socket_path": "/sda/losf/rpc.socket",
+ "volume_dir": "/sda/losf/volumes",
+ "policy_idx": 0,
+ "ohash": "d41d8cd98f00b204e9800998ecf8427e",
+ "filename": "1522913866.16520#12#d.data"},
+ # Longer mountpoint
+ {"path": "/srv/node1/sda1/quarantined/objects",
+ "type": "ohashes",
+ "socket_path": "/srv/node1/sda1/losf/rpc.socket",
+ "volume_dir": "/srv/node1/sda1/losf/volumes",
+ "policy_idx": 0,
+ "ohash": None,
+ "filename": None},
+ # Policy 1
+ {"path": "/srv/node1/sda1/quarantined/objects-1",
+ "type": "ohashes",
+ "socket_path": "/srv/node1/sda1/losf-1/rpc.socket",
+ "volume_dir": "/srv/node1/sda1/losf-1/volumes",
+ "policy_idx": 1,
+ "ohash": None,
+ "filename": None},
+ ]
+
+ for test in chain(test_data, test_data_others):
+ si = vfile_utils.SwiftQuarantinedPathInfo.from_path(test["path"])
+ self.assertEqual(si.type, test["type"])
+ self.assertEqual(si.socket_path, test["socket_path"])
+ self.assertEqual(si.volume_dir, test["volume_dir"])
+ self.assertEqual(si.policy_idx, test["policy_idx"])
+ self.assertEqual(si.ohash, test["ohash"])
+ self.assertEqual(si.filename, test["filename"])
+
+ def test_swift_path_info_error(self):
+ with self.assertRaises(vfile_utils.VOSError):
+ vfile_utils.SwiftPathInfo.from_path("/invalid")
+ with self.assertRaises(vfile_utils.VOSError):
+ vfile_utils.SwiftPathInfo.from_path("/srv/node/sda2")
+ with self.assertRaises(vfile_utils.VOSError):
+ invalid_path = "/sdb/objects/1234/def" \
+ "/d41d8cd98f00b204e9800998ecf8427e/" \
+ "1522913866.16520#12#d.data/extra_dir"
+ vfile_utils.SwiftPathInfo.from_path(invalid_path)
diff --git a/tools/playbooks/common/install_dependencies.yaml b/tools/playbooks/common/install_dependencies.yaml
index 67dbcf6a9..d48905aab 100644
--- a/tools/playbooks/common/install_dependencies.yaml
+++ b/tools/playbooks/common/install_dependencies.yaml
@@ -18,7 +18,7 @@
- ensure-pip
tasks:
- name: installing dependencies
- yum: name={{ item }} state=present
+ package: name={{ item }} state=present
with_items:
- python-pyeclib
- python-nose
diff --git a/tools/playbooks/common/install_losf_dependencies.yaml b/tools/playbooks/common/install_losf_dependencies.yaml
new file mode 100644
index 000000000..ef55c0d1e
--- /dev/null
+++ b/tools/playbooks/common/install_losf_dependencies.yaml
@@ -0,0 +1,59 @@
+# Copyright (c) 2018 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This might be just a temporal, we may want to remove this and take
+# another approach for some items but this is needed to test the make
+# file and dependency installation
+- hosts: all
+ tasks:
+ # install cmake 3.14, cmake must be > 3.9 for losf build but xenial
+ # is too stale. Perhaps we could skip this section after we change
+ # the env to bionic
+ - name: download cmake
+ unarchive:
+ src: https://github.com/Kitware/CMake/releases/download/v3.14.3/cmake-3.14.3-Linux-x86_64.tar.gz
+ dest: "{{ ansible_env.HOME }}"
+ remote_src: yes
+
+ # setup golang
+ # To use specific version by the feature of go module we need at least >=1.11.
+ # and more, 1.12 is better because we don't have to consider extra environment
+ # setting.
+ - name: download golang package and unarchive
+ unarchive:
+ src: https://dl.google.com/go/go1.14.1.linux-amd64.tar.gz
+ dest: /usr/local
+ remote_src: yes
+ become: true
+
+ # install protoc (protobuf-compiler)
+ # TODO: what version we want. at least, 2.6.1 in xenial doesn't work well
+ - name: install unzip to extract the pre-compiled binary
+ package:
+ name: unzip
+ state: present
+ become: true
+
+ - name: create protoc dir to extract the binary
+ file:
+ path: "{{ ansible_env.HOME }}/protoc"
+ state: directory
+ mode: 0755
+
+ - name: download protoc
+ unarchive:
+ src: https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
+ dest: "{{ ansible_env.HOME }}/protoc"
+ remote_src: yes
diff --git a/tools/playbooks/losf_setup/pre.yaml b/tools/playbooks/losf_setup/pre.yaml
new file mode 100644
index 000000000..c2b2d93e1
--- /dev/null
+++ b/tools/playbooks/losf_setup/pre.yaml
@@ -0,0 +1,47 @@
+# Copyright (c) 2018 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This might be just a temporal, we may want to remove this and take
+# another approach for some items but this is needed to test the make
+# file and dependency installation
+- hosts: all
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/go/bin:{{ ansible_env.HOME }}/protoc/bin:{{ ansible_env.HOME }}/go/bin:{{ ansible_env.HOME }}/cmake-3.14.3-Linux-x86_64/bin"
+ tasks:
+ # losf golang package installation
+ - name: build losf golang package
+ make:
+ chdir: "{{ zuul.project.src_dir }}/go/swift-rpc-losf"
+
+ # sanity check the swift-rpc-losf exists in the go binary path
+ - name: check if swift-rpc-losf exists
+ stat:
+ path: "{{ ansible_env.HOME }}/go/bin/swift-rpc-losf"
+ register: stat_result
+
+ - name: Fail if swift-rpc-losf doesn't exist
+ fail:
+ msg: "swift-rpc-losf was not found"
+ when: stat_result.stat.exists == False
+
+ # TBD: hopefully, we don't need this trick with either
+ # - dump the binary to /usr/local/bin directry
+ # - garantee to call the makefile even if needn't
+ - name: create symbolic link to the built binary
+ file:
+ state: link
+ src: "{{ ansible_env.HOME }}/go/bin/swift-rpc-losf"
+ path: /usr/local/bin/swift-rpc-losf
+ become: true
diff --git a/tools/playbooks/losf_setup/run.yaml b/tools/playbooks/losf_setup/run.yaml
new file mode 100644
index 000000000..eb0093aa2
--- /dev/null
+++ b/tools/playbooks/losf_setup/run.yaml
@@ -0,0 +1,46 @@
+# Copyright (c) 2018 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+- hosts: all
+ environment:
+ SWIFT_TEST_CONFIG_FILE: "/etc/swift/test.conf"
+ SAIO_BLOCK_DEVICE: "/srv/swift-disk"
+ tasks:
+ - name: install test requirements
+ pip:
+ requirements: '{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}/test-requirements.txt'
+ become: true
+
+ - name: create swift.conf file from template to overwirte default policy
+ template: src=swift.conf.j2 dest=/etc/swift/swift.conf
+ become: true
+
+ - name: Starts main swift servers
+ shell: "swift-init main start"
+ become: true
+
+ - name: Starts main swift rpc servers
+ shell: "swift-init object-rpcmanager start"
+ environment:
+ LD_LIBRARY_PATH: /usr/local/lib
+ become: true
+
+ # expecting common saio setup makes test.conf
+ - name: run functional tests
+ shell:
+ cmd: |
+ source ~/.bashrc
+ nosetests test/functional
+ executable: /bin/bash
+ chdir: '{{ ansible_env.HOME }}/{{ zuul.project.src_dir }}'
diff --git a/tools/playbooks/losf_setup/templates/swift.conf.j2 b/tools/playbooks/losf_setup/templates/swift.conf.j2
new file mode 100644
index 000000000..cee5d6f37
--- /dev/null
+++ b/tools/playbooks/losf_setup/templates/swift.conf.j2
@@ -0,0 +1,11 @@
+[swift-hash]
+swift_hash_path_suffix = changeme
+swift_hash_path_prefix = changeme
+
+[storage-policy:0]
+name = losf-test
+default = yes
+policy_type = replication
+diskfile_module = egg:swift#replication.kv
+
+[swift-constraints]
diff --git a/tools/playbooks/probetests/run.yaml b/tools/playbooks/probetests/run.yaml
index 912b19aaa..5615f5788 100644
--- a/tools/playbooks/probetests/run.yaml
+++ b/tools/playbooks/probetests/run.yaml
@@ -15,8 +15,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
-
tasks:
+
- name: run probe tests
shell:
cmd: |
diff --git a/tools/playbooks/saio_single_node_setup/make_rings.yaml b/tools/playbooks/saio_single_node_setup/make_rings.yaml
index 65c469a23..b13f524f6 100644
--- a/tools/playbooks/saio_single_node_setup/make_rings.yaml
+++ b/tools/playbooks/saio_single_node_setup/make_rings.yaml
@@ -14,6 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: all
+ environment:
+ PATH: "{{ ansible_env.PATH }}:{{ ansible_env.HOME }}/bin"
+ SAIO_BLOCK_DEVICE: "/srv/swift-disk"
tasks:
- name: install swift
become: true
diff --git a/tools/playbooks/saio_single_node_setup/setup_saio.yaml b/tools/playbooks/saio_single_node_setup/setup_saio.yaml
index c78cefda7..c6ba25b72 100644
--- a/tools/playbooks/saio_single_node_setup/setup_saio.yaml
+++ b/tools/playbooks/saio_single_node_setup/setup_saio.yaml
@@ -92,26 +92,52 @@
- 3
- 4
- - name: create rc.local from template
- template: src=rc.local.j2 dest=/etc/rc.d/rc.local owner=root group=root mode=0755
+ - name: setup rsync for centos
+ block:
+ - name: create rc.local from template
+ template: src=rc.local.j2 dest=/etc/rc.d/rc.local owner=root group=root mode=0755
- - name: create /etc/rsyncd.conf
- command: cp {{ zuul.project.src_dir }}/doc/saio/rsyncd.conf /etc/
+ - name: create /etc/rsyncd.conf
+ command: cp {{ zuul.project.src_dir }}/doc/saio/rsyncd.conf /etc/
- - name: update rsyncd.conf with correct username
- replace: dest=/etc/rsyncd.conf regexp=<your-user-name> replace={{ ansible_user_id }}
+ - name: update rsyncd.conf with correct username
+ replace: dest=/etc/rsyncd.conf regexp=<your-user-name> replace={{ ansible_user_id }}
- - name: enable rsync
- lineinfile: dest=/etc/xinetd.d/rsync line="disable = no" create=yes
+ - name: enable rsync
+ lineinfile: dest=/etc/xinetd.d/rsync line="disable = no" create=yes
- - name: set selinux to permissive
- selinux: policy=targeted state=disabled
+ - name: set selinux to permissive
+ selinux: policy=targeted state=disabled
- - name: restart rsync
- service: name=rsyncd state=restarted enabled=yes
+ - name: restart rsync
+ systemd: name=rsyncd state=restarted enabled=yes
+
+ when: ansible_os_family == 'RedHat'
+
+ - name: setup rsync for ubuntu
+ block:
+ # TODO: check if it's still safe but basically it doesn't exist ubuntu xenial/bionic
+ # in default
+ - name: create rc.local from template
+ template: src=rc.local.j2 dest=/etc/rc.local owner=root group=root mode=0755
+
+ - name: create /etc/rsyncd.conf
+ command: cp {{ zuul.project.src_dir }}/doc/saio/rsyncd.conf /etc/
+
+ - name: update rsyncd.conf with correct username
+ replace: dest=/etc/rsyncd.conf regexp=<your-user-name> replace={{ ansible_user_id }}
+
+ - name: enable rsync
+ systemd:
+ name: rsync
+ state: restarted
+ daemon_reload: yes
+ enabled: true
+
+ when: ansible_os_family == 'Debian'
- name: start memcache
- service: name=memcached state=started enabled=yes
+ systemd: name=memcached state=started enabled=yes
- name: configure rsyslog
command: cp {{ zuul.project.src_dir }}/doc/saio/rsyslog.d/10-swift.conf /etc/rsyslog.d/
@@ -130,7 +156,7 @@
mode="g+w"
- name: restart rsyslog
- service: name=rsyslog state=restarted enabled=yes
+ systemd: name=rsyslog state=restarted enabled=yes
- name: clean up /etc/swift directory
file: path=/etc/swift state=absent
diff --git a/tox.ini b/tox.ini
index f73886f58..ed5c24503 100644
--- a/tox.ini
+++ b/tox.ini
@@ -83,6 +83,11 @@ commands = ./.functests {posargs}
setenv = SWIFT_TEST_IN_PROCESS=1
SWIFT_TEST_IN_PROCESS_CONF_LOADER=ec
+[testenv:func-losf]
+commands = ./.functests {posargs}
+setenv = SWIFT_TEST_IN_PROCESS=1
+ SWIFT_TEST_IN_PROCESS_CONF_LOADER=losf
+
[testenv:venv]
commands = {posargs}
@@ -122,7 +127,7 @@ commands = bandit -c bandit.yaml -r swift -n 5
# W503: line break before binary operator
# W504: line break after binary operator
ignore = H101,H202,H301,H306,H404,H405,H501,W503,W504,E402,E731,E741
-exclude = .venv,.tox,dist,*egg
+exclude = .venv,.tox,dist,*egg,*pb2.py
filename = *.py,bin/*
show-source = True