summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2015-06-03 09:41:02 -0500
committerDavid Teigland <teigland@redhat.com>2015-06-03 14:00:48 -0500
commit094da33bd33b87c38aee8105b3fe7a46068604ca (patch)
tree00ccd06ba0273892539f7def82b9cdafb3f56d33
parent6698d1b3186ef7b5ccaf823b588249ba28a56960 (diff)
downloadlvm2-094da33bd33b87c38aee8105b3fe7a46068604ca.tar.gz
check and extend sanlock lv earlier
at the start of lvcreate
-rw-r--r--daemons/lvmlockd/lvmlockd-core.c62
-rw-r--r--daemons/lvmlockd/lvmlockd-internal.h5
-rw-r--r--daemons/lvmlockd/lvmlockd-sanlock.c88
-rw-r--r--lib/config/config_settings.h10
-rw-r--r--lib/config/defaults.h2
-rw-r--r--lib/locking/lvmlockd.c109
-rw-r--r--lib/locking/lvmlockd.h7
-rw-r--r--tools/lvcreate.c7
8 files changed, 250 insertions, 40 deletions
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index ba99b2825..d7e98abb2 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -742,6 +742,8 @@ static const char *op_str(int x)
return "rename_final";
case LD_OP_RUNNING_LM:
return "running_lm";
+ case LD_OP_FIND_FREE_LOCK:
+ return "find_free_lock";
default:
return "op_unknown";
};
@@ -945,6 +947,15 @@ static void lm_rem_resource(struct lockspace *ls, struct resource *r)
lm_rem_resource_sanlock(ls, r);
}
+static int lm_find_free_lock(struct lockspace *ls, uint64_t *free_offset)
+{
+ if (ls->lm_type == LD_LM_DLM)
+ return 0;
+ else if (ls->lm_type == LD_LM_SANLOCK)
+ return lm_find_free_lock_sanlock(ls, free_offset);
+ return -1;
+}
+
/*
* While adopting locks, actions originate from the adopt_locks()
* function, not from a client. So, these actions (flagged ADOPT),
@@ -2040,8 +2051,18 @@ static void *lockspace_thread_main(void *arg_in)
pthread_cond_wait(&ls->cond, &ls->mutex);
}
- /* client thread queues actions on ls->actions, we move
- ls->actions to r->actions, then process the resources */
+ /*
+ * Process all the actions queued for this lockspace.
+ * The client thread queues actions on ls->actions.
+ *
+ * Here, take all the actions off of ls->actions, and:
+ *
+ * - For lock operations, move the act to r->actions.
+ * These lock actions/operations processed by res_process().
+ *
+ * - For non-lock operations, e.g. related to managing
+ * the lockspace, process them in this loop.
+ */
while (1) {
if (list_empty(&ls->actions)) {
@@ -2098,6 +2119,19 @@ static void *lockspace_thread_main(void *arg_in)
break;
}
+ if (act->op == LD_OP_FIND_FREE_LOCK && act->rt == LD_RT_VG) {
+ uint64_t free_offset = 0;
+ log_debug("S %s find free lock", ls->name);
+ rv = lm_find_free_lock(ls, &free_offset);
+ log_debug("S %s find free lock %d offset %llu",
+ ls->name, rv, (unsigned long long)free_offset);
+ ls->free_lock_offset = free_offset;
+ list_del(&act->list);
+ act->result = rv;
+ add_client_result(act);
+ continue;
+ }
+
list_del(&act->list);
/* applies to all resources */
@@ -2107,8 +2141,12 @@ static void *lockspace_thread_main(void *arg_in)
}
/*
- * Find the specific resource this action refers to;
- * creates resource if not found.
+ * All the other op's are for locking.
+ * Find the specific resource that the lock op is for,
+ * and add the act to the resource's list of lock ops.
+ *
+ * (This creates a new resource if the one named in
+ * the act is not found.)
*/
r = find_resource_act(ls, act, (act->op == LD_OP_FREE) ? 1 : 0);
@@ -2125,6 +2163,11 @@ static void *lockspace_thread_main(void *arg_in)
}
pthread_mutex_unlock(&ls->mutex);
+ /*
+ * Process the lock operations that have been queued for each
+ * resource.
+ */
+
retry = 0;
list_for_each_entry_safe(r, r2, &ls->resources, list)
@@ -2915,6 +2958,7 @@ static int work_init_lv(struct action *act)
char ls_name[MAX_NAME+1];
char vg_args[MAX_ARGS];
char lv_args[MAX_ARGS];
+ uint64_t free_offset;
int lm_type = 0;
int rv = 0;
@@ -2929,6 +2973,8 @@ static int work_init_lv(struct action *act)
if (ls) {
lm_type = ls->lm_type;
memcpy(vg_args, ls->vg_args, MAX_ARGS);
+ free_offset = ls->free_lock_offset;
+ ls->free_lock_offset = 0;
}
pthread_mutex_unlock(&lockspaces_mutex);
@@ -2945,7 +2991,7 @@ static int work_init_lv(struct action *act)
if (lm_type == LD_LM_SANLOCK) {
rv = lm_init_lv_sanlock(ls_name, act->vg_name, act->lv_uuid,
- vg_args, lv_args);
+ vg_args, lv_args, free_offset);
memcpy(act->lv_args, lv_args, MAX_ARGS);
return rv;
@@ -3588,6 +3634,11 @@ static int str_to_op_rt(const char *req_name, int *op, int *rt)
*rt = 0;
return 0;
}
+ if (!strcmp(req_name, "find_free_lock")) {
+ *op = LD_OP_FIND_FREE_LOCK;
+ *rt = LD_RT_VG;
+ return 0;
+ }
out:
return -1;
}
@@ -4137,6 +4188,7 @@ static void client_recv_action(struct client *cl)
case LD_OP_DISABLE:
case LD_OP_FREE:
case LD_OP_RENAME_BEFORE:
+ case LD_OP_FIND_FREE_LOCK:
rv = add_lock_action(act);
break;
default:
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 81e0ff9c5..f5f7a399c 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -50,6 +50,7 @@ enum {
LD_OP_RENAME_BEFORE,
LD_OP_RENAME_FINAL,
LD_OP_RUNNING_LM,
+ LD_OP_FIND_FREE_LOCK,
};
/* resource types */
@@ -167,6 +168,7 @@ struct lockspace {
int8_t lm_type; /* lock manager: LM_DLM, LM_SANLOCK */
void *lm_data;
uint64_t host_id;
+ uint64_t free_lock_offset; /* start search for free lock here */
uint32_t start_client_id; /* client_id that started the lockspace */
pthread_t thread; /* makes synchronous lock requests */
@@ -217,7 +219,7 @@ int lm_data_size_dlm(void);
int lm_is_running_dlm(void);
int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args);
-int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name, char *vg_args, char *lv_args);
+int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name, char *vg_args, char *lv_args, uint64_t free_offset);
int lm_free_lv_sanlock(struct lockspace *ls, struct resource *r);
int lm_rename_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_args);
int lm_prepare_lockspace_sanlock(struct lockspace *ls);
@@ -237,6 +239,7 @@ int lm_gl_is_enabled(struct lockspace *ls);
int lm_get_lockspaces_sanlock(struct list_head *ls_rejoin);
int lm_data_size_sanlock(void);
int lm_is_running_sanlock(void);
+int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset);
#define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \
diff --git a/daemons/lvmlockd/lvmlockd-sanlock.c b/daemons/lvmlockd/lvmlockd-sanlock.c
index cd3954d94..80f5dc082 100644
--- a/daemons/lvmlockd/lvmlockd-sanlock.c
+++ b/daemons/lvmlockd/lvmlockd-sanlock.c
@@ -426,7 +426,7 @@ int lm_init_vg_sanlock(char *ls_name, char *vg_name, uint32_t flags, char *vg_ar
*/
int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
- char *vg_args, char *lv_args)
+ char *vg_args, char *lv_args, uint64_t free_offset)
{
struct sanlk_resourced rd;
char lock_lv_name[MAX_ARGS];
@@ -459,7 +459,10 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
return -EINVAL;
}
- offset = align_size * LV_LOCK_BEGIN;
+ if (free_offset)
+ offset = free_offset;
+ else
+ offset = align_size * LV_LOCK_BEGIN;
rd.rs.disks[0].offset = offset;
if (daemon_test) {
@@ -482,7 +485,7 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
return rv;
}
- if (rv) {
+ if (rv && rv != SANLK_LEADER_MAGIC) {
log_error("S %s init_lv_san read error %d offset %llu",
ls_name, rv, (unsigned long long)offset);
break;
@@ -494,7 +497,12 @@ int lm_init_lv_sanlock(char *ls_name, char *vg_name, char *lv_name,
return -EEXIST;
}
- if (!strcmp(rd.rs.name, "#unused")) {
+ /*
+ * If we read newly extended space, it will not be initialized
+ * with an "#unused" resource, but will return SANLK_LEADER_MAGIC
+ * indicating an uninitialized paxos structure on disk.
+ */
+ if ((rv == SANLK_LEADER_MAGIC) || !strcmp(rd.rs.name, "#unused")) {
log_debug("S %s init_lv_san %s found unused area at %llu",
ls_name, lv_name, (unsigned long long)offset);
@@ -860,6 +868,78 @@ int lm_gl_is_enabled(struct lockspace *ls)
}
/*
+ * This is called at the beginning of lvcreate to
+ * ensure there is free space for a new LV lock.
+ * If not, lvcreate will extend the lvmlock lv
+ * before continuing with creating the new LV.
+ * This way, lm_init_lv_san() should find a free
+ * lock (unless the autoextend of lvmlock lv has
+ * been disabled.)
+ */
+
+int lm_find_free_lock_sanlock(struct lockspace *ls, uint64_t *free_offset)
+{
+ struct lm_sanlock *lms = (struct lm_sanlock *)ls->lm_data;
+ struct sanlk_resourced rd;
+ uint64_t offset;
+ int rv;
+
+ if (daemon_test)
+ return 0;
+
+ memset(&rd, 0, sizeof(rd));
+
+ strncpy(rd.rs.lockspace_name, ls->name, SANLK_NAME_LEN);
+ rd.rs.num_disks = 1;
+ strncpy(rd.rs.disks[0].path, lms->ss.host_id_disk.path, SANLK_PATH_LEN);
+
+ offset = lms->align_size * LV_LOCK_BEGIN;
+
+ while (1) {
+ rd.rs.disks[0].offset = offset;
+
+ memset(rd.rs.name, 0, SANLK_NAME_LEN);
+
+ rv = sanlock_read_resource(&rd.rs, 0);
+ if (rv == -EMSGSIZE || rv == -ENOSPC) {
+ /* This indicates the end of the device is reached. */
+ log_debug("S %s find_free_lock_san read limit offset %llu",
+ ls->name, (unsigned long long)offset);
+ return -EMSGSIZE;
+ }
+
+ /*
+ * If we read newly extended space, it will not be initialized
+ * with an "#unused" resource, but will return an error about
+ * an invalid paxos structure on disk.
+ */
+ if (rv == SANLK_LEADER_MAGIC) {
+ log_debug("S %s find_free_lock_san found empty area at %llu",
+ ls->name, (unsigned long long)offset);
+ *free_offset = offset;
+ return 0;
+ }
+
+ if (rv) {
+ log_error("S %s find_free_lock_san read error %d offset %llu",
+ ls->name, rv, (unsigned long long)offset);
+ break;
+ }
+
+ if (!strcmp(rd.rs.name, "#unused")) {
+ log_debug("S %s find_free_lock_san found unused area at %llu",
+ ls->name, (unsigned long long)offset);
+ *free_offset = offset;
+ return 0;
+ }
+
+ offset += lms->align_size;
+ }
+
+ return rv;
+}
+
+/*
* host A: start_vg/add_lockspace
* host B: vgremove
*
diff --git a/lib/config/config_settings.h b/lib/config/config_settings.h
index adb803843..f2f550f68 100644
--- a/lib/config/config_settings.h
+++ b/lib/config/config_settings.h
@@ -833,9 +833,17 @@ cfg(global_use_lvmetad_CFG, "use_lvmetad", global_CFG_SECTION, 0, CFG_TYPE_BOOL,
cfg(global_use_lvmlockd_CFG, "use_lvmlockd", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 120), NULL, 0, NULL,
"Use lvmlockd for locking among hosts using LVM on shared storage.\n")
-cfg(global_lock_retries_CFG, "lock_retries", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_LOCK_RETRIES, vsn(2, 2, 12), NULL, 0, NULL,
+cfg(global_lock_retries_CFG, "lock_retries", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_LOCK_RETRIES, vsn(2, 2, 120), NULL, 0, NULL,
"Retry lvmlockd lock requests this many times.\n")
+cfg(global_sanlock_lv_extend_CFG, "sanlock_lv_extend", global_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_INT, DEFAULT_SANLOCK_LV_EXTEND_MB, vsn(2, 2, 120), NULL, 0, NULL,
+ "Size in MiB to extend the internal LV holding sanlock locks.\n"
+ "The internal LV holds locks for each LV in the VG, and after\n"
+ "enough LVs have been created, the internal LV needs to be extended.\n"
+ "lvcreate will automatically extend the internal LV when needed by\n"
+ "the amount specified here. Setting this to 0 disables the\n"
+ "automatic extension and can cause lvcreate to fail.\n")
+
cfg(global_allow_override_lock_modes_CFG, "allow_override_lock_modes", global_CFG_SECTION, 0, CFG_TYPE_BOOL, 0, vsn(2, 2, 120), NULL, 0, NULL,
"Allow command options to override normal locking.\n")
diff --git a/lib/config/defaults.h b/lib/config/defaults.h
index b5b14331c..4dd9bef88 100644
--- a/lib/config/defaults.h
+++ b/lib/config/defaults.h
@@ -57,6 +57,8 @@
#define DEFAULT_METADATA_READ_ONLY 0
#define DEFAULT_LVDISPLAY_SHOWS_FULL_DEVICE_PATH 0
+#define DEFAULT_SANLOCK_LV_EXTEND_MB 256
+
#define DEFAULT_MIRRORLOG MIRROR_LOG_DISK
#define DEFAULT_MIRROR_LOG_FAULT_POLICY "allocate"
#define DEFAULT_MIRROR_IMAGE_FAULT_POLICY "remove"
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 103558bc2..d40f1576b 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -328,8 +328,6 @@ static int _lockd_request(struct cmd_context *cmd,
* whenever it runs out of space.
*/
-#define LVMLOCKD_SANLOCK_LV_EXTEND (512 * 1024 * 1024)
-
static struct logical_volume *_find_sanlock_lv(struct volume_group *vg,
const char *lock_lv_name)
{
@@ -347,13 +345,13 @@ static struct logical_volume *_find_sanlock_lv(struct volume_group *vg,
*/
static int _create_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg,
- const char *lock_lv_name)
+ const char *lock_lv_name, int extend_mb)
{
struct logical_volume *lv;
struct lvcreate_params lp = {
.activate = CHANGE_ALY,
.alloc = ALLOC_INHERIT,
- .extents = LVMLOCKD_SANLOCK_LV_EXTEND / (vg->extent_size * SECTOR_SIZE),
+ .extents = (extend_mb * 1024 * 1024) / (vg->extent_size * SECTOR_SIZE),
.major = -1,
.minor = -1,
.permission = LVM_READ | LVM_WRITE,
@@ -399,7 +397,7 @@ static int _remove_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg,
return 1;
}
-static int _extend_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
+static int _extend_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg, int extend_mb)
{
const char *lock_lv_name = LVMLOCKD_SANLOCK_LV_NAME;
struct logical_volume *lv;
@@ -417,7 +415,7 @@ static int _extend_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
return 0;
}
- lp.size = lv->size + (LVMLOCKD_SANLOCK_LV_EXTEND / SECTOR_SIZE);
+ lp.size = lv->size + ((extend_mb * 1024 * 1024) / SECTOR_SIZE);
if (!lv_resize_prepare(cmd, lv, &lp, &vg->pvs) ||
!lv_resize(cmd, lv, &lp, &vg->pvs)) {
@@ -449,6 +447,64 @@ static int _refresh_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
return 1;
}
+/*
+ * Called at the beginning of lvcreate in a sanlock VG to ensure
+ * that there is space in the sanlock LV for a new lock. If it's
+ * full, then this extends it.
+ */
+
+int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
+{
+ daemon_reply reply;
+ int extend_mb;
+ int result;
+ int ret;
+
+ if (!_use_lvmlockd)
+ return 1;
+ if (!_lvmlockd_connected)
+ return 0;
+
+ extend_mb = find_config_tree_int(cmd, global_sanlock_lv_extend_CFG, NULL);
+
+ /*
+ * User can choose to not automatically extend the lvmlock LV
+ * so they can manually extend it.
+ */
+ if (!extend_mb)
+ return 1;
+
+ /*
+ * Another host may have extended the lvmlock LV already.
+ * Refresh so that we'll find the new space they added
+ * when we search for new space.
+ */
+ if (!_refresh_sanlock_lv(cmd, vg))
+ return 0;
+
+ /*
+ * Ask lvmlockd/sanlock to look for an unused lock.
+ */
+ reply = _lockd_send("find_free_lock",
+ "pid = %d", getpid(),
+ "vg_name = %s", vg->name,
+ NULL);
+
+ if (!_lockd_result(reply, &result, NULL)) {
+ ret = 0;
+ } else {
+ ret = (result < 0) ? 0 : 1;
+ }
+
+ /* No space on the lvmlock lv for a new lease. */
+ if (result == -EMSGSIZE)
+ ret = _extend_sanlock_lv(cmd, vg, extend_mb);
+
+ daemon_reply_destroy(reply);
+
+ return ret;
+}
+
static int _activate_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
{
struct logical_volume *lv;
@@ -564,6 +620,7 @@ static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
const char *vg_lock_args = NULL;
const char *lock_lv_name = LVMLOCKD_SANLOCK_LV_NAME;
const char *opts = NULL;
+ int extend_mb;
int result;
int ret;
@@ -572,7 +629,16 @@ static int _init_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
if (!_lvmlockd_connected)
return 0;
- if (!_create_sanlock_lv(cmd, vg, lock_lv_name)) {
+ /*
+ * Automatic extension of the sanlock lv is disabled by
+ * setting sanlock_lv_extend to 0. Zero won't work as
+ * an initial size, so in this case, use the default as
+ * the initial size.
+ */
+ if (!(extend_mb = find_config_tree_int(cmd, global_sanlock_lv_extend_CFG, NULL)))
+ extend_mb = DEFAULT_SANLOCK_LV_EXTEND_MB;
+
+ if (!_create_sanlock_lv(cmd, vg, lock_lv_name, extend_mb)) {
log_error("Failed to create internal lv.");
return 0;
}
@@ -1938,8 +2004,6 @@ static int _init_lv_sanlock(struct cmd_context *cmd, struct volume_group *vg,
daemon_reply reply;
const char *reply_str;
const char *lv_lock_args = NULL;
- int refreshed = 0;
- int extended = 0;
int result;
int ret;
@@ -1949,7 +2013,7 @@ static int _init_lv_sanlock(struct cmd_context *cmd, struct volume_group *vg,
return 0;
id_write_format(lv_id, lv_uuid, sizeof(lv_uuid));
- retry:
+
reply = _lockd_send("init_lv",
"pid = %d", getpid(),
"vg_name = %s", vg->name,
@@ -1972,20 +2036,10 @@ static int _init_lv_sanlock(struct cmd_context *cmd, struct volume_group *vg,
if (result == -EMSGSIZE) {
/*
- * No space on the lvmlock lv for a new lease.
- * Check if another host has extended lvmlock,
- * and extend lvmlock if needed.
+ * No space on the lvmlock lv for a new lease, this should be
+ * detected by handle_sanlock_lv() called before.
*/
- if (!refreshed++) {
- log_debug("Refresh lvmlock");
- _refresh_sanlock_lv(cmd, vg);
- goto retry;
- }
- if (!extended++) {
- log_debug("Extend lvmlock");
- _extend_sanlock_lv(cmd, vg);
- goto retry;
- }
+ log_error("No sanlock space for lock for LV %s/%s", vg->name, lv_name);
goto out;
}
@@ -1994,15 +2048,13 @@ static int _init_lv_sanlock(struct cmd_context *cmd, struct volume_group *vg,
goto out;
}
- reply_str = daemon_reply_str(reply, "lv_lock_args", NULL);
- if (!reply_str) {
+ if (!(reply_str = daemon_reply_str(reply, "lv_lock_args", NULL))) {
log_error("lv_lock_args not returned");
ret = 0;
goto out;
}
- lv_lock_args = dm_pool_strdup(cmd->mem, reply_str);
- if (!lv_lock_args) {
+ if (!(lv_lock_args = dm_pool_strdup(cmd->mem, reply_str))) {
log_error("lv_lock_args allocation failed");
ret = 0;
}
@@ -2044,9 +2096,8 @@ static int _free_lv_sanlock(struct cmd_context *cmd, struct volume_group *vg,
ret = (result < 0) ? 0 : 1;
}
- if (!ret) {
+ if (!ret)
log_error("_free_lv_sanlock lvmlockd result %d", result);
- }
daemon_reply_destroy(reply);
diff --git a/lib/locking/lvmlockd.h b/lib/locking/lvmlockd.h
index aea4f20b2..88d4d49dd 100644
--- a/lib/locking/lvmlockd.h
+++ b/lib/locking/lvmlockd.h
@@ -145,6 +145,8 @@ int lockd_init_lv_args(struct cmd_context *cmd, struct volume_group *vg,
const char *lockd_running_lock_type(struct cmd_context *cmd);
+int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg);
+
#else /* LVMLOCKD_SUPPORT */
static inline void lvmlockd_set_socket(const char *sock)
@@ -269,6 +271,11 @@ static inline const char *lockd_running_lock_type(struct cmd_context *cmd)
return NULL;
}
+int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg)
+{
+ return 0;
+}
+
#endif /* LVMLOCKD_SUPPORT */
#endif
diff --git a/tools/lvcreate.c b/tools/lvcreate.c
index d1b95a6cf..ec9b312b1 100644
--- a/tools/lvcreate.c
+++ b/tools/lvcreate.c
@@ -1514,6 +1514,13 @@ int lvcreate(struct cmd_context *cmd, int argc, char **argv)
lp.pool_name ? : "with generated name", lp.vg_name, lp.segtype->name);
}
+ if (vg->lock_type && !strcmp(vg->lock_type, "sanlock")) {
+ if (!handle_sanlock_lv(cmd, vg)) {
+ log_error("No space for sanlock lock, extend the internal lvmlock LV.");
+ goto_out;
+ }
+ }
+
if (seg_is_thin_volume(&lp))
log_verbose("Making thin LV %s in pool %s in VG %s%s%s using segtype %s",
lp.lv_name ? : "with generated name",