summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Haller <thaller@redhat.com>2022-10-05 12:22:51 +0200
committerThomas Haller <thaller@redhat.com>2022-10-14 17:48:24 +0200
commit88724ff169a1796d4cdfbd4f116e08769de1c12f (patch)
tree7b54a1f723a446c2004018bedc819a214282fcc6
parent2f5a2dc7327f969003e4c526ce3a0c9e83d75204 (diff)
downloadNetworkManager-88724ff169a1796d4cdfbd4f116e08769de1c12f.tar.gz
libnm: add nm_client_wait_shutdown() function for cleaning up NMClient
Add a fire-and-forget function to wait for shutdown to be complete. It's not entirely trivial to ensure all resources of NMClient are cleaned up. That matters only if NMClient uses a temporary GMainContext that the user wants to release while the application continues. For example, to do some short-lived operations an a worker thread. It's not trivial also because glib provides no convenient API to integrate a GMainContext in another GMainContext. We have that code as nm_utils_g_main_context_create_integrate_source(), so add a helper function to allow the user to do this. The function allows to omit the callback, in which case the caller wouldn't know when shutdown is complete. That would still be useful however, when integrating the client's context into the caller's context, so that the client's context gets automatically iterated until completion. The following test script will run out of file descriptors, when wait_shutdown() is not used: #!/bin/python import gi gi.require_version("NM", "1.0") from gi.repository import NM, GLib for i in range(1200): print(f">>>{i}") ctx = GLib.MainContext() ctx.push_thread_default() nmc = NM.Client.new() ctx.pop_thread_default() def cb(unused, result, i): try: NM.Client.wait_shutdown_finish(result) except Exception: # cannot happen assert False else: print(f">>>>> {i} complete") nmc.wait_shutdown(True, None, cb, i) while GLib.MainContext.default().iteration(False): pass
-rwxr-xr-xexamples/python/gi/gmaincontext.py115
-rw-r--r--src/libnm-client-impl/libnm.ver2
-rw-r--r--src/libnm-client-impl/nm-client.c293
-rw-r--r--src/libnm-client-impl/tests/test-nm-client.c238
-rw-r--r--src/libnm-client-public/nm-client.h12
5 files changed, 618 insertions, 42 deletions
diff --git a/examples/python/gi/gmaincontext.py b/examples/python/gi/gmaincontext.py
index 90a9fa2542..de5d78ee3e 100755
--- a/examples/python/gi/gmaincontext.py
+++ b/examples/python/gi/gmaincontext.py
@@ -329,7 +329,7 @@ def make_call(nmc):
###############################################################################
-def destroy_nmc(nmc_holder):
+def destroy_nmc(nmc_holder, destroy_mode):
# The way to shutdown an NMClient is just by unrefing it.
#
# At any moment, can an NMClient instance have pending async operations.
@@ -362,53 +362,77 @@ def destroy_nmc(nmc_holder):
(nmc,) = nmc_holder
nmc_holder.clear()
- if not nmc:
- log(f"[destroy_nmc]: nothing to destroy")
- return
-
log(
- f"[destroy_nmc]: destroying NMClient {nmc}: pyref={sys.getrefcount(nmc)}, ref_count={nmc.ref_count}"
+ f"[destroy_nmc]: destroying NMClient {nmc}: pyref={sys.getrefcount(nmc)}, ref_count={nmc.ref_count}, destroy_mode={destroy_mode}"
)
- ctx = nmc.get_main_context()
+ if destroy_mode == 0:
+ ctx = nmc.get_main_context()
- finished = []
+ finished = []
- def _weak_ref_cb():
- log(f"[destroy_nmc]: context busy watcher is gone")
- finished.clear()
- finished.append(True)
+ def _weak_ref_cb():
+ log(f"[destroy_nmc]: context busy watcher is gone")
+ finished.clear()
+ finished.append(True)
- # We take a weak ref on the context-busy-watcher object and give up
- # our reference on nmc. This must be the last reference, which initiates
- # the shutdown of the NMClient.
- weak_ref = nmc.get_context_busy_watcher().weak_ref(_weak_ref_cb)
- del nmc
+ # We take a weak ref on the context-busy-watcher object and give up
+ # our reference on nmc. This must be the last reference, which initiates
+ # the shutdown of the NMClient.
+ weak_ref = nmc.get_context_busy_watcher().weak_ref(_weak_ref_cb)
+ del nmc
- def _timeout_cb(unused):
- if not finished:
- # Somebody else holds a reference to the NMClient and keeps
- # it alive. We cannot properly clean up.
- log(
- f"[destroy_nmc]: ERROR: timeout waiting for context busy watcher to be gone"
- )
- finished.append(False)
- return False
+ def _timeout_cb(unused):
+ if not finished:
+ # Somebody else holds a reference to the NMClient and keeps
+ # it alive. We cannot properly clean up.
+ log(
+ f"[destroy_nmc]: ERROR: timeout waiting for context busy watcher to be gone"
+ )
+ finished.append(False)
+ return False
- timeout_source = GLib.timeout_source_new(1000)
- timeout_source.set_callback(_timeout_cb)
- timeout_source.attach(ctx)
+ timeout_source = GLib.timeout_source_new(1000)
+ timeout_source.set_callback(_timeout_cb)
+ timeout_source.attach(ctx)
- while not finished:
- log(f"[destroy_nmc]: iterating main context")
- ctx.iteration(True)
+ while not finished:
+ log(f"[destroy_nmc]: iterating main context")
+ ctx.iteration(True)
- timeout_source.destroy()
+ timeout_source.destroy()
- log(f"[destroy_nmc]: done: {finished[0]}")
- if not finished[0]:
- weak_ref.unref()
- raise Exception("Failure to destroy NMClient: something keeps it alive")
+ log(f"[destroy_nmc]: done: {finished[0]}")
+ if not finished[0]:
+ weak_ref.unref()
+ raise Exception("Failure to destroy NMClient: something keeps it alive")
+
+ else:
+
+ if destroy_mode == 1:
+ ctx = GLib.MainContext.default()
+ else:
+ # Run the maincontext of the NMClient.
+ ctx = nmc.get_main_context()
+ with MainLoopRun("destroy_nmc", ctx, 2) as r:
+
+ def _wait_shutdown_cb(source_unused, result, r):
+ try:
+ NM.Client.wait_shutdown_finish(result)
+ except Exception as e:
+ if error_is_cancelled(e):
+ log(
+ f"[destroy_nmc]: wait_shutdown() completed with cancellation after timeout"
+ )
+ else:
+ log(f"[destroy_nmc]: wait_shutdown() completed with error: {e}")
+ else:
+ log(f"[destroy_nmc]: wait_shutdown() completed with success")
+
+ r.quit()
+
+ nmc.wait_shutdown(True, r.cancellable, _wait_shutdown_cb, r)
+ del nmc
###############################################################################
@@ -425,11 +449,18 @@ def run1():
make_call(nmc)
log()
- # To cleanup the NMClient, we need to give up the reference. Move
- # it to a list, and destroy_nmc() will take care of it.
- nmc_holder = [nmc]
- del nmc
- destroy_nmc(nmc_holder)
+ if not nmc:
+ log(f"[destroy_nmc]: nothing to destroy")
+ else:
+ # To cleanup the NMClient, we need to give up the reference. Move
+ # it to a list, and destroy_nmc() will take care of it.
+ nmc_holder = [nmc]
+ del nmc
+
+ # In the example, there are three modes how the destroy is
+ # implemented.
+ destroy_nmc(nmc_holder, destroy_mode=1)
+
log()
log("done")
except Exception as e:
diff --git a/src/libnm-client-impl/libnm.ver b/src/libnm-client-impl/libnm.ver
index 4a5455ede0..4d0c2241ad 100644
--- a/src/libnm-client-impl/libnm.ver
+++ b/src/libnm-client-impl/libnm.ver
@@ -1847,5 +1847,7 @@ global:
libnm_1_42_0 {
global:
+ nm_client_wait_shutdown;
+ nm_client_wait_shutdown_finish;
nm_setting_ovs_interface_get_ofport_request;
} libnm_1_40_0;
diff --git a/src/libnm-client-impl/nm-client.c b/src/libnm-client-impl/nm-client.c
index a16f9873db..cd5621d5cc 100644
--- a/src/libnm-client-impl/nm-client.c
+++ b/src/libnm-client-impl/nm-client.c
@@ -8832,6 +8832,299 @@ nm_client_async_initable_iface_init(GAsyncInitableIface *iface)
iface->init_finish = init_finish;
}
+/*****************************************************************************/
+
+typedef struct {
+ GCancellable *cancellable;
+ GSource *integration_source;
+ GTask *task;
+ GSource *idle_source;
+
+ /* A weakref to nm_client_get_context_busy_watcher() */
+ GWeakRef weak_ref;
+
+ gulong cancellable_id;
+
+ guint64 log_ptr;
+
+ int result;
+} WaitShutdownData;
+
+G_LOCK_DEFINE_STATIC(wait_shutdown_mutex);
+
+static NM_CACHED_QUARK_FCN("nm.client.wait-shutdown", _wait_shutdown_get_quark);
+
+static void
+_wait_shutdown_data_free(gpointer user_data)
+{
+ WaitShutdownData *data = user_data;
+
+ nm_g_slice_free(data);
+}
+
+static gboolean
+_wait_shutdown_idle_cb(gpointer user_data)
+{
+ WaitShutdownData *data = user_data;
+ gs_unref_object GTask *task = NULL;
+ int result;
+
+ nm_clear_g_source_inst(&data->idle_source);
+
+ task = g_steal_pointer(&data->task);
+
+ result = g_atomic_int_get(&data->result);
+ nm_assert(NM_IN_SET(result, 0, 1));
+
+ NML_DBUS_LOG(_NML_NMCLIENT_LOG_LEVEL_COERCE(NML_DBUS_LOG_LEVEL_TRACE),
+ "nmclient[" NM_HASH_OBFUSCATE_PTR_FMT
+ "]: wait-shutdown (" NM_HASH_OBFUSCATE_PTR_FMT ")"
+ "%s",
+ data->log_ptr,
+ NM_HASH_OBFUSCATE_PTR(data),
+ !result ? " cancelled" : " completed");
+
+ if (!result) {
+ GError *error = NULL;
+
+ nm_utils_error_set_cancelled(&error, FALSE, NULL);
+ g_task_return_error(task, error);
+ } else
+ g_task_return_boolean(task, TRUE);
+
+ return G_SOURCE_CONTINUE;
+}
+
+static void
+_wait_shutdown_data_clear(WaitShutdownData *data, gboolean result)
+{
+ gs_unref_object GObject *context_busy_watcher = NULL;
+
+ if (!g_atomic_int_compare_and_exchange(&data->result, -1, !!result)) {
+ /* There was a race and the result is already provided. Nothing to
+ * do, except, if "result" indicates cancellation (FALSE), set data->result
+ * to FALSE to. Aside that, the completion is already in progress. */
+ if (!result)
+ g_atomic_int_compare_and_exchange(&data->result, TRUE, FALSE);
+ return;
+ }
+
+ nm_clear_g_signal_handler(data->cancellable, &data->cancellable_id);
+ nm_clear_g_source_inst(&data->integration_source);
+ g_clear_object(&data->cancellable);
+
+ if (!result) {
+ /* This was a cancellation. We likely still have the qdata tracked.
+ * We need to remove it. */
+
+ context_busy_watcher = g_weak_ref_get(&data->weak_ref);
+ if (context_busy_watcher) {
+ GPtrArray *qdata_arr;
+
+ G_LOCK(wait_shutdown_mutex);
+ qdata_arr = g_object_get_qdata(context_busy_watcher, _wait_shutdown_get_quark());
+ if (qdata_arr && g_ptr_array_remove_fast(qdata_arr, data)) {
+ /* data->task had an additional reference, we return it now. */
+ g_object_unref(data->task);
+ }
+ G_UNLOCK(wait_shutdown_mutex);
+ }
+ }
+
+ g_weak_ref_clear(&data->weak_ref);
+
+ /* We don't complete right away, instead always schedule an idle action
+ * on the caller's context. */
+ data->idle_source = nm_g_source_attach(
+ nm_g_idle_source_new(G_PRIORITY_DEFAULT_IDLE, _wait_shutdown_idle_cb, data, NULL),
+ g_task_get_context(data->task));
+}
+
+static void
+_wait_shutdown_qdata_cb(gpointer user_data)
+{
+ gs_unref_ptrarray GPtrArray *qdata_arr = user_data;
+
+ while (qdata_arr->len > 0) {
+ WaitShutdownData *data;
+
+ data = g_ptr_array_remove_index_fast(qdata_arr, qdata_arr->len - 1);
+ _wait_shutdown_data_clear(data, TRUE);
+
+ /* data->task had an additional reference, we return it now. */
+ g_object_unref(data->task);
+ }
+}
+
+static void
+_wait_shutdown_cancelled_cb(GCancellable *cancellable, gpointer user_data)
+{
+ _wait_shutdown_data_clear(g_task_get_task_data(user_data), FALSE);
+}
+
+/**
+ * nm_client_wait_shutdown:
+ * @client: the #NMClient to shutdown.
+ * @integrate_maincontext: whether to hook the client's maincontext
+ * in the current thread default. Otherwise, you must ensure
+ * that the client's maincontext gets iterated so that it can complete.
+ * By integrating the maincontext in the current thread default, you
+ * may instead only iterate the latter.
+ * @cancellable: (allow-none): the #GCancellable to abort the shutdown.
+ * @callback: (nullable): a #GAsyncReadyCallback to call when the request
+ * is satisfied or %NULL if you don't care about the result of the
+ * method invocation.
+ * @user_data: the data to pass to @callback
+ *
+ * The way to stop #NMClient is by unrefing it. That will cancel all
+ * internally pending async operations. However, as async operations in
+ * NMClient use GTask, hence they cannot complete right away. Instead,
+ * their (internal) result callback still needs to be dispatched by iterating
+ * the client's main context.
+ *
+ * You thus cannot stop iterating the client's main context until
+ * everything is wrapped up. nm_client_get_context_busy_watcher()
+ * helps to watch how long that will be.
+ *
+ * This function automates that waiting. Like all glib async operations
+ * this honors the current g_main_context_get_thread_default().
+ *
+ * In any case, to complete the shutdown, nm_client_get_main_context()
+ * must be iterated. If the current g_main_context_get_thread_default() is
+ * the same as nm_client_get_main_context(), then @integrate_maincontext
+ * is ignored. In that case, the caller is required to iterate the context
+ * for shutdown to complete. Otherwise, if g_main_context_get_thread_default()
+ * differs from nm_client_get_main_context() and @integrate_maincontext
+ * is %FALSE, the caller must make sure that both contexts are iterated
+ * until completion. Otherwise, if @integrate_maincontext is %TRUE, then
+ * nm_client_get_main_context() will be integrated in g_main_context_get_thread_default().
+ * This means, the caller gives nm_client_get_main_context() up until the waiting
+ * completes, the function will acquire the context and hook it into
+ * g_main_context_get_thread_default().
+ * It is a bug to request @integrate_maincontext while having nm_client_get_main_context()
+ * acquired or iterated otherwise because a context can only be acquired once
+ * at a time.
+ *
+ * Shutdown can only complete after all references to @client were released.
+ *
+ * It is possible to call this function multiple times for the same client.
+ * But note that with @integrate_maincontext the client's context is acquired,
+ * which can only be done once at a time.
+ *
+ * It is permissible to start waiting before the objects is fully initialized.
+ *
+ * The function really allows two separate things. To get a notification (callback) when
+ * shutdown is complete, and to integrate the client's context in another context.
+ * The latter case is useful if the client has a separate context and you hand it
+ * over to another GMainContext to wrap up.
+ *
+ * The main use is to have a NMClient and a separate GMainContext on a worker
+ * thread. When being done, you can hand over the cleanup of the context
+ * to g_main_context_default(), assuming that the main thread iterates
+ * the default context. In that case, you don't need to care about passing
+ * a callback to know when shutdown completed.
+ *
+ * Since: 1.42
+ */
+void
+nm_client_wait_shutdown(NMClient *client,
+ gboolean integrate_maincontext,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer user_data)
+{
+ NMClientPrivate *priv;
+ WaitShutdownData *data;
+ gs_unref_object GTask *task = NULL;
+ GQuark quark = _wait_shutdown_get_quark();
+ GPtrArray *qdata_arr;
+ GSource *integration_source = NULL;
+
+ g_return_if_fail(NM_IS_CLIENT(client));
+ g_return_if_fail(!cancellable || G_IS_CANCELLABLE(cancellable));
+
+ priv = NM_CLIENT_GET_PRIVATE(client);
+
+ task = nm_g_task_new(NULL, cancellable, nm_client_wait_shutdown, callback, user_data);
+
+ if (integrate_maincontext && g_task_get_context(task) != priv->main_context) {
+ integration_source = nm_utils_g_main_context_create_integrate_source(priv->main_context);
+ g_return_if_fail(integration_source);
+ g_source_attach(integration_source, g_task_get_context(task));
+ }
+
+ data = g_slice_new(WaitShutdownData);
+ *data = (WaitShutdownData){
+ .cancellable = nm_g_object_ref(cancellable),
+ .task = g_object_ref(task),
+ .result = -1,
+ .integration_source = integration_source,
+ .log_ptr = NM_HASH_OBFUSCATE_PTR(client),
+ };
+ /* The "data" itself stays alive as long as the task lives. That's important, because
+ * the callbacks _wait_shutdown_weak_ref_cb and _wait_shutdown_cancelled_cb
+ * rely on accessing "data", which must live long enough. */
+ g_task_set_task_data(task, data, _wait_shutdown_data_free);
+
+ g_weak_ref_init(&data->weak_ref, priv->context_busy_watcher);
+
+ NML_DBUS_LOG(_NML_NMCLIENT_LOG_LEVEL_COERCE(NML_DBUS_LOG_LEVEL_TRACE),
+ "nmclient[" NM_HASH_OBFUSCATE_PTR_FMT
+ "]: wait-shutdown (" NM_HASH_OBFUSCATE_PTR_FMT ")"
+ "%s",
+ data->log_ptr,
+ NM_HASH_OBFUSCATE_PTR(data),
+ integration_source ? " (integrated main source)" : "");
+
+ /* I don't think g_object_weak_ref() + GWeakRef can actually be used in
+ * a race-free way here, because g_object_weak_ref() has no GDestroyNotify
+ * and cannot keep task alive to avoid races. Instead, implement a weak pointer
+ * notification via the qdata.
+ *
+ * Yes, getting cancellation thread-safe is rather complicated here! I think
+ * the code is correct though, and you can cancel the operation from
+ * any thread without races. */
+ G_LOCK(wait_shutdown_mutex);
+ qdata_arr = g_object_get_qdata(priv->context_busy_watcher, quark);
+ if (!qdata_arr) {
+ qdata_arr = g_ptr_array_new();
+ g_object_set_qdata_full(priv->context_busy_watcher,
+ quark,
+ qdata_arr,
+ _wait_shutdown_qdata_cb);
+ }
+ /* data->task gets an additional reference, take it now. */
+ g_object_ref(data->task);
+ g_ptr_array_add(qdata_arr, data);
+ G_UNLOCK(wait_shutdown_mutex);
+
+ if (data->cancellable) {
+ /* Take an additional reference on task. */
+ data->cancellable_id = g_cancellable_connect(data->cancellable,
+ G_CALLBACK(_wait_shutdown_cancelled_cb),
+ g_object_ref(task),
+ g_object_unref);
+ }
+}
+
+/**
+ * nm_client_wait_shutdown_finish:
+ * @result: a #GAsyncResult obtained from the #GAsyncReadyCallback passed to nm_client_wait_shutdown()
+ * @error: return location for error or %NULL
+ *
+ * Returns: %TRUE if waiting is complete successfully. In that case, all resources of the
+ * nmclient are wrapped up and released. This can only fail by user cancellation.
+ *
+ * Since: 1.42.
+ */
+gboolean
+nm_client_wait_shutdown_finish(GAsyncResult *result, GError **error)
+{
+ g_return_val_if_fail(nm_g_task_is_valid(result, NULL, nm_client_wait_shutdown), FALSE);
+
+ return g_task_propagate_boolean(G_TASK(result), error);
+}
+
/*****************************************************************************
* Backported symbols. Usually, new API is only added in new major versions
* of NetworkManager (that is, on "master" branch). Sometimes however, we might
diff --git a/src/libnm-client-impl/tests/test-nm-client.c b/src/libnm-client-impl/tests/test-nm-client.c
index a43e98cd18..07b0789b55 100644
--- a/src/libnm-client-impl/tests/test-nm-client.c
+++ b/src/libnm-client-impl/tests/test-nm-client.c
@@ -8,7 +8,10 @@
#include <sys/types.h>
#include <signal.h>
+#include "libnm-client-impl/nm-dbus-helpers.h"
+
#include "libnm-client-test/nm-test-libnm-utils.h"
+#include "libnm-glib-aux/nm-time-utils.h"
static struct {
GMainLoop *loop;
@@ -1349,6 +1352,240 @@ test_connection_invalid(void)
/*****************************************************************************/
+typedef struct {
+ NMClient *nmc;
+ NMOptionBool completed;
+} WaitShutdownNmcInitData;
+
+static void
+_wait_shutdown_cb(GObject *source, GAsyncResult *result, gpointer user_data)
+{
+ gs_free_error GError *error = NULL;
+ gboolean b;
+ guint *p_pending_ops = user_data;
+
+ g_assert(!source);
+
+ if (nmtst_get_rand_bool()) {
+ b = nm_client_wait_shutdown_finish(result, &error);
+ g_assert(b == !error);
+ }
+
+ g_assert_cmpint(*p_pending_ops, >, 0);
+ (*p_pending_ops)--;
+}
+
+static void
+_wait_shutdown_nmc_init_cb(GObject *source, GAsyncResult *result, gpointer user_data)
+{
+ WaitShutdownNmcInitData *data = user_data;
+ gs_free_error GError *error = NULL;
+ gboolean b;
+
+ g_assert(data->nmc == NM_CLIENT(source));
+ g_assert(data->completed == NM_OPTION_BOOL_DEFAULT);
+
+ b = g_async_initable_init_finish(G_ASYNC_INITABLE(source), result, &error);
+ nmtst_assert_success(b, error);
+ g_assert(b == TRUE);
+
+ g_assert(NM_FLAGS_HAS(nm_client_get_instance_flags(data->nmc),
+ NM_CLIENT_INSTANCE_FLAGS_INITIALIZED_GOOD));
+
+ data->completed = TRUE;
+}
+
+static void
+test_client_wait_shutdown(void)
+{
+ gs_unref_ptrarray GPtrArray *contexts =
+ g_ptr_array_new_with_free_func((GDestroyNotify) g_main_context_unref);
+ nmtstc_auto_service_cleanup NMTstcServiceInfo *sinfo = NULL;
+ int i_run;
+ const int N_RUN = 50;
+ gs_free_error GError *error = NULL;
+ gs_unref_object GDBusConnection *dbus_connection = NULL;
+ guint pending_ops = 0;
+ int i;
+ gint64 until_msec;
+
+ /* The test creates NMClient instances in various ways (with different
+ * context of g_main_context_default()) and with sync/asnyc/no initialization.
+ *
+ * Then it calls nm_client_wait_shutdown() and checks that all callbacks
+ * got invoked.
+ *
+ * You can also manually bump N_RUN and convince yourself that there are no
+ * leaks.
+ */
+
+ sinfo = nmtstc_service_init();
+ if (!nmtstc_service_available(sinfo))
+ return;
+
+ /* Have 5 contexts for creating clients (+ the default one). */
+ g_ptr_array_add(contexts, g_main_context_ref(g_main_context_default()));
+ for (i = 0; i < 5; i++)
+ g_ptr_array_add(contexts, g_main_context_new());
+
+ dbus_connection = g_bus_get_sync(_nm_dbus_bus_type(), NULL, &error);
+ nmtst_assert_success(dbus_connection, error);
+
+ for (i_run = 0; i_run < N_RUN; i_run++) {
+ gs_unref_object GCancellable *init_cancellable = g_cancellable_new();
+ gs_unref_object NMClient *nmc = NULL;
+ nm_auto_pop_gmaincontext GMainContext *client_context = NULL;
+ gboolean b;
+ gboolean context_integrated = FALSE;
+ gs_unref_object GCancellable *cancellable_1 = NULL;
+ GMainContext *ctx;
+
+ /* Choose a random context for the client. */
+ ctx = contexts->pdata[nmtst_get_rand_uint32() % contexts->len];
+ if (ctx == g_main_context_default()) {
+ /* don't use the main context explicitly. */
+ } else if (!g_main_context_acquire(ctx)) {
+ /* the context is in use, we cannot use it. */
+ } else {
+ g_main_context_release(ctx);
+ client_context = g_main_context_ref(ctx);
+ g_main_context_push_thread_default(client_context);
+ }
+
+ nmc = g_object_new(
+ NM_TYPE_CLIENT,
+ NM_CLIENT_INSTANCE_FLAGS,
+ nmtst_get_rand_one_case_in(5) ? NM_CLIENT_INSTANCE_FLAGS_NO_AUTO_FETCH_PERMISSIONS : 0,
+ NM_CLIENT_DBUS_CONNECTION,
+ nmtst_get_rand_one_case_in(3) ? NULL : dbus_connection,
+ NULL);
+
+ if (nmtst_get_rand_bool()) {
+ /* randomly already pop the context before initializing nmc. */
+ nm_clear_pointer(&client_context, nm_g_main_context_pop_and_unref);
+ }
+
+ if (nmtst_get_rand_bool()) {
+ nm_auto_pop_gmaincontext GMainContext *context =
+ nm_g_main_context_push_thread_default(NULL);
+
+ /* It is also allowed to start waiting before initialization started. Randomly do that. */
+ pending_ops++;
+ nm_client_wait_shutdown(
+ nmc,
+ nmtst_true_once(&context_integrated, nmtst_get_rand_bool()),
+ (cancellable_1 = nmtst_get_rand_bool() ? g_cancellable_new() : NULL),
+ _wait_shutdown_cb,
+ &pending_ops);
+ }
+
+ /* randomly do some initialization... */
+ switch (nmtst_get_rand_uint32() % 4) {
+ case 0:
+ /* Don't start initialization. */
+ break;
+ case 1:
+ /* Do sync initialization. */
+ b = g_initable_init(G_INITABLE(nmc),
+ nmtst_get_rand_bool() ? init_cancellable : NULL,
+ &error);
+ nmtst_assert_success(b, error);
+ break;
+ case 2:
+ /* Start async initialization in the background, but don't wait for it complete. */
+ g_async_initable_init_async(G_ASYNC_INITABLE(nmc),
+ G_PRIORITY_DEFAULT,
+ nmtst_get_rand_bool() ? init_cancellable : NULL,
+ NULL,
+ NULL);
+ break;
+ default:
+ {
+ /* Do async initialization, and iterate the main context until done. */
+ WaitShutdownNmcInitData data = {
+ .completed = NM_OPTION_BOOL_DEFAULT,
+ .nmc = nmc,
+ };
+
+ g_async_initable_init_async(G_ASYNC_INITABLE(nmc),
+ G_PRIORITY_DEFAULT,
+ nmtst_get_rand_bool() ? init_cancellable : NULL,
+ _wait_shutdown_nmc_init_cb,
+ &data);
+
+ while (data.completed != TRUE) {
+ g_main_context_iteration(context_integrated ? g_main_context_default()
+ : nm_client_get_main_context(nmc),
+ TRUE);
+ }
+ break;
+ }
+ }
+
+ if (!context_integrated && nmtst_get_rand_bool()) {
+ nm_clear_g_cancellable(&cancellable_1);
+ }
+
+ if (nmtst_get_rand_bool())
+ nm_clear_pointer(&client_context, nm_g_main_context_pop_and_unref);
+
+ {
+ gs_unref_object GCancellable *cancellable = NULL;
+
+ pending_ops++;
+ nm_client_wait_shutdown(
+ nmc,
+ nmtst_true_once(&context_integrated, nmtst_get_rand_bool()),
+ (cancellable = nmtst_get_rand_bool() ? g_cancellable_new() : NULL),
+ _wait_shutdown_cb,
+ &pending_ops);
+ }
+
+ if (nmtst_get_rand_bool())
+ g_clear_object(&nmc);
+
+ for (i = 0; i < (int) contexts->len; i++) {
+ GMainContext *context = contexts->pdata[i];
+
+ if (nmtst_get_rand_bool()) {
+ /* iterate next time. */
+ continue;
+ }
+
+ if (!g_main_context_acquire(context)) {
+ /* It's integrated, skip. */
+ continue;
+ }
+ g_main_context_release(context);
+
+ while (g_main_context_iteration(context, FALSE)) {
+ /* pass */
+ }
+ }
+ }
+
+ until_msec = nm_utils_get_monotonic_timestamp_msec() + 10000;
+ while (pending_ops > 0 && nm_utils_get_monotonic_timestamp_msec() < until_msec) {
+ for (i = contexts->len; i > 0; i--) {
+ GMainContext *context = contexts->pdata[i - 1];
+
+ if (!g_main_context_acquire(context)) {
+ /* integrated, skip. */
+ continue;
+ }
+ g_main_context_release(context);
+
+ while (g_main_context_iteration(context, FALSE)) {
+ /* pass */
+ }
+ }
+ }
+
+ g_assert_cmpint(pending_ops, ==, 0);
+}
+
+/*****************************************************************************/
+
NMTST_DEFINE();
int
@@ -1369,6 +1606,7 @@ main(int argc, char **argv)
g_test_add_func("/libnm/activate-virtual", test_activate_virtual);
g_test_add_func("/libnm/device-connection-compatibility", test_device_connection_compatibility);
g_test_add_func("/libnm/connection/invalid", test_connection_invalid);
+ g_test_add_func("/libnm/test_client_wait_shutdown", test_client_wait_shutdown);
return g_test_run();
}
diff --git a/src/libnm-client-public/nm-client.h b/src/libnm-client-public/nm-client.h
index 02481a8145..a65047f8a4 100644
--- a/src/libnm-client-public/nm-client.h
+++ b/src/libnm-client-public/nm-client.h
@@ -508,6 +508,18 @@ gboolean nm_utils_file_is_certificate(const char *filename);
gboolean nm_utils_file_is_private_key(const char *filename, gboolean *out_encrypted);
gboolean nm_utils_file_is_pkcs12(const char *filename);
+/*****************************************************************************/
+
+NM_AVAILABLE_IN_1_42
+void nm_client_wait_shutdown(NMClient *client,
+ gboolean integrate_maincontext,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer user_data);
+
+NM_AVAILABLE_IN_1_42
+gboolean nm_client_wait_shutdown_finish(GAsyncResult *result, GError **error);
+
G_END_DECLS
#endif /* __NM_CLIENT_H__ */