diff options
author | Uli Schlachter <psychon@znc.in> | 2021-03-07 07:42:28 +0100 |
---|---|---|
committer | Ran Benita <ran@unusedvar.com> | 2021-03-09 11:00:13 +0200 |
commit | 40c00b472144d1684d2fb97cafef39ef59f21b28 (patch) | |
tree | a7d8ad463ed4831a806d6548bb4b388e28c0caad /src/x11/util.c | |
parent | 82a5bdc43c7bd942b20b1ecf453db980d0a75530 (diff) | |
download | xorg-lib-libxkbcommon-40c00b472144d1684d2fb97cafef39ef59f21b28.tar.gz |
xkb_x11_keymap_new_from_device: Less X11 round-trips
On my system, calling xkb_x11_keymap_new_from_device() did 78 round trips to the
X11 server, which seems excessive. This commit brings this number down to about
9 to 10 round trips.
The existing functions adopt_atom() and adopt_atoms() guarantee that the atom
was adopted by the time they return. Thus, each call to these functions must do
a round-trip. However, none of the callers need this guarantee.
This commit makes "atom adopting" asynchronous: Only some time later is the atom
actually adopted. Until then, it is in some pending "limbo" state.
This actually fixes a TODO in the comments.
Fixes: https://github.com/xkbcommon/libxkbcommon/issues/216
Signed-off-by: Uli Schlachter <psychon@znc.in>
Diffstat (limited to 'src/x11/util.c')
-rw-r--r-- | src/x11/util.c | 160 |
1 files changed, 94 insertions, 66 deletions
diff --git a/src/x11/util.c b/src/x11/util.c index 660d885..766e9a0 100644 --- a/src/x11/util.c +++ b/src/x11/util.c @@ -169,14 +169,9 @@ struct x11_atom_cache { size_t len; }; -bool -adopt_atoms(struct xkb_context *ctx, xcb_connection_t *conn, - const xcb_atom_t *from, xkb_atom_t *to, const size_t count) +static struct x11_atom_cache * +get_cache(struct xkb_context *ctx, xcb_connection_t *conn) { - enum { SIZE = 128 }; - xcb_get_atom_name_cookie_t cookies[SIZE]; - const size_t num_batches = ROUNDUP(count, SIZE) / SIZE; - if (!ctx->x11_atom_cache) { ctx->x11_atom_cache = calloc(1, sizeof(struct x11_atom_cache)); } @@ -186,79 +181,112 @@ adopt_atoms(struct xkb_context *ctx, xcb_connection_t *conn, cache->conn = conn; cache->len = 0; } + return cache; +} - memset(to, 0, count * sizeof(*to)); - - /* Send and collect the atoms in batches of reasonable SIZE. */ - for (size_t batch = 0; batch < num_batches; batch++) { - const size_t start = batch * SIZE; - const size_t stop = MIN((batch + 1) * SIZE, count); - - /* Send. */ - for (size_t i = start; i < stop; i++) { - bool cache_hit = false; - if (cache) { - for (size_t c = 0; c < cache->len; c++) { - if (cache->cache[c].from == from[i]) { - to[i] = cache->cache[c].to; - cache_hit = true; - break; - } - } +void +x11_atom_interner_init(struct x11_atom_interner *interner, + struct xkb_context *ctx, xcb_connection_t *conn) +{ + interner->had_error = false; + interner->ctx = ctx; + interner->conn = conn; + interner->num_pending = 0; + interner->num_copies = 0; +} + +void +x11_atom_interner_adopt_atom(struct x11_atom_interner *interner, + const xcb_atom_t atom, xkb_atom_t *out) +{ + *out = 0; + + /* Can be NULL in case the malloc failed. */ + struct x11_atom_cache *cache = get_cache(interner->ctx, interner->conn); + +retry: + + /* Already in the cache? */ + if (cache) { + for (size_t c = 0; c < cache->len; c++) { + if (cache->cache[c].from == atom) { + *out = cache->cache[c].to; + return; } - if (!cache_hit && from[i] != XCB_ATOM_NONE) - cookies[i % SIZE] = xcb_get_atom_name(conn, from[i]); } + } - /* Collect. */ - for (size_t i = start; i < stop; i++) { - xcb_get_atom_name_reply_t *reply; + /* Already pending? */ + for (size_t i = 0; i < interner->num_pending; i++) { + if (interner->pending[i].from == atom) { + if (interner->num_copies == ARRAY_SIZE(interner->copies)) { + x11_atom_interner_round_trip(interner); + goto retry; + } - if (from[i] == XCB_ATOM_NONE) - continue; + size_t idx = interner->num_copies++; + interner->copies[idx].from = atom; + interner->copies[idx].out = out; + return; + } + } - /* Was filled from cache. */ - if (to[i] != 0) - continue; + /* We have to send a GetAtomName request */ + if (interner->num_pending == ARRAY_SIZE(interner->pending)) { + x11_atom_interner_round_trip(interner); + assert(interner->num_pending < ARRAY_SIZE(interner->pending)); + } + size_t idx = interner->num_pending++; + interner->pending[idx].from = atom; + interner->pending[idx].out = out; + interner->pending[idx].cookie = xcb_get_atom_name(interner->conn, atom); +} - reply = xcb_get_atom_name_reply(conn, cookies[i % SIZE], NULL); - if (!reply) - goto err_discard; +void +x11_atom_interner_adopt_atoms(struct x11_atom_interner *interner, + const xcb_atom_t *from, xkb_atom_t *to, + size_t count) +{ + for (size_t i = 0; i < count; i++) { + x11_atom_interner_adopt_atom(interner, from[i], &to[i]); + } +} - to[i] = xkb_atom_intern(ctx, - xcb_get_atom_name_name(reply), - xcb_get_atom_name_name_length(reply)); - free(reply); +void x11_atom_interner_round_trip(struct x11_atom_interner *interner) { + struct xkb_context *ctx = interner->ctx; + xcb_connection_t *conn = interner->conn; - if (to[i] == XKB_ATOM_NONE) - goto err_discard; + /* Can be NULL in case the malloc failed. */ + struct x11_atom_cache *cache = get_cache(ctx, conn); - if (cache && cache->len < ARRAY_SIZE(cache->cache)) { - size_t idx = cache->len++; - cache->cache[idx].from = from[i]; - cache->cache[idx].to = to[i]; - } + for (size_t i = 0; i < interner->num_pending; i++) { + xcb_get_atom_name_reply_t *reply; + reply = xcb_get_atom_name_reply(conn, interner->pending[i].cookie, NULL); + if (!reply) { + interner->had_error = true; continue; + } + xcb_atom_t x11_atom = interner->pending[i].from; + xkb_atom_t atom = xkb_atom_intern(ctx, + xcb_get_atom_name_name(reply), + xcb_get_atom_name_name_length(reply)); + free(reply); - /* - * If we don't discard the uncollected replies, they just - * sit in the XCB queue waiting forever. Sad. - */ -err_discard: - for (size_t j = i + 1; j < stop; j++) - if (from[j] != XCB_ATOM_NONE) - xcb_discard_reply(conn, cookies[j % SIZE].sequence); - return false; + if (cache && cache->len < ARRAY_SIZE(cache->cache)) { + size_t idx = cache->len++; + cache->cache[idx].from = x11_atom; + cache->cache[idx].to = atom; } - } - return true; -} + *interner->pending[i].out = atom; -bool -adopt_atom(struct xkb_context *ctx, xcb_connection_t *conn, xcb_atom_t atom, - xkb_atom_t *out) -{ - return adopt_atoms(ctx, conn, &atom, out, 1); + for (size_t j = 0; j < interner->num_copies; j++) { + if (interner->copies[j].from == x11_atom) + *interner->copies[j].out = atom; + } + } + + interner->num_pending = 0; + interner->num_copies = 0; } |