From d804bcadcb448879f31c32363970d9b70b742b9a Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Tue, 8 Jun 2021 23:17:53 +0200 Subject: journal: don't try to reuse already calculated hash between files with keyed hash feature When suppressing duplicate fields between files we so far tried to reuse the already known hash value of the data fields between files. This was fine as long as we used the same hash function everywhere. However, since addition of the keyed hash feature for journal files this doesn't work anymore, since the hashes will be different for different files. Fixes: #19172 (cherry picked from commit 2e1a8a5dab8b5519c079c9bed54fc682aa4095b0) --- src/libsystemd/sd-journal/sd-journal.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/libsystemd/sd-journal/sd-journal.c b/src/libsystemd/sd-journal/sd-journal.c index ffc7af4087..e43947a7c8 100644 --- a/src/libsystemd/sd-journal/sd-journal.c +++ b/src/libsystemd/sd-journal/sd-journal.c @@ -2984,7 +2984,13 @@ _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0) continue; - r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL); + /* We can reuse the hash from our current file only on old-style journal files + * without keyed hashes. On new-style files we have to calculate the hash anew, to + * take the per-file hash seed into consideration. */ + if (!JOURNAL_HEADER_KEYED_HASH(j->unique_file->header) && !JOURNAL_HEADER_KEYED_HASH(of->header)) + r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL); + else + r = journal_file_find_data_object(of, odata, ol, NULL, NULL); if (r < 0) return r; if (r > 0) { -- cgit v1.2.1