1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
|
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#pragma once
#include <inttypes.h>
#include <stdbool.h>
#include <sys/types.h>
#include "sd-id128.h"
#include "sd-journal.h"
#include "hashmap.h"
#include "journal-def.h"
#include "journal-file.h"
#include "list.h"
#include "set.h"
typedef struct Match Match;
typedef struct Location Location;
typedef struct Directory Directory;
typedef enum MatchType {
MATCH_DISCRETE,
MATCH_OR_TERM,
MATCH_AND_TERM
} MatchType;
struct Match {
MatchType type;
Match *parent;
LIST_FIELDS(Match, matches);
/* For concrete matches */
char *data;
size_t size;
uint64_t hash; /* old-style jenkins hash. New-style siphash is different per file, hence won't be cached here */
/* For terms */
LIST_HEAD(Match, matches);
};
struct Location {
LocationType type;
bool seqnum_set:1;
bool realtime_set:1;
bool monotonic_set:1;
bool xor_hash_set:1;
uint64_t seqnum;
sd_id128_t seqnum_id;
uint64_t realtime;
uint64_t monotonic;
sd_id128_t boot_id;
uint64_t xor_hash;
};
struct Directory {
char *path;
int wd;
bool is_root;
unsigned last_seen_generation;
};
struct sd_journal {
int toplevel_fd;
char *path;
char *prefix;
char *namespace;
OrderedHashmap *files;
IteratedCache *files_cache;
MMapCache *mmap;
Location current_location;
JournalFile *current_file;
uint64_t current_field;
Match *level0, *level1, *level2;
pid_t original_pid;
int inotify_fd;
unsigned current_invalidate_counter, last_invalidate_counter;
usec_t last_process_usec;
unsigned generation;
/* Iterating through unique fields and their data values */
char *unique_field;
JournalFile *unique_file;
uint64_t unique_offset;
/* Iterating through known fields */
JournalFile *fields_file;
uint64_t fields_offset;
uint64_t fields_hash_table_index;
char *fields_buffer;
size_t fields_buffer_allocated;
int flags;
bool on_network:1;
bool no_new_files:1;
bool no_inotify:1;
bool unique_file_lost:1; /* File we were iterating over got
removed, and there were no more
files, so sd_j_enumerate_unique
will return a value equal to 0. */
bool fields_file_lost:1;
bool has_runtime_files:1;
bool has_persistent_files:1;
size_t data_threshold;
Hashmap *directories_by_path;
Hashmap *directories_by_wd;
Hashmap *errors;
};
char *journal_make_match_string(sd_journal *j);
void journal_print_header(sd_journal *j);
#define JOURNAL_FOREACH_DATA_RETVAL(j, data, l, retval) \
for (sd_journal_restart_data(j); ((retval) = sd_journal_enumerate_data((j), &(data), &(l))) > 0; )
/* All errors that we might encounter while extracting a field that are not real errors,
* but only mean that the field is too large or we don't support the compression. */
static inline bool JOURNAL_ERRNO_IS_UNAVAILABLE_FIELD(int r) {
return IN_SET(abs(r),
ENOBUFS, /* Field or decompressed field too large */
E2BIG, /* Field too large for pointer width */
EPROTONOSUPPORT); /* Unsupported compression */
}
|