summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSeif Lotfy <seif@lotfy.com>2013-01-19 00:00:26 +0100
committerSeif Lotfy <seif@lotfy.com>2013-01-19 00:00:26 +0100
commitb03a8e5aba22da16068f9a58d46da6b0c940c5cd (patch)
tree1175b14970c8feb4e973f498753b20696b83cec6
parent403487224559779581fe279514b8ea1de8afc229 (diff)
downloadzeitgeist-b03a8e5aba22da16068f9a58d46da6b0c940c5cd.tar.gz
Move files around
-rw-r--r--libzeitgeist/db-reader.vala953
-rw-r--r--libzeitgeist/errors.vala53
-rw-r--r--libzeitgeist/sql-schema.vala674
-rw-r--r--libzeitgeist/sql.vala478
-rw-r--r--libzeitgeist/table-lookup.vala155
-rw-r--r--libzeitgeist/where-clause.vala288
6 files changed, 2601 insertions, 0 deletions
diff --git a/libzeitgeist/db-reader.vala b/libzeitgeist/db-reader.vala
new file mode 100644
index 00000000..9e67cd0e
--- /dev/null
+++ b/libzeitgeist/db-reader.vala
@@ -0,0 +1,953 @@
+/* db-reader.vala
+ *
+ * Copyright © 2011-2012 Collabora Ltd.
+ * By Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ * By Seif Lotfy <seif@lotfy.com>
+ * Copyright © 2011 Canonical Ltd.
+ * By Michal Hruby <michal.hruby@canonical.com>
+ *
+ * Based upon a Python implementation (2009-2011) by:
+ * Markus Korn <thekorn@gmx.net>
+ * Mikkel Kamstrup Erlandsen <mikkel.kamstrup@gmail.com>
+ * Seif Lotfy <seif@lotfy.com>
+ * Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+using Zeitgeist;
+using Zeitgeist.SQLite;
+using Zeitgeist.Utils;
+
+namespace Zeitgeist
+{
+
+public class DbReader : Object
+{
+
+ public Zeitgeist.SQLite.Database database { get; construct; }
+ protected unowned Sqlite.Database db;
+
+ protected TableLookup interpretations_table;
+ protected TableLookup manifestations_table;
+ protected TableLookup mimetypes_table;
+ protected TableLookup actors_table;
+
+ public DbReader () throws EngineError
+ {
+ Object (database: new Zeitgeist.SQLite.Database.read_only ());
+ }
+
+ construct
+ {
+ database.set_deletion_callback (delete_from_cache);
+ db = database.database;
+
+ try
+ {
+ interpretations_table = new TableLookup (database, "interpretation");
+ manifestations_table = new TableLookup (database, "manifestation");
+ mimetypes_table = new TableLookup (database, "mimetype");
+ actors_table = new TableLookup (database, "actor");
+ }
+ catch (EngineError err)
+ {
+ // FIXME: propagate this properly?
+ critical ("TableLookup initialization failed: %s", err.message);
+ }
+ }
+
+ protected Event get_event_from_row (Sqlite.Statement stmt, uint32 event_id)
+ throws EngineError
+ {
+ Event event = new Event ();
+ event.id = event_id;
+ event.timestamp = stmt.column_int64 (EventViewRows.TIMESTAMP);
+ event.interpretation = interpretations_table.get_value (
+ stmt.column_int (EventViewRows.INTERPRETATION));
+ event.manifestation = manifestations_table.get_value (
+ stmt.column_int (EventViewRows.MANIFESTATION));
+ event.actor = actors_table.get_value (
+ stmt.column_int (EventViewRows.ACTOR));
+ event.origin = stmt.column_text (
+ EventViewRows.EVENT_ORIGIN_URI);
+
+ // Load payload
+ unowned uint8[] data = (uint8[]) stmt.column_blob(
+ EventViewRows.PAYLOAD);
+ data.length = stmt.column_bytes(EventViewRows.PAYLOAD);
+ if (data != null)
+ {
+ event.payload = new ByteArray();
+ event.payload.append(data);
+ }
+ return event;
+ }
+
+ protected Subject get_subject_from_row (Sqlite.Statement stmt)
+ throws EngineError
+ {
+ Subject subject = new Subject ();
+ subject.uri = stmt.column_text (EventViewRows.SUBJECT_URI);
+ subject.text = stmt.column_text (EventViewRows.SUBJECT_TEXT);
+ subject.storage = stmt.column_text (EventViewRows.SUBJECT_STORAGE);
+ subject.origin = stmt.column_text (EventViewRows.SUBJECT_ORIGIN_URI);
+ subject.current_uri = stmt.column_text (
+ EventViewRows.SUBJECT_CURRENT_URI);
+ subject.current_origin = stmt.column_text (
+ EventViewRows.SUBJECT_CURRENT_ORIGIN_URI);
+ subject.interpretation = interpretations_table.get_value (
+ stmt.column_int (EventViewRows.SUBJECT_INTERPRETATION));
+ subject.manifestation = manifestations_table.get_value (
+ stmt.column_int (EventViewRows.SUBJECT_MANIFESTATION));
+ subject.mimetype = mimetypes_table.get_value (
+ stmt.column_int (EventViewRows.SUBJECT_MIMETYPE));
+ return subject;
+ }
+
+ public GenericArray<Event?> get_events(uint32[] event_ids,
+ BusName? sender=null) throws EngineError
+ {
+ // TODO: Consider if we still want the cache. This should be done
+ // once everything is working, since it adds unneeded complexity.
+ // It'd also benchmark it again first, we may have better options
+ // to enhance the performance of SQLite now, and event processing
+ // will be faster now being C.
+
+ if (event_ids.length == 0)
+ return new GenericArray<Event?> ();
+
+ var sql_event_ids = database.get_sql_string_from_event_ids (event_ids);
+ string sql = """
+ SELECT * FROM event_view
+ WHERE id IN (%s)
+ """.printf (sql_event_ids);
+
+ Sqlite.Statement stmt;
+ int rc = db.prepare_v2 (sql, -1, out stmt);
+ database.assert_query_success (rc, "SQL error");
+
+ var events = new HashTable<uint32, Event?> (direct_hash, direct_equal);
+
+ // Create Events and Subjects from rows
+ while ((rc = stmt.step ()) == Sqlite.ROW)
+ {
+ uint32 event_id = (uint32) stmt.column_int64 (EventViewRows.ID);
+ Event? event = events.lookup (event_id);
+ if (event == null)
+ {
+ event = get_event_from_row(stmt, event_id);
+ events.insert (event_id, event);
+ }
+ Subject subject = get_subject_from_row(stmt);
+ event.add_subject(subject);
+ }
+ database.assert_query_success (rc, "Error", Sqlite.DONE);
+
+ // Sort events according to the sequence of event_ids
+ var results = new GenericArray<Event?> ();
+ results.length = event_ids.length;
+ int i = 0;
+ foreach (var id in event_ids)
+ {
+ results.set(i++, events.lookup (id));
+ }
+
+ return results;
+ }
+
+ public uint32[] find_event_ids_for_clause (WhereClause where,
+ uint max_events, uint result_type) throws EngineError
+ {
+ string sql = "SELECT id FROM event_view ";
+ string where_sql = "";
+ if (!where.is_empty ())
+ {
+ where_sql = "WHERE " + where.get_sql_conditions ();
+ }
+
+ switch (result_type)
+ {
+ case ResultType.MOST_RECENT_EVENTS:
+ sql += where_sql + " ORDER BY ";
+ break;
+ case ResultType.LEAST_RECENT_EVENTS:
+ sql += where_sql + " ORDER BY ";
+ break;
+ case ResultType.MOST_RECENT_EVENT_ORIGIN:
+ sql = group_and_sort ("origin", where);
+ break;
+ case ResultType.LEAST_RECENT_EVENT_ORIGIN:
+ sql = group_and_sort ("origin", where);
+ break;
+ case ResultType.MOST_POPULAR_EVENT_ORIGIN:
+ sql = group_and_sort ("origin", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_EVENT_ORIGIN:
+ sql = group_and_sort ("origin", where, true);
+ break;
+ case ResultType.MOST_RECENT_SUBJECTS:
+ sql = group_and_sort ("subj_id", where);
+ break;
+ case ResultType.LEAST_RECENT_SUBJECTS:
+ sql = group_and_sort ("subj_id", where);
+ break;
+ case ResultType.MOST_POPULAR_SUBJECTS:
+ sql = group_and_sort ("subj_id", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_SUBJECTS:
+ sql = group_and_sort ("subj_id", where, true);
+ break;
+ case ResultType.MOST_RECENT_CURRENT_URI:
+ sql = group_and_sort ("subj_id_current", where);
+ break;
+ case ResultType.LEAST_RECENT_CURRENT_URI:
+ sql = group_and_sort ("subj_id_current", where);
+ break;
+ case ResultType.MOST_POPULAR_CURRENT_URI:
+ sql = group_and_sort ("subj_id_current", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_CURRENT_URI:
+ sql = group_and_sort ("subj_id_current", where, true);
+ break;
+ case ResultType.MOST_RECENT_ACTOR:
+ sql = group_and_sort ("actor", where);
+ break;
+ case ResultType.LEAST_RECENT_ACTOR:
+ sql = group_and_sort ("actor", where);
+ break;
+ case ResultType.MOST_POPULAR_ACTOR:
+ sql = group_and_sort ("actor", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_ACTOR:
+ sql = group_and_sort ("actor", where, true);
+ break;
+ case ResultType.OLDEST_ACTOR:
+ sql = group_and_sort ("actor", where, null, "min");
+ break;
+ case ResultType.MOST_RECENT_ORIGIN:
+ sql = group_and_sort ("subj_origin", where);
+ break;
+ case ResultType.LEAST_RECENT_ORIGIN:
+ sql = group_and_sort ("subj_origin", where);
+ break;
+ case ResultType.MOST_POPULAR_ORIGIN:
+ sql = group_and_sort ("subj_origin", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_ORIGIN:
+ sql = group_and_sort ("subj_origin", where, true);
+ break;
+ case ResultType.MOST_RECENT_CURRENT_ORIGIN:
+ sql = group_and_sort ("subj_origin_current", where);
+ break;
+ case ResultType.LEAST_RECENT_CURRENT_ORIGIN:
+ sql = group_and_sort ("subj_origin_current", where);
+ break;
+ case ResultType.MOST_POPULAR_CURRENT_ORIGIN:
+ sql = group_and_sort ("subj_origin_current", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_CURRENT_ORIGIN:
+ sql = group_and_sort ("subj_origin_current", where, true);
+ break;
+ case ResultType.MOST_RECENT_SUBJECT_INTERPRETATION:
+ sql = group_and_sort ("subj_interpretation", where);
+ break;
+ case ResultType.LEAST_RECENT_SUBJECT_INTERPRETATION:
+ sql = group_and_sort ("subj_interpretation", where);
+ break;
+ case ResultType.MOST_POPULAR_SUBJECT_INTERPRETATION:
+ sql = group_and_sort ("subj_interpretation", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_SUBJECT_INTERPRETATION:
+ sql = group_and_sort ("subj_interpretation", where, true);
+ break;
+ case ResultType.MOST_RECENT_MIMETYPE:
+ sql = group_and_sort ("subj_mimetype", where);
+ break;
+ case ResultType.LEAST_RECENT_MIMETYPE:
+ sql = group_and_sort ("subj_mimetype", where);
+ break;
+ case ResultType.MOST_POPULAR_MIMETYPE:
+ sql = group_and_sort ("subj_mimetype", where, false);
+ break;
+ case ResultType.LEAST_POPULAR_MIMETYPE:
+ sql = group_and_sort ("subj_mimetype", where, true);
+ break;
+ default:
+ string error_message = "Invalid ResultType.";
+ warning (error_message);
+ throw new EngineError.INVALID_ARGUMENT (error_message);
+ }
+
+ // complete the sort rule
+ bool time_asc = ResultType.is_sort_order_asc ((ResultType) result_type);
+ sql += " timestamp %s".printf ((time_asc) ? "ASC" : "DESC");
+
+ if (where.get_is_simple ())
+ sql = sql.replace ("FROM event_view", "FROM event");
+
+ int rc;
+ Sqlite.Statement stmt;
+
+ rc = db.prepare_v2 (sql, -1, out stmt);
+ database.assert_query_success(rc, "SQL error");
+
+ var arguments = where.get_bind_arguments ();
+ for (int i = 0; i < arguments.length; ++i)
+ stmt.bind_text (i + 1, arguments[i]);
+
+#if EXPLAIN_QUERIES
+ database.explain_query (stmt);
+#endif
+
+ uint32[] event_ids = {};
+
+ while ((rc = stmt.step()) == Sqlite.ROW)
+ {
+ var event_id = (uint32) uint64.parse(
+ stmt.column_text (EventViewRows.ID));
+ // Events are supposed to be contiguous in the database
+ if (event_ids.length == 0 || event_ids[event_ids.length-1] != event_id) {
+ event_ids += event_id;
+ if (event_ids.length == max_events) break;
+ }
+ }
+ if (rc != Sqlite.DONE && rc != Sqlite.ROW)
+ {
+ string error_message = "Error in find_event_ids: %d, %s".printf (
+ rc, db.errmsg ());
+ warning (error_message);
+ database.assert_not_corrupt (rc);
+ throw new EngineError.DATABASE_ERROR (error_message);
+ }
+
+ return event_ids;
+ }
+
+ public uint32[] find_event_ids (TimeRange time_range,
+ GenericArray<Event> event_templates,
+ uint storage_state, uint max_events, uint result_type,
+ BusName? sender=null) throws EngineError
+ {
+ WhereClause where = get_where_clause_for_query (time_range,
+ event_templates, storage_state);
+
+ //if (!where.may_have_results ())
+ // return new uint32[0];
+
+ return find_event_ids_for_clause (where, max_events, result_type);
+ }
+
+ public GenericArray<Event?> find_events (TimeRange time_range,
+ GenericArray<Event> event_templates,
+ uint storage_state, uint max_events, uint result_type,
+ BusName? sender=null) throws EngineError
+ {
+ return get_events (find_event_ids (time_range, event_templates,
+ storage_state, max_events, result_type));
+ }
+
+ public WhereClause get_where_clause_for_query (TimeRange time_range,
+ GenericArray<Event> event_templates, uint storage_state) throws EngineError
+ {
+ WhereClause where = new WhereClause (WhereClause.Type.AND);
+
+ if (time_range.start != 0)
+ where.add (("timestamp >= %" + int64.FORMAT).printf(
+ time_range.start));
+ if (time_range.end != 0)
+ where.add (("timestamp <= %" + int64.FORMAT).printf(
+ time_range.end));
+
+ if (storage_state == StorageState.AVAILABLE ||
+ storage_state == StorageState.NOT_AVAILABLE)
+ {
+ where.add ("(subj_storage_state=? OR subj_storage_state IS NULL)",
+ storage_state.to_string ());
+ where.set_is_simple (false);
+ }
+ else if (storage_state != StorageState.ANY)
+ {
+ throw new EngineError.INVALID_ARGUMENT(
+ "Unknown storage state '%u'".printf(storage_state));
+ }
+
+ WhereClause tpl_conditions = get_where_clause_from_event_templates (
+ event_templates);
+ where.extend (tpl_conditions);
+
+ return where;
+ }
+
+ private struct RelatedUri {
+ public uint32 id;
+ public int64 timestamp;
+ public string uri;
+ public int32 counter;
+ }
+
+ public string[] find_related_uris (TimeRange time_range,
+ GenericArray<Event> event_templates,
+ GenericArray<Event> result_event_templates,
+ uint storage_state, uint max_results, uint result_type,
+ BusName? sender=null) throws EngineError
+ {
+ /**
+ * Return a list of subject URIs commonly used together with events
+ * matching the given template, considering data from within the
+ * indicated timerange.
+ * Only URIs for subjects matching the indicated `result_event_templates`
+ * and `result_storage_state` are returned.
+ */
+ if (result_type == RelevantResultType.RECENT ||
+ result_type == RelevantResultType.RELATED)
+ {
+ // We pick out the ids for relational event so we can set them as
+ // roots the ids are taken from the events that match the
+ // events_templates
+ uint32[] ids = find_event_ids (time_range, event_templates,
+ storage_state, 0, ResultType.LEAST_RECENT_EVENTS);
+
+ if (event_templates.length > 0 && ids.length == 0)
+ {
+ throw new EngineError.INVALID_ARGUMENT (
+ "No results found for the event_templates");
+ }
+
+ // Pick out the result_ids for the filtered results we would like to
+ // take into account the ids are taken from the events that match
+ // the result_event_templates if no result_event_templates are set we
+ // consider all results as allowed
+ uint32[] result_ids;
+ result_ids = find_event_ids (time_range, result_event_templates,
+ storage_state, 0, ResultType.LEAST_RECENT_EVENTS);
+
+ // From here we create several graphs with the maximum depth of 2
+ // and push all the nodes and vertices (events) in one pot together
+
+ uint32[] pot = new uint32[ids.length + result_ids.length];
+
+ for (uint32 i=0; i < ids.length; i++)
+ pot[i] = ids[i];
+ for (uint32 i=0; i < result_ids.length; i++)
+ pot[ids.length + i] = result_ids[ids.length + i];
+
+ Sqlite.Statement stmt;
+
+ var sql_event_ids = database.get_sql_string_from_event_ids (pot);
+ string sql = """
+ SELECT id, timestamp, subj_uri FROM event_view
+ WHERE id IN (%s) ORDER BY timestamp ASC
+ """.printf (sql_event_ids);
+
+ int rc = db.prepare_v2 (sql, -1, out stmt);
+
+ database.assert_query_success(rc, "SQL error");
+
+ // FIXME: fix this ugly code
+ var temp_related_uris = new GenericArray<RelatedUri?>();
+
+ while ((rc = stmt.step()) == Sqlite.ROW)
+ {
+ RelatedUri ruri = RelatedUri(){
+ id = (uint32) uint64.parse(stmt.column_text (0)),
+ timestamp = stmt.column_int64 (1),
+ uri = stmt.column_text (2),
+ counter = 0
+ };
+ temp_related_uris.add (ruri);
+ }
+
+ // RelatedUri[] related_uris = new RelatedUri[temp_related_uris.length];
+ // for (int i=0; i<related_uris.length; i++)
+ // related_uris[i] = temp_related_uris[i];
+
+ database.assert_query_success (rc, "Error in find_related_uris",
+ Sqlite.DONE);
+
+ var uri_counter = new HashTable<string, RelatedUri?>(
+ str_hash, str_equal);
+
+ for (int i = 0; i < temp_related_uris.length; i++)
+ {
+ var window = new GenericArray<unowned RelatedUri?>();
+
+ bool count_in_window = false;
+ for (int j = int.max (0, i - 5);
+ j < int.min (i, temp_related_uris.length);
+ j++)
+ {
+ window.add(temp_related_uris[j]);
+ if (temp_related_uris[j].id in ids)
+ {
+ count_in_window = true;
+ break;
+ }
+ }
+
+ if (count_in_window)
+ {
+ for (int j = 0; j < window.length; j++)
+ {
+ if (window[j].id in result_ids)
+ {
+ if (uri_counter.lookup (window[j].uri) == null)
+ {
+ RelatedUri ruri = RelatedUri ()
+ {
+ id = window[j].id,
+ timestamp = window[j].timestamp,
+ uri = window[j].uri,
+ counter = 0
+ };
+ uri_counter.insert (window[j].uri, ruri);
+ }
+ uri_counter.lookup (window[j].uri).counter++;
+ if (uri_counter.lookup (window[j].uri).timestamp
+ < window[j].timestamp)
+ {
+ uri_counter.lookup (window[j].uri).timestamp =
+ window[j].timestamp;
+ }
+ }
+ }
+ }
+ }
+
+ // We have the big hashtable with the structs, now we sort them by
+ // most used and limit the result then sort again
+ List<RelatedUri?> temp_ruris = new List<RelatedUri?>();
+ List<RelatedUri?> values = new List<RelatedUri?>();
+
+ foreach (var uri in uri_counter.get_values())
+ values.append(uri);
+
+ values.sort ((a, b) => a.counter - b.counter);
+ values.sort ((a, b) => {
+ int64 delta = a.timestamp - b.timestamp;
+ if (delta < 0) return 1;
+ else if (delta > 0) return -1;
+ else return 0;
+ });
+
+ foreach (RelatedUri ruri in values)
+ {
+ if (temp_ruris.length() < max_results)
+ temp_ruris.append(ruri);
+ else
+ break;
+ }
+
+ // Sort by recency
+ if (result_type == RelevantResultType.RECENT)
+ temp_ruris.sort ((a, b) => {
+ int64 delta = a.timestamp - b.timestamp;
+ if (delta < 0) return 1;
+ else if (delta > 0) return -1;
+ else return 0;});
+
+ string[] results = new string[temp_ruris.length()];
+
+ int i = 0;
+ foreach (var uri in temp_ruris)
+ {
+ results[i] = uri.uri;
+ stdout.printf("%i %lld %s\n", uri.counter,
+ uri.timestamp,
+ uri.uri);
+ i++;
+ }
+
+ return results;
+ }
+ else
+ {
+ throw new EngineError.DATABASE_ERROR ("Unsupported RelevantResultType");
+ }
+ }
+
+ /**
+ * Clear all resources Engine is using (close database connection, etc.).
+ *
+ * After executing this method on an instance, no other function
+ * may be called.
+ */
+ public virtual void close ()
+ {
+ database.close ();
+ }
+
+ // Used by find_event_ids
+ private string group_and_sort (string field, WhereClause where,
+ bool? count_asc=null, string aggregation_type="max")
+ {
+ string aggregation_sql = "";
+ string order_sql = "";
+ string where_sql = where.get_sql_conditions ();
+ if (count_asc != null)
+ {
+ aggregation_sql = ", COUNT(%s) AS num_events".printf (field);
+ order_sql = "num_events %s,".printf ((count_asc) ? "ASC" : "DESC");
+ }
+ if (count_asc != null || !where.has_non_timestamp_condition())
+ {
+ return """
+ SELECT id FROM event
+ NATURAL JOIN (
+ SELECT %s,
+ %s(timestamp) AS timestamp
+ %s
+ FROM event_view WHERE %s
+ GROUP BY %s)
+ GROUP BY %s
+ ORDER BY %s
+ """.printf (
+ field,
+ aggregation_type,
+ aggregation_sql,
+ where_sql,
+ field,
+ field,
+ order_sql);
+ }
+ return """
+ SELECT id, %s(timestamp) AS timestamp
+ FROM event_view WHERE %s AND %s IS NOT NULL
+ GROUP BY %s
+ ORDER BY
+ """.printf (
+ aggregation_type,
+ where_sql,
+ field,
+ field);
+ }
+
+ // Used by find_event_ids
+ public WhereClause get_where_clause_from_event_templates (
+ GenericArray<Event> templates) throws EngineError
+ {
+ WhereClause where = new WhereClause (WhereClause.Type.OR);
+ for (int i = 0; i < templates.length; ++i)
+ {
+ Event event_template = templates[i];
+ where.extend (
+ get_where_clause_from_event_template (event_template));
+ }
+ return where;
+ }
+
+ // Used by get_where_clause_from_event_templates
+ private WhereClause get_where_clause_from_event_template (Event template)
+ throws EngineError
+ {
+ WhereClause where = new WhereClause (WhereClause.Type.AND);
+
+ // Event ID
+ if (template.id != 0)
+ where.add ("id=?", template.id.to_string());
+
+ // Interpretation
+ if (!is_empty_string (template.interpretation))
+ {
+ assert_no_wildcard ("interpretation", template.interpretation);
+ WhereClause subwhere = get_where_clause_for_symbol (
+ "interpretation", template.interpretation,
+ interpretations_table);
+ if (!subwhere.is_empty ())
+ where.extend (subwhere);
+ }
+
+ // Manifestation
+ if (!is_empty_string (template.manifestation))
+ {
+ assert_no_wildcard ("manifestation", template.interpretation);
+ WhereClause subwhere = get_where_clause_for_symbol (
+ "manifestation", template.manifestation,
+ manifestations_table);
+ if (!subwhere.is_empty ())
+ where.extend (subwhere);
+ }
+
+ // Actor
+ if (!is_empty_string (template.actor))
+ {
+ string val = template.actor;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+
+ if (like)
+ where.add_wildcard_condition ("actor", val, negated);
+ else
+ where.add_match_condition ("actor",
+ actors_table.id_try_string (val), negated);
+ }
+
+ // Origin
+ if (!is_empty_string (template.origin))
+ {
+ string val = template.origin;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+ assert_no_noexpand (val, "origin");
+
+ if (like)
+ where.add_wildcard_condition ("origin", val, negated);
+ else
+ where.add_text_condition_subquery ("origin", val, negated);
+ }
+
+ // Subject templates within the same event template are AND'd
+ // See LP bug #592599.
+ for (int i = 0; i < template.num_subjects(); ++i)
+ {
+ Subject subject_template = template.subjects[i];
+
+ // Subject interpretation
+ if (!is_empty_string (subject_template.interpretation))
+ {
+ assert_no_wildcard ("subject interpretation",
+ template.interpretation);
+ WhereClause subwhere = get_where_clause_for_symbol (
+ "subj_interpretation", subject_template.interpretation,
+ interpretations_table);
+ if (!subwhere.is_empty ())
+ where.extend (subwhere);
+ }
+
+ // Subject manifestation
+ if (!is_empty_string (subject_template.manifestation))
+ {
+ assert_no_wildcard ("subject manifestation",
+ subject_template.manifestation);
+ WhereClause subwhere = get_where_clause_for_symbol (
+ "subj_manifestation", subject_template.manifestation,
+ manifestations_table);
+ if (!subwhere.is_empty ())
+ where.extend (subwhere);
+ }
+
+ // Mime-Type
+ if (!is_empty_string (subject_template.mimetype))
+ {
+ string val = subject_template.mimetype;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+ assert_no_noexpand (val, "mime-type");
+
+ if (like)
+ where.add_wildcard_condition (
+ "subj_mimetype", val, negated);
+ else
+ where.add_match_condition ("subj_mimetype",
+ mimetypes_table.id_try_string (val), negated);
+ }
+
+ // URI
+ if (!is_empty_string (subject_template.uri))
+ {
+ string val = subject_template.uri;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+ assert_no_noexpand (val, "uri");
+
+ if (like)
+ where.add_wildcard_condition ("subj_id", val, negated);
+ else
+ where.add_text_condition_subquery ("subj_id", val, negated);
+ }
+
+ // Origin
+ if (!is_empty_string (subject_template.origin))
+ {
+ string val = subject_template.origin;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+ assert_no_noexpand (val, "subject origin");
+
+ if (like)
+ where.add_wildcard_condition (
+ "subj_origin", val, negated);
+ else
+ where.add_text_condition_subquery (
+ "subj_origin", val, negated);
+ }
+
+ // Text
+ if (!is_empty_string (subject_template.text))
+ {
+ // Negation, noexpand and prefix search aren't supported
+ // for subject texts, but "!", "+" and "*" are valid as
+ // plain text characters.
+ where.add_text_condition_subquery ("subj_text_id",
+ subject_template.text, false);
+ }
+
+ // Current URI
+ if (!is_empty_string (subject_template.current_uri))
+ {
+ string val = subject_template.current_uri;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+ assert_no_noexpand (val, "current_uri");
+
+ if (like)
+ where.add_wildcard_condition (
+ "subj_id_current", val, negated);
+ else
+ where.add_text_condition_subquery (
+ "subj_id_current", val, negated);
+ }
+
+ // Current Origin
+ if (!is_empty_string (subject_template.current_origin))
+ {
+ string val = subject_template.current_origin;
+ bool like = parse_wildcard (ref val);
+ bool negated = parse_negation (ref val);
+ assert_no_noexpand (val, "current_origin");
+
+ if (like)
+ where.add_wildcard_condition (
+ "subj_origin_current", val, negated);
+ else
+ where.add_text_condition_subquery (
+ "subj_origin_current", val, negated);
+ }
+
+ // Subject storage
+ if (!is_empty_string (subject_template.storage))
+ {
+ // FIXME: do we want to support negation?
+ string val = subject_template.storage;
+ assert_no_negation ("subject storage", val);
+ assert_no_wildcard ("subject storage", val);
+ assert_no_noexpand (val, "subject storage");
+ where.add_text_condition_subquery ("subj_storage_id", val);
+ }
+ }
+
+ return where;
+ }
+
+ // Used by get_where_clause_from_event_templates
+ /**
+ * If the value starts with the negation operator, throw an
+ * error.
+ */
+ protected void assert_no_negation (string field, string? val)
+ throws EngineError
+ {
+ if (val == null || !val.has_prefix ("!"))
+ return;
+ string error_message =
+ "Field '%s' doesn't support negation".printf (field);
+ warning (error_message);
+ throw new EngineError.INVALID_ARGUMENT (error_message);
+ }
+
+ // Used by get_where_clause_from_event_templates
+ /**
+ * If the value starts with the negation operator, throw an
+ * error.
+ */
+ protected void assert_no_noexpand (string field, string? val)
+ throws EngineError
+ {
+ if (val == null || !val.has_prefix ("+"))
+ return;
+ string error_message =
+ "Field '%s' doesn't support the no-expand operator".printf (field);
+ warning (error_message);
+ throw new EngineError.INVALID_ARGUMENT (error_message);
+ }
+
+ // Used by get_where_clause_from_event_templates
+ /**
+ * If the value ends with the wildcard character, throw an error.
+ */
+ protected void assert_no_wildcard (string field, string? val)
+ throws EngineError
+ {
+ if (val == null || !val.has_suffix ("*"))
+ return;
+ string error_message =
+ "Field '%s' doesn't support prefix search".printf (field);
+ warning (error_message);
+ throw new EngineError.INVALID_ARGUMENT (error_message);
+ }
+
+ protected WhereClause get_where_clause_for_symbol (string table_name,
+ string symbol, TableLookup lookup_table) throws EngineError
+ {
+ string _symbol = symbol;
+ bool negated = parse_negation (ref _symbol);
+ bool noexpand = parse_noexpand (ref _symbol);
+ List<unowned string> symbols;
+ if (noexpand)
+ symbols = new List<unowned string> ();
+ else
+ symbols = Symbol.get_all_children (_symbol);
+ symbols.prepend (_symbol);
+
+ WhereClause subwhere = new WhereClause(
+ WhereClause.Type.OR, negated);
+
+ if (symbols.length () == 1)
+ {
+ subwhere.add_match_condition (table_name,
+ lookup_table.id_try_string (_symbol));
+ }
+ else
+ {
+ string sql = "(%s)";
+ string sub_sql = "";
+ bool first = true;
+ foreach (unowned string uri in symbols)
+ {
+ if (!first)
+ sub_sql = sub_sql + " OR ";
+ sub_sql = sub_sql + "%s = %i ".printf (table_name, lookup_table.id_try_string (uri));
+ first = false;
+ }
+ sql = sql.printf (sub_sql);
+ subwhere.add(sql);
+ }
+
+ return subwhere;
+ }
+
+ private void delete_from_cache (string table, int64 rowid)
+ {
+ TableLookup table_lookup;
+
+ if (table == "interpretation")
+ table_lookup = interpretations_table;
+ else if (table == "manifestation")
+ table_lookup = manifestations_table;
+ else if (table == "mimetype")
+ table_lookup = mimetypes_table;
+ else if (table == "actor")
+ table_lookup = actors_table;
+ else
+ return;
+
+ table_lookup.remove((int) rowid);
+ }
+
+}
+
+}
+
+// vim:expandtab:ts=4:sw=4
diff --git a/libzeitgeist/errors.vala b/libzeitgeist/errors.vala
new file mode 100644
index 00000000..538264b6
--- /dev/null
+++ b/libzeitgeist/errors.vala
@@ -0,0 +1,53 @@
+/* zeitgeist-daemon.vala
+ *
+ * Copyright © 2011 Collabora Ltd.
+ * By Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+namespace Zeitgeist
+{
+ [DBus (name = "org.gnome.zeitgeist.EngineError")]
+ public errordomain EngineError
+ {
+ BACKUP_FAILED,
+ DATABASE_BUSY,
+ DATABASE_CANTOPEN,
+ DATABASE_CORRUPT,
+ DATABASE_ERROR,
+ DATABASE_RETIRE_FAILED,
+ EXISTING_INSTANCE,
+ INVALID_ARGUMENT,
+ INVALID_EVENT,
+ INVALID_KEY,
+ }
+
+ // vala doesn't include proper headers, this fixes it
+ private static void vala_bug_workaround ()
+ {
+ try
+ {
+ Bus.get_sync (BusType.SESSION, null);
+ }
+ catch (Error err)
+ {
+ // kill "unused method" warning
+ vala_bug_workaround ();
+ }
+ }
+}
+
+// vim:expandtab:ts=4:sw=4
diff --git a/libzeitgeist/sql-schema.vala b/libzeitgeist/sql-schema.vala
new file mode 100644
index 00000000..17e6ff7a
--- /dev/null
+++ b/libzeitgeist/sql-schema.vala
@@ -0,0 +1,674 @@
+/* sql-schema.vala
+ *
+ * Copyright © 2011-2012 Collabora Ltd.
+ * By Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ * Copyright © 2011-2012 Canonical Ltd.
+ * By Michal Hruby <michal.hruby@canonical.com>
+ * By Siegfried-A. Gevatter <siegfried.gevatter@collabora.co.uk>
+ *
+ * Based upon a Python implementation (2009-2011) by:
+ * Markus Korn <thekorn@gmx.net>
+ * Mikkel Kamstrup Erlandsen <mikkel.kamstrup@gmail.com>
+ * Seif Lotfy <seif@lotfy.com>
+ * Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+using Zeitgeist;
+
+namespace Zeitgeist.SQLite
+{
+
+ public class DatabaseSchema : Object
+ {
+
+ public const string CORE_SCHEMA = "core";
+ public const int CORE_SCHEMA_VERSION = 9;
+
+ private const string DATABASE_CREATION = "database_creation";
+
+ public static void ensure_schema (Sqlite.Database database)
+ throws EngineError
+ {
+ int schema_version = Utils.using_in_memory_database () ?
+ -1 : get_schema_version (database);
+
+ if (schema_version == -1)
+ {
+ // most likely a new DB
+ setup_database (database);
+ create_schema (database);
+ create_event_indices (database);
+
+ // set database creation date
+ var schema_sql = ("INSERT INTO schema_version VALUES ('%s', %" +
+ int64.FORMAT + ")").printf (DATABASE_CREATION,
+ Timestamp.from_now ());
+ exec_query (database, schema_sql);
+ }
+ else if (schema_version >= 3 && schema_version <= 8)
+ {
+ backup_database ();
+
+ // Fix file permissions, update PRAGMAs, etc.
+ setup_database (database);
+
+ // SQLite supports transactional DDL. This way we won't leave a
+ // messed up schema if something goes wrong.
+ exec_query (database, "BEGIN");
+
+ if (schema_version == 3)
+ {
+ // Add missing columns to storage table
+ exec_query (database,
+ "ALTER TABLE storage ADD COLUMN icon VARCHAR");
+ exec_query (database,
+ "ALTER TABLE storage ADD COLUMN display_name VARCHAR");
+
+ // Set subjects that don't have a storage to "unknown", so
+ // they'll always be marked as available.
+ // FIXME: Do we want to separate unknown/local/online?
+ exec_query (database, """
+ INSERT OR IGNORE INTO storage (value, state)
+ VALUES ('unknown', 1)
+ """);
+ exec_query (database, """
+ UPDATE event SET subj_storage =
+ (SELECT id FROM storage WHERE value='unknown')
+ WHERE subj_storage IS NULL
+ """);
+ }
+
+ // The events table is missing several columns:
+ // - new in version 4: (event) origin and subj_id_current
+ // - new in version 8: subj_origin_current
+ exec_query (database,
+ "ALTER TABLE event RENAME TO event_old");
+
+ string[] tables = { "interpretation", "manifestation",
+ "mimetype", "actor" };
+
+ // Rename old tables that need to be replaced
+ foreach (unowned string table in tables)
+ {
+ exec_query (database,
+ "ALTER TABLE %s RENAME TO %s_old".printf (table, table));
+ }
+
+ // Create any missing tables and indices
+ create_schema (database);
+ drop_event_indices (database);
+ drop_basic_indices (database);
+
+ // Migrate data to the new tables and delete the old ones
+ foreach (unowned string table in tables)
+ {
+ exec_query (database,
+ "INSERT INTO %s SELECT id, value FROM %s_old".printf (
+ table, table));
+
+ exec_query (database, "DROP TABLE %s_old".printf (table));
+ }
+
+
+ // Migrate events from the old table
+ // - We initialize subj_origin_current to subj_origin as an
+ // approximation.
+ // FIXME: consider replaying MOVE_EVENTs to fix this
+ exec_query (database, """
+ INSERT INTO event
+ SELECT
+ id, timestamp, interpretation, manifestation,
+ actor, payload, subj_id, subj_interpretation,
+ subj_manifestation, subj_origin, subj_mimetype,
+ subj_text, subj_storage, NULL as origin,
+ subj_id AS subj_id_current,
+ subj_origin AS subj_origin_current
+ FROM event_old WHERE id IS NOT NULL
+ AND timestamp IS NOT NULL
+ AND interpretation IS NOT NULL
+ AND manifestation IS NOT NULL
+ AND actor IS NOT NULL
+ and subj_id IS NOT NULL
+ """);
+
+ // This will also drop any triggers the `events' table had
+ exec_query (database, "DROP TABLE event_old");
+
+ // Ontology update
+ exec_query (database,
+ "INSERT OR IGNORE INTO manifestation (value) VALUES ('%s')"
+ .printf (NFO.WEB_DATA_OBJECT));
+ exec_query (database, """
+ UPDATE event
+ SET subj_manifestation=(
+ SELECT id FROM manifestation WHERE value='""" +
+ NFO.WEB_DATA_OBJECT + """')
+ WHERE
+ subj_manifestation=(
+ SELECT id FROM manifestation WHERE value='""" +
+ NFO.REMOTE_DATA_OBJECT + """')
+ AND subj_id IN (
+ SELECT id FROM uri
+ WHERE
+ value LIKE "http://%"
+ OR value LIKE "https://%"
+ )
+ """);
+
+ exec_query (database, "COMMIT");
+ create_event_indices (database);
+ exec_query (database, "ANALYZE");
+ message ("Upgraded database to schema version %d.",
+ CORE_SCHEMA_VERSION);
+ }
+ else if (schema_version < CORE_SCHEMA_VERSION)
+ {
+ throw new EngineError.DATABASE_ERROR (
+ "Unable to upgrade from schema version %d".printf (
+ schema_version));
+ }
+ }
+
+ private static void backup_database () throws EngineError
+ {
+ try
+ {
+ Utils.backup_database ();
+ }
+ catch (Error backup_error)
+ {
+ var msg = "Database backup failed: " + backup_error.message;
+ throw new EngineError.BACKUP_FAILED (msg);
+ }
+ }
+
+ public static int get_schema_version (Sqlite.Database database)
+ throws EngineError
+ {
+ int schema_version = (int) get_schema_metadata (database, CORE_SCHEMA);
+ debug ("schema_version is %d", schema_version);
+
+ if (schema_version < -1)
+ {
+ throw new EngineError.DATABASE_CORRUPT (
+ "Database corruption flag is set.");
+ }
+ return schema_version;
+ }
+
+ public static int64 get_creation_date (Sqlite.Database database)
+ {
+ return get_schema_metadata (database, DATABASE_CREATION);
+ }
+
+ private static int64 get_schema_metadata (Sqlite.Database database,
+ string key)
+ {
+ var sql = "SELECT version FROM schema_version " +
+ "WHERE schema='%s'".printf (key);
+
+ int64 schema_version = -1;
+
+ database.exec (sql,
+ (n_cols, values, column_names) =>
+ {
+ if (values[0] != null)
+ {
+ schema_version = int64.parse (values[0]);
+ }
+ return 0;
+ }, null);
+
+ // we don't really care about the return value of exec, the result
+ // will be -1 if something went wrong anyway
+
+ return schema_version;
+ }
+
+ public static void set_corruption_flag (Sqlite.Database database)
+ throws EngineError
+ {
+ // A schema_version value smaller than -1 indicates that
+ // database corruption has been detected.
+ int version = get_schema_version (database);
+ if (version > 0)
+ version = -version;
+ set_schema_version (database, version);
+ }
+
+ private static void set_schema_version (Sqlite.Database database,
+ int schema_version) throws EngineError
+ {
+ /* The 'ON CONFLICT REPLACE' on the PK converts INSERT to UPDATE
+ * when appriopriate */
+ var schema_sql = "INSERT INTO schema_version VALUES ('%s', %d)"
+ .printf (CORE_SCHEMA, schema_version);
+ exec_query (database, schema_sql);
+ }
+
+ public static void setup_database (Sqlite.Database database)
+ throws EngineError
+ {
+ if (!Utils.using_in_memory_database ())
+ FileUtils.chmod (Utils.get_database_file_path (), 0600);
+ if (Utils.get_data_path () == Utils.get_default_data_path ())
+ FileUtils.chmod (Utils.get_data_path (), 0700);
+
+ exec_query (database, "PRAGMA journal_mode = WAL");
+ exec_query (database, "PRAGMA synchronous = NORMAL");
+ exec_query (database, "PRAGMA locking_mode = NORMAL");
+ }
+
+ public static void create_schema (Sqlite.Database database)
+ throws EngineError
+ {
+ // URI
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS uri (
+ id INTEGER PRIMARY KEY,
+ value VARCHAR UNIQUE
+ )
+ """);
+
+ // Interpretation
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS interpretation (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ value VARCHAR UNIQUE
+ )
+ """);
+
+ // Manifestation
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS manifestation (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ value VARCHAR UNIQUE
+ )
+ """);
+
+ // Mime-Type
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS mimetype (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ value VARCHAR UNIQUE
+ )
+ """);
+
+ // Actor
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS actor (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ value VARCHAR UNIQUE
+ )
+ """);
+
+ // Text
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS text (
+ id INTEGER PRIMARY KEY,
+ value VARCHAR UNIQUE
+ )
+ """);
+
+ // Payload
+ // (There's no value index for payloads, they can only be fetched
+ // by ID).
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS payload
+ (id INTEGER PRIMARY KEY, value BLOB)
+ """);
+
+ // Storage
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS storage (
+ id INTEGER PRIMARY KEY,
+ value VARCHAR UNIQUE,
+ state INTEGER,
+ icon VARCHAR,
+ display_name VARCHAR
+ )
+ """);
+
+ // Event
+ // This is the primary table for log statements. Note that:
+ // - event.id is NOT unique, each subject has a separate row;
+ // - timestamps are integers;
+ // - (event_)origin and subj_id_current are at the end of the
+ // table, for backwards-compatibility reasons.
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS event (
+ id INTEGER NOT NULL,
+ timestamp INTEGER NOT NULL,
+ interpretation INTEGER NOT NULL,
+ manifestation INTEGER NOT NULL,
+ actor INTEGER NOT NULL,
+ payload INTEGER,
+ subj_id INTEGER NOT NULL,
+ subj_interpretation INTEGER,
+ subj_manifestation INTEGER,
+ subj_origin INTEGER,
+ subj_mimetype INTEGER,
+ subj_text INTEGER,
+ subj_storage INTEGER,
+ origin INTEGER,
+ subj_id_current INTEGER,
+ subj_origin_current INTEGER,
+ CONSTRAINT interpretation_fk
+ FOREIGN KEY(interpretation)
+ REFERENCES interpretation(id)
+ ON DELETE CASCADE,
+ CONSTRAINT manifestation_fk
+ FOREIGN KEY(manifestation)
+ REFERENCES manifestation(id)
+ ON DELETE CASCADE,
+ CONSTRAINT actor_fk
+ FOREIGN KEY(actor)
+ REFERENCES actor(id)
+ ON DELETE CASCADE,
+ CONSTRAINT origin_fk
+ FOREIGN KEY(origin)
+ REFERENCES uri(id)
+ ON DELETE CASCADE,
+ CONSTRAINT payload_fk
+ FOREIGN KEY(payload)
+ REFERENCES payload(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_id_fk
+ FOREIGN KEY(subj_id)
+ REFERENCES uri(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_id_current_fk
+ FOREIGN KEY(subj_id_current)
+ REFERENCES uri(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_interpretation_fk
+ FOREIGN KEY(subj_interpretation)
+ REFERENCES interpretation(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_manifestation_fk
+ FOREIGN KEY(subj_manifestation)
+ REFERENCES manifestation(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_origin_fk
+ FOREIGN KEY(subj_origin)
+ REFERENCES uri(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_origin_current_fk
+ FOREIGN KEY(subj_origin_current)
+ REFERENCES uri(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_mimetype_fk
+ FOREIGN KEY(subj_mimetype)
+ REFERENCES mimetype(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_text_fk
+ FOREIGN KEY(subj_text)
+ REFERENCES text(id)
+ ON DELETE CASCADE,
+ CONSTRAINT subj_storage_fk
+ FOREIGN KEY(subj_storage)
+ REFERENCES storage(id)
+ ON DELETE CASCADE,
+ PRIMARY KEY (timestamp, interpretation,
+ manifestation, actor, subj_id)
+ )
+ """);
+
+ // Extensions
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS extensions_conf (
+ extension VARCHAR,
+ key VARCHAR,
+ value BLOB,
+ CONSTRAINT unique_extension UNIQUE (extension, key)
+ )
+ """);
+
+ // Performance note: the subqueries here are provided for lookup
+ // only. For querying, use explicit "WHERE x IN (SELECT id ...)"
+ // subqueries.
+ exec_query (database, "DROP VIEW IF EXISTS event_view");
+ exec_query (database, """
+ CREATE VIEW IF NOT EXISTS event_view AS
+ SELECT event.id,
+ event.timestamp,
+ event.interpretation,
+ event.manifestation,
+ event.actor,
+ (SELECT value FROM payload
+ WHERE payload.id=event.payload)
+ AS payload,
+ (SELECT value FROM uri
+ WHERE uri.id=event.subj_id)
+ AS subj_uri,
+ event.subj_id, --//this points to an id in the uri table
+ event.subj_interpretation,
+ event.subj_manifestation,
+ event.subj_origin,
+ (SELECT value FROM uri
+ WHERE uri.id=event.subj_origin)
+ AS subj_origin_uri,
+ event.subj_mimetype,
+ (SELECT value FROM text
+ WHERE text.id = event.subj_text)
+ AS subj_text,
+ (SELECT value FROM storage
+ WHERE storage.id=event.subj_storage)
+ AS subj_storage,
+ (SELECT state FROM storage
+ WHERE storage.id=event.subj_storage)
+ AS subj_storage_state,
+ event.origin,
+ (SELECT value FROM uri
+ WHERE uri.id=event.origin)
+ AS event_origin_uri,
+ (SELECT value FROM uri
+ WHERE uri.id=event.subj_id_current)
+ AS subj_current_uri,
+ event.subj_id_current,
+ event.subj_text AS subj_text_id,
+ event.subj_storage AS subj_storage_id,
+ (SELECT value FROM actor
+ WHERE actor.id=event.actor)
+ AS actor_uri,
+ event.subj_origin_current,
+ (SELECT value FROM uri
+ WHERE uri.id=event.subj_origin_current)
+ AS subj_origin_current_uri
+ FROM event
+ """);
+
+ // Set schema version
+ exec_query (database, """
+ CREATE TABLE IF NOT EXISTS schema_version (
+ schema VARCHAR PRIMARY KEY ON CONFLICT REPLACE,
+ version INT
+ )
+ """);
+ set_schema_version (database, CORE_SCHEMA_VERSION);
+ }
+
+ /*
+ * Creates indices for all auxiliary tables.
+ */
+ public static void create_basic_indices (Sqlite.Database database)
+ throws EngineError
+ {
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS uri_value ON uri(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS interpretation_value
+ ON interpretation(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS manifestation_value
+ ON manifestation(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS mimetype_value
+ ON mimetype(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS actor_value
+ ON actor(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS text_value
+ ON text(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS storage_value
+ ON storage(value)
+ """);
+ exec_query (database, """
+ CREATE UNIQUE INDEX IF NOT EXISTS extensions_conf_key
+ ON extensions_conf (extension, key)
+ """);
+ }
+
+ public static void create_event_indices (Sqlite.Database database)
+ throws EngineError
+ {
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_id
+ ON event(id, timestamp)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_timestamp
+ ON event(timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_interpretation
+ ON event(interpretation, timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_manifestation
+ ON event(manifestation, timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_actor
+ ON event(actor, timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_origin
+ ON event(origin, timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_id
+ ON event(subj_id, timestamp, subj_interpretation, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_id_current
+ ON event(subj_id_current, timestamp, subj_interpretation, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_interpretation
+ ON event(subj_interpretation, timestamp, subj_id, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_manifestation
+ ON event(subj_manifestation, timestamp, subj_id, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_origin
+ ON event(subj_origin, timestamp, subj_interpretation, subj_id, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_origin_current
+ ON event(subj_origin_current, timestamp, subj_interpretation, subj_id, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_mimetype
+ ON event(subj_mimetype, timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_text
+ ON event(subj_text, timestamp, id)
+ """);
+ exec_query (database, """
+ CREATE INDEX IF NOT EXISTS event_subj_storage
+ ON event(subj_storage, timestamp, id)
+ """);
+ }
+
+ /*
+ * Drop indices for all auxiliary tables.
+ */
+ public static void drop_basic_indices (Sqlite.Database database)
+ throws EngineError
+ {
+ exec_query (database, "DROP INDEX IF EXISTS uri_value");
+ exec_query (database, "DROP INDEX IF EXISTS interpretation_value");
+ exec_query (database, "DROP INDEX IF EXISTS manifestation_value");
+ exec_query (database, "DROP INDEX IF EXISTS mimetype_value");
+ exec_query (database, "DROP INDEX IF EXISTS actor_value");
+ exec_query (database, "DROP INDEX IF EXISTS text_value");
+ exec_query (database, "DROP INDEX IF EXISTS storage_value");
+ exec_query (database, "DROP INDEX IF EXISTS extensions_conf_key");
+ }
+
+ public static void drop_event_indices (Sqlite.Database database)
+ throws EngineError
+ {
+ exec_query (database, "DROP INDEX IF EXISTS event_id");
+ exec_query (database, "DROP INDEX IF EXISTS event_timestamp");
+ exec_query (database, "DROP INDEX IF EXISTS event_interpretation");
+ exec_query (database, "DROP INDEX IF EXISTS event_manifestation");
+ exec_query (database, "DROP INDEX IF EXISTS event_actor");
+ exec_query (database, "DROP INDEX IF EXISTS event_origin");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_id");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_id_current");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_interpretation");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_manifestation");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_origin");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_current");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_mimetype");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_text");
+ exec_query (database, "DROP INDEX IF EXISTS event_subj_storage");
+ }
+
+ /**
+ * Execute the given SQL. If the query doesn't succeed, throw
+ * an error.
+ *
+ * @param database the database on which to run the query
+ * @param sql the SQL query to run
+ */
+ private static void exec_query (Sqlite.Database database,
+ string sql) throws EngineError
+ {
+ int rc = database.exec (sql);
+ if (rc != Sqlite.OK)
+ {
+ if (rc == Sqlite.CORRUPT)
+ {
+ throw new EngineError.DATABASE_CORRUPT (database.errmsg ());
+ }
+ else
+ {
+ const string fmt_str = "Can't create database: %d, %s\n\n" +
+ "Unable to execute SQL:\n%s";
+ var err_msg = fmt_str.printf (rc, database.errmsg (), sql);
+ throw new EngineError.DATABASE_ERROR (err_msg);
+ }
+ }
+ }
+
+ }
+
+}
diff --git a/libzeitgeist/sql.vala b/libzeitgeist/sql.vala
new file mode 100644
index 00000000..1bb24685
--- /dev/null
+++ b/libzeitgeist/sql.vala
@@ -0,0 +1,478 @@
+/* sql.vala
+ *
+ * Copyright © 2011-2012 Collabora Ltd.
+ * By Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ * By Seif Lotfy <seif@lotfy.com>
+ * Copyright © 2011 Manish Sinha <manishsinha@ubuntu.com>
+ * Copyright © 2012 Canonical Ltd.
+ * By Siegfried-A. Gevatter <siegfried.gevatter@collabora.co.uk>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+using Zeitgeist;
+
+namespace Zeitgeist.SQLite
+{
+
+ public enum EventViewRows
+ {
+ ID,
+ TIMESTAMP,
+ INTERPRETATION,
+ MANIFESTATION,
+ ACTOR,
+ PAYLOAD,
+ SUBJECT_URI,
+ SUBJECT_ID,
+ SUBJECT_INTERPRETATION,
+ SUBJECT_MANIFESTATION,
+ SUBJECT_ORIGIN,
+ SUBJECT_ORIGIN_URI,
+ SUBJECT_MIMETYPE,
+ SUBJECT_TEXT,
+ SUBJECT_STORAGE,
+ SUBJECT_STORAGE_STATE,
+ EVENT_ORIGIN,
+ EVENT_ORIGIN_URI,
+ SUBJECT_CURRENT_URI,
+ SUBJECT_ID_CURRENT,
+ SUBJECT_TEXT_ID,
+ SUBJECT_STORAGE_ID,
+ ACTOR_URI,
+ SUBJECT_CURRENT_ORIGIN,
+ SUBJECT_CURRENT_ORIGIN_URI
+ }
+
+ public delegate void DeletionCallback (string table, int64 rowid);
+
+ public class Database : Object
+ {
+ private const int DEFAULT_OPEN_FLAGS =
+ Sqlite.OPEN_READWRITE | Sqlite.OPEN_CREATE;
+
+ public Sqlite.Statement event_insertion_stmt;
+ public Sqlite.Statement id_retrieval_stmt;
+ public Sqlite.Statement move_handling_stmt;
+ public Sqlite.Statement payload_insertion_stmt;
+
+ // The DB should be accessible from engine for statement preperations
+ // as well as allowing extensions to add tables to it.
+ public Sqlite.Database database;
+
+ private DeletionCallback? deletion_callback = null;
+ private bool is_read_only = false;
+
+ public Database () throws EngineError
+ {
+ open_database (true);
+
+ prepare_read_queries ();
+ prepare_modification_queries ();
+
+ // Register a data change notification callback to look for
+ // deletions, so we can keep the TableLookups up to date.
+ database.update_hook (update_callback);
+ }
+
+ public Database.read_only () throws EngineError
+ {
+ is_read_only = true;
+ open_database (false);
+
+ prepare_read_queries ();
+ // not initializing the modification queries will let us find
+ // issues more easily
+
+ // Register a data change notification callback to look for
+ // deletions, so we can keep the TableLookups up to date.
+ database.update_hook (update_callback);
+ }
+
+ private void open_database (bool retry)
+ throws EngineError
+ {
+ int flags = is_read_only ? Sqlite.OPEN_READONLY : DEFAULT_OPEN_FLAGS;
+ int rc = Sqlite.Database.open_v2 (
+ Utils.get_database_file_path (),
+ out database, flags);
+
+ if (rc == Sqlite.OK)
+ {
+ try
+ {
+ // Errors (like a malformed database) may not be exposed
+ // until we try to operate on the database.
+ if (is_read_only)
+ {
+ int ver = DatabaseSchema.get_schema_version (database);
+ if (ver < DatabaseSchema.CORE_SCHEMA_VERSION)
+ {
+ throw new EngineError.DATABASE_CANTOPEN (
+ "Unable to open database: old schema version");
+ }
+ }
+ else
+ {
+ DatabaseSchema.ensure_schema (database);
+ }
+ }
+ catch (EngineError err)
+ {
+ if (err is EngineError.DATABASE_CORRUPT && retry)
+ rc = Sqlite.CORRUPT;
+ else if (err is EngineError.DATABASE_CANTOPEN)
+ rc = Sqlite.CANTOPEN;
+ else if (err is EngineError.DATABASE_BUSY)
+ rc = Sqlite.BUSY;
+ else
+ throw err;
+ }
+ }
+
+ if (rc != Sqlite.OK)
+ {
+ if (rc == Sqlite.CORRUPT && retry)
+ {
+ // The database disk image is malformed
+ warning ("It looks like your database is corrupt. " +
+ "It will be renamed and a new one will be created.");
+ retire_database ();
+ open_database (false);
+ }
+ else if (rc == Sqlite.PERM || rc == Sqlite.CANTOPEN)
+ {
+ // Access permission denied / Unable to open database file
+ throw new EngineError.DATABASE_CANTOPEN (
+ database.errmsg ());
+ }
+ else if (rc == Sqlite.BUSY)
+ {
+ // The database file is locked
+ throw new EngineError.DATABASE_BUSY (database.errmsg ());
+ }
+ else
+ {
+ string message = "Can't open database: %d, %s".printf (rc,
+ database.errmsg ());
+ throw new EngineError.DATABASE_ERROR (message);
+ }
+ }
+ }
+
+ private static void retire_database () throws EngineError
+ {
+ try
+ {
+ File dbfile = File.new_for_path (
+ Utils.get_database_file_path ());
+ dbfile.set_display_name (
+ Utils.get_database_file_retire_name ());
+ }
+ catch (Error err)
+ {
+ string message = "Could not rename database: %s".printf (
+ err.message);
+ throw new EngineError.DATABASE_RETIRE_FAILED (message);
+ }
+ }
+
+ public uint32 get_last_id () throws EngineError
+ {
+ int last_id = -1;
+ int rc = database.exec ("SELECT MAX(id) FROM event",
+ (n_columns, values, column_names) =>
+ {
+ if (values[0] == null)
+ last_id = 0;
+ else
+ last_id = int.parse (values[0]);
+ return 0;
+ }, null);
+ assert_query_success (rc, "Can't query database");
+ assert (last_id != -1);
+ return last_id;
+ }
+
+ public void set_deletion_callback (owned DeletionCallback? callback)
+ {
+ deletion_callback = (owned) callback;
+ }
+
+ /**
+ * Join all given event_ids into a comma-separated string suitable
+ * for use in a SQL query like "WHERE id IN (...)".
+ */
+ public string get_sql_string_from_event_ids (uint32[] event_ids)
+ requires (event_ids.length > 0)
+ {
+ var sql_condition = new StringBuilder ();
+ sql_condition.append_printf ("%u", event_ids[0]);
+ for (int i = 1; i < event_ids.length; ++i) {
+ sql_condition.append_printf (", %u", event_ids[i]);
+ }
+ return sql_condition.str;
+ }
+
+ public TimeRange? get_time_range_for_event_ids (uint32[] event_ids)
+ throws EngineError
+ {
+ if (event_ids.length == 0)
+ return null;
+
+ string sql = """
+ SELECT MIN(timestamp), MAX(timestamp)
+ FROM event
+ WHERE id IN (%s)
+ """.printf (get_sql_string_from_event_ids (event_ids));
+
+ TimeRange? time_range = null;
+ int rc = database.exec (sql,
+ (n_columns, values, column_names) =>
+ {
+ if (values[0] != null)
+ {
+ int64 start = int64.parse (values[0]);
+ int64 end = int64.parse (values[1]);
+ time_range = new TimeRange (start, end);
+ }
+ return 0;
+ }, null);
+ assert_query_success (rc, "SQL Error");
+
+ return time_range;
+ }
+
+ public void insert_or_ignore_into_table (string table_name,
+ GenericArray<string> values) throws EngineError
+ {
+ if (values.length == 0)
+ return;
+
+ int rc;
+
+ var sql = new StringBuilder ();
+ sql.append ("INSERT OR IGNORE INTO ");
+ sql.append (table_name);
+ sql.append (" (value) SELECT ?");
+ for (int i = 1; i < values.length; ++i)
+ sql.append (" UNION SELECT ?");
+
+ Sqlite.Statement stmt;
+ rc = database.prepare_v2 (sql.str, -1, out stmt);
+ assert_query_success (rc, "SQL error");
+
+ for (int i = 0; i < values.length; ++i)
+ stmt.bind_text (i+1, values[i]);
+
+ rc = stmt.step ();
+ assert_query_success (rc, "SQL error", Sqlite.DONE);
+ }
+
+ public void begin_transaction () throws EngineError
+ {
+ int rc = database.exec ("BEGIN");
+ assert_query_success (rc, "Can't start transaction");
+ }
+
+ public void end_transaction () throws EngineError
+ {
+ int rc = database.exec ("COMMIT");
+ assert_query_success (rc, "Can't commit transaction");
+ }
+
+ public void abort_transaction () throws EngineError
+ {
+ int rc = database.exec ("ROLLBACK");
+ assert_query_success (rc, "Can't rollback transaction");
+ }
+
+ public void close ()
+ {
+ // SQLite connection is implicitly closed upon destruction
+ database = null;
+ }
+
+#if EXPLAIN_QUERIES
+ public void explain_query (Sqlite.Statement prepared_stmt)
+ throws EngineError
+ {
+ int rc;
+ Sqlite.Statement stmt;
+
+ var explain_sql = "EXPLAIN QUERY PLAN %s".printf (prepared_stmt.sql ());
+
+ rc = prepared_stmt.db_handle ().prepare_v2 (explain_sql, -1, out stmt);
+ assert_query_success(rc, "SQL error");
+
+ print ("%s\n", explain_sql);
+
+ while ((rc = stmt.step()) == Sqlite.ROW)
+ {
+ int select_id = stmt.column_int (0);
+ int order = stmt.column_int (1);
+ int from = stmt.column_int (2);
+ unowned string detail = stmt.column_text (3);
+
+ print ("%d %d %d %s\n", select_id, order, from, detail);
+ }
+ }
+#endif
+
+ /**
+ * Ensure `rc' is SQLITE_OK. If it isn't, print an error message
+ * and throw an error.
+ *
+ * @param rc error code returned by a SQLite call
+ * @param msg message to print if `rc' indicates an error
+ * @throws EngineError
+ */
+ [Diagnostics]
+ public void assert_query_success (int rc, string msg,
+ int success_code=Sqlite.OK) throws EngineError
+ {
+ if (unlikely (rc != success_code))
+ {
+ string error_message = "%s: %d, %s".printf (
+ msg, rc, database.errmsg ());
+ warning ("%s\n", error_message);
+ assert_not_corrupt (rc);
+ throw new EngineError.DATABASE_ERROR (error_message);
+ }
+ }
+
+ /**
+ * Ensure `rc' isn't SQLITE_CORRUPT. If it is, schedule a database
+ * retire and Zeitgeist restart so a new database can be created,
+ * unless in read-only mode, in which case EngineError.DATABASE_ERROR
+ * will be thrown.
+ *
+ * This function should be called whenever assert_query_success isn't
+ * used.
+ *
+ * @param rc error code returned by a SQLite call
+ */
+ public void assert_not_corrupt (int rc)
+ throws EngineError
+ {
+ if (unlikely (rc == Sqlite.CORRUPT))
+ {
+ warning ("It looks like your database is corrupt: %s".printf (
+ database.errmsg ()));
+ if (!is_read_only)
+ {
+ // Sets a flag in the database indicating that it is
+ // corrupt. This will trigger a database retire and
+ // re-creation on the next startup.
+ DatabaseSchema.set_corruption_flag (database);
+ }
+ throw new EngineError.DATABASE_CORRUPT (database.errmsg ());
+ }
+ }
+
+ private void prepare_read_queries () throws EngineError
+ {
+ int rc;
+ string sql;
+
+ // Event ID retrieval statement
+ sql = """
+ SELECT id FROM event
+ WHERE timestamp=? AND interpretation=? AND
+ manifestation=? AND actor=?
+ """;
+ rc = database.prepare_v2 (sql, -1, out id_retrieval_stmt);
+ assert_query_success (rc, "Event ID retrieval query error");
+ }
+
+ private void prepare_modification_queries () throws EngineError
+ {
+ int rc;
+ string sql;
+
+ // Event insertion statement
+ sql = """
+ INSERT INTO event (
+ id, timestamp, interpretation, manifestation, actor,
+ origin, payload, subj_id, subj_id_current,
+ subj_interpretation, subj_manifestation, subj_origin,
+ subj_origin_current, subj_mimetype, subj_text, subj_storage
+ ) VALUES (
+ ?, ?, ?, ?, ?,
+ (SELECT id FROM uri WHERE value=?),
+ ?,
+ (SELECT id FROM uri WHERE value=?),
+ (SELECT id FROM uri WHERE value=?),
+ ?, ?,
+ (SELECT id FROM uri WHERE value=?),
+ (SELECT id FROM uri WHERE value=?),
+ ?,
+ (SELECT id FROM text WHERE value=?),
+ (SELECT id FROM storage WHERE value=?)
+ )""";
+
+ rc = database.prepare_v2 (sql, -1, out event_insertion_stmt);
+ assert_query_success (rc, "Insertion query error");
+
+ // Move handling statment
+ sql = """
+ UPDATE event
+ SET subj_id_current=(SELECT id FROM uri WHERE value=?)
+ , subj_origin_current=(SELECT id FROM uri WHERE value=?)
+ WHERE subj_id_current=(SELECT id FROM uri WHERE value=?)
+ AND interpretation!=? AND timestamp<?
+ """;
+ rc = database.prepare_v2 (sql, -1, out move_handling_stmt);
+ assert_query_success (rc, "Move handling error");
+
+ // Payload insertion statment
+ sql = """
+ INSERT INTO payload (value) VALUES (?)
+ """;
+ rc = database.prepare_v2 (sql, -1, out payload_insertion_stmt);
+ assert_query_success (rc, "Payload insertion query error");
+ }
+
+ public bool analyze() throws EngineError
+ {
+ int rc = database.exec("ANALYZE");
+ assert_query_success (rc, "Event ID retrieval query error");
+ return false;
+ }
+
+ protected void update_callback (Sqlite.Action action,
+ string dbname, string table, int64 rowid)
+ {
+ if (action != Sqlite.Action.DELETE)
+ return;
+ if (deletion_callback != null)
+ deletion_callback (table, rowid);
+ //interpretations_table
+ // manifestations_
+ //mimetypes_table - mimetype table
+ // actors_ . actor table
+ // FIXME!
+ /*
+ stdout.printf ("%s", dbname); // = main
+ stdout.printf ("%s", table);
+ stdout.printf ("%li", (long) rowid);
+ */
+ }
+
+ }
+
+}
+
+// vim:expandtab:ts=4:sw=4
diff --git a/libzeitgeist/table-lookup.vala b/libzeitgeist/table-lookup.vala
new file mode 100644
index 00000000..b582b143
--- /dev/null
+++ b/libzeitgeist/table-lookup.vala
@@ -0,0 +1,155 @@
+/* table-lookup.vala
+ *
+ * Copyright © 2011 Collabora Ltd.
+ * By Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * Based upon a Python implementation (2009-2011) by:
+ * Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+namespace Zeitgeist.SQLite
+{
+
+ public class TableLookup : Object
+ {
+ unowned Zeitgeist.SQLite.Database database;
+ unowned Sqlite.Database db;
+
+ private string table;
+ private HashTable<int, string> id_to_value;
+ private HashTable<string, int> value_to_id;
+ private Sqlite.Statement insertion_stmt;
+ private Sqlite.Statement retrieval_stmt;
+
+ public TableLookup (Database database, string table_name)
+ throws EngineError
+ {
+ this.database = database;
+ db = database.database;
+ table = table_name;
+ id_to_value = new HashTable<int, string>(direct_hash, direct_equal);
+ value_to_id = new HashTable<string, int>(str_hash, str_equal);
+
+ int rc;
+ string sql;
+
+ rc = db.exec ("SELECT id, value FROM " + table,
+ (n_columns, values, column_names) =>
+ {
+ id_to_value.insert (int.parse(values[0]), values[1]);
+ value_to_id.insert (values[1], int.parse(values[0]));
+ return 0;
+ }, null);
+ database.assert_query_success (rc,
+ "Can't init %s table".printf (table));
+
+ sql = "INSERT INTO " + table + " (value) VALUES (?)";
+ rc = db.prepare_v2 (sql, -1, out insertion_stmt);
+ database.assert_query_success (rc, "Error creating insertion_stmt");
+
+ sql = "SELECT value FROM " + table + " WHERE id=?";
+ rc = db.prepare_v2 (sql, -1, out retrieval_stmt);
+ database.assert_query_success (rc, "Error creating retrieval_stmt");
+ }
+
+ /**
+ * Searches the table for the given ID, returns -1 if not found.
+ *
+ * @see id_for_string
+ */
+ public int id_try_string (string name)
+ {
+ int id = value_to_id.lookup (name);
+ if (id == 0)
+ return -1;
+ return id;
+ }
+
+ /**
+ * Searches the table for the given ID, inserts a new one if not found.
+ *
+ * @see id_try_string
+ *
+ */
+ public int id_for_string (string name) throws EngineError
+ {
+ int id = value_to_id.lookup (name);
+ if (id == 0)
+ {
+ int rc;
+ insertion_stmt.reset ();
+ insertion_stmt.bind_text (1, name);
+ rc = insertion_stmt.step ();
+ database.assert_query_success (rc, "Error in id_for_string",
+ Sqlite.DONE);
+
+ id = (int) db.last_insert_rowid ();
+
+ id_to_value.insert (id, name);
+ value_to_id.insert (name, id);
+ }
+ return id;
+ }
+
+ public unowned string? get_value (int id) throws EngineError
+ {
+ // When we fetch an event, it either was already in the database
+ // at the time Zeitgeist started or it was inserted later -using
+ // Zeitgeist-, so here we always have the data in memory already.
+ if (id == 0)
+ return null;
+ unowned string val = id_to_value.lookup (id);
+ if (val != null) return val;
+
+ // Unless this is a standalone reader in a separate process, in
+ // which case the values won't be kept updated, so we need to
+ // query the DB if we don't find it.
+ int rc;
+ string? text = null;
+
+ retrieval_stmt.reset ();
+ retrieval_stmt.bind_int64 (1, id);
+ if ((rc = retrieval_stmt.step()) == Sqlite.ROW)
+ {
+ text = retrieval_stmt.column_text (0);
+ id_to_value.insert (id, text);
+ value_to_id.insert (text, id);
+ rc = retrieval_stmt.step ();
+ }
+ database.assert_query_success (rc, "Error in get_value",
+ Sqlite.DONE);
+ if (text == null)
+ {
+ critical ("Error getting data from table: %d, %s\n",
+ rc, db.errmsg ());
+ }
+
+ return id_to_value.lookup (id);
+ }
+
+ public void remove (int id)
+ {
+ string name = id_to_value.lookup (id);
+ id_to_value.remove (id);
+ value_to_id.remove (name);
+ }
+
+ }
+
+}
+
+// vim:expandtab:ts=4:sw=4
diff --git a/libzeitgeist/where-clause.vala b/libzeitgeist/where-clause.vala
new file mode 100644
index 00000000..a8305995
--- /dev/null
+++ b/libzeitgeist/where-clause.vala
@@ -0,0 +1,288 @@
+/* where-clause.vala
+ *
+ * Copyright © 2011-2012 Collabora Ltd.
+ * By Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * Based upon a Python implementation (2009-2011) by:
+ * Markus Korn <thekorn@gmx.net>
+ * Mikkel Kamstrup Erlandsen <mikkel.kamstrup@gmail.com>
+ * Seif Lotfy <seif@lotfy.com>
+ * Siegfried-Angel Gevatter Pujals <siegfried@gevatter.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+using Zeitgeist;
+
+namespace Zeitgeist
+{
+
+ /**
+ * This class provides a convenient representation a SQL `WHERE' clause,
+ * composed of a set of conditions joined together.
+ *
+ * The relation between conditions can be either of type *AND* or *OR*, but
+ * not both. To create more complex clauses, use several `WhereClause`
+ * instances and joining them together using `extend`.
+ *
+ * Instances of this class can then be used to obtain a line of SQL code and
+ * a list of arguments, for use with SQLite 3.
+ */
+ public class WhereClause : Object
+ {
+
+ public enum Type
+ {
+ AND,
+ OR
+ }
+
+ private static string[] RELATION_SIGNS = { " AND ", " OR " };
+
+ private WhereClause.Type clause_type;
+ private bool negated;
+ private GenericArray<string> conditions;
+ private GenericArray<string> arguments;
+ private bool is_simple;
+
+ public WhereClause (WhereClause.Type type, bool negate=false)
+ {
+ clause_type = type;
+ negated = negate;
+ is_simple = true;
+ conditions = new GenericArray<string> ();
+ arguments = new GenericArray<string> ();
+ }
+
+ public int get_conditions_length()
+ {
+ return conditions.length;
+ }
+
+ public bool has_non_timestamp_condition() {
+ for (int i=0; i<conditions.length; i++) {
+ if (!conditions[i].has_prefix("timestamp"))
+ return true;
+ }
+ return false;
+ }
+
+ public void add (string condition, string? argument=null)
+ {
+ conditions.add (condition);
+ if (argument != null)
+ arguments.add (argument);
+ }
+
+ public void add_with_array (string condition,
+ GenericArray<string> args)
+ {
+ conditions.add (condition);
+ for (int i = 0; i < args.length; ++i)
+ arguments.add (args[i]);
+ }
+
+ private static string get_search_table_for_column (string column)
+ {
+ string search_table;
+ switch (column)
+ {
+ // For use in add_text_condition_subquery and
+ // add_wildcard_condition:
+ case "origin":
+ case "subj_origin":
+ case "subj_origin_current":
+ case "subj_id":
+ case "subj_id_current":
+ search_table = "uri";
+ break;
+ case "subj_mimetype":
+ search_table = "mimetype";
+ break;
+
+ // For use only in add_text_condition_subquery:
+ case "subj_text_id":
+ search_table = "text";
+ break;
+ case "subj_storage_id":
+ search_table = "storage";
+ break;
+
+ // --
+ default:
+ search_table = column;
+ break;
+ }
+ return search_table;
+ }
+
+ public void add_match_condition (string column, int val,
+ bool negation=false)
+ {
+ string sql = "%s %s= %d".printf (column, (negation) ? "!" : "", val);
+ add (sql);
+ }
+
+ public void add_text_condition_subquery (string column, string val,
+ bool negation=false)
+ {
+ string search_table = get_search_table_for_column (column);
+ string sql = "%s %s= (SELECT id FROM %s WHERE value = ?)".printf (
+ column, (negation) ? "!" : "", search_table);
+ add (sql, val);
+ is_simple = false;
+ }
+
+ public void add_text_condition (string column, string val,
+ bool negation=false)
+ {
+ string sql = "%s %s= ?".printf (column, (negation) ? "!" : "");
+ add (sql, val);
+ }
+
+ public void add_wildcard_condition (string column, string needle,
+ bool negation=false)
+ {
+ string search_table = get_search_table_for_column (column);
+
+ var values = new GenericArray<string> ();
+ values.add(needle);
+ string optimized_glob = optimize_glob (
+ "id", search_table, ref values);
+
+ string sql;
+ if (!negation)
+ sql = "%s IN (%s)".printf (column, optimized_glob);
+ else
+ sql = "(%s NOT IN (%s) OR %s is NULL)".printf (column,
+ optimized_glob, column);
+ add_with_array (sql, values);
+ is_simple = false;
+ }
+
+ public void extend (WhereClause clause)
+ {
+ if (clause.is_empty ())
+ return;
+ string sql = clause.get_sql_conditions ();
+ add_with_array (sql, clause.arguments);
+ is_simple = clause.get_is_simple ();
+ /*if not where.may_have_results():
+ if self._relation == self.AND:
+ self.clear()
+ self.register_no_result()
+ */
+ }
+
+ public bool is_empty ()
+ {
+ return conditions.length == 0;
+ }
+
+ public bool may_have_results ()
+ {
+ return conditions.length > 0; // or not self._no_result_member
+ }
+
+ public bool get_is_simple ()
+ {
+ return is_simple;
+ }
+
+ public void set_is_simple (bool simple)
+ {
+ is_simple = simple;
+ }
+
+ /**
+ * This is dangerous. Only use it if you're made full of awesome.
+ */
+ private T[] generic_array_to_unowned_array<T> (GenericArray<T> gptrarr)
+ {
+ long[] pointers = new long[gptrarr.length + 1];
+ Memory.copy(pointers, ((PtrArray *) gptrarr)->pdata,
+ (gptrarr.length) * sizeof (void *));
+ return (T[]) pointers;
+ }
+
+ public string get_sql_conditions ()
+ {
+ assert (conditions.length > 0);
+ string negation_sign = (negated) ? "NOT " : "";
+ string relation_sign = RELATION_SIGNS[clause_type];
+
+ if (conditions.length == 1)
+ return "%s%s".printf (negation_sign, conditions[0]);
+ string conditions_string = string.joinv (relation_sign,
+ generic_array_to_unowned_array<string> (conditions));
+ return "%s(%s)".printf (negation_sign, conditions_string);
+ }
+
+ public unowned GenericArray<string> get_bind_arguments ()
+ {
+ return arguments;
+ }
+
+ /**
+ * Return an optimized version of the GLOB statement as described in
+ * http://www.sqlite.org/optoverview.html "4.0 The LIKE optimization".
+ */
+ private static string optimize_glob (string column, string table,
+ ref GenericArray<string> args)
+ requires (args.length == 1)
+ {
+ string sql;
+ string prefix = args[0];
+ if (prefix == "") {
+ sql = "SELECT %s FROM %s".printf (column, table);
+ }
+ else if (false) // ...
+ {
+ // FIXME: check for all(i == unichr(0x10ffff)...)
+ }
+ else
+ {
+ sql = "SELECT %s FROM %s WHERE (value >= ? AND value < ?)"
+ .printf (column, table);
+ args.add (get_right_boundary (prefix));
+ }
+ return sql;
+ }
+
+ /**
+ * Return the smallest string which is greater than the given `text`.
+ */
+ protected static string get_right_boundary (string text)
+ {
+ if (text == "")
+ return new StringBuilder ().append_unichar(0x10ffff).str;
+ int len = text.char_count () - 1;
+ unichar charpoint = text.get_char (text.index_of_nth_char (len));
+ string head = text.substring (0, text.index_of_nth_char (len));
+ if (charpoint == 0x10ffff)
+ {
+ // If the last char is the biggest possible char we need to
+ // look at the second last.
+ return get_right_boundary (head);
+ }
+ return head +
+ new StringBuilder ().append_unichar(charpoint + 1).str;
+ }
+
+ }
+
+}
+
+// vim:expandtab:ts=4:sw=4