From 9279bf73e50b34821eecb41cd355c6a2f3cf63ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Corentin=20No=C3=ABl?= Date: Thu, 28 Mar 2019 17:22:48 +0100 Subject: docs: Modernize the libtracker-sparql documentation Still not fixed the Tracker 2.0 references here and there --- docs/reference/libtracker-sparql/examples.sgml | 385 --------------------- docs/reference/libtracker-sparql/examples.xml | 385 +++++++++++++++++++++ .../libtracker-sparql/libtracker-sparql-docs.sgml | 57 --- .../libtracker-sparql/libtracker-sparql-docs.xml | 95 +++++ .../libtracker-sparql-sections.txt | 42 ++- .../libtracker-sparql/libtracker-sparql.types | 8 - docs/reference/libtracker-sparql/meson.build | 5 +- docs/reference/libtracker-sparql/ontologies.sgml | 352 ------------------- docs/reference/libtracker-sparql/ontologies.xml | 352 +++++++++++++++++++ docs/reference/libtracker-sparql/overview.sgml | 183 ---------- docs/reference/libtracker-sparql/overview.xml | 183 ++++++++++ src/libtracker-sparql/tracker-connection.vala | 10 +- 12 files changed, 1061 insertions(+), 996 deletions(-) delete mode 100644 docs/reference/libtracker-sparql/examples.sgml create mode 100644 docs/reference/libtracker-sparql/examples.xml delete mode 100644 docs/reference/libtracker-sparql/libtracker-sparql-docs.sgml create mode 100644 docs/reference/libtracker-sparql/libtracker-sparql-docs.xml delete mode 100644 docs/reference/libtracker-sparql/libtracker-sparql.types delete mode 100644 docs/reference/libtracker-sparql/ontologies.sgml create mode 100644 docs/reference/libtracker-sparql/ontologies.xml delete mode 100644 docs/reference/libtracker-sparql/overview.sgml create mode 100644 docs/reference/libtracker-sparql/overview.xml diff --git a/docs/reference/libtracker-sparql/examples.sgml b/docs/reference/libtracker-sparql/examples.sgml deleted file mode 100644 index 5590e3b00..000000000 --- a/docs/reference/libtracker-sparql/examples.sgml +++ /dev/null @@ -1,385 +0,0 @@ - - - - Examples - - - This chapters shows some real examples of usage of the Tracker SPARQL Library. - - - - - - SPARQL query builder - - - The Tracker SPARQL library provides an easy and secure way of creating - SPARQL queries with the proper syntax. This is achieved using the - TrackerSparqlBuilder - object. - - - - -#include <tracker-sparql.h> - -int main (int argc, char **argv) -{ - TrackerSparqlBuilder *builder; - const gchar *iri = "urn:example:0001"; - const gchar *query_str; - time_t now; - - /* Create builder */ - builder = tracker_sparql_builder_new_update (); - - /* Insert new data */ - tracker_sparql_builder_insert_open (builder, NULL); - - tracker_sparql_builder_subject_iri (builder, iri); - - tracker_sparql_builder_predicate (builder, "a"); - tracker_sparql_builder_object (builder, "nie:DataObject"); - tracker_sparql_builder_object (builder, "nfo:FileDataObject"); - - now = time (NULL); - tracker_sparql_builder_predicate (builder, "nfo:fileLastModified"); - tracker_sparql_builder_object_date (builder, &now); - - tracker_sparql_builder_insert_close (builder); - - /* Get query as string. Do NOT g_free() the resulting string! */ - query_str = tracker_sparql_builder_get_result (builder); - - /* Print it */ - g_print ("Generated SPARQL query: '%s'\n", query_str); - - /* Once builder no longer needed, unref it. Note that after - * this operation, you must not use the returned query result - * any more - */ - g_object_unref (builder); - - return 0; -} - - - The previous code will generate the following SPARQL query: - - DROP GRAPH <urn:example:0001> - INSERT INTO <urn:example:0001> { - <urn:example:0001> a nie:DataObject , nfo:FileDataObject ; - nfo:fileLastModified "2010-08-04T13:09:26Z" . -} - - - - - - Querying the Store - - - In order to perform read-only queries to the store, a new - TrackerSparqlConnection - object must be acquired. In this case, as there is no intention of updating any - value in the store, both the general connection (with - tracker_sparql_connection_get) - or a specific direct-access connection (with - tracker_sparql_connection_get_direct) - may be acquired. Note that in the latter case, every non read-only operation will result - in an error being thrown by the TrackerSparqlConnection. - - - - Once a proper connection object has been acquired, the read-only query can be launched either - synchronously (tracker_sparql_connection_query) - or asynchronously (tracker_sparql_connection_query_async). - If launched asynchronously, the results of the query can be obtained with - tracker_sparql_connection_query_finish. - - - - If the query was successful, a TrackerSparqlCursor - will be available. You can now iterate the results of the query both synchronously (with - tracker_sparql_cursor_next) or - asynchronously (with - tracker_sparql_cursor_next_async and - tracker_sparql_cursor_next_finish) - - - - Once you end up with the query, remember to call g_object_unref - for the TrackerSparqlCursor. And the same applies to the - TrackerSparqlConnection when no longer needed. - - - - The following program shows how Read-Only queries can be done to the store in a - synchronous way: - - -#include <tracker-sparql.h> - -int main (int argc, const char **argv) -{ - GError *error = NULL; - TrackerSparqlConnection *connection; - TrackerSparqlCursor *cursor; - const gchar *query = "SELECT nie:url(?u) WHERE { ?u a nfo:FileDataObject }"; - - /* As we know only read-only queries will be done, it's enough - * to use a connection with only direct-access setup. The NULL - * represents a possible GCancellable. - */ - connection = tracker_sparql_connection_get_direct (NULL, &error); - if (!connection) { - g_printerr ("Couldn't obtain a direct connection to the Tracker store: %s", - error ? error->message : "unknown error"); - g_clear_error (&error); - - return 1; - } - - /* Make a synchronous query to the store */ - cursor = tracker_sparql_connection_query (connection, - query, - NULL, - &error); - - if (error) { - /* Some error happened performing the query, not good */ - g_printerr ("Couldn't query the Tracker Store: '%s'", - error ? error->message : "unknown error"); - g_clear_error (&error); - - return 1; - } - - /* Check results... */ - if (!cursor) { - g_print ("No results found :-/\n"); - } else { - gint i = 0; - - /* Iterate, synchronously, the results... */ - while (tracker_sparql_cursor_next (cursor, NULL, &error)) { - g_print ("Result [%d]: %s\n", - i++, - tracker_sparql_cursor_get_string (cursor, 0, NULL)); - } - - g_print ("A total of '%d' results were found\n", i); - - g_object_unref (cursor); - } - - g_object_unref (connection); - - return 0; -} - - - - - - Updating the Store - - - In order to perform updates in the store, a new - TrackerSparqlConnection - object must be acquired with - tracker_sparql_connection_get. - Note that you MUST NOT use a specific direct-access connection obtained with - tracker_sparql_connection_get_direct, as the direct-access method only supports read-only queries. - - - - Once a proper connection object has been acquired, the update can be launched either - synchronously (tracker_sparql_connection_update) - or asynchronously (tracker_sparql_connection_update_async). - If launched asynchronously, the result of the operation can be obtained with - tracker_sparql_connection_update_finish. - - - - Once you no longer need the connection, remember to call g_object_unref - for the TrackerSparqlConnection. - - - - The following program shows how a synchronous update can be done to the store: - - -#include <tracker-sparql.h> - -int main (int argc, const char **argv) -{ - GError *error = NULL; - TrackerSparqlConnection *connection; - const gchar *query = - "INSERT { " - " _:tag a nao:Tag ; " - " nao:prefLabel 'mylabel' . " - "} WHERE { " - " OPTIONAL { " - " ?tag a nao:Tag ; " - " nao:prefLabel 'mylabel' " - " } . " - "FILTER (!bound(?tag)) " - "}"; - - /* Do NOT get a direct connection if you're going to do some write - * operation in the Store. The NULL represents a possible - * GCancellable. - */ - connection = tracker_sparql_connection_get (NULL, &error); - if (!connection) { - g_printerr ("Couldn't obtain a connection to the Tracker store: %s", - error ? error->message : "unknown error"); - g_clear_error (&error); - - return 1; - } - - /* Run a synchronous update query */ - tracker_sparql_connection_update (connection, - query, - G_PRIORITY_DEFAULT, - NULL, - &error); - if (error) { - /* Some error happened performing the query, not good */ - g_printerr ("Couldn't update the Tracker store: %s", - error ? error->message : "unknown error"); - - g_clear_error (&error); - g_object_unref (connection); - - return 1; - } - - g_object_unref (connection); - - return 0; -} - - - - - - Updating the Store with Blank Nodes - - - The majority of the work here is already described in the - previous example where we talk about how to write the store. - - - - The difference with this example is that sometimes you want to - insert data and have the URNs returned which were created to - avoid re-querying for them. This is done using - the tracker_sparql_connection_update_blank function (or asynchronously tracker_sparql_connection_update_blank_async). - If launched asynchronously, the result of the operation can be obtained with - tracker_sparql_connection_update_blank_finish. - - - - The _:foo in the example is how a blank node is - represented in SPARQL. The foo part is used to generate the - unique ID that is used for the new URN. It is also used in the - GVariant - that is returned. In the example below, we are creating a new - blank node called foo for every class that - exists. - - - - The format of the GVariant (in D-Bus terms) is an aaa{ss} (an - array of an array of dictionaries). This is rather complex but - for a good reason. The first array represents each INSERT that - may exist in the SPARQL. The second array represents each new - node for a given WHERE clause (the example below illustrates - this), you need this to differentiate between two INSERT - statments like the one below in the same SPARQL sent to the - store. Last, we have a final array to represent each new node's - name (in this case foo) and the actual URN which was - created. For most updates the first two outer arrays will only - have one item in them. - - - - The following program shows how a synchronous blank node update can be done to the store: - - -#include <tracker-sparql.h> - -int main (int argc, const char **argv) -{ - GError *error = NULL; - GVariant *v; - TrackerSparqlConnection *connection; - const gchar *query = - "INSERT { _:foo a nie:InformationElement } WHERE { ?x a rdfs:Class }"; - - /* Do NOT get a direct connection if you're going to do some write - * operation in the Store. The NULL represents a possible - * GCancellable. - */ - connection = tracker_sparql_connection_get (NULL, &error); - if (!connection) { - g_printerr ("Couldn't obtain a connection to the Tracker store: %s", - error ? error->message : "unknown error"); - g_clear_error (&error); - - return 1; - } - - /* Run a synchronous blank node update query */ - v = tracker_sparql_connection_update_blank (connection, - query, - G_PRIORITY_DEFAULT, - NULL, - &error); - - if (error) { - /* Some error happened performing the query, not good */ - g_printerr ("Couldn't update the Tracker store: %s", - error ? error->message : "unknown error"); - - g_clear_error (&error); - g_object_unref (connection); - - return 1; - } - - if (!v) { - g_print ("No results were returned\n"); - } else { - GVariantIter iter1, *iter2, *iter3; - const gchar *node; - const gchar *urn; - - g_print ("Results:\n"); - - g_variant_iter_init (&iter1, v); - while (g_variant_iter_loop (&iter1, "aa{ss}", &iter2)) { /* aa{ss} */ - while (g_variant_iter_loop (iter2, "a{ss}", &iter3)) { /* a{ss} */ - while (g_variant_iter_loop (iter3, "{ss}", &node, &urn)) { /* {ss} */ - g_print (" Node:'%s', URN:'%s'\n", node, urn); - } - } - } - - g_variant_unref (v); - } - - g_object_unref (connection); - - return 0; -} - - - - - - diff --git a/docs/reference/libtracker-sparql/examples.xml b/docs/reference/libtracker-sparql/examples.xml new file mode 100644 index 000000000..5590e3b00 --- /dev/null +++ b/docs/reference/libtracker-sparql/examples.xml @@ -0,0 +1,385 @@ + + + + Examples + + + This chapters shows some real examples of usage of the Tracker SPARQL Library. + + + + + + SPARQL query builder + + + The Tracker SPARQL library provides an easy and secure way of creating + SPARQL queries with the proper syntax. This is achieved using the + TrackerSparqlBuilder + object. + + + + +#include <tracker-sparql.h> + +int main (int argc, char **argv) +{ + TrackerSparqlBuilder *builder; + const gchar *iri = "urn:example:0001"; + const gchar *query_str; + time_t now; + + /* Create builder */ + builder = tracker_sparql_builder_new_update (); + + /* Insert new data */ + tracker_sparql_builder_insert_open (builder, NULL); + + tracker_sparql_builder_subject_iri (builder, iri); + + tracker_sparql_builder_predicate (builder, "a"); + tracker_sparql_builder_object (builder, "nie:DataObject"); + tracker_sparql_builder_object (builder, "nfo:FileDataObject"); + + now = time (NULL); + tracker_sparql_builder_predicate (builder, "nfo:fileLastModified"); + tracker_sparql_builder_object_date (builder, &now); + + tracker_sparql_builder_insert_close (builder); + + /* Get query as string. Do NOT g_free() the resulting string! */ + query_str = tracker_sparql_builder_get_result (builder); + + /* Print it */ + g_print ("Generated SPARQL query: '%s'\n", query_str); + + /* Once builder no longer needed, unref it. Note that after + * this operation, you must not use the returned query result + * any more + */ + g_object_unref (builder); + + return 0; +} + + + The previous code will generate the following SPARQL query: + + DROP GRAPH <urn:example:0001> + INSERT INTO <urn:example:0001> { + <urn:example:0001> a nie:DataObject , nfo:FileDataObject ; + nfo:fileLastModified "2010-08-04T13:09:26Z" . +} + + + + + + Querying the Store + + + In order to perform read-only queries to the store, a new + TrackerSparqlConnection + object must be acquired. In this case, as there is no intention of updating any + value in the store, both the general connection (with + tracker_sparql_connection_get) + or a specific direct-access connection (with + tracker_sparql_connection_get_direct) + may be acquired. Note that in the latter case, every non read-only operation will result + in an error being thrown by the TrackerSparqlConnection. + + + + Once a proper connection object has been acquired, the read-only query can be launched either + synchronously (tracker_sparql_connection_query) + or asynchronously (tracker_sparql_connection_query_async). + If launched asynchronously, the results of the query can be obtained with + tracker_sparql_connection_query_finish. + + + + If the query was successful, a TrackerSparqlCursor + will be available. You can now iterate the results of the query both synchronously (with + tracker_sparql_cursor_next) or + asynchronously (with + tracker_sparql_cursor_next_async and + tracker_sparql_cursor_next_finish) + + + + Once you end up with the query, remember to call g_object_unref + for the TrackerSparqlCursor. And the same applies to the + TrackerSparqlConnection when no longer needed. + + + + The following program shows how Read-Only queries can be done to the store in a + synchronous way: + + +#include <tracker-sparql.h> + +int main (int argc, const char **argv) +{ + GError *error = NULL; + TrackerSparqlConnection *connection; + TrackerSparqlCursor *cursor; + const gchar *query = "SELECT nie:url(?u) WHERE { ?u a nfo:FileDataObject }"; + + /* As we know only read-only queries will be done, it's enough + * to use a connection with only direct-access setup. The NULL + * represents a possible GCancellable. + */ + connection = tracker_sparql_connection_get_direct (NULL, &error); + if (!connection) { + g_printerr ("Couldn't obtain a direct connection to the Tracker store: %s", + error ? error->message : "unknown error"); + g_clear_error (&error); + + return 1; + } + + /* Make a synchronous query to the store */ + cursor = tracker_sparql_connection_query (connection, + query, + NULL, + &error); + + if (error) { + /* Some error happened performing the query, not good */ + g_printerr ("Couldn't query the Tracker Store: '%s'", + error ? error->message : "unknown error"); + g_clear_error (&error); + + return 1; + } + + /* Check results... */ + if (!cursor) { + g_print ("No results found :-/\n"); + } else { + gint i = 0; + + /* Iterate, synchronously, the results... */ + while (tracker_sparql_cursor_next (cursor, NULL, &error)) { + g_print ("Result [%d]: %s\n", + i++, + tracker_sparql_cursor_get_string (cursor, 0, NULL)); + } + + g_print ("A total of '%d' results were found\n", i); + + g_object_unref (cursor); + } + + g_object_unref (connection); + + return 0; +} + + + + + + Updating the Store + + + In order to perform updates in the store, a new + TrackerSparqlConnection + object must be acquired with + tracker_sparql_connection_get. + Note that you MUST NOT use a specific direct-access connection obtained with + tracker_sparql_connection_get_direct, as the direct-access method only supports read-only queries. + + + + Once a proper connection object has been acquired, the update can be launched either + synchronously (tracker_sparql_connection_update) + or asynchronously (tracker_sparql_connection_update_async). + If launched asynchronously, the result of the operation can be obtained with + tracker_sparql_connection_update_finish. + + + + Once you no longer need the connection, remember to call g_object_unref + for the TrackerSparqlConnection. + + + + The following program shows how a synchronous update can be done to the store: + + +#include <tracker-sparql.h> + +int main (int argc, const char **argv) +{ + GError *error = NULL; + TrackerSparqlConnection *connection; + const gchar *query = + "INSERT { " + " _:tag a nao:Tag ; " + " nao:prefLabel 'mylabel' . " + "} WHERE { " + " OPTIONAL { " + " ?tag a nao:Tag ; " + " nao:prefLabel 'mylabel' " + " } . " + "FILTER (!bound(?tag)) " + "}"; + + /* Do NOT get a direct connection if you're going to do some write + * operation in the Store. The NULL represents a possible + * GCancellable. + */ + connection = tracker_sparql_connection_get (NULL, &error); + if (!connection) { + g_printerr ("Couldn't obtain a connection to the Tracker store: %s", + error ? error->message : "unknown error"); + g_clear_error (&error); + + return 1; + } + + /* Run a synchronous update query */ + tracker_sparql_connection_update (connection, + query, + G_PRIORITY_DEFAULT, + NULL, + &error); + if (error) { + /* Some error happened performing the query, not good */ + g_printerr ("Couldn't update the Tracker store: %s", + error ? error->message : "unknown error"); + + g_clear_error (&error); + g_object_unref (connection); + + return 1; + } + + g_object_unref (connection); + + return 0; +} + + + + + + Updating the Store with Blank Nodes + + + The majority of the work here is already described in the + previous example where we talk about how to write the store. + + + + The difference with this example is that sometimes you want to + insert data and have the URNs returned which were created to + avoid re-querying for them. This is done using + the tracker_sparql_connection_update_blank function (or asynchronously tracker_sparql_connection_update_blank_async). + If launched asynchronously, the result of the operation can be obtained with + tracker_sparql_connection_update_blank_finish. + + + + The _:foo in the example is how a blank node is + represented in SPARQL. The foo part is used to generate the + unique ID that is used for the new URN. It is also used in the + GVariant + that is returned. In the example below, we are creating a new + blank node called foo for every class that + exists. + + + + The format of the GVariant (in D-Bus terms) is an aaa{ss} (an + array of an array of dictionaries). This is rather complex but + for a good reason. The first array represents each INSERT that + may exist in the SPARQL. The second array represents each new + node for a given WHERE clause (the example below illustrates + this), you need this to differentiate between two INSERT + statments like the one below in the same SPARQL sent to the + store. Last, we have a final array to represent each new node's + name (in this case foo) and the actual URN which was + created. For most updates the first two outer arrays will only + have one item in them. + + + + The following program shows how a synchronous blank node update can be done to the store: + + +#include <tracker-sparql.h> + +int main (int argc, const char **argv) +{ + GError *error = NULL; + GVariant *v; + TrackerSparqlConnection *connection; + const gchar *query = + "INSERT { _:foo a nie:InformationElement } WHERE { ?x a rdfs:Class }"; + + /* Do NOT get a direct connection if you're going to do some write + * operation in the Store. The NULL represents a possible + * GCancellable. + */ + connection = tracker_sparql_connection_get (NULL, &error); + if (!connection) { + g_printerr ("Couldn't obtain a connection to the Tracker store: %s", + error ? error->message : "unknown error"); + g_clear_error (&error); + + return 1; + } + + /* Run a synchronous blank node update query */ + v = tracker_sparql_connection_update_blank (connection, + query, + G_PRIORITY_DEFAULT, + NULL, + &error); + + if (error) { + /* Some error happened performing the query, not good */ + g_printerr ("Couldn't update the Tracker store: %s", + error ? error->message : "unknown error"); + + g_clear_error (&error); + g_object_unref (connection); + + return 1; + } + + if (!v) { + g_print ("No results were returned\n"); + } else { + GVariantIter iter1, *iter2, *iter3; + const gchar *node; + const gchar *urn; + + g_print ("Results:\n"); + + g_variant_iter_init (&iter1, v); + while (g_variant_iter_loop (&iter1, "aa{ss}", &iter2)) { /* aa{ss} */ + while (g_variant_iter_loop (iter2, "a{ss}", &iter3)) { /* a{ss} */ + while (g_variant_iter_loop (iter3, "{ss}", &node, &urn)) { /* {ss} */ + g_print (" Node:'%s', URN:'%s'\n", node, urn); + } + } + } + + g_variant_unref (v); + } + + g_object_unref (connection); + + return 0; +} + + + + + + diff --git a/docs/reference/libtracker-sparql/libtracker-sparql-docs.sgml b/docs/reference/libtracker-sparql/libtracker-sparql-docs.sgml deleted file mode 100644 index 0f76996df..000000000 --- a/docs/reference/libtracker-sparql/libtracker-sparql-docs.sgml +++ /dev/null @@ -1,57 +0,0 @@ - - - -]> - - - Tracker SPARQL Library Reference Manual - - for libtracker-sparql &version;. - The latest version of this documentation can be found on-line at - - http://library.gnome.org/devel/libtracker-sparql/unstable - . - - - - - - - - - Reference - - - This section provides the detailed API of the Tracker SPARQL library. - - - - - - - - - - - - - - - - Base ontology - - - - - - - - - - - - - - diff --git a/docs/reference/libtracker-sparql/libtracker-sparql-docs.xml b/docs/reference/libtracker-sparql/libtracker-sparql-docs.xml new file mode 100644 index 000000000..5f5611e18 --- /dev/null +++ b/docs/reference/libtracker-sparql/libtracker-sparql-docs.xml @@ -0,0 +1,95 @@ + + + +]> + + + Tracker SPARQL Library Reference Manual + + for libtracker-sparql &version;. + The latest version of this documentation can be found on-line at + + https://developer.gnome.org/libtracker-sparql/stable + . + + + + + + + + + Reference + + + This section provides the detailed API of the Tracker SPARQL library. + + + + + + + + + + + + + + + + Base ontology + + + + + + + + + + + + + + Index + + + + Index of deprecated symbols + + + + Index of new symbols in 0.10 + + + + Index of new symbols in 0.12 + + + + Index of new symbols in 1.10 + + + + Index of new symbols in 1.12 + + + + Index of new symbols in 2.0 + + + + Index of new symbols in 2.0.5 + + + + Index of new symbols in 2.2 + + + + + diff --git a/docs/reference/libtracker-sparql/libtracker-sparql-sections.txt b/docs/reference/libtracker-sparql/libtracker-sparql-sections.txt index 5f7899a10..8c0b6a4ef 100644 --- a/docs/reference/libtracker-sparql/libtracker-sparql-sections.txt +++ b/docs/reference/libtracker-sparql/libtracker-sparql-sections.txt @@ -7,6 +7,9 @@ tracker_sparql_escape_string tracker_sparql_escape_uri tracker_sparql_escape_uri_printf tracker_sparql_escape_uri_vprintf + +TRACKER_TYPE_URI +tracker_uri_get_type
@@ -45,6 +48,27 @@ tracker_resource_get_values tracker_resource_identifier_compare_func tracker_resource_print_sparql_update tracker_resource_print_turtle +tracker_resource_print_jsonld +TRACKER_DATASOURCE_URN_NON_REMOVABLE_MEDIA +TRACKER_OWN_GRAPH_URN +TRACKER_PREFIX_DATASOURCE_URN +TRACKER_PREFIX_DC +TRACKER_PREFIX_MFO +TRACKER_PREFIX_MLO +TRACKER_PREFIX_NAO +TRACKER_PREFIX_NCO +TRACKER_PREFIX_NFO +TRACKER_PREFIX_NID3 +TRACKER_PREFIX_NIE +TRACKER_PREFIX_NMM +TRACKER_PREFIX_NMO +TRACKER_PREFIX_NRL +TRACKER_PREFIX_OSINFO +TRACKER_PREFIX_RDF +TRACKER_PREFIX_RDFS +TRACKER_PREFIX_SLO +TRACKER_PREFIX_TRACKER +TRACKER_PREFIX_XSD TrackerResourceClass TRACKER_RESOURCE @@ -59,6 +83,7 @@ TRACKER_RESOURCE_GET_CLASS
tracker-namespace-manager TrackerNamespaceManager +TrackerNamespaceManager tracker_namespace_manager_add_prefix tracker_namespace_manager_expand_uri tracker_namespace_manager_get_default @@ -66,7 +91,9 @@ tracker_namespace_manager_has_prefix tracker_namespace_manager_lookup_prefix tracker_namespace_manager_new tracker_namespace_manager_print_turtle +tracker_namespace_manager_foreach +TrackerNamespaceManagerClass TRACKER_TYPE_NAMESPACE_MANAGER
@@ -161,6 +188,8 @@ tracker_sparql_connection_statistics_finish tracker_sparql_connection_get_namespace_manager tracker_sparql_connection_set_domain tracker_sparql_connection_get_domain +tracker_sparql_connection_get_dbus_connection +tracker_sparql_connection_set_dbus_connection TrackerSparqlConnectionClass TRACKER_SPARQL_CONNECTION @@ -171,6 +200,8 @@ TRACKER_SPARQL_IS_CONNECTION_CLASS TRACKER_SPARQL_TYPE_CONNECTION TRACKER_SPARQL_TYPE_VALUE_TYPE tracker_sparql_connection_get_type +TRACKER_SPARQL_TYPE_CONNECTION_FLAGS +tracker_sparql_connection_flags_get_type TRACKER_DBUS_INTERFACE_RESOURCES TRACKER_DBUS_INTERFACE_STATISTICS @@ -197,6 +228,10 @@ tracker_sparql_statement_bind_int tracker_sparql_statement_bind_double tracker_sparql_statement_bind_string tracker_sparql_statement_bind_boolean +tracker_sparql_statement_get_connection +tracker_sparql_statement_set_connection +tracker_sparql_statement_get_sparql +tracker_sparql_statement_set_sparql TrackerSparqlStatementClass TRACKER_SPARQL_STATEMENT @@ -269,6 +304,10 @@ TRACKER_NOTIFIER_CLASS TRACKER_NOTIFIER_GET_CLASS TRACKER_TYPE_NOTIFIER tracker_notifier_get_type +TRACKER_TYPE_NOTIFIER_EVENT_TYPE +tracker_notifier_event_type_get_type +TRACKER_TYPE_NOTIFIER_FLAGS +tracker_notifier_flags_get_type
@@ -282,8 +321,5 @@ tracker_interface_age tracker_check_version -TRACKER_MAJOR_VERSION -TRACKER_MINOR_VERSION -TRACKER_MICRO_VERSION TRACKER_CHECK_VERSION
diff --git a/docs/reference/libtracker-sparql/libtracker-sparql.types b/docs/reference/libtracker-sparql/libtracker-sparql.types deleted file mode 100644 index 0b6587b7b..000000000 --- a/docs/reference/libtracker-sparql/libtracker-sparql.types +++ /dev/null @@ -1,8 +0,0 @@ -tracker_resource_get_type -tracker_namespace_manager_get_type -tracker_sparql_builder_get_type -tracker_sparql_builder_state_get_type -tracker_sparql_connection_get_type -tracker_sparql_statement_get_type -tracker_sparql_cursor_get_type -tracker_notifier_get_type diff --git a/docs/reference/libtracker-sparql/meson.build b/docs/reference/libtracker-sparql/meson.build index 193cded84..e86e0b459 100644 --- a/docs/reference/libtracker-sparql/meson.build +++ b/docs/reference/libtracker-sparql/meson.build @@ -28,9 +28,8 @@ example_files = [ gnome.gtkdoc('libtracker-sparql', src_dir: sparqlinc, - main_sgml: 'libtracker-sparql-docs.sgml', - content_files: ['overview.sgml', 'examples.sgml', 'ontologies.sgml', 'private-store.xml', 'migrating-1to2.xml', example_files], + main_xml: 'libtracker-sparql-docs.xml', + content_files: ['overview.xml', 'examples.xml', 'ontologies.xml', 'private-store.xml', 'migrating-1to2.xml', example_files], dependencies: tracker_sparql_dep, - gobject_typesfile: 'libtracker-sparql.types', fixxref_args: fixxref_args, install: true) diff --git a/docs/reference/libtracker-sparql/ontologies.sgml b/docs/reference/libtracker-sparql/ontologies.sgml deleted file mode 100644 index 615ff74d0..000000000 --- a/docs/reference/libtracker-sparql/ontologies.sgml +++ /dev/null @@ -1,352 +0,0 @@ - - - - Defining ontologies - - - An ontology defines the entities that a Tracker endpoint can store, as - well as their properties and the relationships between different entities. - - - Tracker internally uses the following ontologies as its base, all ontologies - defined by the user of the endpoint are recommended to be build around this - base: - - - - <systemitem>XML Schema (XSD).</systemitem> - Defining basic types. - - - <systemitem>Resource Description Framework (RDF).</systemitem> - Defining classes, properties and inheritance - - - <systemitem>Nepomuk Resource Language (NRL).</systemitem> - Defining resource uniqueness and cardinality - - - <systemitem>Dublin Core (DC).</systemitem> - Defining common superproperties for documents - - - <systemitem>Nepomuk Annotation Ontology (NAO).</systemitem> - Implementing tagging and annotations - - - - Ontologies are RDF files with the .ontology extension, Tracker parses all - ontology files from the given directory. The individual ontology files may - ontologies may not be self-consistent (i.e. use missing definitions), but - all the ontology files as a whole must be. - - - - Tracker loads the ontology files in alphanumeric order, it is advisable - that those have a numbered prefix in order to load those at a consistent - order despite future additions. - - - - - Creating an ontology -
- Defining a namespace - - A namespace is the topmost layer of an individual ontology, it will - contain all classes and properties defined by it. In order to define - a namespace you can do: - - -
-
- Defining classes - - Classes are the base of an ontology, all stored resources must define - themselves as "being" at least one of these classes. They all derive - from the base rdfs:Resource type. To eg. define classes representing - animals and plants, you can do: - - - - By convention all classes use CamelCase names, although class names - are not restricted. The allowed charset is UTF-8. - - - Declaring subclasses is possible: - - - - With such classes defined, resources may be inserted to the endpoint, - eg. with the SPARQL: - - - - Note that multiple inheritance is possible, resources will just inherit - all properties from all classes and superclasses. - -
-
- Defining properties - - Properties relate to a class, so all resources pertaining to that class - can define values for these. - - - - The class the property belongs to is defined by rdfs:domain, while the - data type contained is defined by rdfs:range. By convention all - properties use lowerCamelCase names, although property names are not - restricted. The allowed charset is UTF-8. - - - The following basic types are supported: - - - <systemitem>xsd:boolean</systemitem> - - - <systemitem>xsd:string</systemitem> - - - <systemitem>xsd:integer</systemitem> - Ranging from -2^63 to 2^63-1. - - - <systemitem>xsd:double</systemitem> - Able to store a 8 byte IEEE floating point number. - - - <systemitem>xsd:date and xsd:dateTime</systemitem> - - Able to store dates and times since January 1, 1970 UTC, with - millisecond resolution. - - - - Of course, properties can also point to resources of the same or - other classes, so stored resources can conform a graph: - - - - There is also inheritance of properties, an example would be a property - in a subclass concretizing a more generic property from a superclass. - - - - SPARQL queries are expected to provide the same result when queried - for a property or one of its superproperties. - - -
-
- Defining cardinality of properties - - By default, properties are multivalued, there are no restrictions in - the number of values a property can store. - - - - Wherever this is not desirable, cardinality can be limited on properties - through nrl:maxCardinality. - - - - This will raise an error if the SPARQL updates in the endpoint end up - in the property inserted multiple times. - - - - Tracker does not implement support for other maximum cardinalities - than 1. - - -
-
- Defining uniqueness - - It is desirable for certain properties to keep their values unique - across all resources, this can be expressed by defining the properties - as being a nrl:InverseFunctionalProperty. - - - - With that in place, no two resources can have the same value on the - property. - - - -
-
- Defining indexes - - It may be the case that SPARQL queries performed on the endpoint are - known to match, sort, or filter on certain properties more often than others. - In this case, the ontology may use tracker:domainIndex in the class definition: - - - - Classes may define multiple domain indexes. - - - Be frugal with indexes, do not add these proactively. An index in the wrong - place might not affect query performance positively, but all indexes come at - a cost in disk size. - -
-
- Defining full-text search properties - - Tracker provides nonstandard full-text search capabilities, in order to use - these, the string properties can use tracker:fulltextIndexed: - - - - Weighting can also be applied, so certain properties rank higher than others - in full-text search queries. With tracker:fulltextIndexed in place, sparql - queries may use full-text search capabilities: - - -
-
- Predefined elements - - It may be desirable for the ontology to offer predefined elements of a - certain class, which can then be used by the endpoint. - - - - Usage does not differ in use from the elements of that same class that - could be inserted in the endpoint. - - -
-
- - - Accompanying metadata - - Ontology files are optionally accompanied by description files, those have - the same basename, but the ".description" extension. - - - - - - Updating an ontology - - - As software evolves, sometimes changes in the ontology are unavoidable. - Tracker can transparently handle certain ontology changes on existing - databases. - - - - <systemitem>Adding a class.</systemitem> - - - <systemitem>Removing a class.</systemitem> - - All resources will be removed from this class, and all related - properties will disappear. - - - - <systemitem>Adding a property.</systemitem> - - - <systemitem>Removing a property.</systemitem> - - The property will disappear from all elements pertaining to the - class in domain of the property. - - - - <systemitem>Changing rdfs:range of a property.</systemitem> - - The following conversions are allowed: - - - xsd:integer to xsd:bool, xsd:double and xsd:string - xsd:double to xsd:bool, xsd:integer and xsd:string - xsd:string to xsd:bool, xsd:integer and xsd:double - - - - <systemitem>Adding and removing tracker:domainIndex from a class.</systemitem> - - - <systemitem>Adding and removing tracker:fulltextIndexed from a property.</systemitem> - - - <systemitem>Changing the tracker:weight on a property.</systemitem> - - - <systemitem>Removing nrl:maxCardinality from a property.</systemitem> - - - - - However, there are certain ontology changes that Tracker will find - incompatible. Either because they are incoherent or resulting into - situations where it can not deterministically satisfy the change - in the stored data. Tracker will error out and refuse to do any data - changes in these situations: - - - - Properties with rdfs:range being xsd:bool, xsd:date, xsd:dateTime, - or any other custom class are not convertible. Only conversions - covered in the list above are accepted. - - - You can not add rdfs:subClassOf in classes that are not being - newly added. You can not remove rdfs:subClassOf from classes. - The only allowed change to rdfs:subClassOf is to correct - subclasses when deleting a class, so they point a common - superclass. - - - You can not add rdfs:subPropertyOf to properties that are not - being newly added. You can not change an existing - rdfs:subPropertyOf unless it is made to point to a common - superproperty. You can however remove rdfs:subPropertyOf from - non-new properties. - - - Properties can not move across classes, thus any change in - rdfs:domain forbidden. - - - You can not add nrl:maxCardinality restrictions on properties that - are not being newly added. - - - You can not add nor remove nrl:InverseFunctionalProperty from a - property that is not being newly added. - - - - The recommendation to bypass these situations is the same for all, - use different property and class names and use SPARQL to manually - migrate the old data to the new format if necessary. - - - High level code is in a better position to solve the - possible incoherences (e.g. picking a single value if a property - changes from multiple values to single value). After the manual - data migration has been completed, the old classes and properties - can be dropped. - - -
diff --git a/docs/reference/libtracker-sparql/ontologies.xml b/docs/reference/libtracker-sparql/ontologies.xml new file mode 100644 index 000000000..615ff74d0 --- /dev/null +++ b/docs/reference/libtracker-sparql/ontologies.xml @@ -0,0 +1,352 @@ + + + + Defining ontologies + + + An ontology defines the entities that a Tracker endpoint can store, as + well as their properties and the relationships between different entities. + + + Tracker internally uses the following ontologies as its base, all ontologies + defined by the user of the endpoint are recommended to be build around this + base: + + + + <systemitem>XML Schema (XSD).</systemitem> + Defining basic types. + + + <systemitem>Resource Description Framework (RDF).</systemitem> + Defining classes, properties and inheritance + + + <systemitem>Nepomuk Resource Language (NRL).</systemitem> + Defining resource uniqueness and cardinality + + + <systemitem>Dublin Core (DC).</systemitem> + Defining common superproperties for documents + + + <systemitem>Nepomuk Annotation Ontology (NAO).</systemitem> + Implementing tagging and annotations + + + + Ontologies are RDF files with the .ontology extension, Tracker parses all + ontology files from the given directory. The individual ontology files may + ontologies may not be self-consistent (i.e. use missing definitions), but + all the ontology files as a whole must be. + + + + Tracker loads the ontology files in alphanumeric order, it is advisable + that those have a numbered prefix in order to load those at a consistent + order despite future additions. + + + + + Creating an ontology +
+ Defining a namespace + + A namespace is the topmost layer of an individual ontology, it will + contain all classes and properties defined by it. In order to define + a namespace you can do: + + +
+
+ Defining classes + + Classes are the base of an ontology, all stored resources must define + themselves as "being" at least one of these classes. They all derive + from the base rdfs:Resource type. To eg. define classes representing + animals and plants, you can do: + + + + By convention all classes use CamelCase names, although class names + are not restricted. The allowed charset is UTF-8. + + + Declaring subclasses is possible: + + + + With such classes defined, resources may be inserted to the endpoint, + eg. with the SPARQL: + + + + Note that multiple inheritance is possible, resources will just inherit + all properties from all classes and superclasses. + +
+
+ Defining properties + + Properties relate to a class, so all resources pertaining to that class + can define values for these. + + + + The class the property belongs to is defined by rdfs:domain, while the + data type contained is defined by rdfs:range. By convention all + properties use lowerCamelCase names, although property names are not + restricted. The allowed charset is UTF-8. + + + The following basic types are supported: + + + <systemitem>xsd:boolean</systemitem> + + + <systemitem>xsd:string</systemitem> + + + <systemitem>xsd:integer</systemitem> + Ranging from -2^63 to 2^63-1. + + + <systemitem>xsd:double</systemitem> + Able to store a 8 byte IEEE floating point number. + + + <systemitem>xsd:date and xsd:dateTime</systemitem> + + Able to store dates and times since January 1, 1970 UTC, with + millisecond resolution. + + + + Of course, properties can also point to resources of the same or + other classes, so stored resources can conform a graph: + + + + There is also inheritance of properties, an example would be a property + in a subclass concretizing a more generic property from a superclass. + + + + SPARQL queries are expected to provide the same result when queried + for a property or one of its superproperties. + + +
+
+ Defining cardinality of properties + + By default, properties are multivalued, there are no restrictions in + the number of values a property can store. + + + + Wherever this is not desirable, cardinality can be limited on properties + through nrl:maxCardinality. + + + + This will raise an error if the SPARQL updates in the endpoint end up + in the property inserted multiple times. + + + + Tracker does not implement support for other maximum cardinalities + than 1. + + +
+
+ Defining uniqueness + + It is desirable for certain properties to keep their values unique + across all resources, this can be expressed by defining the properties + as being a nrl:InverseFunctionalProperty. + + + + With that in place, no two resources can have the same value on the + property. + + + +
+
+ Defining indexes + + It may be the case that SPARQL queries performed on the endpoint are + known to match, sort, or filter on certain properties more often than others. + In this case, the ontology may use tracker:domainIndex in the class definition: + + + + Classes may define multiple domain indexes. + + + Be frugal with indexes, do not add these proactively. An index in the wrong + place might not affect query performance positively, but all indexes come at + a cost in disk size. + +
+
+ Defining full-text search properties + + Tracker provides nonstandard full-text search capabilities, in order to use + these, the string properties can use tracker:fulltextIndexed: + + + + Weighting can also be applied, so certain properties rank higher than others + in full-text search queries. With tracker:fulltextIndexed in place, sparql + queries may use full-text search capabilities: + + +
+
+ Predefined elements + + It may be desirable for the ontology to offer predefined elements of a + certain class, which can then be used by the endpoint. + + + + Usage does not differ in use from the elements of that same class that + could be inserted in the endpoint. + + +
+
+ + + Accompanying metadata + + Ontology files are optionally accompanied by description files, those have + the same basename, but the ".description" extension. + + + + + + Updating an ontology + + + As software evolves, sometimes changes in the ontology are unavoidable. + Tracker can transparently handle certain ontology changes on existing + databases. + + + + <systemitem>Adding a class.</systemitem> + + + <systemitem>Removing a class.</systemitem> + + All resources will be removed from this class, and all related + properties will disappear. + + + + <systemitem>Adding a property.</systemitem> + + + <systemitem>Removing a property.</systemitem> + + The property will disappear from all elements pertaining to the + class in domain of the property. + + + + <systemitem>Changing rdfs:range of a property.</systemitem> + + The following conversions are allowed: + + + xsd:integer to xsd:bool, xsd:double and xsd:string + xsd:double to xsd:bool, xsd:integer and xsd:string + xsd:string to xsd:bool, xsd:integer and xsd:double + + + + <systemitem>Adding and removing tracker:domainIndex from a class.</systemitem> + + + <systemitem>Adding and removing tracker:fulltextIndexed from a property.</systemitem> + + + <systemitem>Changing the tracker:weight on a property.</systemitem> + + + <systemitem>Removing nrl:maxCardinality from a property.</systemitem> + + + + + However, there are certain ontology changes that Tracker will find + incompatible. Either because they are incoherent or resulting into + situations where it can not deterministically satisfy the change + in the stored data. Tracker will error out and refuse to do any data + changes in these situations: + + + + Properties with rdfs:range being xsd:bool, xsd:date, xsd:dateTime, + or any other custom class are not convertible. Only conversions + covered in the list above are accepted. + + + You can not add rdfs:subClassOf in classes that are not being + newly added. You can not remove rdfs:subClassOf from classes. + The only allowed change to rdfs:subClassOf is to correct + subclasses when deleting a class, so they point a common + superclass. + + + You can not add rdfs:subPropertyOf to properties that are not + being newly added. You can not change an existing + rdfs:subPropertyOf unless it is made to point to a common + superproperty. You can however remove rdfs:subPropertyOf from + non-new properties. + + + Properties can not move across classes, thus any change in + rdfs:domain forbidden. + + + You can not add nrl:maxCardinality restrictions on properties that + are not being newly added. + + + You can not add nor remove nrl:InverseFunctionalProperty from a + property that is not being newly added. + + + + The recommendation to bypass these situations is the same for all, + use different property and class names and use SPARQL to manually + migrate the old data to the new format if necessary. + + + High level code is in a better position to solve the + possible incoherences (e.g. picking a single value if a property + changes from multiple values to single value). After the manual + data migration has been completed, the old classes and properties + can be dropped. + + +
diff --git a/docs/reference/libtracker-sparql/overview.sgml b/docs/reference/libtracker-sparql/overview.sgml deleted file mode 100644 index 14890457d..000000000 --- a/docs/reference/libtracker-sparql/overview.sgml +++ /dev/null @@ -1,183 +0,0 @@ - - - - Overview - - - The libtracker-sparql library is the foundation for Tracker - querying and inserting into the data store. The data store - allows both querying and inserting using SPARQL based on the - Nepomuk ontology. - - - - - - Connection methods - - - The Tracker SPARQL library provides several underlying methods to perform - queries and updates to the Tracker Store. - - - - Direct Access: - - All Read-Only operations done in a - TrackerSparqlConnection - will by default use this method, as it doesn't involve any D-Bus traffic, - and thus, it will perform much better. There is no real connection with - the Tracker Store in this case, as the access is direct to the underlying - SQLite database. Again, note that this method applies only to - Read-Only operations. - - - If you plan to only do Read-Only queries to the store, you can get the - TrackerSparqlConnection - object using tracker_sparql_connection_get_direct. Otherwise, if you also plan to use the same connection object - for Write operations, you must get the connection object with - tracker_sparql_connection_get. - - - - D-Bus FD passing: - - The TrackerSparqlConnection - will use the File Descriptor passing method via D-Bus to connect to the Store for all non - Read-Only queries on - TrackerSparqlConnection - objects obtained with - tracker_sparql_connection_get. - - - See the Environment Variables section - to check how you can force also Read-Only queries to be done using D-Bus. - - - - - - - - Compiling applications - - - To compile applications using libtracker-sparql, you - need to tell the compiler where to find the proper header files - and libraries. This is done with the - pkg-config utility. - - - - The following interactive shell session demonstrates how - pkg-config is used (the actual output on - your system may be different): - -$ pkg-config --cflags tracker-sparql-0.12 --pthread -I/usr/include/tracker-0.12 -I/usr/include/tracker-0.12/libtracker-sparql -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include - -$ pkg-config --libs tracker-sparql-0.12 --Wl,--export-dynamic -pthread -ltracker-sparql-0.12 -lgio-2.0 -lgobject-2.0 -lgmodule-2.0 -lgthread-2.0 -lrt -lglib-2.0 - - - - The simplest way to compile a program is to use the "backticks" - feature of the shell. If you enclose a command in backticks - (not single quotes), then its output will be - substituted into the command line before execution: - - $ cc `pkg-config --cflags --libs tracker-sparql-0.12` hello.c -o hello - - - - - - - Environment Variables - - - There are a number of environment variables which affect the way - that the libtracker-sparql library will do its work. Those - environment variables are described here. - - - - TRACKER_USE_LOG_FILES - - Don't just log to stdout and stderr, but to log files too - which are kept in $HOME/.local/share/tracker/. This came - into effect in 0.15.3 and 0.16.0. After this version of - Tracker, logging to file (usually useful for debugging) - can only be done by declaring this environment variable. - - - - TRACKER_USE_CONFIG_FILES - - Don't use GSettings, instead use a config file similar to - how settings were saved in 0.10.x. That is, a file which - is much like an .ini file. These are saved to - $HOME/.config/tracker/ - - - - TRACKER_SPARQL_BACKEND - - Backends for libtracker-sparql are dynamically loaded at - run time. Currently there are only two backends which are - explained - more closely in the previous chapter. In short, - this environment variable gives the client the ability to - directly mandate which backend they want to use. The - value can be set to either "direct" or "bus". A "direct" - value means the direct access approach will be forced. A - "bus" value means a D-Bus / IPC approach will be forced. - - - - TRACKER_SPARQL_CACHE_SIZE - - Tracker caches database statements which occur frequently to make - subsequent repeat queries much faster. The cache size is - set to 100 by default for each type - (select and update queries). This must be at - least 2 as a minimum, any less and a - value of 3 is used instead. The - number represents the number of cached statements to keep - around. This environment variable is used mainly for - testing purposes. - - - Tracker's store also has environment variables to control - this behavior, see the manual pages - for tracker-store - regarding TRACKER_STORE_SELECT_CACHE_SIZE - and TRACKER_STORE_UPDATE_CACHE_SIZE. - - - - TRACKER_VERBOSITY - - Historically, all queries would go - through tracker-store and all - requests would be logged according to the verbosity set - in tracker-store.cfg (see manual - pages for tracker-store.cfg). Since - libtracker-sparql may - circumvent tracker-store if using the - direct access backend, this environment variable was added - to let clients choose the log level. The same values apply - to all other processes which have logging and a - configuration to control it. Values range - from 0 to 3, - 0=errors, 1=minimal, 2=detailed, 3=debug. By default it - is 0. - - - - - - - - - diff --git a/docs/reference/libtracker-sparql/overview.xml b/docs/reference/libtracker-sparql/overview.xml new file mode 100644 index 000000000..14890457d --- /dev/null +++ b/docs/reference/libtracker-sparql/overview.xml @@ -0,0 +1,183 @@ + + + + Overview + + + The libtracker-sparql library is the foundation for Tracker + querying and inserting into the data store. The data store + allows both querying and inserting using SPARQL based on the + Nepomuk ontology. + + + + + + Connection methods + + + The Tracker SPARQL library provides several underlying methods to perform + queries and updates to the Tracker Store. + + + + Direct Access: + + All Read-Only operations done in a + TrackerSparqlConnection + will by default use this method, as it doesn't involve any D-Bus traffic, + and thus, it will perform much better. There is no real connection with + the Tracker Store in this case, as the access is direct to the underlying + SQLite database. Again, note that this method applies only to + Read-Only operations. + + + If you plan to only do Read-Only queries to the store, you can get the + TrackerSparqlConnection + object using tracker_sparql_connection_get_direct. Otherwise, if you also plan to use the same connection object + for Write operations, you must get the connection object with + tracker_sparql_connection_get. + + + + D-Bus FD passing: + + The TrackerSparqlConnection + will use the File Descriptor passing method via D-Bus to connect to the Store for all non + Read-Only queries on + TrackerSparqlConnection + objects obtained with + tracker_sparql_connection_get. + + + See the Environment Variables section + to check how you can force also Read-Only queries to be done using D-Bus. + + + + + + + + Compiling applications + + + To compile applications using libtracker-sparql, you + need to tell the compiler where to find the proper header files + and libraries. This is done with the + pkg-config utility. + + + + The following interactive shell session demonstrates how + pkg-config is used (the actual output on + your system may be different): + +$ pkg-config --cflags tracker-sparql-0.12 +-pthread -I/usr/include/tracker-0.12 -I/usr/include/tracker-0.12/libtracker-sparql -I/usr/include/glib-2.0 -I/usr/lib/glib-2.0/include + +$ pkg-config --libs tracker-sparql-0.12 +-Wl,--export-dynamic -pthread -ltracker-sparql-0.12 -lgio-2.0 -lgobject-2.0 -lgmodule-2.0 -lgthread-2.0 -lrt -lglib-2.0 + + + + The simplest way to compile a program is to use the "backticks" + feature of the shell. If you enclose a command in backticks + (not single quotes), then its output will be + substituted into the command line before execution: + + $ cc `pkg-config --cflags --libs tracker-sparql-0.12` hello.c -o hello + + + + + + + Environment Variables + + + There are a number of environment variables which affect the way + that the libtracker-sparql library will do its work. Those + environment variables are described here. + + + + TRACKER_USE_LOG_FILES + + Don't just log to stdout and stderr, but to log files too + which are kept in $HOME/.local/share/tracker/. This came + into effect in 0.15.3 and 0.16.0. After this version of + Tracker, logging to file (usually useful for debugging) + can only be done by declaring this environment variable. + + + + TRACKER_USE_CONFIG_FILES + + Don't use GSettings, instead use a config file similar to + how settings were saved in 0.10.x. That is, a file which + is much like an .ini file. These are saved to + $HOME/.config/tracker/ + + + + TRACKER_SPARQL_BACKEND + + Backends for libtracker-sparql are dynamically loaded at + run time. Currently there are only two backends which are + explained + more closely in the previous chapter. In short, + this environment variable gives the client the ability to + directly mandate which backend they want to use. The + value can be set to either "direct" or "bus". A "direct" + value means the direct access approach will be forced. A + "bus" value means a D-Bus / IPC approach will be forced. + + + + TRACKER_SPARQL_CACHE_SIZE + + Tracker caches database statements which occur frequently to make + subsequent repeat queries much faster. The cache size is + set to 100 by default for each type + (select and update queries). This must be at + least 2 as a minimum, any less and a + value of 3 is used instead. The + number represents the number of cached statements to keep + around. This environment variable is used mainly for + testing purposes. + + + Tracker's store also has environment variables to control + this behavior, see the manual pages + for tracker-store + regarding TRACKER_STORE_SELECT_CACHE_SIZE + and TRACKER_STORE_UPDATE_CACHE_SIZE. + + + + TRACKER_VERBOSITY + + Historically, all queries would go + through tracker-store and all + requests would be logged according to the verbosity set + in tracker-store.cfg (see manual + pages for tracker-store.cfg). Since + libtracker-sparql may + circumvent tracker-store if using the + direct access backend, this environment variable was added + to let clients choose the log level. The same values apply + to all other processes which have logging and a + configuration to control it. Values range + from 0 to 3, + 0=errors, 1=minimal, 2=detailed, 3=debug. By default it + is 0. + + + + + + + + + diff --git a/src/libtracker-sparql/tracker-connection.vala b/src/libtracker-sparql/tracker-connection.vala index cdb6c36eb..70ef83218 100644 --- a/src/libtracker-sparql/tracker-connection.vala +++ b/src/libtracker-sparql/tracker-connection.vala @@ -136,11 +136,11 @@ public abstract class Tracker.Sparql.Connection : Object { * which it won't support (i.e. an update for a read-only backend), you will * see critical warnings. * - * When calling either tracker_sparql_connection_get(), - * tracker_sparql_connection_get_direct() or the asynchronous variants of - * these functions, a mutex is used to protect the loading of backends - * against potential race conditions. For synchronous calls, this function - * will always block if a previous connection get method has been called. + * When calling either tracker_sparql_connection_get(), or the asynchronous + * variants of these functions, a mutex is used to protect the loading of + * backends against potential race conditions. For synchronous calls, this + * function will always block if a previous connection get method has been + * called. * * All backends will call the D-Bus tracker-store API Wait() to make sure * the store and databases are in the right state before any user based -- cgit v1.2.1