summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicholas Car <nicholas.car@surroundaustralia.com>2020-05-25 21:52:32 +1000
committerNicholas Car <nicholas.car@surroundaustralia.com>2020-05-25 21:52:32 +1000
commit90f1da062391aa790f947c320a9d0ef4390f678c (patch)
treec876df3a436e079a33192de2da82ffd78c5c13e4
parentd8a172e1601793cb153d51d9fae4c42d19bb6d16 (diff)
parent9c064ac15fe801a5e5cb2b4ce7b43e087e1be0a6 (diff)
downloadrdflib-90f1da062391aa790f947c320a9d0ef4390f678c.tar.gz
Merge branch 'master' into issue-910
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml11
-rw-r--r--CHANGELOG.md220
-rw-r--r--README.md138
-rw-r--r--docs/_static/RDFlib-500.pngbin0 -> 26062 bytes
-rw-r--r--docs/_static/RDFlib.icobin0 -> 3262 bytes
-rw-r--r--docs/_static/RDFlib.pngbin0 -> 21014 bytes
-rw-r--r--docs/_static/RDFlib.svg47
-rw-r--r--docs/_static/logo-rdflib.pngbin0 -> 40641 bytes
-rw-r--r--docs/_static/logo.svg374
-rw-r--r--docs/_themes/armstrong/rtd-themes.conf65
-rw-r--r--docs/_themes/armstrong/static/rtd.css_t3
-rw-r--r--docs/_themes/armstrong/theme-old.conf65
-rw-r--r--docs/_themes/armstrong/theme.conf48
-rw-r--r--docs/apidocs/.gitignore2
-rw-r--r--docs/apidocs/examples.rst6
-rw-r--r--docs/apidocs/modules.rst7
-rw-r--r--docs/apidocs/rdflib.extras.rst36
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.pyMicrodata.rst35
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.pyRdfa.extras.rst19
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.pyRdfa.host.rst27
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.pyRdfa.rdfs.rst27
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.pyRdfa.rst85
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.pyRdfa.transform.rst51
-rw-r--r--docs/apidocs/rdflib.plugins.parsers.rst85
-rw-r--r--docs/apidocs/rdflib.plugins.rst37
-rw-r--r--docs/apidocs/rdflib.plugins.serializers.rst67
-rw-r--r--docs/apidocs/rdflib.plugins.sparql.results.rst51
-rw-r--r--docs/apidocs/rdflib.plugins.sparql.rst116
-rw-r--r--docs/apidocs/rdflib.plugins.stores.rst45
-rw-r--r--docs/apidocs/rdflib.rst180
-rw-r--r--docs/apidocs/rdflib.tools.rst55
-rw-r--r--docs/conf.py128
-rw-r--r--docs/docs.rst10
-rw-r--r--docs/faq.rst48
-rw-r--r--docs/gettingstarted.rst90
-rw-r--r--docs/index.rst96
-rw-r--r--docs/intro_to_creating_rdf.rst157
-rw-r--r--docs/intro_to_graphs.rst72
-rw-r--r--docs/intro_to_parsing.rst24
-rw-r--r--docs/intro_to_sparql.rst31
-rw-r--r--docs/plugintable.py17
-rw-r--r--docs/rdf_terms.rst5
-rw-r--r--docs/sphinx-requirements.txt5
-rw-r--r--docs/univrdfstore.rst7
-rw-r--r--docs/upgrade2to3.rst60
-rw-r--r--docs/upgrade3to4.rst92
-rw-r--r--docs/upgrade4to5.rst266
-rw-r--r--examples/conjunctive_graphs.py46
-rw-r--r--examples/custom_datatype.py15
-rw-r--r--examples/custom_eval.py22
-rw-r--r--examples/film.py116
-rw-r--r--examples/foaf.n3284
-rw-r--r--examples/foaf.rdf346
-rw-r--r--examples/foafpaths.py12
-rw-r--r--examples/graph_digest_benchmark.py87
-rw-r--r--examples/prepared_query.py14
-rw-r--r--examples/rdfa_example.py14
-rw-r--r--examples/resource.py11
-rw-r--r--examples/simple_example.py19
-rw-r--r--examples/sleepycat_example.py31
-rw-r--r--examples/slice.py10
-rw-r--r--examples/smushing.py11
-rw-r--r--examples/sparql_query_example.py12
-rw-r--r--examples/sparql_update_example.py35
-rw-r--r--examples/sparqlstore_example.py33
-rw-r--r--examples/swap_primer.py68
-rw-r--r--examples/transitive.py20
-rw-r--r--rdflib/__init__.py112
-rw-r--r--rdflib/collection.py14
-rw-r--r--rdflib/compare.py114
-rw-r--r--rdflib/compat.py110
-rw-r--r--rdflib/container.py265
-rw-r--r--rdflib/events.py11
-rw-r--r--rdflib/exceptions.py46
-rw-r--r--rdflib/extras/cmdlineutils.py18
-rw-r--r--rdflib/extras/describer.py7
-rw-r--r--rdflib/extras/external_graph_libs.py74
-rw-r--r--rdflib/extras/infixowl.py1054
-rw-r--r--rdflib/graph.py796
-rw-r--r--rdflib/namespace.py458
-rw-r--r--rdflib/parser.py99
-rw-r--r--rdflib/paths.py69
-rw-r--r--rdflib/plugin.py391
-rw-r--r--rdflib/plugins/memory.py74
-rwxr-xr-xrdflib/plugins/parsers/notation3.py913
-rw-r--r--rdflib/plugins/parsers/nquads.py19
-rw-r--r--rdflib/plugins/parsers/nt.py2
-rw-r--r--rdflib/plugins/parsers/ntriples.py59
-rw-r--r--rdflib/plugins/parsers/rdfxml.py151
-rw-r--r--rdflib/plugins/parsers/trig.py43
-rw-r--r--rdflib/plugins/parsers/trix.py68
-rw-r--r--rdflib/plugins/serializers/n3.py28
-rw-r--r--rdflib/plugins/serializers/nquads.py35
-rw-r--r--rdflib/plugins/serializers/nt.py51
-rw-r--r--rdflib/plugins/serializers/rdfxml.py99
-rw-r--r--rdflib/plugins/serializers/trig.py34
-rw-r--r--rdflib/plugins/serializers/trix.py47
-rw-r--r--rdflib/plugins/serializers/turtle.py138
-rw-r--r--rdflib/plugins/serializers/xmlwriter.py17
-rw-r--r--rdflib/plugins/sleepycat.py158
-rw-r--r--rdflib/plugins/sparql/__init__.py2
-rw-r--r--rdflib/plugins/sparql/aggregates.py19
-rw-r--r--rdflib/plugins/sparql/algebra.py258
-rw-r--r--rdflib/plugins/sparql/datatypes.py78
-rw-r--r--rdflib/plugins/sparql/evaluate.py179
-rw-r--r--rdflib/plugins/sparql/evalutils.py17
-rw-r--r--rdflib/plugins/sparql/operators.py248
-rw-r--r--rdflib/plugins/sparql/parser.py1140
-rw-r--r--rdflib/plugins/sparql/parserutils.py28
-rw-r--r--rdflib/plugins/sparql/processor.py15
-rw-r--r--rdflib/plugins/sparql/results/csvresults.py43
-rw-r--r--rdflib/plugins/sparql/results/graph.py10
-rw-r--r--rdflib/plugins/sparql/results/jsonresults.py63
-rw-r--r--rdflib/plugins/sparql/results/rdfresults.py25
-rw-r--r--rdflib/plugins/sparql/results/tsvresults.py55
-rw-r--r--rdflib/plugins/sparql/results/txtresults.py16
-rw-r--r--rdflib/plugins/sparql/results/xmlresults.py156
-rw-r--r--rdflib/plugins/sparql/sparql.py87
-rw-r--r--rdflib/plugins/sparql/update.py46
-rw-r--r--rdflib/plugins/stores/auditable.py62
-rw-r--r--rdflib/plugins/stores/concurrent.py11
-rw-r--r--rdflib/plugins/stores/regexmatching.py72
-rw-r--r--rdflib/plugins/stores/sparqlconnector.py65
-rw-r--r--rdflib/plugins/stores/sparqlstore.py278
-rw-r--r--rdflib/query.py78
-rw-r--r--rdflib/resource.py78
-rw-r--r--rdflib/serializer.py3
-rw-r--r--rdflib/store.py83
-rw-r--r--rdflib/term.py600
-rw-r--r--rdflib/tools/csv2rdf.py135
-rw-r--r--rdflib/tools/graphisomorphism.py44
-rw-r--r--rdflib/tools/rdf2dot.py109
-rw-r--r--rdflib/tools/rdfpipe.py132
-rw-r--r--rdflib/tools/rdfs2dot.py102
-rw-r--r--rdflib/util.py113
-rw-r--r--rdflib/void.py12
-rw-r--r--requirements.dev.txt3
-rw-r--r--requirements.txt1
-rwxr-xr-xrun_tests.py38
-rw-r--r--setup.py79
-rw-r--r--test/earl.py12
-rw-r--r--test/manifest.py105
-rw-r--r--test/store_performance.py14
-rw-r--r--test/test_aggregate_graphs.py50
-rw-r--r--test/test_auditable.py244
-rw-r--r--test/test_batch_add.py88
-rw-r--r--test/test_bnode_ncname.py23
-rw-r--r--test/test_canonicalization.py436
-rw-r--r--test/test_comparison.py7
-rw-r--r--test/test_conjunctive_graph.py15
-rw-r--r--test/test_conneg.py12
-rw-r--r--test/test_container.py77
-rw-r--r--test/test_conventions.py13
-rw-r--r--test/test_core_sparqlstore.py5
-rw-r--r--test/test_dataset.py98
-rw-r--r--test/test_datetime.py39
-rw-r--r--test/test_dawg.py210
-rw-r--r--test/test_diff.py4
-rw-r--r--test/test_duration.py8
-rw-r--r--test/test_empty_xml_base.py32
-rw-r--r--test/test_evaluate_bind.py28
-rw-r--r--test/test_events.py15
-rw-r--r--test/test_expressions.py156
-rw-r--r--test/test_extras_external_graph_libs.py47
-rw-r--r--test/test_finalnewline.py18
-rw-r--r--test/test_graph.py42
-rw-r--r--test/test_graph_context.py115
-rw-r--r--test/test_graph_formula.py32
-rw-r--r--test/test_graph_items.py7
-rw-r--r--test/test_hex_binary.py32
-rw-r--r--test/test_initbindings.py275
-rw-r--r--test/test_iomemory.py3
-rw-r--r--test/test_issue084.py93
-rw-r--r--test/test_issue1003.py133
-rw-r--r--test/test_issue160.py17
-rw-r--r--test/test_issue161.py11
-rw-r--r--test/test_issue184.py4
-rw-r--r--test/test_issue190.py26
-rw-r--r--test/test_issue200.py13
-rw-r--r--test/test_issue209.py1
-rw-r--r--test/test_issue223.py6
-rw-r--r--test/test_issue247.py1
-rw-r--r--test/test_issue248.py38
-rw-r--r--test/test_issue274.py78
-rw-r--r--test/test_issue363.py14
-rw-r--r--test/test_issue379.py4
-rw-r--r--test/test_issue381.py53
-rw-r--r--test/test_issue432.py2
-rw-r--r--test/test_issue446.py11
-rw-r--r--test/test_issue492.py4
-rw-r--r--test/test_issue523.py8
-rw-r--r--test/test_issue532.py2
-rw-r--r--test/test_issue545.py4
-rw-r--r--test/test_issue554.py7
-rw-r--r--test/test_issue563.py24
-rw-r--r--test/test_issue579.py4
-rw-r--r--test/test_issue604.py2
-rw-r--r--test/test_issue655.py52
-rw-r--r--test/test_issue715.py13
-rw-r--r--test/test_issue733.py19
-rw-r--r--test/test_issue920.py19
-rw-r--r--test/test_issue923.py5
-rw-r--r--test/test_issue953.py6
-rw-r--r--test/test_issue_git_200.py3
-rw-r--r--test/test_issue_git_336.py26
-rw-r--r--test/test_literal.py76
-rw-r--r--test/test_memory_store.py16
-rw-r--r--test/test_mulpath_n3.py4
-rw-r--r--test/test_n3.py133
-rw-r--r--test/test_n3_suite.py22
-rw-r--r--test/test_namespace.py87
-rw-r--r--test/test_nodepickler.py13
-rw-r--r--test/test_nquads.py24
-rw-r--r--test/test_nquads_w3c.py13
-rw-r--r--test/test_nt_misc.py30
-rw-r--r--test/test_nt_suite.py52
-rw-r--r--test/test_nt_w3c.py13
-rw-r--r--test/test_parser.py11
-rw-r--r--test/test_parser_helpers.py1
-rw-r--r--test/test_prefixTypes.py17
-rw-r--r--test/test_preflabel.py75
-rw-r--r--test/test_prettyxml.py106
-rw-r--r--test/test_rdf_lists.py20
-rw-r--r--test/test_rdfxml.py34
-rw-r--r--test/test_roundtrip.py37
-rw-r--r--test/test_rules.py11
-rw-r--r--test/test_seq.py8
-rw-r--r--test/test_serializexml.py79
-rw-r--r--test/test_slice.py38
-rw-r--r--test/test_sparql.py41
-rw-r--r--test/test_sparql_agg_distinct.py49
-rw-r--r--test/test_sparql_agg_undef.py24
-rw-r--r--test/test_sparql_construct_bindings.py39
-rw-r--r--test/test_sparql_service.py43
-rw-r--r--test/test_sparqlstore.py108
-rw-r--r--test/test_sparqlupdatestore.py180
-rw-r--r--test/test_swap_n3.py82
-rw-r--r--test/test_term.py214
-rw-r--r--test/test_trig.py118
-rw-r--r--test/test_trig_w3c.py32
-rw-r--r--test/test_trix_parse.py3
-rw-r--r--test/test_trix_serialize.py29
-rw-r--r--test/test_tsvresults.py3
-rw-r--r--test/test_turtle_serialize.py58
-rw-r--r--test/test_turtle_sort_issue613.py10
-rw-r--r--test/test_turtle_w3c.py19
-rw-r--r--test/test_util.py133
-rw-r--r--test/test_wide_python.py13
-rw-r--r--test/test_xmlliterals.py48
-rw-r--r--test/testutils.py16
-rw-r--r--test/triple_store.py4
-rw-r--r--test/type_check.py17
-rw-r--r--tox.ini4
254 files changed, 10667 insertions, 8881 deletions
diff --git a/.gitignore b/.gitignore
index 17184737..a88c4920 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,4 +16,5 @@ build/
*~
test_reports/*latest.ttl
# PyCharm
-.idea/ \ No newline at end of file
+.idea/
+prepare_changelog.sh
diff --git a/.travis.yml b/.travis.yml
index f37f0750..9811fa4a 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,5 +1,4 @@
# http://travis-ci.org/#!/RDFLib/rdflib
-sudo: false
language: python
branches:
only:
@@ -10,16 +9,14 @@ git:
depth: 3
python:
- - 2.7
- - 3.4
- 3.5
- 3.6
+ - 3.7
-matrix:
+jobs:
include:
- - python: 3.7
- dist: xenial
- sudo: true
+ - python: 3.8
+ dist: focal
before_install:
- pip install -U setuptools pip # seems travis comes with a too old setuptools for html5lib
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 06cdada7..a72b4d66 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,223 @@
+2020/04/18 RELEASE 5.0.0
+========================
+5.0.0 is a major stable release and is the last release to support Python 2 & 3.4. 5.0.0 is mostly backwards-
+compatible with 4.2.2 and is intended for long-term, bug fix only support.
+
+5.0.0 comes two weeks after the 5.0.0RC1 and includes a small number of additional bug fixes. Note that
+rdflib-jsonld has released a version 0.5.0 to be compatible with rdflib 5.0.0.
+
+_**All PRs merged since 5.0.0RC1:**_
+
+### General Bugs Fixed:
+ * Fix n3 parser exponent syntax of floats with leading dot.
+ [PR #1012](https://github.com/RDFLib/rdflib/pull/1012)
+ * FIX: Change is comparison to == for tuple
+ [PR #1009](https://github.com/RDFLib/rdflib/pull/1009)
+ * fix #913 : Added _parseBoolean function to enforce correct Lexical-to-value mapping
+ [PR #995](https://github.com/RDFLib/rdflib/pull/995)
+
+### Enhanced Features:
+ * Issue 1003
+ [PR #1005](https://github.com/RDFLib/rdflib/pull/1005)
+
+### SPARQL Fixes:
+ * CONSTRUCT resolve with initBindings fixes #1001
+ [PR #1002](https://github.com/RDFLib/rdflib/pull/1002)
+
+### Documentation Fixes:
+ * DOC: Use sphinxcontrib-apidoc and various cleanups
+ [PR #1010](https://github.com/RDFLib/rdflib/pull/1010)
+ * Update copyright year in docs conf.py
+ [PR #1006](https://github.com/RDFLib/rdflib/pull/1006)
+ * slightly improved styling, small index text changes
+ [PR #1004](https://github.com/RDFLib/rdflib/pull/1004)
+
+
+2020/04/04 RELEASE 5.0.0RC1
+===========================
+
+After more than three years, RDFLib 5.0.0rc1 is finally released.
+
+This is a rollup of all of the bugfixes merged, and features introduced to RDFLib since
+RDFLib 4.2.2 was released in Jan 2017.
+
+While all effort was taken to minimize breaking changes in this release, there are some.
+
+Please see the upgrade4to5 document in the docs directory for more information on some specific differences from 4.2.2 to 5.0.0.
+
+_**All issues closed and PRs merged since 4.2.2:**_
+
+### General Bugs Fixed:
+ * Pr 451 redux
+ [PR #978](https://github.com/RDFLib/rdflib/pull/978)
+ * NTriples fails to parse URIs with only a scheme
+ [ISSUE #920](https://github.com/RDFLib/rdflib/issues/920), [PR #974](https://github.com/RDFLib/rdflib/pull/974)
+ * Cannot clone on windows - Remove colons from test result files.
+ [ISSUE #901](https://github.com/RDFLib/rdflib/issues/901), [PR #971](https://github.com/RDFLib/rdflib/pull/971)
+ * Add requirement for requests to setup.py
+ [PR #969](https://github.com/RDFLib/rdflib/pull/969)
+ * fixed URIRef including native unicode characters
+ [PR #961](https://github.com/RDFLib/rdflib/pull/961)
+ * DCTERMS.format not working
+ [ISSUE #932](https://github.com/RDFLib/rdflib/issues/932)
+ * infixowl.manchesterSyntax do not encode strings
+ [PR #906](https://github.com/RDFLib/rdflib/pull/906)
+ * Fix blank node label to not contain '_:' during parsing
+ [PR #886](https://github.com/RDFLib/rdflib/pull/886)
+ * rename new SPARQLWrapper to SPARQLConnector
+ [PR #872](https://github.com/RDFLib/rdflib/pull/872)
+ * Fix #859. Unquote and Uriquote Literal Datatype.
+ [PR #860](https://github.com/RDFLib/rdflib/pull/860)
+ * Parsing nquads
+ [ISSUE #786](https://github.com/RDFLib/rdflib/issues/786)
+ * ntriples spec allows for upper-cased lang tag, fixes #782
+ [PR #784](https://github.com/RDFLib/rdflib/pull/784), [ISSUE #782](https://github.com/RDFLib/rdflib/issues/782)
+ * Adds escaped single quote to literal parser
+ [PR #736](https://github.com/RDFLib/rdflib/pull/736)
+ * N3 parse error on single quote within single quotes
+ [ISSUE #732](https://github.com/RDFLib/rdflib/issues/732)
+ * Fixed #725
+ [PR #730](https://github.com/RDFLib/rdflib/pull/730)
+ * test for issue #725: canonicalization collapses BNodes
+ [PR #726](https://github.com/RDFLib/rdflib/pull/726)
+ * RGDA1 graph canonicalization sometimes still collapses distinct BNodes
+ [ISSUE #725](https://github.com/RDFLib/rdflib/issues/725)
+ * Accept header should use a q parameter
+ [PR #720](https://github.com/RDFLib/rdflib/pull/720)
+ * Added test for Issue #682 and fixed.
+ [PR #718](https://github.com/RDFLib/rdflib/pull/718)
+ * Incompatibility with Python3: unichr
+ [ISSUE #687](https://github.com/RDFLib/rdflib/issues/687)
+ * namespace.py include colon in ALLOWED_NAME_CHARS
+ [PR #663](https://github.com/RDFLib/rdflib/pull/663)
+ * namespace.py fix compute_qname missing namespaces
+ [PR #649](https://github.com/RDFLib/rdflib/pull/649)
+ * RDFa parsing Error! `__init__()` got an unexpected keyword argument 'encoding'
+ [ISSUE #639](https://github.com/RDFLib/rdflib/issues/639)
+ * Bugfix: `term.Literal.__add__`
+ [PR #451](https://github.com/RDFLib/rdflib/pull/451)
+ * fixup of #443
+ [PR #445](https://github.com/RDFLib/rdflib/pull/445)
+ * Microdata to rdf second edition bak
+ [PR #444](https://github.com/RDFLib/rdflib/pull/444)
+
+### Enhanced Features:
+ * Register additional serializer plugins for SPARQL mime types.
+ [PR #987](https://github.com/RDFLib/rdflib/pull/987)
+ * Pr 388 redux
+ [PR #979](https://github.com/RDFLib/rdflib/pull/979)
+ * Allows RDF terms introduced by JSON-LD 1.1
+ [PR #970](https://github.com/RDFLib/rdflib/pull/970)
+ * make SPARQLConnector work with DBpedia
+ [PR #941](https://github.com/RDFLib/rdflib/pull/941)
+ * ClosedNamespace returns right exception for way of access
+ [PR #866](https://github.com/RDFLib/rdflib/pull/866)
+ * Not adding all namespaces for n3 serializer
+ [PR #832](https://github.com/RDFLib/rdflib/pull/832)
+ * Adds basic support of xsd:duration
+ [PR #808](https://github.com/RDFLib/rdflib/pull/808)
+ * Add possibility to set authority and basepath to skolemize graph
+ [PR #807](https://github.com/RDFLib/rdflib/pull/807)
+ * Change notation3 list realization to non-recursive function.
+ [PR #805](https://github.com/RDFLib/rdflib/pull/805)
+ * Suppress warning for not using custom encoding.
+ [PR #800](https://github.com/RDFLib/rdflib/pull/800)
+ * Add support to parsing large xml inputs
+ [ISSUE #749](https://github.com/RDFLib/rdflib/issues/749)
+ [PR #750](https://github.com/RDFLib/rdflib/pull/750)
+ * improve hash efficiency by directly using str/unicode hash
+ [PR #746](https://github.com/RDFLib/rdflib/pull/746)
+ * Added the csvw prefix to the RDFa initial context.
+ [PR #594](https://github.com/RDFLib/rdflib/pull/594)
+ * syncing changes from pyMicrodata
+ [PR #587](https://github.com/RDFLib/rdflib/pull/587)
+ * Microdata parser: updated the parser to the latest version of the microdata->rdf note (published in December 2014)
+ [PR #443](https://github.com/RDFLib/rdflib/pull/443)
+ * Literal.toPython() support for xsd:hexBinary
+ [PR #388](https://github.com/RDFLib/rdflib/pull/388)
+
+### SPARQL Fixes:
+ * Total order patch patch
+ [PR #862](https://github.com/RDFLib/rdflib/pull/862)
+ * use <<= instead of deprecated <<
+ [PR #861](https://github.com/RDFLib/rdflib/pull/861)
+ * Fix #847
+ [PR #856](https://github.com/RDFLib/rdflib/pull/856)
+ * RDF Literal `"1"^^xsd:boolean` should _not_ coerce to True
+ [ISSUE #847](https://github.com/RDFLib/rdflib/issues/847)
+ * Makes NOW() return an UTC date
+ [PR #844](https://github.com/RDFLib/rdflib/pull/844)
+ * NOW() SPARQL should return an xsd:dateTime with a timezone
+ [ISSUE #843](https://github.com/RDFLib/rdflib/issues/843)
+ * fix property paths bug: issue #715
+ [PR #822](https://github.com/RDFLib/rdflib/pull/822), [ISSUE #715](https://github.com/RDFLib/rdflib/issues/715)
+ * MulPath: correct behaviour of n3()
+ [PR #820](https://github.com/RDFLib/rdflib/pull/820)
+ * Literal total ordering
+ [PR #793](https://github.com/RDFLib/rdflib/pull/793)
+ * Remove SPARQLWrapper dependency
+ [PR #744](https://github.com/RDFLib/rdflib/pull/744)
+ * made UNION faster by not preventing duplicates
+ [PR #741](https://github.com/RDFLib/rdflib/pull/741)
+ * added a hook to add custom functions to SPARQL
+ [PR #723](https://github.com/RDFLib/rdflib/pull/723)
+ * Issue714
+ [PR #717](https://github.com/RDFLib/rdflib/pull/717)
+ * Use <<= instead of deprecated << in SPARQL parser
+ [PR #417](https://github.com/RDFLib/rdflib/pull/417)
+ * Custom FILTER function for SPARQL engine
+ [ISSUE #274](https://github.com/RDFLib/rdflib/issues/274)
+
+### Code Quality and Cleanups:
+ * a slightly opinionated autopep8 run
+ [PR #870](https://github.com/RDFLib/rdflib/pull/870)
+ * remove rdfa and microdata parsers from core RDFLib
+ [PR #828](https://github.com/RDFLib/rdflib/pull/828)
+ * ClosedNamespace KeyError -> AttributeError
+ [PR #827](https://github.com/RDFLib/rdflib/pull/827)
+ * typo in rdflib/plugins/sparql/update.py
+ [ISSUE #760](https://github.com/RDFLib/rdflib/issues/760)
+ * Fix logging in interactive mode
+ [PR #731](https://github.com/RDFLib/rdflib/pull/731)
+ * make namespace module flake8-compliant, change exceptions in that mod…
+ [PR #711](https://github.com/RDFLib/rdflib/pull/711)
+ * delete ez_setup.py?
+ [ISSUE #669](https://github.com/RDFLib/rdflib/issues/669)
+ * code duplication issue between rdflib and pymicrodata
+ [ISSUE #582](https://github.com/RDFLib/rdflib/issues/582)
+ * Transition from 2to3 to use of six.py to be merged in 5.0.0-dev
+ [PR #519](https://github.com/RDFLib/rdflib/pull/519)
+ * sparqlstore drop deprecated methods and args
+ [PR #516](https://github.com/RDFLib/rdflib/pull/516)
+ * python3 code seems shockingly inefficient
+ [ISSUE #440](https://github.com/RDFLib/rdflib/issues/440)
+ * removed md5_term_hash, fixes #240
+ [PR #439](https://github.com/RDFLib/rdflib/pull/439), [ISSUE #240](https://github.com/RDFLib/rdflib/issues/240)
+
+### Testing:
+ * 3.7 for travis
+ [PR #864](https://github.com/RDFLib/rdflib/pull/864)
+ * Added trig unit tests to highlight some current parsing/serializing issues
+ [PR #431](https://github.com/RDFLib/rdflib/pull/431)
+
+### Documentation Fixes:
+ * Fix a doc string in the query module
+ [PR #976](https://github.com/RDFLib/rdflib/pull/976)
+ * setup.py: Make the license field use an SPDX identifier
+ [PR #789](https://github.com/RDFLib/rdflib/pull/789)
+ * Update README.md
+ [PR #764](https://github.com/RDFLib/rdflib/pull/764)
+ * Update namespaces_and_bindings.rst
+ [PR #757](https://github.com/RDFLib/rdflib/pull/757)
+ * DOC: README.md: rdflib-jsonld, https uris
+ [PR #712](https://github.com/RDFLib/rdflib/pull/712)
+ * make doctest support py2/py3
+ [ISSUE #707](https://github.com/RDFLib/rdflib/issues/707)
+ * `pip install rdflib` (as per README.md) gets OSError on Mint 18.1
+ [ISSUE #704](https://github.com/RDFLib/rdflib/issues/704)
+
+
+
2017/01/29 RELEASE 4.2.2
========================
diff --git a/README.md b/README.md
index eb335679..0c123419 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,5 @@
+![](docs/_static/RDFlib.png)
+
RDFLib
======
[![Build Status](https://travis-ci.org/RDFLib/rdflib.png?branch=master)](https://travis-ci.org/RDFLib/rdflib)
@@ -6,98 +8,160 @@ RDFLib
[![PyPI](https://img.shields.io/pypi/v/rdflib.svg)](https://pypi.python.org/pypi/rdflib)
[![PyPI](https://img.shields.io/pypi/pyversions/rdflib.svg)](https://pypi.python.org/pypi/rdflib)
+RDFLib is a pure Python package for working with [RDF](http://www.w3.org/RDF/). RDFLib contains most things you need to work with RDF, including:
+
+* parsers and serializers for RDF/XML, N3, NTriples, N-Quads, Turtle, TriX, Trig and JSON-LD (via a plugin).
+* a Graph interface which can be backed by any one of a number of Store implementations
+* store implementations for in-memory storage and persistent storage on top of the Berkeley DB
+* a SPARQL 1.1 implementation - supporting SPARQL 1.1 Queries and Update statements
+
+## RDFlib Family of packages
+The RDFlib community maintains many RDF-related Python code repositories with different purposes. For example:
+
+* [rdflib](https://github.com/RDFLib/rdflib) - the rdflib core
+* [sparqlwrapper](https://github.com/RDFLib/sparqlwrapper) - a simple Python wrapper around a SPARQL service to remotely execute your queries
+* [pyLODE](https://github.com/RDFLib/pyLODE) - An OWL ontology documentation tool using Python and templating, based on LODE.
+* [rdflib-jsonld](https://github.com/RDFLib/rdflib-jsonld) - an rdflib plugin that is an implementation of JSON-LD
-RDFLib is a Python library for working with RDF, a simple yet
-powerful language for representing information as graphs.
+Please see the list for all packages/repositories here:
-RDFLib may be installed with pip (use sudo as required):
+* <https://github.com/RDFLib>
+
+
+## Installation
+RDFLib may be installed with Python's package management tool *pip*:
$ pip install rdflib
Alternatively manually download the package from the Python Package
Index (PyPI) at https://pypi.python.org/pypi/rdflib
-The current version of RDFLib is 4.2.2, see the ``CHANGELOG.md``
-file for what's new.
+The current version of RDFLib is 5.0.0, see the ``CHANGELOG.md``
+file for what's new in this release.
-Getting Started
----------------
+## Getting Started
+RDFLib aims to be a pythonic RDF API. rdflib's main data object is a `Graph` which is a Python collection
+of RDF *Subject, Predicate, Object* Triples:
-RDFLib aims to be a pythonic RDF API, a Graph is a python collection
-of RDF Subject,Predicate,Object Triples:
+To create graph and load it with RDF data from DBPedia then print the results:
```python
import rdflib
-g=rdflib.Graph()
+g = rdflib.Graph()
g.load('http://dbpedia.org/resource/Semantic_Web')
-for s,p,o in g:
+for s, p, o in g:
print(s, p, o)
```
-
The components of the triples are URIs (resources) or Literals
-(values), URIs are grouped together by *namespace*, common namespaces are
+(values).
+
+URIs are grouped together by *namespace*, common namespaces are
included in RDFLib:
```python
-
-semweb=rdflib.URIRef('http://dbpedia.org/resource/Semantic_Web')
-type=g.value(semweb, rdflib.RDFS.label)
+from rdflib.namespace import DC, DCTERMS, DOAP, FOAF, SKOS, OWL, RDF, RDFS, VOID, XMLNS, XSD
```
+You can use them like this:
+
+```python
+semweb = rdflib.URIRef('http://dbpedia.org/resource/Semantic_Web')
+type = g.value(semweb, rdflib.RDFS.label)
+```
Where `rdflib.RDFS` is the RDFS Namespace, `graph.value` returns an
object of the triple-pattern given (or an arbitrary one if more
-exist). New Namespaces can also be defined:
+exist).
+
+Or like this, adding a triple to a graph `g`:
```python
+g.add((
+ rdflib.URIRef("http://example.com/person/nick"),
+ FOAF.givenName,
+ rdflib.Literal("Nick", datatype=XSD.string)
+))
+```
+The triple (in n-triples notation) `<http://example.com/person/nick> <http://xmlns.com/foaf/0.1/givenName> "Nick"^^<http://www.w3.org/2001/XMLSchema#string> .`
+is created where the property `FOAF.giveName` is the URI `<http://xmlns.com/foaf/0.1/givenName>` and `XSD.string` is the
+URI `<http://www.w3.org/2001/XMLSchema#string>`.
-dbpedia=rdflib.Namespace('http://dbpedia.org/ontology/')
+You can bind namespaces to prefixes to shorten the URIs for RDF/XML, Turtle, N3, TriG, TriX & JSON-LD serializations:
-abstracts=list(x for x in g.objects(semweb, dbpedia['abstract']) if x.language=='en')
+ ```python
+g.bind("foaf", FOAF)
+g.bind("xsd", XSD)
+```
+This will allow the n-triples triple above to be serialised like this:
+ ```python
+print(g.serialize(format="turtle").decode("utf-8"))
```
-See also [./examples](./examples)
+With these results:
+```turtle
+PREFIX foaf: <http://xmlns.com/foaf/0.1/>
+PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
+
+<http://example.com/person/nick> foaf:givenName "Nick"^^xsd:string .
+```
+
+New Namespaces can also be defined:
+
+```python
+dbpedia = rdflib.Namespace('http://dbpedia.org/ontology/')
+abstracts = list(x for x in g.objects(semweb, dbpedia['abstract']) if x.language=='en')
+```
+
+See also [./examples](./examples)
-Features
---------
+## Features
The library contains parsers and serializers for RDF/XML, N3,
-NTriples, N-Quads, Turtle, TriX, RDFa and Microdata.
+NTriples, N-Quads, Turtle, TriX, RDFa and Microdata. JSON-LD parsing/serializing can be achieved using the
+[JSON-LD plugin](https://github.com/RDFLib/rdflib-jsonld).
The library presents a Graph interface which can be backed by
any one of a number of Store implementations.
This core RDFLib package includes store implementations for
-in memory storage and persistent storage on top of the Berkeley DB.
+in-memory storage and persistent storage on top of the Berkeley DB.
A SPARQL 1.1 implementation is included - supporting SPARQL 1.1 Queries and Update statements.
RDFLib is open source and is maintained on [GitHub](https://github.com/RDFLib/rdflib/). RDFLib releases, current and previous
are listed on [PyPI](https://pypi.python.org/pypi/rdflib/)
-RDFLib has a plugin-architecture for store-implementation, as well as parsers/serializers, several other projects exist which extend RDFLib features:
+Multiple other projects are contained within the RDFlib "family", see <https://github.com/RDFLib/>.
+
+
+## Documentation
+See <https://rdflib.readthedocs.io> for our documentation built from the code.
+
+
+## Support
+For general "how do I..." queries, please use https://stackoverflow.com and tag your question with `rdflib`.
+Existing questions:
- * [rdflib-jsonld](https://github.com/RDFLib/rdflib-jsonld) - Serializer and parser for [json-ld](http://json-ld.org)
+* <https://stackoverflow.com/questions/tagged/rdflib>
-Support
--------
-More information is available on the project webpage:
+## Releases
+See <https://rdflib.dev> for the release schedule.
-https://github.com/RDFLib/rdflib/
-The documentation can be built by doing::
+## Contributing
+rdflib survives and grows via user contributions! Please consider lodging Pull Requests here:
- $ python setup.py build_sphinx
+* <https://github.com/RDFLib/rdflib/pulls>
-And is also available from ReadTheDocs:
+You can also raise issues here:
-https://rdflib.readthedocs.io
+* <https://github.com/RDFLib/rdflib/issues>
-Support is available through the rdflib-dev group:
-https://groups.google.com/group/rdflib-dev
+## Contacts
+If you want to contact the rdflib maintainers, please do so via the rdflib-dev mailing list:
-and on the IRC channel #rdflib on the freenode.net server
+* <https://groups.google.com/group/rdflib-dev>
diff --git a/docs/_static/RDFlib-500.png b/docs/_static/RDFlib-500.png
new file mode 100644
index 00000000..8312a071
--- /dev/null
+++ b/docs/_static/RDFlib-500.png
Binary files differ
diff --git a/docs/_static/RDFlib.ico b/docs/_static/RDFlib.ico
new file mode 100644
index 00000000..b667f6b6
--- /dev/null
+++ b/docs/_static/RDFlib.ico
Binary files differ
diff --git a/docs/_static/RDFlib.png b/docs/_static/RDFlib.png
new file mode 100644
index 00000000..435f07e4
--- /dev/null
+++ b/docs/_static/RDFlib.png
Binary files differ
diff --git a/docs/_static/RDFlib.svg b/docs/_static/RDFlib.svg
new file mode 100644
index 00000000..8e8d3fc1
--- /dev/null
+++ b/docs/_static/RDFlib.svg
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Generator: Adobe Illustrator 19.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
+<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
+ viewBox="0 0 500 500" style="enable-background:new 0 0 500 500;" xml:space="preserve">
+<style type="text/css">
+ .st0{fill:#329545;}
+ .st1{fill:#FDD43B;}
+ .st2{fill:#144A99;}
+ .st3{fill:#FFFFFF;}
+</style>
+<g id="RDFlib">
+ <path id="path1948" class="st0" d="M247.4,33.3c-17.7,0.1-34.5,1.6-49.4,4.2c-43.7,7.7-51.7,23.9-51.7,53.7v39.4h103.4v13.1H146.4
+ h-38.8c-30,0-56.4,18.1-64.6,52.4c-9.5,39.4-9.9,63.9,0,105.1c7.3,30.6,24.9,52.4,54.9,52.4h35.5v-47.2c0-34.1,29.5-64.2,64.6-64.2
+ h103.3c28.7,0,51.7-23.7,51.7-52.5V91.3c0-28-23.6-49.1-51.7-53.7C283.5,34.6,265.1,33.3,247.4,33.3z M191.5,65
+ c10.7,0,19.4,8.9,19.4,19.8c0,10.9-8.7,19.6-19.4,19.6c-10.7,0-19.4-8.8-19.4-19.6C172.1,73.9,180.8,65,191.5,65z"/>
+ <path id="path1950" class="st1" d="M365.9,143.8v45.9c0,35.6-30.2,65.5-64.6,65.5H198c-28.3,0-51.7,24.2-51.7,52.5v98.4
+ c0,28,24.4,44.5,51.7,52.5c32.7,9.6,64.1,11.4,103.3,0c26-7.5,51.7-22.7,51.7-52.5v-39.4H249.7v-13.1H353h51.7
+ c30,0,41.2-21,51.7-52.4c10.8-32.4,10.3-63.5,0-105.1c-7.4-29.9-21.6-52.4-51.7-52.4H365.9z M307.8,393.1
+ c10.7,0,19.4,8.8,19.4,19.6c0,10.9-8.7,19.8-19.4,19.8c-10.7,0-19.4-8.9-19.4-19.8C288.4,401.9,297.1,393.1,307.8,393.1z"/>
+ <path id="XMLID_41_" class="st2" d="M342.1,345.7c5.2-17,2.3-36.4-11.5-51c-3.9-4.2-8.6-7.7-13.6-10.4c-1.8-1-3.6-1.8-5.5-2.5
+ c0,0-10.5-5.3-11.5-43.2c-0.3-12.7-0.5-28.2,5.9-39.7c0.9-1.6,1.9-3.2,3.3-4.4c7.3-6.2,15.4-10.5,20.7-18.8
+ c5.4-8.5,8.4-18.4,8.4-28.4c0-43.9-53.7-68.6-87.2-40.3c-5.1,4.3-9.4,9.6-12.5,15.6c-5.6,10.6-7.2,22.2-5.5,33.2
+ c0,0,2.5,12.8-29.8,32.9c-28.6,17.8-42.5,13-45.3,11.7c-8.2-5.2-17.8-8.2-28.2-8.2c-44,0-68.6,53.8-40.2,87.3
+ c4.3,5.1,9.6,9.3,15.5,12.4c19.2,10.2,41.9,7.4,57.9-5.4c0,0,11.4-9,45.2,9.2c26.7,14.3,30.7,28.4,31.2,33.7
+ c1.4,14.3,3.7,26.6,13.8,37.7C280.8,397,330.5,384,342.1,345.7z M215.1,280.6c-27.8-14.9-32-27.4-32.6-31.2
+ c0.4-4.5,0.1-9.1-0.6-13.5l0.2,0.3c0,0-2.3-12.1,29.6-31.9c28.5-17.7,41.5-14.2,43.9-13.3c1.6,1.1,3.2,2,4.8,2.9
+ c3.2,1.7,6.5,3,9.9,4c3.9,3.7,11.1,14.3,11.9,42.2c0.8,28.1-7.5,38.9-12,42.7c-4.6,2.1-9,4.9-12.9,8.3
+ C253,292.7,240.8,294.4,215.1,280.6z"/>
+ <g id="XMLID_32_">
+ <path id="XMLID_33_" class="st3" d="M253.8,117.4c-15.4,16.8-15.7,41.6-0.9,55.6c-7.3-7.1-7.2-21.7,0.2-35.8
+ c1-1.3,3.7-4.2,7.7-2.9c0.4,0.1,0.7,0.2,0.8,0.2c0.9,0.2,1.8,0.3,2.8,0.3c6.1-0.3,10.9-5.5,10.6-11.6c-0.1-2.7-1.3-5.2-3-7
+ c14.2-9.3,30.5-10.4,37.2-4.3l0.3,0C294.3,97.9,269.3,100.4,253.8,117.4z"/>
+ </g>
+ <g id="XMLID_29_">
+ <path id="XMLID_31_" class="st3" d="M98.6,272.9c-0.1-0.1-0.3-0.3-0.4-0.4c0.1,0.1,0.2,0.2,0.3,0.3L98.6,272.9z"/>
+ <path id="XMLID_30_" class="st3" d="M99.1,216.9c-15.4,16.8-15.7,41.6-0.9,55.6c-7.3-7.1-7.2-21.7,0.2-35.8c1-1.3,3.7-4.2,7.7-2.9
+ c0.4,0.1,0.7,0.2,0.8,0.2c0.9,0.2,1.8,0.3,2.8,0.3c6.1-0.3,10.9-5.5,10.6-11.6c-0.1-2.7-1.3-5.2-3-7c14.2-9.3,30.5-10.4,37.2-4.3
+ l0.3,0C139.6,197.4,114.7,199.9,99.1,216.9z"/>
+ </g>
+ <g id="XMLID_26_">
+ <path id="XMLID_28_" class="st3" d="M262.4,357.2c-0.1-0.1-0.3-0.3-0.4-0.4c0.1,0.1,0.2,0.2,0.3,0.3L262.4,357.2z"/>
+ <path id="XMLID_27_" class="st3" d="M262.9,301.2c-15.4,16.8-15.7,41.6-0.9,55.6c-7.3-7.1-7.2-21.7,0.2-35.8
+ c1-1.3,3.7-4.2,7.7-2.9c0.4,0.1,0.7,0.2,0.8,0.2c0.9,0.2,1.8,0.3,2.8,0.3c6.1-0.3,10.9-5.5,10.6-11.6c-0.1-2.7-1.3-5.2-3-7
+ c14.2-9.3,30.5-10.4,37.2-4.3l0.3,0C303.4,281.7,278.4,284.2,262.9,301.2z"/>
+ </g>
+</g>
+</svg>
diff --git a/docs/_static/logo-rdflib.png b/docs/_static/logo-rdflib.png
new file mode 100644
index 00000000..d8f96c3f
--- /dev/null
+++ b/docs/_static/logo-rdflib.png
Binary files differ
diff --git a/docs/_static/logo.svg b/docs/_static/logo.svg
deleted file mode 100644
index b17048f2..00000000
--- a/docs/_static/logo.svg
+++ /dev/null
@@ -1,374 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- version="1.0"
- width="600.04999"
- height="105"
- id="svg2"
- inkscape:version="0.48.2 r9819"
- sodipodi:docname="logo.svg">
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1280"
- inkscape:window-height="776"
- id="namedview3160"
- showgrid="false"
- inkscape:zoom="0.69327557"
- inkscape:cx="300.02499"
- inkscape:cy="52.5"
- inkscape:window-x="0"
- inkscape:window-y="24"
- inkscape:window-maximized="1"
- inkscape:current-layer="svg2" />
- <metadata
- id="metadata2193">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs4">
- <color-profile
- xlink:href="/usr/share/color/icc/compatibleWithAdobeRGB1998.icc"
- name="Compatible-with-Adobe-RGB--1998-"
- id="color-profile4000" />
- <linearGradient
- id="linearGradient2795">
- <stop
- id="stop2797"
- style="stop-color:#b8b8b8;stop-opacity:0.49803922"
- offset="0" />
- <stop
- id="stop2799"
- style="stop-color:#7f7f7f;stop-opacity:0"
- offset="1" />
- </linearGradient>
- <linearGradient
- id="linearGradient2787">
- <stop
- id="stop2789"
- style="stop-color:#7f7f7f;stop-opacity:0.5"
- offset="0" />
- <stop
- id="stop2791"
- style="stop-color:#7f7f7f;stop-opacity:0"
- offset="1" />
- </linearGradient>
- <linearGradient
- id="linearGradient3676">
- <stop
- id="stop3678"
- style="stop-color:#b2b2b2;stop-opacity:0.5"
- offset="0" />
- <stop
- id="stop3680"
- style="stop-color:#b3b3b3;stop-opacity:0"
- offset="1" />
- </linearGradient>
- <linearGradient
- id="linearGradient3236">
- <stop
- id="stop3244"
- style="stop-color:#f4f4f4;stop-opacity:1"
- offset="0" />
- <stop
- id="stop3240"
- style="stop-color:#ffffff;stop-opacity:1"
- offset="1" />
- </linearGradient>
- <linearGradient
- id="linearGradient4671">
- <stop
- id="stop4673"
- style="stop-color:#ffd43b;stop-opacity:1"
- offset="0" />
- <stop
- id="stop4675"
- style="stop-color:#ffe873;stop-opacity:1"
- offset="1" />
- </linearGradient>
- <linearGradient
- id="linearGradient4689">
- <stop
- id="stop4691"
- style="stop-color:#5a9fd4;stop-opacity:1"
- offset="0" />
- <stop
- id="stop4693"
- style="stop-color:#306998;stop-opacity:1"
- offset="1" />
- </linearGradient>
- <linearGradient
- x1="224.23996"
- y1="144.75717"
- x2="-65.308502"
- y2="144.75717"
- id="linearGradient2987"
- xlink:href="#linearGradient4671"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(100.2702,99.61116)" />
- <linearGradient
- x1="172.94208"
- y1="77.475983"
- x2="26.670298"
- y2="76.313133"
- id="linearGradient2990"
- xlink:href="#linearGradient4689"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(100.2702,99.61116)" />
- <linearGradient
- x1="172.94208"
- y1="77.475983"
- x2="26.670298"
- y2="76.313133"
- id="linearGradient2587"
- xlink:href="#linearGradient4689"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(100.2702,99.61116)" />
- <linearGradient
- x1="224.23996"
- y1="144.75717"
- x2="-65.308502"
- y2="144.75717"
- id="linearGradient2589"
- xlink:href="#linearGradient4671"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(100.2702,99.61116)" />
- <linearGradient
- x1="172.94208"
- y1="77.475983"
- x2="26.670298"
- y2="76.313133"
- id="linearGradient2248"
- xlink:href="#linearGradient4689"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(100.2702,99.61116)" />
- <linearGradient
- x1="224.23996"
- y1="144.75717"
- x2="-65.308502"
- y2="144.75717"
- id="linearGradient2250"
- xlink:href="#linearGradient4671"
- gradientUnits="userSpaceOnUse"
- gradientTransform="translate(100.2702,99.61116)" />
- <linearGradient
- x1="224.23996"
- y1="144.75717"
- x2="-65.308502"
- y2="144.75717"
- id="linearGradient2255"
- xlink:href="#linearGradient4671"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.562541,0,0,0.567972,-11.5974,-7.60954)" />
- <linearGradient
- x1="172.94208"
- y1="76.176224"
- x2="26.670298"
- y2="76.313133"
- id="linearGradient2258"
- xlink:href="#linearGradient4689"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.562541,0,0,0.567972,-11.5974,-7.60954)" />
- <radialGradient
- cx="61.518883"
- cy="132.28575"
- r="29.036913"
- fx="61.518883"
- fy="132.28575"
- id="radialGradient2801"
- xlink:href="#linearGradient2795"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1,0,0,0.177966,0,108.7434)" />
- <linearGradient
- x1="150.96111"
- y1="192.35176"
- x2="112.03144"
- y2="137.27299"
- id="linearGradient1475"
- xlink:href="#linearGradient4671"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.562541,0,0,0.567972,-9.3997487,-5.3053169)" />
- <linearGradient
- x1="26.648937"
- y1="20.603781"
- x2="135.66525"
- y2="114.39767"
- id="linearGradient1478"
- xlink:href="#linearGradient4689"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.562541,0,0,0.567972,-9.3997487,-5.3053169)" />
- <radialGradient
- cx="61.518883"
- cy="132.28575"
- r="29.036913"
- fx="61.518883"
- fy="132.28575"
- id="radialGradient1480"
- xlink:href="#linearGradient2795"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(2.382716e-8,-0.296405,1.43676,4.683673e-7,-124.41586,170.0665)" />
- <linearGradient
- x1="26.648937"
- y1="20.603781"
- x2="135.66525"
- y2="114.39767"
- id="linearGradient4183"
- xlink:href="#linearGradient4689"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.63138863,0,0,0.63748431,-14.570883,12.971009)" />
- <linearGradient
- x1="150.96111"
- y1="192.35176"
- x2="112.03144"
- y2="137.27299"
- id="linearGradient4185"
- xlink:href="#linearGradient4671"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(0.63138863,0,0,0.63748431,-14.570883,12.971009)" />
- <radialGradient
- cx="61.518883"
- cy="132.28575"
- r="29.036913"
- fx="61.518883"
- fy="132.28575"
- id="radialGradient4288"
- xlink:href="#linearGradient2795"
- gradientUnits="userSpaceOnUse"
- gradientTransform="matrix(1.7490565e-8,-0.23994696,1.054668,3.7915457e-7,-78.109429,182.86408)" />
- </defs>
- <g
- id="g4179"
- transform="matrix(0.8909584,0,0,0.8909584,3.5823011,-16.861955)">
- <path
- inkscape:connector-curvature="0"
- style="fill:url(#linearGradient4183);fill-opacity:1"
- id="path1948"
- d="m 59.981055,19.096386 c -4.583653,0.0213 -8.960939,0.412218 -12.8125,1.09375 -11.346216,2.004504 -13.406249,6.200102 -13.40625,13.9375 l 0,10.21875 26.8125,0 0,3.40625 -26.8125,0 -10.0625,0 c -7.792459,0 -14.6157589,4.683717 -16.7499999,13.59375 -2.46182,10.212966 -2.571015,16.586023 0,27.25 1.905928,7.937852 6.4575429,13.593744 14.2499999,13.593754 l 9.21875,0 0,-12.250004 c 0,-8.849902 7.657144,-16.656248 16.75,-16.65625 l 26.78125,0 c 7.454951,0 13.406253,-6.138164 13.40625,-13.625 l 0,-25.53125 c 0,-7.266339 -6.12998,-12.724777 -13.40625,-13.9375 -4.605987,-0.766725 -9.385097,-1.115048 -13.96875,-1.09375 z m -14.5,8.21875 c 2.769547,0 5.03125,2.298646 5.03125,5.125 -2e-6,2.816336 -2.261703,5.09375 -5.03125,5.09375 -2.779476,-1e-6 -5.03125,-2.277415 -5.03125,-5.09375 -10e-7,-2.826353 2.251774,-5.125 5.03125,-5.125 z" />
- <path
- inkscape:connector-curvature="0"
- style="fill:url(#linearGradient4185);fill-opacity:1"
- id="path1950"
- d="m 90.699805,47.752636 0,11.90625 c 0,9.230755 -7.825895,16.999999 -16.75,17 l -26.78125,0 c -7.335833,0 -13.406249,6.278483 -13.40625,13.625 l 0,25.531244 c 0,7.26634 6.318588,11.54032 13.40625,13.625 8.487331,2.49561 16.626237,2.94663 26.78125,0 6.750155,-1.95439 13.406253,-5.88761 13.40625,-13.625 l 0,-10.21875 -26.78125,0 0,-3.40624 26.78125,0 13.406255,0 c 7.79246,0 10.69625,-5.435412 13.40624,-13.593754 2.79933,-8.398886 2.68022,-16.475776 0,-27.25 -1.92578,-7.757441 -5.60387,-13.59375 -13.40624,-13.59375 l -10.062505,0 z m -15.0625,64.656244 c 2.779478,1e-5 5.03125,2.27742 5.03125,5.09375 -2e-6,2.82635 -2.251775,5.125 -5.03125,5.125 -2.76955,0 -5.03125,-2.29865 -5.03125,-5.125 2e-6,-2.81633 2.261697,-5.09375 5.03125,-5.09375 z" />
- </g>
- <path
- inkscape:connector-curvature="0"
- style="fill:#0c479c"
- id="path3055"
- d="m 579.5918,67.356391 c -0.58493,-0.30994 -1.18089,-0.57389 -1.78052,-0.80841 l 0.42857,-0.0349 c 0,0 -3.81763,-1.69039 -4.15148,-13.960886 -0.33016,-12.272336 3.63922,-14.36555 3.63922,-14.36555 l -0.57021,0.02483 c 2.99911,-1.53864 5.57883,-3.96938 7.28118,-7.177257 4.43566,-8.343422 1.26181,-18.707403 -7.08345,-23.143985 -8.34894,-4.431064 -18.71016,-1.264573 -23.14306,7.087127 -1.82283,3.425843 -2.34061,7.189213 -1.76489,10.744732 l -0.19497,-0.299819 c 0,0 1.00522,4.453137 -9.43418,10.933269 -10.43755,6.483811 -15.14636,3.2557 -15.14636,3.2557 l 0.29982,0.441451 c -0.2989,-0.184857 -0.58493,-0.373393 -0.8967,-0.536179 -8.34894,-4.436582 -18.71292,-1.266412 -23.14767,7.082529 -4.4329,8.348021 -1.26181,18.707408 7.08437,23.145828 6.22262,3.3026 13.5599,2.38199 18.72948,-1.76029 l -0.1122,0.21705 c 0,0 3.80015,-3.12602 14.75181,2.75355 8.64509,4.63891 9.92897,9.18586 10.0936,10.89464 -0.22625,6.33942 3.0782,12.55744 9.05618,15.73313 8.34618,4.437499 18.71016,1.26457 23.14306,-7.08253 4.43659,-8.34618 1.26734,-18.71108 -7.0816,-23.14398 z m -19.29877,2.24956 c -1.38321,0.48927 -5.33512,1.03097 -13.65463,-3.43045 -9.0102,-4.83756 -10.34742,-8.878678 -10.54332,-10.091748 0.12784,-1.471504 0.046,-2.940248 -0.20509,-4.373124 l 0.0552,0.08277 c 0,0 -0.73391,-3.930753 9.57581,-10.332711 9.22724,-5.727826 13.42195,-4.588331 14.20276,-4.289432 0.50215,0.341205 1.0227,0.659418 1.56899,0.95004 1.03833,0.551813 2.10885,0.984067 3.19317,1.304119 1.26917,1.208472 3.60518,4.639834 3.84798,13.661068 0.24831,9.089288 -2.42247,12.594228 -3.87833,13.833968 -1.4991,0.67873 -2.90254,1.58463 -4.16252,2.6855 z" />
- <g
- id="g3057"
- transform="matrix(0.91968947,0,0,0.91968947,501.92402,5.888021)">
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff"
- id="path3059"
- d="m 62.239,8.1 c -5.415,5.923 -5.529,14.636 -0.312,19.566 -2.579,-2.483 -2.523,-7.651 0.083,-12.597 0.335,-0.443 1.306,-1.49 2.725,-1.014 0.143,0.049 0.237,0.062 0.292,0.053 0.321,0.069 0.65,0.11 0.99,0.095 2.155,-0.098 3.822,-1.921 3.725,-4.077 C 69.698,9.159 69.297,8.303 68.677,7.646 73.679,4.369 79.419,3.994 81.771,6.142 l 0.09,0.006 C 76.488,1.242 67.705,2.119 62.239,8.1 z" />
- </g>
- <g
- id="g3061"
- transform="matrix(0.91968947,0,0,0.91968947,501.92402,5.888021)">
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff"
- id="path3063"
- d="M 7.632,62.845 C 7.586,62.798 7.539,62.743 7.491,62.697 c 0.03,0.031 0.059,0.069 0.095,0.102 l 0.046,0.046 z" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff"
- id="path3065"
- d="M 7.805,43.13 C 2.389,49.054 2.276,57.765 7.492,62.696 4.914,60.212 4.969,55.044 7.575,50.098 c 0.336,-0.444 1.308,-1.49 2.727,-1.014 0.141,0.049 0.236,0.061 0.292,0.054 0.321,0.069 0.651,0.11 0.99,0.095 2.156,-0.099 3.822,-1.922 3.725,-4.076 -0.045,-0.967 -0.445,-1.824 -1.063,-2.48 4.999,-3.276 10.74,-3.654 13.092,-1.505 l 0.089,0.008 C 22.054,36.271 13.269,37.147 7.805,43.13 z" />
- </g>
- <g
- id="g3067"
- transform="matrix(0.91968947,0,0,0.91968947,501.92402,5.888021)">
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff"
- id="path3069"
- d="m 65.256,92.504 c -0.047,-0.048 -0.094,-0.102 -0.141,-0.148 0.029,0.031 0.059,0.069 0.094,0.101 l 0.047,0.047 z" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff"
- id="path3071"
- d="m 65.428,72.786 c -5.416,5.926 -5.529,14.639 -0.313,19.569 -2.58,-2.483 -2.523,-7.653 0.082,-12.597 0.336,-0.445 1.307,-1.49 2.727,-1.014 0.143,0.047 0.235,0.061 0.292,0.053 0.32,0.069 0.651,0.11 0.99,0.096 2.154,-0.1 3.82,-1.924 3.723,-4.08 -0.044,-0.966 -0.445,-1.822 -1.063,-2.479 5,-3.275 10.739,-3.652 13.093,-1.504 l 0.088,0.007 c -5.37,-4.907 -14.156,-4.03 -19.619,1.949 z" />
- </g>
- <text
- sodipodi:linespacing="100%"
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.68493150000000003;stroke:none;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- xml:space="preserve"
- id="text3235-0"
- y="77.651299"
- x="142.00471"><tspan
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.68493150000000003;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- id="tspan3237-3"
- y="77.651299"
- x="142.00471">R</tspan></text>
- <text
- sodipodi:linespacing="100%"
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.68493150000000003;stroke:none;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- xml:space="preserve"
- id="text3235-05"
- y="77.651299"
- x="197.48764"><tspan
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.68493150000000003;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- id="tspan3237-1"
- y="77.651299"
- x="197.48764">D</tspan></text>
- <text
- sodipodi:linespacing="100%"
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.68493150000000003;stroke:none;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- xml:space="preserve"
- id="text3235-1"
- y="77.651299"
- x="254.22394"><tspan
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.68493150000000003;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- id="tspan3237-0"
- y="77.651299"
- x="254.22394">F</tspan></text>
- <text
- sodipodi:linespacing="100%"
- style="font-size:66.78321837999999389px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;stroke:none;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- xml:space="preserve"
- id="text3235-4"
- transform="scale(1.0248205,0.97578063)"
- y="80.830879"
- x="338.19601"><tspan
- style="font-size:66.78321837999999389px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- id="tspan3237-9"
- y="80.830879"
- x="338.19601">ib</tspan></text>
- <text
- sodipodi:linespacing="100%"
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.97260274999999996;stroke:none;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- xml:space="preserve"
- id="text3235-1-4"
- y="78.059006"
- x="298.59976"><tspan
- style="font-size:84.37892913999999678px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#ffffff;fill-opacity:0.97260274999999996;font-family:Verana Sans;-inkscape-font-specification:Verana Sans"
- id="tspan3237-0-1"
- y="78.059006"
- x="298.59976">L</tspan></text>
-</svg>
diff --git a/docs/_themes/armstrong/rtd-themes.conf b/docs/_themes/armstrong/rtd-themes.conf
deleted file mode 100644
index 5930488d..00000000
--- a/docs/_themes/armstrong/rtd-themes.conf
+++ /dev/null
@@ -1,65 +0,0 @@
-[theme]
-inherit = default
-stylesheet = rtd.css
-pygment_style = default
-show_sphinx = False
-
-[options]
-show_rtd = True
-
-white = #ffffff
-almost_white = #f8f8f8
-barely_white = #f2f2f2
-dirty_white = #eeeeee
-almost_dirty_white = #e6e6e6
-dirtier_white = #dddddd
-lighter_gray = #cccccc
-gray_a = #aaaaaa
-gray_9 = #999999
-light_gray = #888888
-gray_7 = #777777
-gray = #666666
-dark_gray = #444444
-gray_2 = #222222
-black = #111111
-light_color = #e8ecef
-light_medium_color = #DDEAF0
-medium_color = #8ca1af
-medium_color_link = #86989b
-medium_color_link_hover = #a6b8bb
-dark_color = #465158
-
-h1 = #000000
-h2 = #465158
-h3 = #6c818f
-
-link_color = #444444
-link_color_decoration = #CCCCCC
-
-medium_color_hover = #697983
-green_highlight = #8ecc4c
-
-
-positive_dark = #609060
-positive_medium = #70a070
-positive_light = #e9ffe9
-
-negative_dark = #900000
-negative_medium = #b04040
-negative_light = #ffe9e9
-negative_text = #c60f0f
-
-ruler = #abc
-
-viewcode_bg = #f4debf
-viewcode_border = #ac9
-
-highlight = #ffe080
-
-code_background = #eeeeee
-
-background = #465158
-background_link = #ffffff
-background_link_half = #ffffff
-background_text = #eeeeee
-background_text_link = #86989b
diff --git a/docs/_themes/armstrong/static/rtd.css_t b/docs/_themes/armstrong/static/rtd.css_t
index f5f08642..489911a2 100644
--- a/docs/_themes/armstrong/static/rtd.css_t
+++ b/docs/_themes/armstrong/static/rtd.css_t
@@ -66,6 +66,9 @@ div.documentwrapper {
background-color: {{ theme_light_color }};
}
+p.logo {
+ padding-top: 30px;
+}
/* HEADINGS --------------------------------------------------------------- */
diff --git a/docs/_themes/armstrong/theme-old.conf b/docs/_themes/armstrong/theme-old.conf
new file mode 100644
index 00000000..c77da3a1
--- /dev/null
+++ b/docs/_themes/armstrong/theme-old.conf
@@ -0,0 +1,65 @@
+[theme]
+inherit = default
+stylesheet = rtd.css
+pygment_style = default
+show_sphinx = False
+
+[options]
+show_rtd = True
+
+white = #ffffff
+almost_white = #f8f8f8
+barely_white = #f2f2f2
+dirty_white = #eeeeee
+almost_dirty_white = #e6e6e6
+dirtier_white = #DAC6AF
+lighter_gray = #cccccc
+gray_a = #aaaaaa
+gray_9 = #999999
+light_gray = #888888
+gray_7 = #777777
+gray = #666666
+dark_gray = #444444
+gray_2 = #222222
+black = #111111
+light_color = #EDE4D8
+light_medium_color = #DDEAF0
+medium_color_link = #634320
+medium_color_link_hover = #261a0c
+dark_color = rgba(160, 109, 52, 1.0)
+
+h1 = #1f3744
+h2 = #335C72
+h3 = #638fa6
+
+link_color = #335C72
+link_color_decoration = #99AEB9
+
+medium_color_hover = rgba(255, 255, 255, 0.25)
+medium_color = rgba(255, 255, 255, 0.5)
+green_highlight = #8ecc4c
+
+
+positive_dark = rgba(51, 77, 0, 1.0)
+positive_medium = rgba(102, 153, 0, 1.0)
+positive_light = rgba(102, 153, 0, 0.1)
+
+negative_dark = rgba(51, 13, 0, 1.0)
+negative_medium = rgba(204, 51, 0, 1.0)
+negative_light = rgba(204, 51, 0, 0.1)
+negative_text = #c60f0f
+
+ruler = #abc
+
+viewcode_bg = #f4debf
+viewcode_border = #ac9
+
+highlight = #ffe080
+
+code_background = rgba(0, 0, 0, 0.075)
+
+background = rgba(135, 57, 34, 1.0)
+background_link = rgba(212, 195, 172, 1.0)
+background_link_half = rgba(212, 195, 172, 0.5)
+background_text = rgba(212, 195, 172, 1.0)
+background_text_link = rgba(171, 138, 93, 1.0)
diff --git a/docs/_themes/armstrong/theme.conf b/docs/_themes/armstrong/theme.conf
index c77da3a1..5930488d 100644
--- a/docs/_themes/armstrong/theme.conf
+++ b/docs/_themes/armstrong/theme.conf
@@ -12,7 +12,7 @@ almost_white = #f8f8f8
barely_white = #f2f2f2
dirty_white = #eeeeee
almost_dirty_white = #e6e6e6
-dirtier_white = #DAC6AF
+dirtier_white = #dddddd
lighter_gray = #cccccc
gray_a = #aaaaaa
gray_9 = #999999
@@ -22,31 +22,31 @@ gray = #666666
dark_gray = #444444
gray_2 = #222222
black = #111111
-light_color = #EDE4D8
+light_color = #e8ecef
light_medium_color = #DDEAF0
-medium_color_link = #634320
-medium_color_link_hover = #261a0c
-dark_color = rgba(160, 109, 52, 1.0)
+medium_color = #8ca1af
+medium_color_link = #86989b
+medium_color_link_hover = #a6b8bb
+dark_color = #465158
-h1 = #1f3744
-h2 = #335C72
-h3 = #638fa6
+h1 = #000000
+h2 = #465158
+h3 = #6c818f
-link_color = #335C72
-link_color_decoration = #99AEB9
+link_color = #444444
+link_color_decoration = #CCCCCC
-medium_color_hover = rgba(255, 255, 255, 0.25)
-medium_color = rgba(255, 255, 255, 0.5)
+medium_color_hover = #697983
green_highlight = #8ecc4c
-positive_dark = rgba(51, 77, 0, 1.0)
-positive_medium = rgba(102, 153, 0, 1.0)
-positive_light = rgba(102, 153, 0, 0.1)
+positive_dark = #609060
+positive_medium = #70a070
+positive_light = #e9ffe9
-negative_dark = rgba(51, 13, 0, 1.0)
-negative_medium = rgba(204, 51, 0, 1.0)
-negative_light = rgba(204, 51, 0, 0.1)
+negative_dark = #900000
+negative_medium = #b04040
+negative_light = #ffe9e9
negative_text = #c60f0f
ruler = #abc
@@ -56,10 +56,10 @@ viewcode_border = #ac9
highlight = #ffe080
-code_background = rgba(0, 0, 0, 0.075)
+code_background = #eeeeee
-background = rgba(135, 57, 34, 1.0)
-background_link = rgba(212, 195, 172, 1.0)
-background_link_half = rgba(212, 195, 172, 0.5)
-background_text = rgba(212, 195, 172, 1.0)
-background_text_link = rgba(171, 138, 93, 1.0)
+background = #465158
+background_link = #ffffff
+background_link_half = #ffffff
+background_text = #eeeeee
+background_text_link = #86989b
diff --git a/docs/apidocs/.gitignore b/docs/apidocs/.gitignore
new file mode 100644
index 00000000..89867378
--- /dev/null
+++ b/docs/apidocs/.gitignore
@@ -0,0 +1,2 @@
+modules.rst
+rdflib*.rst
diff --git a/docs/apidocs/examples.rst b/docs/apidocs/examples.rst
index 01a9d809..d099838f 100644
--- a/docs/apidocs/examples.rst
+++ b/docs/apidocs/examples.rst
@@ -60,7 +60,7 @@ These examples all live in ``./examples`` in the source-distribution of RDFLib.
:show-inheritance:
:mod:`rdfa_example` Module
-----------------------
+--------------------------
.. automodule:: examples.rdfa_example
:members:
@@ -76,7 +76,7 @@ These examples all live in ``./examples`` in the source-distribution of RDFLib.
:show-inheritance:
:mod:`sleepycat_example` Module
------------------------------------
+--------------------------------
.. automodule:: examples.sleepycat_example
:members:
@@ -92,7 +92,7 @@ These examples all live in ``./examples`` in the source-distribution of RDFLib.
:show-inheritance:
:mod:`smushing` Module
--------------------
+----------------------
.. automodule:: examples.smushing
:members:
diff --git a/docs/apidocs/modules.rst b/docs/apidocs/modules.rst
deleted file mode 100644
index 6db120d1..00000000
--- a/docs/apidocs/modules.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-rdflib API docs
-===============
-
-.. toctree::
- :maxdepth: 10
-
- rdflib
diff --git a/docs/apidocs/rdflib.extras.rst b/docs/apidocs/rdflib.extras.rst
deleted file mode 100644
index 3da6f545..00000000
--- a/docs/apidocs/rdflib.extras.rst
+++ /dev/null
@@ -1,36 +0,0 @@
-extras Package
-==============
-
-:mod:`extras` Package
----------------------
-
-.. automodule:: rdflib.extras
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cmdlineutils` Module
---------------------------
-
-.. automodule:: rdflib.extras.cmdlineutils
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`describer` Module
------------------------
-
-.. automodule:: rdflib.extras.describer
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`infixowl` Module
-----------------------
-
-.. automodule:: rdflib.extras.infixowl
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
diff --git a/docs/apidocs/rdflib.plugins.parsers.pyMicrodata.rst b/docs/apidocs/rdflib.plugins.parsers.pyMicrodata.rst
deleted file mode 100644
index 1c0f6734..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.pyMicrodata.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-pyMicrodata Package
-===================
-
-:mod:`pyMicrodata` Package
---------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyMicrodata
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`microdata` Module
------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyMicrodata.microdata
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`registry` Module
-----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyMicrodata.registry
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`utils` Module
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyMicrodata.utils
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.extras.rst b/docs/apidocs/rdflib.plugins.parsers.pyRdfa.extras.rst
deleted file mode 100644
index 337a1f73..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.extras.rst
+++ /dev/null
@@ -1,19 +0,0 @@
-extras Package
-==============
-
-:mod:`extras` Package
----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.extras
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`httpheader` Module
-------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.extras.httpheader
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
diff --git a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.host.rst b/docs/apidocs/rdflib.plugins.parsers.pyRdfa.host.rst
deleted file mode 100644
index e0df6f06..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.host.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-host Package
-============
-
-:mod:`host` Package
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.host
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`atom` Module
-------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.host.atom
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`html5` Module
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.host.html5
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.rdfs.rst b/docs/apidocs/rdflib.plugins.parsers.pyRdfa.rdfs.rst
deleted file mode 100644
index 98915eb3..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.rdfs.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-rdfs Package
-============
-
-:mod:`rdfs` Package
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.rdfs
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cache` Module
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.rdfs.cache
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`process` Module
----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.rdfs.process
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.rst b/docs/apidocs/rdflib.plugins.parsers.pyRdfa.rst
deleted file mode 100644
index 8563debd..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-pyRdfa Package
-==============
-
-:mod:`pyRdfa` Package
----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`embeddedRDF` Module
--------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.embeddedRDF
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`initialcontext` Module
-----------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.initialcontext
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`options` Module
----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.options
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`parse` Module
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.parse
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`property` Module
-----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.property
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`state` Module
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.state
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`termorcurie` Module
--------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.termorcurie
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`utils` Module
--------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.utils
- :members:
- :undoc-members:
- :show-inheritance:
-
-Subpackages
------------
-
-.. toctree::
-
- rdflib.plugins.parsers.pyRdfa.extras
- rdflib.plugins.parsers.pyRdfa.host
- rdflib.plugins.parsers.pyRdfa.rdfs
- rdflib.plugins.parsers.pyRdfa.transform
-
diff --git a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.transform.rst b/docs/apidocs/rdflib.plugins.parsers.pyRdfa.transform.rst
deleted file mode 100644
index 7f7a357b..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.pyRdfa.transform.rst
+++ /dev/null
@@ -1,51 +0,0 @@
-transform Package
-=================
-
-:mod:`transform` Package
-------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.transform
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`DublinCore` Module
-------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.transform.DublinCore
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`OpenID` Module
---------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.transform.OpenID
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`lite` Module
-------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.transform.lite
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`metaname` Module
-----------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.transform.metaname
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`prototype` Module
------------------------
-
-.. automodule:: rdflib.plugins.parsers.pyRdfa.transform.prototype
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/apidocs/rdflib.plugins.parsers.rst b/docs/apidocs/rdflib.plugins.parsers.rst
deleted file mode 100644
index 9428ad47..00000000
--- a/docs/apidocs/rdflib.plugins.parsers.rst
+++ /dev/null
@@ -1,85 +0,0 @@
-parsers Package
-===============
-
-:mod:`parsers` Package
-----------------------
-
-.. automodule:: rdflib.plugins.parsers
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`hturtle` Module
----------------------
-
-.. automodule:: rdflib.plugins.parsers.hturtle
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`notation3` Module
------------------------
-
-.. automodule:: rdflib.plugins.parsers.notation3
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`nquads` Module
---------------------
-
-.. automodule:: rdflib.plugins.parsers.nquads
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`nt` Module
-----------------
-
-.. automodule:: rdflib.plugins.parsers.nt
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`ntriples` Module
-----------------------
-
-.. automodule:: rdflib.plugins.parsers.ntriples
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`rdfxml` Module
---------------------
-
-.. automodule:: rdflib.plugins.parsers.rdfxml
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`structureddata` Module
-----------------------------
-
-.. automodule:: rdflib.plugins.parsers.structureddata
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`trix` Module
-------------------
-
-.. automodule:: rdflib.plugins.parsers.trix
- :members:
- :undoc-members:
- :show-inheritance:
-
-Subpackages
------------
-
-.. toctree::
-
- rdflib.plugins.parsers.pyMicrodata
- rdflib.plugins.parsers.pyRdfa
-
diff --git a/docs/apidocs/rdflib.plugins.rst b/docs/apidocs/rdflib.plugins.rst
deleted file mode 100644
index 6d45026f..00000000
--- a/docs/apidocs/rdflib.plugins.rst
+++ /dev/null
@@ -1,37 +0,0 @@
-plugins Package
-===============
-
-:mod:`plugins` Package
-----------------------
-
-.. automodule:: rdflib.plugins
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`memory` Module
---------------------
-
-.. automodule:: rdflib.plugins.memory
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`sleepycat` Module
------------------------
-
-.. automodule:: rdflib.plugins.sleepycat
- :members:
- :undoc-members:
- :show-inheritance:
-
-Subpackages
------------
-
-.. toctree::
-
- rdflib.plugins.parsers
- rdflib.plugins.serializers
- rdflib.plugins.sparql
- rdflib.plugins.stores
-
diff --git a/docs/apidocs/rdflib.plugins.serializers.rst b/docs/apidocs/rdflib.plugins.serializers.rst
deleted file mode 100644
index c89309dd..00000000
--- a/docs/apidocs/rdflib.plugins.serializers.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-serializers Package
-===================
-
-:mod:`n3` Module
-----------------
-
-.. automodule:: rdflib.plugins.serializers.n3
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`nquads` Module
---------------------
-
-.. automodule:: rdflib.plugins.serializers.nquads
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`nt` Module
-----------------
-
-.. automodule:: rdflib.plugins.serializers.nt
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`rdfxml` Module
---------------------
-
-.. automodule:: rdflib.plugins.serializers.rdfxml
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`trig` Module
-------------------
-
-.. automodule:: rdflib.plugins.serializers.trig
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`trix` Module
-------------------
-
-.. automodule:: rdflib.plugins.serializers.trix
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`turtle` Module
---------------------
-
-.. automodule:: rdflib.plugins.serializers.turtle
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`xmlwriter` Module
------------------------
-
-.. automodule:: rdflib.plugins.serializers.xmlwriter
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
diff --git a/docs/apidocs/rdflib.plugins.sparql.results.rst b/docs/apidocs/rdflib.plugins.sparql.results.rst
deleted file mode 100644
index 41d632ff..00000000
--- a/docs/apidocs/rdflib.plugins.sparql.results.rst
+++ /dev/null
@@ -1,51 +0,0 @@
-results Package
-===============
-
-:mod:`csvresults` Module
-------------------------
-
-.. automodule:: rdflib.plugins.sparql.results.csvresults
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`jsonlayer` Module
------------------------
-
-.. automodule:: rdflib.plugins.sparql.results.jsonlayer
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`jsonresults` Module
--------------------------
-
-.. automodule:: rdflib.plugins.sparql.results.jsonresults
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`rdfresults` Module
-------------------------
-
-.. automodule:: rdflib.plugins.sparql.results.rdfresults
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`tsvresults` Module
-------------------------
-
-.. automodule:: rdflib.plugins.sparql.results.tsvresults
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`xmlresults` Module
-------------------------
-
-.. automodule:: rdflib.plugins.sparql.results.xmlresults
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/apidocs/rdflib.plugins.sparql.rst b/docs/apidocs/rdflib.plugins.sparql.rst
deleted file mode 100644
index 0985fd41..00000000
--- a/docs/apidocs/rdflib.plugins.sparql.rst
+++ /dev/null
@@ -1,116 +0,0 @@
-sparql Package
-==============
-
-:mod:`sparql` Package
----------------------
-
-.. automodule:: rdflib.plugins.sparql
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`aggregates` Module
-------------------------
-
-.. automodule:: rdflib.plugins.sparql.aggregates
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`algebra` Module
----------------------
-
-.. automodule:: rdflib.plugins.sparql.algebra
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`compat` Module
---------------------
-
-.. automodule:: rdflib.plugins.sparql.compat
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`datatypes` Module
------------------------
-
-.. automodule:: rdflib.plugins.sparql.datatypes
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`evaluate` Module
-----------------------
-
-.. automodule:: rdflib.plugins.sparql.evaluate
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`evalutils` Module
------------------------
-
-.. automodule:: rdflib.plugins.sparql.evalutils
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`operators` Module
------------------------
-
-.. automodule:: rdflib.plugins.sparql.operators
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`parser` Module
---------------------
-
-.. automodule:: rdflib.plugins.sparql.parser
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`parserutils` Module
--------------------------
-
-.. automodule:: rdflib.plugins.sparql.parserutils
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`processor` Module
------------------------
-
-.. automodule:: rdflib.plugins.sparql.processor
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`sparql` Module
---------------------
-
-.. automodule:: rdflib.plugins.sparql.sparql
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`update` Module
---------------------
-
-.. automodule:: rdflib.plugins.sparql.update
- :members:
- :undoc-members:
- :show-inheritance:
-
-Subpackages
------------
-
-.. toctree::
-
- rdflib.plugins.sparql.results
-
diff --git a/docs/apidocs/rdflib.plugins.stores.rst b/docs/apidocs/rdflib.plugins.stores.rst
deleted file mode 100644
index 89f4506a..00000000
--- a/docs/apidocs/rdflib.plugins.stores.rst
+++ /dev/null
@@ -1,45 +0,0 @@
-stores Package
-==============
-
-:mod:`stores` Package
----------------------
-
-.. automodule:: rdflib.plugins.stores
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`auditable` Module
------------------------
-
-.. automodule:: rdflib.plugins.stores.auditable
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`concurrent` Module
-------------------------
-
-.. automodule:: rdflib.plugins.stores.concurrent
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`regexmatching` Module
----------------------------
-
-.. automodule:: rdflib.plugins.stores.regexmatching
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`sparqlstore` Module
--------------------------
-
-.. automodule:: rdflib.plugins.stores.sparqlstore
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/apidocs/rdflib.rst b/docs/apidocs/rdflib.rst
deleted file mode 100644
index f279cb03..00000000
--- a/docs/apidocs/rdflib.rst
+++ /dev/null
@@ -1,180 +0,0 @@
-rdflib Package
-==============
-
-:mod:`rdflib` Package
----------------------
-
-.. automodule:: rdflib.__init__
- :members: NORMALIZE_LITERALS, DAWG_LITERAL_COLLATION
- :show-inheritance:
-
-:mod:`collection` Module
-------------------------
-
-.. automodule:: rdflib.collection
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`compare` Module
----------------------
-
-.. automodule:: rdflib.compare
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`compat` Module
---------------------
-
-.. automodule:: rdflib.compat
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`events` Module
---------------------
-
-.. automodule:: rdflib.events
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`exceptions` Module
-------------------------
-
-.. automodule:: rdflib.exceptions
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`graph` Module
--------------------
-
-.. automodule:: rdflib.graph
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`namespace` Module
------------------------
-
-.. automodule:: rdflib.namespace
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`parser` Module
---------------------
-
-.. automodule:: rdflib.parser
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`paths` Module
--------------------
-
-.. automodule:: rdflib.paths
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`plugin` Module
---------------------
-
-.. automodule:: rdflib.plugin
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`py3compat` Module
------------------------
-
-.. automodule:: rdflib.py3compat
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`query` Module
--------------------
-
-.. automodule:: rdflib.query
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
- .. autoclass:: ResultRow
- :members:
-
-
-:mod:`resource` Module
-----------------------
-
-.. automodule:: rdflib.resource
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`serializer` Module
-------------------------
-
-.. automodule:: rdflib.serializer
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`store` Module
--------------------
-
-.. automodule:: rdflib.store
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`term` Module
-------------------
-
-.. automodule:: rdflib.term
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-
-:mod:`util` Module
-------------------
-
-.. automodule:: rdflib.util
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`void` Module
-------------------
-
-.. automodule:: rdflib.void
- :members:
- :undoc-members:
- :show-inheritance:
-
-Subpackages
------------
-
-.. toctree::
-
- rdflib.extras
- rdflib.plugins
- rdflib.tools
-
diff --git a/docs/apidocs/rdflib.tools.rst b/docs/apidocs/rdflib.tools.rst
deleted file mode 100644
index 856991cc..00000000
--- a/docs/apidocs/rdflib.tools.rst
+++ /dev/null
@@ -1,55 +0,0 @@
-tools Package
-=============
-
-These commandline-tools are installed into :samp:`{INSTALL_PREFIX}/bin` by setuptools.
-
-:mod:`tools` Package
----------------------
-
-.. automodule:: rdflib.tools
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-:mod:`csv2rdf` Module
----------------------
-
-.. automodule:: rdflib.tools.csv2rdf
- :members:
- :undoc-members:
- :show-inheritance:
- :exclude-members: __dict__,__weakref__
-
-:mod:`graphisomorphism` Module
-------------------------------
-
-.. automodule:: rdflib.tools.graphisomorphism
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`rdf2dot` Module
----------------------
-
-.. automodule:: rdflib.tools.rdf2dot
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`rdfpipe` Module
----------------------
-
-.. automodule:: rdflib.tools.rdfpipe
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`rdfs2dot` Module
-----------------------
-
-.. automodule:: rdflib.tools.rdfs2dot
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/docs/conf.py b/docs/conf.py
index 814e1f54..dd9087c3 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -11,52 +11,64 @@
# All configuration values have a default; values that are commented out
# serve to show the default.
-import sys, os, re
+import sys
+import os
+import re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
+# sys.path.append(os.path.abspath(".."))
+sys.path.append(os.path.abspath(".."))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.doctest']
-extensions = ['sphinx.ext.autodoc', #'sphinx.ext.autosummary',
- 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
- 'sphinx.ext.todo', 'sphinx.ext.coverage',
- 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
+extensions = [
+ "sphinxcontrib.apidoc",
+ "sphinx.ext.autodoc",
+ #'sphinx.ext.autosummary',
+ "sphinx.ext.doctest",
+ "sphinx.ext.intersphinx",
+ "sphinx.ext.todo",
+ "sphinx.ext.coverage",
+ "sphinx.ext.ifconfig",
+ "sphinx.ext.viewcode",
+]
-autodoc_default_flags = [ "special-members" ]
+apidoc_module_dir = "../rdflib"
+apidoc_output_dir = "apidocs"
+autodoc_default_options = {"special-members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
-#epydoc_mapping = {
+# epydoc_mapping = {
# '/_static/api/': [r'rdflib\.'],
# }
-
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The encoding of source files.
-source_encoding = 'utf-8'
+source_encoding = "utf-8"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# General information about the project.
-project = u'rdflib'
-copyright = u'2009 - 2013, RDFLib Team'
+project = u"rdflib"
+copyright = u"2009 - 2020, RDFLib Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
+
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
@@ -66,30 +78,31 @@ def find_version(filename):
if version_match:
return version_match.group(1)
+
# The full version, including alpha/beta/rc tags.
-release = find_version('../rdflib/__init__.py')
+release = find_version("../rdflib/__init__.py")
# The short X.Y version.
version = re.sub("[0-9]+\\.[0-9]\\..*", "\1", release)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
-#language = None
+# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
-#today = ''
+# today = ''
# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
-#unused_docs = []
+# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
-exclude_trees = ['_build', 'draft']
+exclude_trees = ["_build", "draft"]
# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
@@ -100,125 +113,130 @@ add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
-#show_authors = False
+# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'armstrong'
+html_theme = "armstrong"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
-#html_theme_options = {}
+# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
-html_theme_path = ["_themes", ]
+html_theme_path = [
+ "_themes",
+]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
-#html_title = None
+# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
+# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
-html_logo = '_static/logo.svg'
+# html_logo = None
+html_logo = "_static/RDFlib.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
-#html_favicon = None
+html_favicon = "_static/RDFlib.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
# If false, no module index is generated.
-#html_use_modindex = True
+# html_use_modindex = True
# If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
# If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
+# html_file_suffix = ''
# Output file base name for HTML help builder.
-htmlhelp_basename = 'rdflibdoc'
+htmlhelp_basename = "rdflibdoc"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
+# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
+# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'rdflib.tex', u'rdflib Documentation',
- u'RDFLib Team', 'manual'),
+ ("index", "rdflib.tex", u"rdflib Documentation", u"RDFLib Team", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
-#latex_logo = None
+# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
-#latex_use_parts = False
+# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
+# latex_preamble = ''
# Documents to append as an appendix to all manuals.
-#latex_appendices = []
+# latex_appendices = []
# If false, no module index is generated.
-#latex_use_modindex = True
+# latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
- 'python': ('http://docs.python.org/2.7', None),
+ "python": ("https://docs.python.org/3.7", None),
}
+
+html_experimental_html5_writer = True
+
+needs_sphinx = "2.4"
diff --git a/docs/docs.rst b/docs/docs.rst
index e193af6b..c2a4a198 100644
--- a/docs/docs.rst
+++ b/docs/docs.rst
@@ -31,16 +31,6 @@ Then you can do:
The docs will be generated in :file:`build/sphinx/html/`
-Syntax highlighting
--------------------
-
-To get N3 and SPARQL syntax highlighting do:
-
-.. code-block:: bash
-
- pip install -e git+git://github.com/gjhiggins/sparql_pygments_lexer.git#egg=SPARQL_Pygments_Lexer
- pip install -e git+git://github.com/gjhiggins/n3_pygments_lexer.git#egg=Notation3_Pygments_Lexer
-
API Docs
--------
diff --git a/docs/faq.rst b/docs/faq.rst
deleted file mode 100644
index 0bf2b9e3..00000000
--- a/docs/faq.rst
+++ /dev/null
@@ -1,48 +0,0 @@
-=============================================
-Frequently Asked Questions about using RDFLib
-=============================================
-
-Questions about parsing
-=======================
-
-Questions about manipulating
-============================
-
-Questions about serializing
-===========================
-
-Which serialization method is the most efficient?
-=================================================
-
-Currently, the "nt" output format uses the most efficient
-serialization; "rdf/xml" should also be efficient. You can
-serialize to these formats using code similar to the following::
-
- myGraph.serialize(target_nt, format="nt")
- myGraph.serialize(target_rdfxml, format="xml")
-
-How can I use some of the abbreviated RDF/XML syntax?
-=====================================================
-
-Use the "pretty-xml" `format` argument to the `serialize` method::
-
- myGraph.serialize(target_pretty, format="pretty-xml")
-
-How can I control the binding of prefixes to XML namespaces when using RDF/XML?
-===============================================================================
-
-Each graph comes with a `NamespaceManager`__ instance in the `namespace_manager` field; you can use the `bind` method of this instance to bind a prefix to a namespace URI::
-
-
- myGraph.namespace_manager.bind('prefix', URIRef('scheme:my-namespace-uri:'))
-
-__ http://rdflib.net/rdflib-2.4.0/html/public/rdflib.syntax.NamespaceManager.NamespaceManager-class.html
-
-Does RDFLib support serialization to the `TriX`__ format?
-=========================================================
-
-Yes, both parsing and serialising is supported::
-
- graph.serialize(format="trix") and graph.load(source, format="trix")
-
-__ http://www.w3.org/2004/03/trix/
diff --git a/docs/gettingstarted.rst b/docs/gettingstarted.rst
index 41d54cc1..a3637210 100644
--- a/docs/gettingstarted.rst
+++ b/docs/gettingstarted.rst
@@ -17,16 +17,27 @@ The best way to install RDFLib is to use ``pip`` (sudo as required):
$ pip install rdflib
-Support is available through the rdflib-dev group:
+If you want the latest code to run, clone the master branch of the GitHub repo and use that.
+
+Support
+=======
+Usage support is available via questions tagged with ``[rdflib]`` on `StackOverflow <https://stackoverflow.com/questions/tagged/rdflib>`__
+and development support, notifications and detailed discussion through the rdflib-dev group (mailing list):
http://groups.google.com/group/rdflib-dev
-and on the IRC channel `#rdflib <irc://irc.freenode.net/swig>`_ on the freenode.net server
+If you notice an bug or want to request an enhancement, please do so via our Issue Tracker in Github:
-The primary interface that RDFLib exposes for working with RDF is a
-:class:`~rdflib.graph.Graph`. The package uses various Python idioms
+ `<http://github.com/RDFLib/rdflib/issues>`_
+
+How it all works
+================
+*The package uses various Python idioms
that offer an appropriate way to introduce RDF to a Python programmer
-who hasn't worked with RDF before.
+who hasn't worked with RDF before.*
+
+The primary interface that RDFLib exposes for working with RDF is a
+:class:`~rdflib.graph.Graph`.
RDFLib graphs are not sorted containers; they have ordinary ``set``
operations (e.g. :meth:`~rdflib.Graph.add` to add a triple) plus
@@ -35,12 +46,12 @@ methods that search triples and return them in arbitrary order.
RDFLib graphs also redefine certain built-in Python methods in order
to behave in a predictable way; they `emulate container types
<http://docs.python.org/release/2.5.2/ref/sequence-types.html>`_ and
-are best thought of as a set of 3-item triples:
+are best thought of as a set of 3-item tuples ("triples", in RDF-speak):
.. code-block:: text
[
- (subject, predicate, object),
+ (subject0, predicate0, object0),
(subject1, predicate1, object1),
...
(subjectN, predicateN, objectN)
@@ -52,52 +63,77 @@ A tiny usage example:
import rdflib
+ # create a Graph
g = rdflib.Graph()
- result = g.parse("http://www.w3.org/People/Berners-Lee/card")
- print("graph has %s statements." % len(g))
- # prints graph has 79 statements.
+ # parse in an RDF file hosted on the Internet
+ result = g.parse("http://www.w3.org/People/Berners-Lee/card")
+ # loop through each triple in the graph (subj, pred, obj)
for subj, pred, obj in g:
- if (subj, pred, obj) not in g:
+ # check if there is at least one triple in the Graph
+ if (subj, pred, obj) not in g:
raise Exception("It better be!")
- s = g.serialize(format='n3')
+ # print the number of "triples" in the Graph
+ print("graph has {} statements.".format(len(g)))
+ # prints graph has 86 statements.
-A more extensive example:
+ # print out the entire Graph in the RDF Turtle format
+ print(g.serialize(format="turtle").decode("utf-8"))
+
+Here a :class:`~rdflib.graph.Graph` is created and then an RDF file online, Tim Berners-Lee's social network details, is
+parsed into that graph. The ``print()`` statement uses the ``len()`` function to count the number of triples in the
+graph.
+A more extensive example:
.. code-block:: python
- from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
- from rdflib.namespace import DC, FOAF
+ from rdflib import Graph, Literal, RDF, URIRef
+ # rdflib knows about some namespaces, like FOAF
+ from rdflib.namespace import FOAF , XSD
+ # create a Graph
g = Graph()
- # Create an identifier to use as the subject for Donna.
- donna = BNode()
+ # Create an RDF URI node to use as the subject for multiple triples
+ donna = URIRef("http://example.org/donna")
+
+ # Add triples using store's add() method.
+ g.add((donna, RDF.type, FOAF.Person))
+ g.add((donna, FOAF.nick, Literal("donna", lang="ed")))
+ g.add((donna, FOAF.name, Literal("Donna Fales")))
+ g.add((donna, FOAF.mbox, URIRef("mailto:donna@example.org")))
- # Add triples using store's add method.
- g.add( (donna, RDF.type, FOAF.Person) )
- g.add( (donna, FOAF.nick, Literal("donna", lang="foo")) )
- g.add( (donna, FOAF.name, Literal("Donna Fales")) )
- g.add( (donna, FOAF.mbox, URIRef("mailto:donna@example.org")) )
+ # Add another person
+ ed = URIRef("http://example.org/edward")
+
+ # Add triples using store's add() method.
+ g.add((ed, RDF.type, FOAF.Person))
+ g.add((ed, FOAF.nick, Literal("ed", datatype=XSD.string)))
+ g.add((ed, FOAF.name, Literal("Edward Scissorhands")))
+ g.add((ed, FOAF.mbox, URIRef("mailto:e.scissorhands@example.org")))
# Iterate over triples in store and print them out.
print("--- printing raw triples ---")
for s, p, o in g:
print((s, p, o))
- # For each foaf:Person in the store print out its mbox property.
+ # For each foaf:Person in the store, print out their mbox property's value.
print("--- printing mboxes ---")
for person in g.subjects(RDF.type, FOAF.Person):
for mbox in g.objects(person, FOAF.mbox):
print(mbox)
- # Bind a few prefix, namespace pairs for more readable output
- g.bind("dc", DC)
+ # Bind the FOAF namespace to a prefix for more readable output
g.bind("foaf", FOAF)
- print( g.serialize(format='n3') )
+ # print all the data in the Notation3 format
+ print("--- printing mboxes ---")
+ print(g.serialize(format='n3').decode("utf-8"))
+
-Many more :doc:`examples <apidocs/examples>` can be found in the :file:`examples` folder in the source distribution.
+More examples
+=============
+There are many more :doc:`examples <apidocs/examples>` in the :file:`examples` folder in the source distribution.
diff --git a/docs/index.rst b/docs/index.rst
index 66d88e16..a76ef159 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -4,20 +4,32 @@
rdflib |release|
================
-RDFLib is a pure Python package for working with `RDF <http://www.w3.org/RDF/>`_. RDFLib contains useful APIs for working with RDF, including:
+RDFLib is a pure Python package for working with `RDF <http://www.w3.org/RDF/>`_. RDFLib contains useful APIs for
+working with RDF, including:
-* parsers and serializers for RDF/XML, N3, NTriples, N-Quads, Turtle, TriX, RDFa and Microdata.
+* **Parsers & Serializers**
-* a Graph interface which can be backed by a number of store implementations.
+ * for RDF/XML, N3, NTriples, N-Quads, Turtle, TriX, RDFa and Microdata
+ * and JSON-LD, via a plugin module
-* store implementations for in memory storage and persistent storage on top of the Berkeley DB.
+* **Store implementations**
+
+ * for in-memory and persistent RDF storage - Berkeley DB
+
+* **Graph interface**
+
+ * to a single graph
+ * or a conjunctive graph (multiple Named Graphs)
+ * or a dataset of graphs
+
+* **SPARQL 1.1 implementation**
+
+ * supporting both Queries and Updates
-* a SPARQL 1.1 implementation - supporting SPARQL 1.1 Queries and Update statements.
Getting started
---------------
-
-If you have never used RDFLib, the following will help get you started
+If you have never used RDFLib, the following will help get you started:
.. toctree::
:maxdepth: 1
@@ -27,14 +39,12 @@ If you have never used RDFLib, the following will help get you started
intro_to_creating_rdf
intro_to_graphs
intro_to_sparql
-
utilities
+ Examples <apidocs/examples>
- RDFLib examples <apidocs/examples>
In depth
--------
-
If you are familiar with RDF and are looking for details on how RDFLib handles RDF, these are for you.
.. toctree::
@@ -44,37 +54,31 @@ If you are familiar with RDF and are looking for details on how RDFLib handles R
namespaces_and_bindings
persistence
merging
-
- upgrade3to4
- upgrade2to3
-
- faq
+ upgrade4to5
Reference
---------
-
The nitty-gritty details of everything.
-.. toctree::
- :maxdepth: 2
-
- plugins
+API reference:
.. toctree::
:maxdepth: 1
apidocs/modules
-* :ref:`genindex`
-* :ref:`modindex`
-
+.. toctree::
+ :maxdepth: 2
+
+ plugins
+.. * :ref:`genindex`
+.. * :ref:`modindex`
For developers
--------------
-
.. toctree::
:maxdepth: 1
@@ -83,42 +87,24 @@ For developers
univrdfstore
persisting_n3_terms
+Developers might also like to join rdflib's dev mailing list: `<https://groups.google.com/group/rdflib-dev>`__
+The Code
+--------
+The rdflib code is hosted on GitHub at `<https://github.com/RDFLib/rdflib>`__ where you lodge Issues and also Pull
+Requests to help improve this community project!
-Indices and tables
-------------------
+The RDFlib organisation on GitHub at `<https://github.com/RDFLib>`__ maintains this package and a number of other RDF
+and related packaged that you might also find useful.
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-.. glossary::
+Further help
+------------
+For asynchronous chat support, try our gitter channel at `<https://gitter.im/RDFLib/rdflib>`__
- functional properties
-
- A functional property is a property that can
- have only one (unique) value y for each instance x, i.e. there
- cannot be two distinct values y1 and y2 such that the pairs
- (x,y1) and (x,y2) are both instances of this
- property. -- http://www.w3.org/TR/owl-ref/#FunctionalProperty-def
+If you would like more help with using rdflib, please post a question using the tag ``[rdflib]`` on StackOverflow. A list of
+existing ``[rdflib]`` tagged questions is there at:
- graph
- An RDF graph is a set of RDF triples. The set of nodes of an RDF graph
- is the set of subjects and objects of triples in the graph.
-
- named graph
- Named Graphs allow multiple RDF graphs to exist within a single
- document/repository. They can be referred to with URIs. Named graphs allow queries across multiple domains.
- -- http://www.w3.org/2004/03/trix/
+* `<https://stackoverflow.com/questions/tagged/rdflib>`__
- transitivity
- A property is transitive:
-
- if whenever an element ``a`` is related to an element
- ``b``, and ``b`` is in turn related to an element ``c``,
- then ``a`` is also related to ``c``. --
- http://en.wikipedia.org/wiki/Transitive_relation
-
- Standard examples include ``rdfs:subClassOf`` or greater-than
-
diff --git a/docs/intro_to_creating_rdf.rst b/docs/intro_to_creating_rdf.rst
index 1a2b8de5..5d58945b 100644
--- a/docs/intro_to_creating_rdf.rst
+++ b/docs/intro_to_creating_rdf.rst
@@ -7,24 +7,29 @@ Creating RDF triples
Creating Nodes
--------------
-RDF is a graph where the nodes are URI references, Blank Nodes or Literals, in RDFLib represented by the classes :class:`~rdflib.term.URIRef`, :class:`~rdflib.term.BNode`, and :class:`~rdflib.term.Literal`. ``URIRefs`` and ``BNodes`` can both be thought of as resources, such a person, a company, a web-site, etc.
-A ``BNode`` is a node where the exact URI is not known.
-``URIRefs`` are also used to represent the properties/predicates in the RDF graph.
-``Literals`` represent attribute values, such as a name, a date, a number, etc.
+RDF data is a graph where the nodes are URI references, Blank Nodes or Literals. In RDFLib, these node types are
+represented by the classes :class:`~rdflib.term.URIRef`, :class:`~rdflib.term.BNode`, and :class:`~rdflib.term.Literal`.
+``URIRefs`` and ``BNodes`` can both be thought of as resources, such a person, a company, a website, etc.
+* A ``BNode`` is a node where the exact URI is not known.
+* A ``URIRef`` is a node where the exact URI is knonw. ``URIRef``\s are also used to represent the properties/predicates in the RDF graph.
+* ``Literals`` represent attribute values, such as a name, a date, a number, etc. The most common literal values are XML data types, e.g. string, int...
-Nodes can be created by the constructors of the node classes::
+
+Nodes can be created by the constructors of the node classes:
+
+.. code-block:: python
from rdflib import URIRef, BNode, Literal
bob = URIRef("http://example.org/people/Bob")
- linda = BNode() # a GUID is generated
+ linda = BNode() # a GUID is generated
- name = Literal('Bob') # passing a string
- age = Literal(24) # passing a python int
- height = Literal(76.5) # passing a python float
+ name = Literal('Bob') # passing a string
+ age = Literal(24) # passing a python int
+ height = Literal(76.5) # passing a python float
-Literals can be created from python objects, this creates ``data-typed literals``, for the details on the mapping see :ref:`rdflibliterals`.
+Literals can be created from Python objects, this creates ``data-typed literals``, for the details on the mapping see :ref:`rdflibliterals`.
For creating many ``URIRefs`` in the same ``namespace``, i.e. URIs with the same prefix, RDFLib has the :class:`rdflib.namespace.Namespace` class::
@@ -32,97 +37,137 @@ For creating many ``URIRefs`` in the same ``namespace``, i.e. URIs with the same
n = Namespace("http://example.org/people/")
- n.bob # = rdflib.term.URIRef(u'http://example.org/people/bob')
- n.eve # = rdflib.term.URIRef(u'http://example.org/people/eve')
+ n.bob # = rdflib.term.URIRef(u'http://example.org/people/bob')
+ n.eve # = rdflib.term.URIRef(u'http://example.org/people/eve')
-This is very useful for schemas where all properties and classes have the same URI prefix, RDFLib pre-defines Namespaces for the most common RDF schemas::
+This is very useful for schemas where all properties and classes have the same URI prefix. RDFLib defines Namespaces for some common RDF/OWL schemas, including most W3C ones:
+
+.. code-block:: python
+
+ from rdflib.namespace import CSVW, DC, DCAT, DCTERMS, DOAP, FOAF, ODRL2, ORG, OWL, \
+ PROF, PROV, RDF, RDFS, SDO, SH, SKOS, SOSA, SSN, TIME, \
+ VOID, XMLNS, XSD
- from rdflib.namespace import RDF, FOAF
+ RDF.type
+ # = rdflib.term.URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")
- RDF.type
- # = rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')
+ FOAF.knows
+ # = rdflib.term.URIRef("http://xmlns.com/foaf/0.1/knows")
+
+ PROF.isProfileOf
+ # = rdflib.term.URIRef("http://www.w3.org/ns/dx/prof/isProfileOf")
+
+ SOSA.Sensor
+ # = rdflib.term.URIRef("http://www.w3.org/ns/sosa/Sensor")
- FOAF.knows
- # = rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/knows')
Adding Triples
--------------
-We already saw in :doc:`intro_to_parsing`, how triples can be added with with the :meth:`~rdflib.graph.Graph.parse` function.
+We already saw in :doc:`intro_to_parsing`, how triples can be added from files and online locations with with the :meth:`~rdflib.graph.Graph.parse` function.
-Triples can also be added with the :meth:`~rdflib.graph.Graph.add` function:
+Triples can also be added within Python code directly, using the :meth:`~rdflib.graph.Graph.add` function:
.. automethod:: rdflib.graph.Graph.add
:noindex:
-:meth:`~rdflib.graph.Graph.add` takes a 3-tuple of RDFLib nodes. Try the following with the nodes and namespaces we defined previously::
+:meth:`~rdflib.graph.Graph.add` takes a 3-tuple (a "triple") of RDFLib nodes. Try the following with the nodes and namespaces we defined previously:
+
+.. code-block:: python
- from rdflib import Graph
- g = Graph()
+ from rdflib import Graph
+ g = Graph()
+ g.bind("foaf", FOAF)
- g.add( (bob, RDF.type, FOAF.Person) )
- g.add( (bob, FOAF.name, name) )
- g.add( (bob, FOAF.knows, linda) )
- g.add( (linda, RDF.type, FOAF.Person) )
- g.add( (linda, FOAF.name, Literal('Linda') ) )
+ g.add((bob, RDF.type, FOAF.Person))
+ g.add((bob, FOAF.name, name))
+ g.add((bob, FOAF.knows, linda))
+ g.add((linda, RDF.type, FOAF.Person))
+ g.add((linda, FOAF.name, Literal("Linda")))
- print g.serialize(format='turtle')
+ print(g.serialize(format="turtle").decode("utf-8"))
outputs:
-.. code-block:: n3
+.. code-block:: Turtle
- @prefix foaf: <http://xmlns.com/foaf/0.1/> .
- @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
- @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
- @prefix xml: <http://www.w3.org/XML/1998/namespace> .
+ @prefix foaf: <http://xmlns.com/foaf/0.1/> .
- <http://example.org/people/Bob> a foaf:Person ;
- foaf:knows [ a foaf:Person ;
- foaf:name "Linda" ] ;
- foaf:name "Bob" .
+ <http://example.org/people/Bob> a foaf:Person ;
+ foaf:knows [ a foaf:Person ;
+ foaf:name "Linda" ] ;
+ foaf:name "Bob" .
For some properties, only one value per resource makes sense (i.e they are *functional properties*, or have max-cardinality of 1). The :meth:`~rdflib.graph.Graph.set` method is useful for this:
.. code-block:: python
- g.add( ( bob, FOAF.age, Literal(42) ) )
- print "Bob is ", g.value( bob, FOAF.age )
- # prints: Bob is 42
-
- g.set( ( bob, FOAF.age, Literal(43) ) ) # replaces 42 set above
- print "Bob is now ", g.value( bob, FOAF.age )
- # prints: Bob is now 43
+ g.add((bob, FOAF.age, Literal(42)))
+ print("Bob is ", g.value(bob, FOAF.age))
+ # prints: Bob is 42
+
+ g.set((bob, FOAF.age, Literal(43))) # replaces 42 set above
+ print("Bob is now ", g.value(bob, FOAF.age))
+ # prints: Bob is now 43
:meth:`rdflib.graph.Graph.value` is the matching query method, it will return a single value for a property, optionally raising an exception if there are more.
You can also add triples by combining entire graphs, see :ref:`graph-setops`.
+
Removing Triples
-^^^^^^^^^^^^^^^^
+----------------
Similarly, triples can be removed by a call to :meth:`~rdflib.graph.Graph.remove`:
.. automethod:: rdflib.graph.Graph.remove
:noindex:
-When removing, it is possible to leave parts of the triple unspecified (i.e. passing ``None``), this will remove all matching triples::
+When removing, it is possible to leave parts of the triple unspecified (i.e. passing ``None``), this will remove all matching triples:
+
+.. code-block:: python
+
+ g.remove((bob, None, None)) # remove all triples about bob
- g.remove( (bob, None, None) ) # remove all triples about bob
An example
-^^^^^^^^^^
+----------
LiveJournal produces FOAF data for their users, but they seem to use
-``foaf:member_name`` for a person's full name. To align with data from
-other sources, it would be nice to have ``foaf:name`` act as a synonym
-for ``foaf:member_name`` (a poor man's one-way
-``owl:equivalentProperty``):
+``foaf:member_name`` for a person's full name but ``foaf:member_name``
+isn't in FOAF's namespace and perhaps they should have used ``foaf:name``
+
+To retrieve some LiveJournal data, add a ``foaf:name`` for every
+``foaf:member_name`` and then remove the ``foaf:member_name`` values to
+ensure the data actually aligns with other FOAF data, we could do this:
.. code-block:: python
+ from rdflib import Graph
from rdflib.namespace import FOAF
- g.parse("http://danbri.livejournal.com/data/foaf")
- for s,_,n in g.triples((None, FOAF['member_name'], None)):
- g.add((s, FOAF['name'], n))
+
+ g = Graph()
+ # get the data
+ g.parse("http://danbri.livejournal.com/data/foaf")
+
+ # for every foaf:member_name, add foaf:name and remove foaf:member_name
+ for s, p, o in g.triples((None, FOAF['member_name'], None)):
+ g.add((s, FOAF['name'], o))
+ g.remove((s, FOAF['member_name'], o))
+
+.. note:: Since rdflib 5.0.0, using ``foaf:member_name`` is somewhat prevented in RDFlib since FOAF is declared
+ as a :meth:`~rdflib.namespace.ClosedNamespace` class instance that has a closed set of members and
+ ``foaf:member_name`` isn't one of them! If LiveJournal used RDFlib 5.0.0, an error would have been raised for
+ ``foaf:member_name`` when the triple was created.
+
+
+Creating Containers & Collections
+---------------------------------
+There are two convenience classes for RDF Containers & Collections which you can use instead of declaring each
+triple of a Containers or a Collections individually:
+
+ * :meth:`~rdflib.container.Container` (also ``Bag``, ``Seq`` & ``Alt``) and
+ * :meth:`~rdflib.collection.Collection`
+
+See their documentation for how.
diff --git a/docs/intro_to_graphs.rst b/docs/intro_to_graphs.rst
index 0c76e6f4..aa0fecf3 100644
--- a/docs/intro_to_graphs.rst
+++ b/docs/intro_to_graphs.rst
@@ -4,7 +4,7 @@
Navigating Graphs
=================
-An RDF Graph is a set of RDF triples, and we try to mirror exactly this in RDFLib, and the graph tries to emulate a container type:
+An RDF Graph is a set of RDF triples, and we try to mirror exactly this in RDFLib. The Python :meth:`~rdflib.graph.Graph` tries to emulate a container type.
Graphs as Iterators
-------------------
@@ -13,25 +13,29 @@ RDFLib graphs override :meth:`~rdflib.graph.Graph.__iter__` in order to support
.. code-block:: python
- for subject,predicate,obj in someGraph:
- if not (subject,predicate,obj) in someGraph:
- raise Exception("Iterator / Container Protocols are Broken!!")
+ for subject, predicate, object in someGraph:
+ if not (subject, predicate, object) in someGraph:
+ raise Exception("Iterator / Container Protocols are Broken!!")
Contains check
--------------
-Graphs implement :meth:`~rdflib.graph.Graph.__contains__`, so you can check if a triple is in a graph with ``triple in graph`` syntax::
+Graphs implement :meth:`~rdflib.graph.Graph.__contains__`, so you can check if a triple is in a graph with ``triple in graph`` syntax:
- from rdflib import URIRef
- from rdflib.namespace import RDF
- bob = URIRef("http://example.org/people/bob")
- if ( bob, RDF.type, FOAF.Person ) in graph:
- print "This graph knows that Bob is a person!"
+.. code-block:: python
+
+ from rdflib import URIRef
+ from rdflib.namespace import RDF
+ bob = URIRef("http://example.org/people/bob")
+ if (bob, RDF.type, FOAF.Person) in graph:
+ print("This graph knows that Bob is a person!")
-Note that this triple does not have to be completely bound::
+Note that this triple does not have to be completely bound:
+
+.. code-block:: python
- if (bob, None, None) in graph:
- print "This graph contains triples about Bob!"
+ if (bob, None, None) in graph:
+ print("This graph contains triples about Bob!")
.. _graph-setops:
@@ -51,40 +55,41 @@ operation effect
``G1 ^ G2`` xor (triples in either G1 or G2, but not in both)
============ ==================================================
-.. warning:: Set-operations on graphs assume bnodes are shared between graphs. This may or may not do what you want. See :doc:`merging` for details.
+.. warning:: Set-operations on graphs assume Blank Nodes are shared between graphs. This may or may not do what you want. See :doc:`merging` for details.
Basic Triple Matching
---------------------
Instead of iterating through all triples, RDFLib graphs support basic triple pattern matching with a :meth:`~rdflib.graph.Graph.triples` function.
-This function is a generator of triples that match the pattern given by the arguments. The arguments of these are RDF terms that restrict the triples that are returned. Terms that are :data:`None` are treated as a wildcard. For example::
+This function is a generator of triples that match the pattern given by the arguments. The arguments of these are RDF terms that restrict the triples that are returned. Terms that are :data:`None` are treated as a wildcard. For example:
+.. code-block:: python
- g.load("some_foaf.rdf")
- for s,p,o in g.triples( (None, RDF.type, FOAF.Person) ):
- print "%s is a person"%s
-
- for s,p,o in g.triples( (None, RDF.type, None) ):
- print "%s is a %s"%(s,o)
-
- bobgraph = Graph()
+ g.load("some_foaf.rdf")
+ for s, p, o in g.triples((None, RDF.type, FOAF.Person)):
+ print("{} is a person".format(s))
- bobgraph += g.triples( (bob, None, None) )
+ for s, p, o in g.triples((None, RDF.type, None)):
+ print("{} is a {}".format(s, o))
-If you are not interested in whole triples, you can get only the bits you want with the methods :meth:`~rdflib.graph.Graph.objects`, :meth:`~rdflib.graph.Graph.subjects`, :meth:`~rdflib.graph.Graph.predicates`, :meth:`~rdflib.graph.Graph.predicate_objects`, etc. Each take parameters for the components of the triple to constraint::
+ bobgraph = Graph()
- for person in g.subjects(RDF.type, FOAF.Person):
- print "%s is a person"%person
+ bobgraph += g.triples((bob, None, None))
+If you are not interested in whole triples, you can get only the bits you want with the methods :meth:`~rdflib.graph.Graph.objects`, :meth:`~rdflib.graph.Graph.subjects`, :meth:`~rdflib.graph.Graph.predicates`, :meth:`~rdflib.graph.Graph.predicate_objects`, etc. Each take parameters for the components of the triple to constraint:
-Finally, for some properties, only one value per resource makes sense (i.e they are *functional properties*, or have max-cardinality of 1). The :meth:`~rdflib.graph.Graph.value` method is useful for this, as it returns just a single node, not a generator::
+.. code-block:: python
- name = g.value(bob, FOAF.name) # get any name of bob
- # get the one person that knows bob and raise an exception if more are found
- mbox = g.value(predicate = FOAF.name, object = bob, any = False)
+ for person in g.subjects(RDF.type, FOAF.Person):
+ print("{} is a person".format(person))
+Finally, for some properties, only one value per resource makes sense (i.e they are *functional properties*, or have max-cardinality of 1). The :meth:`~rdflib.graph.Graph.value` method is useful for this, as it returns just a single node, not a generator:
+.. code-block:: python
+ name = g.value(bob, FOAF.name) # get any name of bob
+ # get the one person that knows bob and raise an exception if more are found
+ mbox = g.value(predicate = FOAF.name, object=bob, any=False)
:class:`~rdflib.graph.Graph` methods for accessing triples
@@ -112,8 +117,3 @@ Here is a list of all convenience methods for querying Graphs:
:noindex:
.. automethod:: rdflib.graph.Graph.predicate_objects
:noindex:
-
-
-
-
-
diff --git a/docs/intro_to_parsing.rst b/docs/intro_to_parsing.rst
index fdf9a6bd..b07b3609 100644
--- a/docs/intro_to_parsing.rst
+++ b/docs/intro_to_parsing.rst
@@ -4,15 +4,15 @@
Loading and saving RDF
======================
-Reading an NT file
--------------------
+Reading an n-triples file
+--------------------------
RDF data has various syntaxes (``xml``, ``n3``, ``ntriples``,
-``trix``, etc) that you might want to read. The simplest format is
+``trix``, ``JSON-LD``, etc) that you might want to read. The simplest format is
``ntriples``, a line-based format. Create the file :file:`demo.nt` in
the current directory with these two lines:
-.. code-block:: n3
+.. code-block:: Turtle
<http://bigasterisk.com/foaf.rdf#drewp> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> .
<http://bigasterisk.com/foaf.rdf#drewp> <http://example.com/says> "Hello world" .
@@ -24,14 +24,16 @@ either a mime-type or the name (a :doc:`list of available parsers
file will be, you can use :func:`rdflib.util.guess_format` which will
guess based on the file extension.
-In an interactive python interpreter, try this::
+In an interactive python interpreter, try this:
+
+.. code-block:: python
from rdflib import Graph
g = Graph()
g.parse("demo.nt", format="nt")
- len(g) # prints 2
+ print(len(g)) # prints 2
import pprint
for stmt in g:
@@ -40,7 +42,7 @@ In an interactive python interpreter, try this::
# prints :
(rdflib.term.URIRef('http://bigasterisk.com/foaf.rdf#drewp'),
rdflib.term.URIRef('http://example.com/says'),
- rdflib.term.Literal(u'Hello world'))
+ rdflib.term.Literal('Hello world'))
(rdflib.term.URIRef('http://bigasterisk.com/foaf.rdf#drewp'),
rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
rdflib.term.URIRef('http://xmlns.com/foaf/0.1/Person'))
@@ -52,14 +54,16 @@ subjects, predicates, and objects are all rdflib types.
Reading remote graphs
---------------------
-Reading graphs from the net is just as easy::
+Reading graphs from the net is just as easy:
+
+.. code-block:: python
g.parse("http://bigasterisk.com/foaf.rdf")
- len(g)
+ print(len(g))
# prints 42
The format defaults to ``xml``, which is the common format for .rdf
files you'll find on the net.
RDFLib will also happily read RDF from any file-like object,
-i.e. anything with a ``.read`` method.
+i.e. anything with a ``.read()`` method.
diff --git a/docs/intro_to_sparql.rst b/docs/intro_to_sparql.rst
index 31d1e7fa..97c7f281 100644
--- a/docs/intro_to_sparql.rst
+++ b/docs/intro_to_sparql.rst
@@ -65,12 +65,41 @@ equivalent.
As an alternative to using ``PREFIX`` in the SPARQL query, namespace
bindings can be passed in with the ``initNs`` kwarg, see
-:doc:`namespace_and_bindings`.
+:doc:`namespaces_and_bindings`.
Variables can also be pre-bound, using ``initBindings`` kwarg can be
used to pass in a ``dict`` of initial bindings, this is particularly
useful for prepared queries, as described below.
+Query a Remote Service
+^^^^^^^^^^^^^^^^^^^^^^
+
+The SERVICE keyword of SPARQL 1.1 can send a query to a remote SPARQL endpoint.
+
+.. code-block:: python
+
+ import rdflib
+
+ g = rdflib.Graph()
+ qres = g.query('''
+ SELECT ?s
+ WHERE {
+ SERVICE <http://dbpedia.org/sparql> {
+ ?s <http://purl.org/linguistics/gold/hypernym> <http://dbpedia.org/resource/Leveller> .
+ }
+ } LIMIT 3''')
+ for row in qres:
+ print(row.s)
+
+This example sends a query to `DBPedia
+<https://dbpedia.org/>`_'s SPARQL endpoint service so that it can run the query and then send back the result:
+
+.. code-block:: text
+
+ http://dbpedia.org/resource/Elizabeth_Lilburne
+ http://dbpedia.org/resource/Thomas_Prince_(Leveller)
+ http://dbpedia.org/resource/John_Lilburne
+
Prepared Queries
^^^^^^^^^^^^^^^^
diff --git a/docs/plugintable.py b/docs/plugintable.py
index ddf0fe97..1d64c1a6 100644
--- a/docs/plugintable.py
+++ b/docs/plugintable.py
@@ -12,21 +12,24 @@ cls = sys.argv[1]
p = {}
for (name, kind), plugin in _plugins.items():
- if "/" in name: continue # skip duplicate entries for mimetypes
+ if "/" in name:
+ continue # skip duplicate entries for mimetypes
if cls == kind.__name__:
- p[name]="%s.%s"%(plugin.module_path, plugin.class_name)
+ p[name] = "%s.%s" % (plugin.module_path, plugin.class_name)
+
+l1 = max(len(x) for x in p)
+l2 = max(10 + len(x) for x in p.values())
-l1=max(len(x) for x in p)
-l2=max(10+len(x) for x in p.values())
def hr():
- print("="*l1,"="*l2)
+ print("=" * l1, "=" * l2)
+
hr()
-print("%-*s"%(l1,"Name"), "%-*s"%(l2, "Class"))
+print("%-*s" % (l1, "Name"), "%-*s" % (l2, "Class"))
hr()
for n in sorted(p):
- print("%-*s"%(l1,n), ":class:`~%s`"%p[n])
+ print("%-*s" % (l1, n), ":class:`~%s`" % p[n])
hr()
print()
diff --git a/docs/rdf_terms.rst b/docs/rdf_terms.rst
index c16339de..a520bc5c 100644
--- a/docs/rdf_terms.rst
+++ b/docs/rdf_terms.rst
@@ -20,7 +20,7 @@ matching nodes by term-patterns probably will only be terms and not nodes.
BNodes
======
- In RDF, a blank node (also called BNode) is a node in an RDF graph representing a resource for which a URI or literal is not given. The resource represented by a blank node is also called an anonymous resource. By RDF standard a blank node can only be used as subject or object in an RDF triple, although in some syntaxes like Notation 3 [1] it is acceptable to use a blank node as a predicate. If a blank node has a node ID (not all blank nodes are labelled in all RDF serializations), it is limited in scope to a serialization of a particular RDF graph, i.e. the node p1 in the subsequent example does not represent the same node as a node named p1 in any other graph --`wikipedia`__
+In RDF, a blank node (also called BNode) is a node in an RDF graph representing a resource for which a URI or literal is not given. The resource represented by a blank node is also called an anonymous resource. By RDF standard a blank node can only be used as subject or object in an RDF triple, although in some syntaxes like Notation 3 [1] it is acceptable to use a blank node as a predicate. If a blank node has a node ID (not all blank nodes are labelled in all RDF serializations), it is limited in scope to a serialization of a particular RDF graph, i.e. the node p1 in the subsequent example does not represent the same node as a node named p1 in any other graph --`wikipedia`__
.. __: http://en.wikipedia.org/wiki/Blank_node
@@ -40,7 +40,7 @@ BNodes
URIRefs
=======
- A URI reference within an RDF graph is a Unicode string that does not contain any control characters ( #x00 - #x1F, #x7F-#x9F) and would produce a valid URI character sequence representing an absolute URI with optional fragment identifier -- `W3 RDF Concepts`__
+A URI reference within an RDF graph is a Unicode string that does not contain any control characters ( #x00 - #x1F, #x7F-#x9F) and would produce a valid URI character sequence representing an absolute URI with optional fragment identifier -- `W3 RDF Concepts`__
.. __: http://www.w3.org/TR/rdf-concepts/#section-Graph-URIref
@@ -159,4 +159,3 @@ All this happens automatically when creating ``Literal`` objects by passing Pyth
You can add custom data-types with :func:`rdflib.term.bind`, see also :mod:`examples.custom_datatype`
-
diff --git a/docs/sphinx-requirements.txt b/docs/sphinx-requirements.txt
index d772747b..45583540 100644
--- a/docs/sphinx-requirements.txt
+++ b/docs/sphinx-requirements.txt
@@ -1,2 +1,3 @@
--e git+git://github.com/gjhiggins/n3_pygments_lexer.git#egg=Notation3_Pygments_Lexer
--e git+git://github.com/gjhiggins/sparql_pygments_lexer.git#egg=SPARQL_Pygments_Lexer
+sphinx==3.0.3
+sphinxcontrib-apidoc
+git+https://github.com/gniezen/n3pygments.git
diff --git a/docs/univrdfstore.rst b/docs/univrdfstore.rst
index a1c54e90..f6822e5b 100644
--- a/docs/univrdfstore.rst
+++ b/docs/univrdfstore.rst
@@ -6,9 +6,9 @@ A Universal RDF Store Interface
This document attempts to summarize some fundamental components of an RDF store. The motivation is to outline a standard set of interfaces for providing the support needed to persist an `RDF Graph`_ in a way that is universal and not tied to any specific implementation.
-For the most part, the interface adheres to the core RDF model and uses terminology that is consistent with the RDF Model specifications. However, this suggested interface also extends an RDF store with additional requirements necessary to facilitate those aspects of `Notation 3`_ that go beyond the RDF model to provide a framework for `First Order Predicate Logic`_ processing and persistence.
+For the most part, the interfaces adhere to the core RDF model and use terminology that is consistent with the RDF Model specifications. However, these suggested interfaces also extends an RDF store with additional requirements necessary to facilitate those aspects of `Notation 3`_ that go beyond the RDF model to provide a framework for `First Order Predicate Logic`_ processing and persistence.
-.. _RDF Graph: http://www.w3.org/TR/rdf-concepts/#dfn-rdf-graph
+.. _RDF Graph: https://www.w3.org/TR/rdf11-concepts/#data-model
.. _Notation 3: http://www.w3.org/2000/10/swap/Primer
.. _First Order Predicate Logic: http://en.wikipedia.org/wiki/First-order_predicate_logic
@@ -173,7 +173,7 @@ Formulae and variables are distinguishable from URI references, Literals, and BN
They must also be distinguishable in persistence to ensure they can be round-tripped.
-.. note:: There are a number of other issues regarding the :doc:`persisting of N3 terms <persisting_n3_terms>`_.
+.. note:: There are a number of other issues regarding the :doc:`persisting_n3_terms`.
Database Management
===================
@@ -200,6 +200,7 @@ Triple Interfaces
An RDF store could provide a standard set of interfaces for the manipulation, management, and/or retrieval of its contained triples (asserted or quoted):
.. module:: rdflib.store
+ :noindex:
.. automethod:: rdflib.store.Store.add
:noindex:
diff --git a/docs/upgrade2to3.rst b/docs/upgrade2to3.rst
deleted file mode 100644
index 4773c176..00000000
--- a/docs/upgrade2to3.rst
+++ /dev/null
@@ -1,60 +0,0 @@
-.. _upgrade2to3: Upgrading from RDFLib version 2.X to 3.X
-
-========================================
-Upgrading from RDFLib version 2.X to 3.X
-========================================
-
-Introduction
-============
-This page details the changes required to upgrade from RDFLib 2.X to 3.X.
-
-Some older Linux distributions still ship 2.4.X. If needed, you can also install 2.4 using easy_install/setup tools.
-
-Version 3.0 reorganised some packages, and moved non-core parts of rdflib to the `rdfextras project <http://code.google.com/p/rdfextras/>`_
-
-
-Features moved to rdfextras
-===========================
-
- * SPARQL Support is now in rdfextras / rdflib-sparql
- * The RDF Commandline tools are now in rdfextras
-
-.. warning:: If you install packages with just distutils - you will need to register the sparql plugins manually - we strongly recommend installing with setuptools or distribute!
- To register the plugins add this somewhere in your program:
-
- .. code-block:: python
-
- rdflib.plugin.register('sparql', rdflib.query.Processor,
- 'rdfextras.sparql.processor', 'Processor')
- rdflib.plugin.register('sparql', rdflib.query.Result,
- 'rdfextras.sparql.query', 'SPARQLQueryResult')
-
-
-Unstable features that were removed
-===================================
-
- The RDBMS back stores (MySQL/PostgreSQL) were removed, but are in the process of being moved to rdfextras. The Redland, SQLite and ZODB stores were all removed.
-
-Packages/Classes that were renamed
-==================================
-
-Previously all packages and classes had colliding names, i.e. both package and the class was called "Graph"::
-
- from rdflib.Graph import Graph, ConjunctiveGraph
-
-Now all packages are lower-case, i.e::
-
- from rdflib.graph import Graph, ConjunctiveGraph
-
-Most classes you need are available from the top level rdflib package::
-
- from rdflib import Graph, URIRef, BNode, Literal
-
-Namespace classes for RDF, RDFS, OWL are now directly in the rdflib package, i.e. in 2.4::
-
- from rdflib.RDF import RDFNS as RDF
-
-in 3.0::
-
- from rdflib import RDF
-
diff --git a/docs/upgrade3to4.rst b/docs/upgrade3to4.rst
deleted file mode 100644
index 574d129a..00000000
--- a/docs/upgrade3to4.rst
+++ /dev/null
@@ -1,92 +0,0 @@
-.. _upgrade3to4: Upgrading from RDFLib version 3.X to 4.X
-
-========================================
-Upgrading from RDFLib version 3.X to 4.X
-========================================
-
-RDFLib version 4.0 introduced a small number of backwards compatible changes that you should know about when porting code from version 3 or earlier.
-
-SPARQL and SPARQLStore are now included in core
------------------------------------------------
-
-For version 4.0 we've merged the SPARQL implementation from ``rdflib-sparql``, the SPARQL(Update)Store from ``rdflib-sparqlstore`` and miscellaneous utilities from ``rdfextras``. If you used any of these previously, everything you need should now be included.
-
-
-Datatyped literals
-------------------
-
-We separate lexical and value space operations for datatyped literals.
-
-This mainly affects the way datatyped literals are compared. Lexical space comparisons are done by default for ``==`` and ``!=``, meaning the exact lexical representation and the exact data-types have to match for literals to be equal. Value space comparisons are also available through the :meth:`rdflib.term.Identifier.eq` and :meth:`rdflib.term.Identifier.neq` methods, ``< > <= >=`` are also done in value space.
-
-Most things now work in a fairly sane and sensible way, if you do not have existing stores/intermediate stored sorted lists, or hash-dependent something-or-other, you should be good to go.
-
-Things to watch out for:
-
-Literals no longer compare equal across data-types with ```==```
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-i.e.
-
-.. code-block:: python
-
- >>> Literal(2, datatype=XSD.int) == Literal(2, datatype=XSD.float)
- False
-
-
-But a new method :meth:`rdflib.term.Identifier.eq` on all Nodes has been introduced, which does semantic equality checking, i.e.:
-
-.. code-block:: python
-
- >>> Literal(2, datatype=XSD.int).eq(Literal(2, datatype=XSD.float))
- True
-
-The ``eq`` method is still limited to what data-types map to the same *value space*, i.e. all numeric types map to numbers and will compare, ``xsd:string`` and ``plain literals`` both map to strings and compare fine, but:
-
-.. code-block:: python
-
- >>> Literal(2, datatype=XSD.int).eq(Literal('2'))
- False
-
-
-
-Literals will be normalised according to datatype
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If you care about the exact lexical representation of a literal, and not just the value. Either set :data:`rdflib.NORMALIZE_LITERALS` to ``False`` before creating your literal, or pass ``normalize=False`` to the Literal constructor
-
-Ordering of literals and nodes has changed
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Comparing literals with ``<, >, <=, >=`` now work same as in SPARQL filter expressions.
-
-Greater-than/less-than ordering comparisons are also done in value space, when compatible datatypes are used.
-Incompatible datatypes are ordered by data-type, or by lang-tag.
-For other nodes the ordering is ``None < BNode < URIRef < Literal``
-
-Any comparison with non-rdflib Node are ``NotImplemented``
-In PY2.X some stable order will be made up by python.
-In PY3 this is an error.
-
-Custom mapping of datatypes to python objects
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-You can add new mappings of datatype URIs to python objects using the :func:`rdflib.term.bind` method.
-This also allows you to specify a constructor for constructing objects from the lexical string representation, and a serialization method for generating a lexical string representation from an object.
-
-
-
-Minor Changes
---------------
-
-* :class:`rdflib.namespace.Namespace` is no longer a sub-class of :class:`rdflib.term.URIRef`
- this was changed as it makes no sense for a namespace to be a node in a graph, and was causing numerous bug. Unless you do something very special, you should not notice this change.
-
-* The identifiers for Graphs are now converted to URIRefs if they are not a :class:`rdflib.term.Node`, i.e. no more graphs with string identifiers. Again, unless you do something quite unusual, you should not notice.
-
-* String concatenation with URIRefs now returns URIRefs, not strings::
-
- >>> URIRef("http://example.org/people/") + "Bill"
- rdflib.term.URIRef(u'http://example.org/people/Bill')
-
- This is be convenient, but may cause trouble if you expected a string.
diff --git a/docs/upgrade4to5.rst b/docs/upgrade4to5.rst
new file mode 100644
index 00000000..8916ab35
--- /dev/null
+++ b/docs/upgrade4to5.rst
@@ -0,0 +1,266 @@
+.. _upgrade4to5: Upgrading from RDFLib version 4.2.2 to 5.0.0
+
+============================================
+Upgrading 4.2.2 to 5.0.0
+============================================
+
+RDFLib version 5.0.0 appeared over 3 years after the previous release, 4.2.2 and contains a large number of both enhancements and bug fixes. Fundamentally though, 5.0.0 is compatible with 4.2.2.
+
+
+Major Changes
+-------------
+
+Literal Ordering
+^^^^^^^^^^^^^^^^
+Literal total ordering `PR #793 <https://github.com/RDFLib/rdflib/pull/793>`_ is implemented. That means all literals can now be compared to be greater than or less than any other literal.
+This is required for implementing some specific SPARQL features, but it is counter-intuitive to those who are expecting a TypeError when certain normally-incompatible types are compared.
+For example, comparing a ``Literal(int(1), datatype=xsd:integer)`` to ``Literal(datetime.date(10,01,2020), datatype=xsd:date)`` using a ``>`` or ``<`` operator in rdflib 4.2.2 and earlier, would normally throw a TypeError,
+however in rdflib 5.0.0 this operation now returns a True or False according to the Literal Total Ordering according the rules outlined in `PR #793 <https://github.com/RDFLib/rdflib/pull/793>`_
+
+Removed RDF Parsers
+^^^^^^^^^^^^^^^^^^^
+The RDFa and Microdata format RDF parsers were removed from rdflib. There are still other python libraries available to implement these parsers.
+
+All Changes
+-----------
+
+This list has been assembled from Pull Request and commit information.
+
+General Bugs Fixed:
+^^^^^^^^^^^^^^^^^^^
+* Pr 451 redux
+ `PR #978 <https://github.com/RDFLib/rdflib/pull/978>`_
+* NTriples fails to parse URIs with only a scheme
+ `ISSUE #920 <https://github.com/RDFLib/rdflib/issues/920>`_
+ `PR #974 <https://github.com/RDFLib/rdflib/pull/974>`_
+* cannot clone it on windows - Remove colons from test result files. Fix #901.
+ `ISSUE #901 <https://github.com/RDFLib/rdflib/issues/901>`_
+ `PR #971 <https://github.com/RDFLib/rdflib/pull/971>`_
+* Add requirement for requests to setup.py
+ `PR #969 <https://github.com/RDFLib/rdflib/pull/969>`_
+* fixed URIRef including native unicode characters
+ `PR #961 <https://github.com/RDFLib/rdflib/pull/961>`_
+* DCTERMS.format not working
+ `ISSUE #932 <https://github.com/RDFLib/rdflib/issues/932>`_
+* infixowl.manchesterSyntax do not encode strings
+ `PR #906 <https://github.com/RDFLib/rdflib/pull/906>`_
+* Fix blank node label to not contain '_:' during parsing
+ `PR #886 <https://github.com/RDFLib/rdflib/pull/886>`_
+* rename new SPARQLWrapper to SPARQLConnector
+ `PR #872 <https://github.com/RDFLib/rdflib/pull/872>`_
+* Fix #859. Unquote and Uriquote Literal Datatype.
+ `PR #860 <https://github.com/RDFLib/rdflib/pull/860>`_
+* Parsing nquads
+ `ISSUE #786 <https://github.com/RDFLib/rdflib/issues/786>`_
+* ntriples spec allows for upper-cased lang tag, fixes #782
+ `PR #784 <https://github.com/RDFLib/rdflib/pull/784>`_
+* Error parsing N-Triple file using RDFlib
+ `ISSUE #782 <https://github.com/RDFLib/rdflib/issues/782>`_
+* Adds escaped single quote to literal parser
+ `PR #736 <https://github.com/RDFLib/rdflib/pull/736>`_
+* N3 parse error on single quote within single quotes
+ `ISSUE #732 <https://github.com/RDFLib/rdflib/issues/732>`_
+* Fixed #725
+ `PR #730 <https://github.com/RDFLib/rdflib/pull/730>`_
+* test for issue #725: canonicalization collapses BNodes
+ `PR #726 <https://github.com/RDFLib/rdflib/pull/726>`_
+* RGDA1 graph canonicalization sometimes still collapses distinct BNodes
+ `ISSUE #725 <https://github.com/RDFLib/rdflib/issues/725>`_
+* Accept header should use a q parameter
+ `PR #720 <https://github.com/RDFLib/rdflib/pull/720>`_
+* Added test for Issue #682 and fixed.
+ `PR #718 <https://github.com/RDFLib/rdflib/pull/718>`_
+* Incompatibility with Python3: unichr
+ `ISSUE #687 <https://github.com/RDFLib/rdflib/issues/687>`_
+* namespace.py include colon in ALLOWED_NAME_CHARS
+ `PR #663 <https://github.com/RDFLib/rdflib/pull/663>`_
+* namespace.py fix compute_qname missing namespaces
+ `PR #649 <https://github.com/RDFLib/rdflib/pull/649>`_
+* RDFa parsing Error! `__init__()` got an unexpected keyword argument 'encoding'
+ `ISSUE #639 <https://github.com/RDFLib/rdflib/issues/639>`_
+* Bugfix: `term.Literal.__add__`
+ `PR #451 <https://github.com/RDFLib/rdflib/pull/451>`_
+* fixup of #443
+ `PR #445 <https://github.com/RDFLib/rdflib/pull/445>`_
+* Microdata to rdf second edition bak
+ `PR #444 <https://github.com/RDFLib/rdflib/pull/444>`_
+
+Enhanced Features:
+^^^^^^^^^^^^^^^^^^
+* Register additional serializer plugins for SPARQL mime types.
+ `PR #987 <https://github.com/RDFLib/rdflib/pull/987>`_
+* Pr 388 redux
+ `PR #979 <https://github.com/RDFLib/rdflib/pull/979>`_
+* Allows RDF terms introduced by JSON-LD 1.1
+ `PR #970 <https://github.com/RDFLib/rdflib/pull/970>`_
+* make SPARQLConnector work with DBpedia
+ `PR #941 <https://github.com/RDFLib/rdflib/pull/941>`_
+* ClosedNamespace returns right exception for way of access
+ `PR #866 <https://github.com/RDFLib/rdflib/pull/866>`_
+* Not adding all namespaces for n3 serializer
+ `PR #832 <https://github.com/RDFLib/rdflib/pull/832>`_
+* Adds basic support of xsd:duration
+ `PR #808 <https://github.com/RDFLib/rdflib/pull/808>`_
+* Add possibility to set authority and basepath to skolemize graph
+ `PR #807 <https://github.com/RDFLib/rdflib/pull/807>`_
+* Change notation3 list realization to non-recursive function.
+ `PR #805 <https://github.com/RDFLib/rdflib/pull/805>`_
+* Suppress warning for not using custom encoding.
+ `PR #800 <https://github.com/RDFLib/rdflib/pull/800>`_
+* Add support to parsing large xml inputs
+ `ISSUE #749 <https://github.com/RDFLib/rdflib/issues/749>`_
+ `PR #750 <https://github.com/RDFLib/rdflib/pull/750>`_
+* improve hash efficiency by directly using str/unicode hash
+ `PR #746 <https://github.com/RDFLib/rdflib/pull/746>`_
+* Added the csvw prefix to the RDFa initial context.
+ `PR #594 <https://github.com/RDFLib/rdflib/pull/594>`_
+* syncing changes from pyMicrodata
+ `PR #587 <https://github.com/RDFLib/rdflib/pull/587>`_
+* Microdata parser: updated the parser to the latest version of the microdata->rdf note (published in December 2014)
+ `PR #443 <https://github.com/RDFLib/rdflib/pull/443>`_
+* Literal.toPython() support for xsd:hexBinary
+ `PR #388 <https://github.com/RDFLib/rdflib/pull/388>`_
+
+SPARQL Fixes:
+^^^^^^^^^^^^^
+* Total order patch patch
+ `PR #862 <https://github.com/RDFLib/rdflib/pull/862>`_
+* use <<= instead of deprecated <<
+ `PR #861 <https://github.com/RDFLib/rdflib/pull/861>`_
+* Fix #847
+ `PR #856 <https://github.com/RDFLib/rdflib/pull/856>`_
+* RDF Literal "1"^^xsd:boolean should _not_ coerce to True
+ `ISSUE #847 <https://github.com/RDFLib/rdflib/issues/847>`_
+* Makes NOW() return an UTC date
+ `PR #844 <https://github.com/RDFLib/rdflib/pull/844>`_
+* NOW() SPARQL should return an xsd:dateTime with a timezone
+ `ISSUE #843 <https://github.com/RDFLib/rdflib/issues/843>`_
+* fix property paths bug: issue #715
+ `PR #822 <https://github.com/RDFLib/rdflib/pull/822>`_
+ `ISSUE #715 <https://github.com/RDFLib/rdflib/issues/715>`_
+* MulPath: correct behaviour of n3()
+ `PR #820 <https://github.com/RDFLib/rdflib/pull/820>`_
+* Literal total ordering
+ `PR #793 <https://github.com/RDFLib/rdflib/pull/793>`_
+* Remove SPARQLWrapper dependency
+ `PR #744 <https://github.com/RDFLib/rdflib/pull/744>`_
+* made UNION faster by not preventing duplicates
+ `PR #741 <https://github.com/RDFLib/rdflib/pull/741>`_
+* added a hook to add custom functions to SPARQL
+ `PR #723 <https://github.com/RDFLib/rdflib/pull/723>`_
+* Issue714
+ `PR #717 <https://github.com/RDFLib/rdflib/pull/717>`_
+* Use <<= instead of deprecated << in SPARQL parser
+ `PR #417 <https://github.com/RDFLib/rdflib/pull/417>`_
+* Custom FILTER function for SPARQL engine
+ `ISSUE #274 <https://github.com/RDFLib/rdflib/issues/274>`_
+
+Code Quality and Cleanups:
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+* a slightly opinionated autopep8 run
+ `PR #870 <https://github.com/RDFLib/rdflib/pull/870>`_
+* remove rdfa and microdata parsers from core RDFLib
+ `PR #828 <https://github.com/RDFLib/rdflib/pull/828>`_
+* ClosedNamespace KeyError -> AttributeError
+ `PR #827 <https://github.com/RDFLib/rdflib/pull/827>`_
+* typo in rdflib/plugins/sparql/update.py
+ `ISSUE #760 <https://github.com/RDFLib/rdflib/issues/760>`_
+* Fix logging in interactive mode
+ `PR #731 <https://github.com/RDFLib/rdflib/pull/731>`_
+* make namespace module flake8-compliant, change exceptions in that mod…
+ `PR #711 <https://github.com/RDFLib/rdflib/pull/711>`_
+* delete ez_setup.py?
+ `ISSUE #669 <https://github.com/RDFLib/rdflib/issues/669>`_
+* code duplication issue between rdflib and pymicrodata
+ `ISSUE #582 <https://github.com/RDFLib/rdflib/issues/582>`_
+* Transition from 2to3 to use of six.py to be merged in 5.0.0-dev
+ `PR #519 <https://github.com/RDFLib/rdflib/pull/519>`_
+* sparqlstore drop deprecated methods and args
+ `PR #516 <https://github.com/RDFLib/rdflib/pull/516>`_
+* python3 code seems shockingly inefficient
+ `ISSUE #440 <https://github.com/RDFLib/rdflib/issues/440>`_
+* removed md5_term_hash, fixes #240
+ `PR #439 <https://github.com/RDFLib/rdflib/pull/439>`_
+ `ISSUE #240 <https://github.com/RDFLib/rdflib/issues/240>`_
+
+Testing:
+^^^^^^^^
+* 3.7 for travis
+ `PR #864 <https://github.com/RDFLib/rdflib/pull/864>`_
+* Added trig unit tests to highlight some current parsing/serializing issues
+ `PR #431 <https://github.com/RDFLib/rdflib/pull/431>`_
+
+Documentation Fixes:
+^^^^^^^^^^^^^^^^^^^^
+* Fix a doc string in the query module
+ `PR #976 <https://github.com/RDFLib/rdflib/pull/976>`_
+* setup.py: Make the license field use an SPDX identifier
+ `PR #789 <https://github.com/RDFLib/rdflib/pull/789>`_
+* Update README.md
+ `PR #764 <https://github.com/RDFLib/rdflib/pull/764>`_
+* Update namespaces_and_bindings.rst
+ `PR #757 <https://github.com/RDFLib/rdflib/pull/757>`_
+* DOC: README.md: rdflib-jsonld, https uris
+ `PR #712 <https://github.com/RDFLib/rdflib/pull/712>`_
+* make doctest support py2/py3
+ `ISSUE #707 <https://github.com/RDFLib/rdflib/issues/707>`_
+* `pip install rdflib` (as per README.md) gets OSError on Mint 18.1
+ `ISSUE #704 <https://github.com/RDFLib/rdflib/issues/704>`_
+ `PR #717 <https://github.com/RDFLib/rdflib/pull/717>`_
+* Use <<= instead of deprecated << in SPARQL parser
+ `PR #417 <https://github.com/RDFLib/rdflib/pull/417>`_
+* Custom FILTER function for SPARQL engine
+ `ISSUE #274 <https://github.com/RDFLib/rdflib/issues/274>`_
+
+Code Quality and Cleanups:
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+* a slightly opinionated autopep8 run
+ `PR #870 <https://github.com/RDFLib/rdflib/pull/870>`_
+* remove rdfa and microdata parsers from core RDFLib
+ `PR #828 <https://github.com/RDFLib/rdflib/pull/828>`_
+* ClosedNamespace KeyError -> AttributeError
+ `PR #827 <https://github.com/RDFLib/rdflib/pull/827>`_
+* typo in rdflib/plugins/sparql/update.py
+ `ISSUE #760 <https://github.com/RDFLib/rdflib/issues/760>`_
+* Fix logging in interactive mode
+ `PR #731 <https://github.com/RDFLib/rdflib/pull/731>`_
+* make namespace module flake8-compliant, change exceptions in that mod…
+ `PR #711 <https://github.com/RDFLib/rdflib/pull/711>`_
+* delete ez_setup.py?
+ `ISSUE #669 <https://github.com/RDFLib/rdflib/issues/669>`_
+* code duplication issue between rdflib and pymicrodata
+ `ISSUE #582 <https://github.com/RDFLib/rdflib/issues/582>`_
+* Transition from 2to3 to use of six.py to be merged in 5.0.0-dev
+ `PR #519 <https://github.com/RDFLib/rdflib/pull/519>`_
+* sparqlstore drop deprecated methods and args
+ `PR #516 <https://github.com/RDFLib/rdflib/pull/516>`_
+* python3 code seems shockingly inefficient
+ `ISSUE #440 <https://github.com/RDFLib/rdflib/issues/440>`_
+* removed md5_term_hash, fixes #240
+ `PR #439 <https://github.com/RDFLib/rdflib/pull/439>`_
+ `ISSUE #240 <https://github.com/RDFLib/rdflib/issues/240>`_
+
+Testing:
+^^^^^^^^
+* 3.7 for travis
+ `PR #864 <https://github.com/RDFLib/rdflib/pull/864>`_
+* Added trig unit tests to highlight some current parsing/serializing issues
+ `PR #431 <https://github.com/RDFLib/rdflib/pull/431>`_
+
+Documentation Fixes:
+^^^^^^^^^^^^^^^^^^^^
+* Fix a doc string in the query module
+ `PR #976 <https://github.com/RDFLib/rdflib/pull/976>`_
+* setup.py: Make the license field use an SPDX identifier
+ `PR #789 <https://github.com/RDFLib/rdflib/pull/789>`_
+* Update README.md
+ `PR #764 <https://github.com/RDFLib/rdflib/pull/764>`_
+* Update namespaces_and_bindings.rst
+ `PR #757 <https://github.com/RDFLib/rdflib/pull/757>`_
+* DOC: README.md: rdflib-jsonld, https uris
+ `PR #712 <https://github.com/RDFLib/rdflib/pull/712>`_
+* make doctest support py2/py3
+ `ISSUE #707 <https://github.com/RDFLib/rdflib/issues/707>`_
+* `pip install rdflib` (as per README.md) gets OSError on Mint 18.1
+ `ISSUE #704 <https://github.com/RDFLib/rdflib/issues/704>`_
+
diff --git a/examples/conjunctive_graphs.py b/examples/conjunctive_graphs.py
index 81cbab9e..f714d9ff 100644
--- a/examples/conjunctive_graphs.py
+++ b/examples/conjunctive_graphs.py
@@ -1,57 +1,59 @@
"""
-
An RDFLib ConjunctiveGraph is an (unnamed) aggregation of all the named graphs
within a Store. The :meth:`~rdflib.graph.ConjunctiveGraph.get_context`
-method can be used to get a particular named graph, or triples can be
-added to the default graph
-
-This example shows how to create some named graphs and work with the
-conjunction of all the graphs.
+method can be used to get a particular named graph for use such as to add
+triples to, or the default graph can be used
+This example shows how to create named graphs and work with the
+conjunction (union) of all the graphs.
"""
from rdflib import Namespace, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.plugins.memory import IOMemory
-if __name__ == '__main__':
+if __name__ == "__main__":
ns = Namespace("http://love.com#")
- mary = URIRef("http://love.com/lovers/mary#")
- john = URIRef("http://love.com/lovers/john#")
+ mary = URIRef("http://love.com/lovers/mary")
+ john = URIRef("http://love.com/lovers/john")
- cmary = URIRef("http://love.com/lovers/mary#")
- cjohn = URIRef("http://love.com/lovers/john#")
+ cmary = URIRef("http://love.com/lovers/mary")
+ cjohn = URIRef("http://love.com/lovers/john")
store = IOMemory()
g = ConjunctiveGraph(store=store)
g.bind("love", ns)
+ # add a graph for Mary's facts to the Conjunctive Graph
gmary = Graph(store=store, identifier=cmary)
+ # Mary's graph only contains the URI of the person she love, not his cute name
+ gmary.add((mary, ns["hasName"], Literal("Mary")))
+ gmary.add((mary, ns["loves"], john))
- gmary.add((mary, ns['hasName'], Literal("Mary")))
- gmary.add((mary, ns['loves'], john))
-
+ # add a graph for Mary's facts to the Conjunctive Graph
gjohn = Graph(store=store, identifier=cjohn)
- gjohn.add((john, ns['hasName'], Literal("John")))
+ # John's graph contains his cute name
+ gjohn.add((john, ns["hasCuteName"], Literal("Johnny Boy")))
# enumerate contexts
for c in g.contexts():
print("-- %s " % c)
# separate graphs
- print(gjohn.serialize(format='n3'))
+ print(gjohn.serialize(format="n3").decode("utf-8"))
print("===================")
- print(gmary.serialize(format='n3'))
+ print(gmary.serialize(format="n3").decode("utf-8"))
print("===================")
# full graph
- print(g.serialize(format='n3'))
+ print(g.serialize(format="n3").decode("utf-8"))
# query the conjunction of all graphs
-
- print('Mary loves:')
- for x in g[mary: ns.loves / ns.hasName]:
- print(x)
+ xx = None
+ for x in g[mary : ns.loves / ns.hasCuteName]:
+ xx = x
+ print("Q: Who does Mary love?")
+ print("A: Mary loves {}".format(xx))
diff --git a/examples/custom_datatype.py b/examples/custom_datatype.py
index f2d4fb28..8d73e894 100644
--- a/examples/custom_datatype.py
+++ b/examples/custom_datatype.py
@@ -1,20 +1,18 @@
"""
-
-RDFLib can map between data-typed literals and python objects.
+RDFLib can map between RDF data-typed literals and Python objects.
Mapping for integers, floats, dateTimes, etc. are already added, but
you can also add your own.
This example shows how :meth:`rdflib.term.bind` lets you register new
-mappings between literal datatypes and python objects
-
+mappings between literal datatypes and Python objects
"""
from rdflib import Graph, Literal, Namespace, XSD
from rdflib.term import bind
-if __name__ == '__main__':
+if __name__ == "__main__":
# complex numbers are not registered by default
# no custom constructor/serializer needed since
@@ -30,12 +28,11 @@ if __name__ == '__main__':
g = Graph()
g.add((ns.mysubject, ns.myprop, l))
- n3 = g.serialize(format='n3')
-
- # round-trip through n3
+ n3 = g.serialize(format="n3")
+ # round-trip through n3 serialize/parse
g2 = Graph()
- g2.parse(data=n3, format='n3')
+ g2.parse(data=n3, format="n3")
l2 = list(g2)[0][2]
diff --git a/examples/custom_eval.py b/examples/custom_eval.py
index 69cd173a..74c393a4 100644
--- a/examples/custom_eval.py
+++ b/examples/custom_eval.py
@@ -1,7 +1,6 @@
"""
-
This example shows how a custom evaluation function can be added to
-handle certain SPARQL Algebra elements
+handle certain SPARQL Algebra elements.
A custom function is added that adds ``rdfs:subClassOf`` "inference" when
asking for ``rdf:type`` triples.
@@ -15,7 +14,6 @@ i.e. in your setup.py::
'myfunc = mypackage:MyFunction',
],
}
-
"""
import rdflib
@@ -23,8 +21,7 @@ import rdflib
from rdflib.plugins.sparql.evaluate import evalBGP
from rdflib.namespace import FOAF
-inferredSubClass = \
- rdflib.RDFS.subClassOf * '*' # any number of rdfs.subClassOf
+inferredSubClass = rdflib.RDFS.subClassOf * "*" # any number of rdfs.subClassOf
def customEval(ctx, part):
@@ -32,7 +29,7 @@ def customEval(ctx, part):
Rewrite triple patterns to get super-classes
"""
- if part.name == 'BGP':
+ if part.name == "BGP":
# rewrite triples
triples = []
@@ -50,20 +47,17 @@ def customEval(ctx, part):
raise NotImplementedError()
-if __name__ == '__main__':
+if __name__ == "__main__":
# add function directly, normally we would use setuptools and entry_points
- rdflib.plugins.sparql.CUSTOM_EVALS['exampleEval'] = customEval
+ rdflib.plugins.sparql.CUSTOM_EVALS["exampleEval"] = customEval
g = rdflib.Graph()
- g.load("foaf.rdf")
+ g.load("foaf.n3")
# Add the subClassStmt so that we can query for it!
- g.add((FOAF.Person,
- rdflib.RDFS.subClassOf,
- FOAF.Agent))
+ g.add((FOAF.Person, rdflib.RDFS.subClassOf, FOAF.Agent))
# Find all FOAF Agents
- for x in g.query(
- 'PREFIX foaf: <%s> SELECT * WHERE { ?s a foaf:Agent . }' % FOAF):
+ for x in g.query("PREFIX foaf: <%s> SELECT * WHERE { ?s a foaf:Agent . }" % FOAF):
print(x)
diff --git a/examples/film.py b/examples/film.py
index 6bbda04a..f25d696b 100644
--- a/examples/film.py
+++ b/examples/film.py
@@ -4,14 +4,13 @@
film.py: a simple tool to manage your movies review
Simon Rozet, http://atonie.org/
-@@ :
-- manage directors and writers
-- manage actors
-- handle non IMDB uri
-- markdown support in comment
+ - manage directors and writers
+ - manage actors
+ - handle non IMDB uri
+ - markdown support in comment
Requires download and import of Python imdb library from
-http://imdbpy.sourceforge.net/ - (warning: installation
+https://imdbpy.github.io/ - (warning: installation
will trigger automatic installation of several other packages)
--
@@ -36,117 +35,120 @@ except ImportError:
from rdflib import BNode, ConjunctiveGraph, URIRef, Literal, Namespace, RDF
from rdflib.namespace import FOAF, DC
-from six.moves import input
-storefn = os.path.expanduser('~/movies.n3')
-#storefn = '/home/simon/codes/film.dev/movies.n3'
-storeuri = 'file://' + storefn
-title = 'Movies viewed by %s'
+storefn = os.path.expanduser("~/movies.n3")
+# storefn = '/home/simon/codes/film.dev/movies.n3'
+storeuri = "file://" + storefn
+title = "Movies viewed by %s"
-r_who = re.compile('^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$')
+r_who = re.compile(
+ "^(.*?) <([a-z0-9_-]+(\.[a-z0-9_-]+)*@[a-z0-9_-]+(\.[a-z0-9_-]+)+)>$"
+)
-IMDB = Namespace('http://www.csd.abdn.ac.uk/~ggrimnes/dev/imdb/IMDB#')
-REV = Namespace('http://purl.org/stuff/rev#')
+IMDB = Namespace("http://www.csd.abdn.ac.uk/~ggrimnes/dev/imdb/IMDB#")
+REV = Namespace("http://purl.org/stuff/rev#")
class Store:
def __init__(self):
self.graph = ConjunctiveGraph()
if os.path.exists(storefn):
- self.graph.load(storeuri, format='n3')
- self.graph.bind('dc', DC)
- self.graph.bind('foaf', FOAF)
- self.graph.bind('imdb', IMDB)
- self.graph.bind('rev', 'http://purl.org/stuff/rev#')
+ self.graph.load(storeuri, format="n3")
+ self.graph.bind("dc", DC)
+ self.graph.bind("foaf", FOAF)
+ self.graph.bind("imdb", IMDB)
+ self.graph.bind("rev", "http://purl.org/stuff/rev#")
def save(self):
- self.graph.serialize(storeuri, format='n3')
+ self.graph.serialize(storeuri, format="n3")
def who(self, who=None):
if who is not None:
name, email = (r_who.match(who).group(1), r_who.match(who).group(2))
- self.graph.add((URIRef(storeuri), DC['title'], Literal(title % name)))
- self.graph.add((URIRef(storeuri + '#author'), RDF.type, FOAF['Person']))
- self.graph.add((URIRef(storeuri + '#author'),
- FOAF['name'], Literal(name)))
- self.graph.add((URIRef(storeuri + '#author'),
- FOAF['mbox'], Literal(email)))
+ self.graph.add((URIRef(storeuri), DC["title"], Literal(title % name)))
+ self.graph.add((URIRef(storeuri + "#author"), RDF.type, FOAF["Person"]))
+ self.graph.add((URIRef(storeuri + "#author"), FOAF["name"], Literal(name)))
+ self.graph.add((URIRef(storeuri + "#author"), FOAF["mbox"], Literal(email)))
self.save()
else:
- return self.graph.objects(URIRef(storeuri + '#author'), FOAF['name'])
+ return self.graph.objects(URIRef(storeuri + "#author"), FOAF["name"])
def new_movie(self, movie):
- movieuri = URIRef('http://www.imdb.com/title/tt%s/' % movie.movieID)
- self.graph.add((movieuri, RDF.type, IMDB['Movie']))
- self.graph.add((movieuri, DC['title'], Literal(movie['title'])))
- self.graph.add((movieuri, IMDB['year'], Literal(int(movie['year']))))
+ movieuri = URIRef("http://www.imdb.com/title/tt%s/" % movie.movieID)
+ self.graph.add((movieuri, RDF.type, IMDB["Movie"]))
+ self.graph.add((movieuri, DC["title"], Literal(movie["title"])))
+ self.graph.add((movieuri, IMDB["year"], Literal(int(movie["year"]))))
self.save()
def new_review(self, movie, date, rating, comment=None):
review = BNode() # @@ humanize the identifier (something like #rev-$date)
- movieuri = URIRef('http://www.imdb.com/title/tt%s/' % movie.movieID)
- self.graph.add((movieuri, REV['hasReview'], URIRef('%s#%s' % (storeuri, review))))
- self.graph.add((review, RDF.type, REV['Review']))
- self.graph.add((review, DC['date'], Literal(date)))
- self.graph.add((review, REV['maxRating'], Literal(5)))
- self.graph.add((review, REV['minRating'], Literal(0)))
- self.graph.add((review, REV['reviewer'], URIRef(storeuri + '#author')))
- self.graph.add((review, REV['rating'], Literal(rating)))
+ movieuri = URIRef("http://www.imdb.com/title/tt%s/" % movie.movieID)
+ self.graph.add(
+ (movieuri, REV["hasReview"], URIRef("%s#%s" % (storeuri, review)))
+ )
+ self.graph.add((review, RDF.type, REV["Review"]))
+ self.graph.add((review, DC["date"], Literal(date)))
+ self.graph.add((review, REV["maxRating"], Literal(5)))
+ self.graph.add((review, REV["minRating"], Literal(0)))
+ self.graph.add((review, REV["reviewer"], URIRef(storeuri + "#author")))
+ self.graph.add((review, REV["rating"], Literal(rating)))
if comment is not None:
- self.graph.add((review, REV['text'], Literal(comment)))
+ self.graph.add((review, REV["text"], Literal(comment)))
self.save()
def movie_is_in(self, uri):
- return (URIRef(uri), RDF.type, IMDB['Movie']) in self.graph
+ return (URIRef(uri), RDF.type, IMDB["Movie"]) in self.graph
def help():
- print(__doc__.split('--')[1])
+ print(__doc__.split("--")[1])
def main(argv=None):
if not argv:
argv = sys.argv
s = Store()
- if argv[1] in ('help', '--help', 'h', '-h'):
+ if argv[1] in ("help", "--help", "h", "-h"):
help()
- elif argv[1] == 'whoami':
+ elif argv[1] == "whoami":
if os.path.exists(storefn):
print(list(s.who())[0])
else:
s.who(argv[2])
- elif argv[1].startswith('http://www.imdb.com/title/tt'):
+ elif argv[1].startswith("http://www.imdb.com/title/tt"):
if s.movie_is_in(argv[1]):
raise
else:
i = imdb.IMDb()
- movie = i.get_movie(argv[1][len('http://www.imdb.com/title/tt'):-1])
- print('%s (%s)' % (movie['title'].encode('utf-8'), movie['year']))
- for director in movie['director']:
- print('directed by: %s' % director['name'].encode('utf-8'))
- for writer in movie['writer']:
- print('written by: %s' % writer['name'].encode('utf-8'))
+ movie = i.get_movie(argv[1][len("http://www.imdb.com/title/tt") : -1])
+ print("%s (%s)" % (movie["title"].encode("utf-8"), movie["year"]))
+ for director in movie["director"]:
+ print("directed by: %s" % director["name"].encode("utf-8"))
+ for writer in movie["writer"]:
+ print("written by: %s" % writer["name"].encode("utf-8"))
s.new_movie(movie)
rating = None
while not rating or (rating > 5 or rating <= 0):
try:
- rating = int(input('Rating (on five): '))
+ rating = int(input("Rating (on five): "))
except ValueError:
rating = None
date = None
while not date:
try:
- i = input('Review date (YYYY-MM-DD): ')
- date = datetime.datetime(*time.strptime(i, '%Y-%m-%d')[:6])
+ i = input("Review date (YYYY-MM-DD): ")
+ date = datetime.datetime(*time.strptime(i, "%Y-%m-%d")[:6])
except:
date = None
- comment = input('Comment: ')
+ comment = input("Comment: ")
s.new_review(movie, date, rating, comment)
else:
help()
-if __name__ == '__main__':
+if __name__ == "__main__":
if not imdb:
- raise Exception('This example requires the IMDB library! Install with "pip install imdbpy"')
+ raise Exception(
+ 'This example requires the IMDB library! Install with "pip install imdbpy"'
+ )
main()
diff --git a/examples/foaf.n3 b/examples/foaf.n3
new file mode 100644
index 00000000..73d6712b
--- /dev/null
+++ b/examples/foaf.n3
@@ -0,0 +1,284 @@
+@prefix cc: <http://creativecommons.org/ns#> .
+@prefix con: <http://www.w3.org/2000/10/swap/pim/contact#> .
+@prefix dc: <http://purl.org/dc/elements/1.1/> .
+@prefix foaf: <http://xmlns.com/foaf/0.1/> .
+@prefix geo: <http://www.opengis.net/ont/geosparql#> .
+@prefix geo1: <http://www.w3.org/2003/01/geo/wgs84_pos#> .
+@prefix owl: <http://www.w3.org/2002/07/owl#> .
+@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
+@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
+@prefix xml: <http://www.w3.org/XML/1998/namespace> .
+@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
+
+<http://dig.csail.mit.edu/2007/01/camp/data#course> foaf:maker <http://www.w3.org/People/Berners-Lee/card#i> .
+
+<http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf> a foaf:PersonalProfileDocument ;
+ cc:license <http://creativecommons.org/licenses/by-nc/3.0/> ;
+ dc:title "Tim Berners-Lee's FOAF file" ;
+ foaf:maker <http://www.w3.org/People/Berners-Lee/card#i> ;
+ foaf:primaryTopic <http://www.w3.org/People/Berners-Lee/card#i> .
+
+<http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#dj> a foaf:Person ;
+ rdfs:seeAlso <http://www.grorg.org/dean/foaf.rdf> ;
+ foaf:homepage <http://www.grorg.org/dean/> ;
+ foaf:mbox <mailto:dean@w3.org>,
+ <mailto:dino@grorg.org> ;
+ foaf:mbox_sha1sum "6de4ff27ef927b9ba21ccc88257e41a2d7e7d293" ;
+ foaf:name "Dean Jackson" .
+
+<http://dig.csail.mit.edu/breadcrumbs/blog/4> dc:title "timbl's blog" ;
+ rdfs:seeAlso <http://dig.csail.mit.edu/breadcrumbs/blog/feed/4> ;
+ foaf:maker <http://www.w3.org/People/Berners-Lee/card#i> .
+
+<http://dig.csail.mit.edu/data#DIG> foaf:member <http://www.w3.org/People/Berners-Lee/card#i> .
+
+<http://my.opera.com/danbri/xml/foaf#me> rdfs:seeAlso <http://danbri.livejournal.com/data/foaf> ;
+ = <http://danbri.org/foaf.rdf#danbri>,
+ <http://www4.wiwiss.fu-berlin.de/dblp/resource/person/336851> ;
+ foaf:mbox_sha1sum "70c053d15de49ff03a1bcc374e4119b40798a66e" .
+
+<http://wiki.ontoworld.org/index.php/_IRW2006> dc:title "Identity, Reference and the Web workshop 2006" ;
+ con:participant <http://www.w3.org/People/Berners-Lee/card#i> .
+
+<http://www.ecs.soton.ac.uk/~dt2/dlstuff/www2006_data#panel-panelk01> rdfs:label "The Next Wave of the Web (Plenary Panel)" ;
+ con:participant <http://www.w3.org/People/Berners-Lee/card#i> .
+
+<http://www.w3.org/data#W3C> rdfs:label "W3C" ;
+ rdfs:seeAlso <http://dig.csail.mit.edu/2008/2002/01/tr-automation/tr.rdf> ;
+ con:publicHomePage <http://www.w3.org/> ;
+ foaf:homepage <http://dig.csail.mit.edu/2008/> ;
+ foaf:logo <http://dig.csail.mit.edu/2008/Icons/w3c_home> ;
+ foaf:name "World Wide Web Consortium" .
+
+<http://www4.wiwiss.fu-berlin.de/booksMeshup/books/006251587X> dc:creator <http://www.w3.org/People/Berners-Lee/card#i> ;
+ dc:title "Weaving the Web: The Original Design and Ultimate Destiny of the World Wide Web" .
+
+<http://bblfish.net/people/henry/card#me> foaf:name "Henry Story" .
+
+<http://dbpedia.org/resource/John_Gage> a foaf:Person ;
+ foaf:img <http://upload.wikimedia.org/wikipedia/commons/d/de/John_Gage.jpg> ;
+ foaf:name "John Gage" .
+
+<http://dbpedia.org/resource/John_Klensin> a foaf:Person ;
+ foaf:name "John Klensin" .
+
+<http://dbpedia.org/resource/John_Markoff> a foaf:Person ;
+ foaf:name "John Markoff" .
+
+<http://dbpedia.org/resource/John_Seely_Brown> a foaf:Person ;
+ = <http://www4.wiwiss.fu-berlin.de/bookmashup/persons/John+Seely+Brown> ;
+ foaf:homepage <http://www.johnseelybrown.com/> ;
+ foaf:img <http://transliteracies.english.ucsb.edu/images/participants/t/brown-john-seely-2.jpg> ;
+ foaf:name "John Seely Brown" .
+
+<http://dbpedia.org/resource/Tim_Bray> a foaf:Person ;
+ foaf:name "Tim Bray" .
+
+<http://dig.csail.mit.edu/2007/wiki/people/JoeLambda#JL> foaf:givenName "Joe" ;
+ foaf:name "Joe Lambda" .
+
+<http://dig.csail.mit.edu/2007/wiki/people/RobertHoffmann#RMH> a foaf:Person ;
+ foaf:name "Robert Hoffmann" .
+
+<http://dig.csail.mit.edu/2008/2002/01/tr-automation/tr.rdf> dc:title "W3C Standards and Technical Reports" .
+
+<http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#cm> a foaf:Person ;
+ rdfs:seeAlso <http://www.koalie.net/foaf.rdf> ;
+ foaf:name "Coralie Mercier" .
+
+<http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#edd> a foaf:Person ;
+ rdfs:seeAlso <http://heddley.com/edd/foaf.rdf> ;
+ foaf:homepage <http://heddley.com/edd/> ;
+ foaf:mbox <mailto:edd@usefulinc.com>,
+ <mailto:edd@xml.com>,
+ <mailto:edd@xmlhack.com> ;
+ foaf:name "Edd Dumbill" ;
+ foaf:nick "edd" .
+
+<http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#libby> a foaf:Person ;
+ = <http://swordfish.rdfweb.org/people/libby/rdfweb/webwho.xrdf#me> ;
+ foaf:img <http://swordfish.rdfweb.org/~libby/libby.jpg> ;
+ foaf:mbox <mailto:libby.miller@bristol.ac.uk> ;
+ foaf:name "Libby Miller" .
+
+<http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#ss> foaf:name "Susie Stephens" ;
+ foaf:organization <http://dbpedia.org/resource/Eli_Lilly_and_Company> .
+
+<http://eikeon.com/foaf.rdf#eikeon> foaf:name "Daniel Krech" .
+
+<http://hometown.aol.com/chbussler/foaf/chbussler.foaf#me> foaf:name "Christoph Bussler" .
+
+<http://id.ecs.soton.ac.uk/person/1269> foaf:name "Nicholas Gibbins" .
+
+<http://id.ecs.soton.ac.uk/person/1650> foaf:name "Wendy Hall" .
+
+<http://id.ecs.soton.ac.uk/person/2686> foaf:name "Nigel Shadbolt" .
+
+<http://id.ecs.soton.ac.uk/person/60> foaf:name "Les Carr" .
+
+<http://inamidst.com/sbp/foaf#Sean> a foaf:Person ;
+ foaf:name "Sean Palmer" .
+
+<http://my.opera.com/chaals/xml/foaf#me> foaf:name "Charles McCathieNevile" .
+
+<http://my.opera.com/howcome/xml/foaf#howcome> foaf:name "Håkon Wium Lie" .
+
+<http://myopenlink.net/dataspace/person/kidehen#this> a foaf:Person ;
+ foaf:name "Kingsley Idehen" .
+
+<http://norman.walsh.name/knows/who#norman-walsh> a foaf:Person ;
+ foaf:name "Norman Walsh" .
+
+<http://people.apache.org/~oshani/foaf.rdf#me> foaf:name "Oshani Seneviratne" .
+
+<http://people.csail.mit.edu/lkagal/foaf#me> a foaf:Person ;
+ foaf:mailbox <mailto:lalana@csail.mit.edu> ;
+ foaf:name "Lalana Kagal" .
+
+<http://people.csail.mit.edu/psz/foaf.rdf#me> foaf:name "Peter Szolovits" .
+
+<http://people.w3.org/simon/foaf#i> a foaf:Person ;
+ foaf:name "Simon J. Hernandez" .
+
+<http://qdos.com/people/tom.xrdf#me> a foaf:Person ;
+ foaf:name "Tom Ilube" .
+
+<http://research.microsoft.com/~henrikn/foaf.xml#me> foaf:name "Henrik Nielsen" .
+
+<http://rit.mellon.org/Members/ihf/foaf.rdf#me> a foaf:Person ;
+ foaf:homepage <http://www.mellon.org/about_foundation/staff/program-area-staff/irafuchs> ;
+ foaf:img <http://www.sun.com/products-n-solutions/edu/images/jelc/fuchs.jpg> ;
+ foaf:name "Ira Fuchs" .
+
+<http://teole.jfouffa.org/People/Teole/card.rdf#me> foaf:name "Philippe Le Hégaret" .
+
+<http://users.ecs.soton.ac.uk/mc/mcfoaf.rdf#me> foaf:name "mc schraefel" .
+
+<http://web.mit.edu/shinnyih/foaf.rdf#> foaf:name "Shinnyih Huang" .
+
+<http://www.aaronsw.com/about.xrdf#aaronsw> a foaf:Person ;
+ rdfs:seeAlso <http://www.aaronsw.com/about.xrdf> ;
+ foaf:mbox <mailto:me@aaronsw.com> ;
+ foaf:name "Aaron Swartz" .
+
+<http://www.cambridgesemantics.com/people/about/lee> foaf:name "Lee Feigenbaum" .
+
+<http://www.cs.umd.edu/~hendler/2003/foaf.rdf#jhendler> foaf:name "Jim Hendler" .
+
+<http://www.dajobe.org/foaf.rdf#i> a foaf:Person ;
+ foaf:mailbox <mailto:dave@dajobe.org> ;
+ foaf:name "Dave Beckett" .
+
+<http://www.isi.edu/~gil/foaf.rdf#me> a foaf:Person ;
+ foaf:name "Yolanda Gill" .
+
+<http://www.ivan-herman.net/foaf.rdf#me> foaf:mbox_sha1sum "5ac8032d5f6012aa1775ea2f63e1676bafd5e80b",
+ "c21b7ed00d78a35efcd8e567f8fd9cca71058c5",
+ "eccd01ba8ce2391a439e9b052a9fbf37eae9f732" ;
+ foaf:name "Ivan Herman" .
+
+<http://www.kjetil.kjernsmo.net/foaf#me> foaf:name "Kjetil Kjernsmo" .
+
+<http://www.lassila.org/ora.rdf#me> foaf:name "Ora Lassila" .
+
+<http://www.mindswap.org/2004/owl/mindswappers#Bijan.Parsia> foaf:name "Bijan Parsia" .
+
+<http://www.mindswap.org/2004/owl/mindswappers#Jennifer.Golbeck> foaf:name "Jennifer Golbeck" .
+
+<http://www.w3.org/People/Berners-Lee/card#amy> a foaf:Person ;
+ rdfs:label "Amy van der Hiel" ;
+ rdfs:seeAlso <http://people.w3.org/amy/foaf.rdf> ;
+ con:familyName "van der Hiel" ;
+ con:givenName "Amy" ;
+ foaf:mbox <mailto:amy@w3.org> ;
+ foaf:mbox_sha1sum "1839a1cc2e719a85ea7d9007f587b2899cd94064" ;
+ foaf:name "Amy van der Hiel" .
+
+<http://www.w3.org/People/Connolly/#me> a foaf:Person ;
+ rdfs:seeAlso <http://www.w3.org/People/Connolly/home-smart.rdf> ;
+ foaf:mbox <mailto:connolly@w3.org> ;
+ foaf:name "Dan Connolly" ;
+ foaf:nick "DanCon" .
+
+<http://www.w3.org/People/EM/contact#me> a foaf:Person ;
+ rdfs:seeAlso <http://www.w3.org/People/EM/contact> ;
+ foaf:homepage <http://purl.org/net/eric/> ;
+ foaf:img <http://www.ilrt.bristol.ac.uk/people/cmdjb/events/dc7/orig/eric.png>,
+ <http://www.oclc.org/~emiller/capture.jpg> ;
+ foaf:mbox <mailto:em@w3.org> ;
+ foaf:name "Eric Miller" .
+
+<http://www.w3.org/People/Jacobs/contact.rdf#IanJacobs> foaf:name "Ian Jacobs" .
+
+<http://www.w3.org/People/djweitzner/foaf#DJW> a foaf:Person ;
+ foaf:mbox_sha1sum "032c319f439f63efba54f4fa51bfb3a3fafedfbe" ;
+ foaf:name "Daniel J Weitzner" .
+
+<http://www.w3.org/People/karl/karl-foaf.xrdf#me> rdfs:seeAlso <http://www.w3.org/People/karl/karl-foaf.xrdf> ;
+ foaf:mbox <mailto:karl@w3.org> ;
+ foaf:name "Karl Dubost" .
+
+<http://www.w3.org/People/Berners-Lee/card#i> a foaf:Person ;
+ foaf:img <http://www.w3.org/Press/Stock/Berners-Lee/2001-europaeum-eighth.jpg> ;
+ foaf:knows [ a foaf:Person ;
+ rdfs:seeAlso <http://dannyayers.com/me.rdf> ;
+ foaf:mbox_sha1sum "669fe353dbef63d12ba11f69ace8acbec1ac8b17" ;
+ foaf:name "Danny Ayers" ],
+ <http://bblfish.net/people/henry/card#me>,
+ <http://danbri.org/foaf#danbri>,
+ <http://dbpedia.org/resource/John_Gage>,
+ <http://dbpedia.org/resource/John_Klensin>,
+ <http://dbpedia.org/resource/John_Markoff>,
+ <http://dbpedia.org/resource/John_Seely_Brown>,
+ <http://dbpedia.org/resource/Tim_Bray>,
+ <http://dig.csail.mit.edu/2007/wiki/people/JoeLambda#JL>,
+ <http://dig.csail.mit.edu/2007/wiki/people/RobertHoffmann#RMH>,
+ <http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#cm>,
+ <http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#edd>,
+ <http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#libby>,
+ <http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#ss>,
+ <http://dig.csail.mit.edu/People/RRS>,
+ <http://dig.csail.mit.edu/People/yosi#YES>,
+ <http://eikeon.com/foaf.rdf#eikeon>,
+ <http://heddley.com/edd/foaf.rdf#edd>,
+ <http://hometown.aol.com/chbussler/foaf/chbussler.foaf#me>,
+ <http://id.ecs.soton.ac.uk/person/1269>,
+ <http://id.ecs.soton.ac.uk/person/1650>,
+ <http://id.ecs.soton.ac.uk/person/2686>,
+ <http://id.ecs.soton.ac.uk/person/60>,
+ <http://inamidst.com/sbp/foaf#Sean>,
+ <http://my.opera.com/chaals/xml/foaf#me>,
+ <http://my.opera.com/howcome/xml/foaf#howcome>,
+ <http://myopenlink.net/dataspace/person/kidehen#this>,
+ <http://norman.walsh.name/knows/who#norman-walsh>,
+ <http://people.apache.org/~oshani/foaf.rdf#me>,
+ <http://people.csail.mit.edu/crowell/foaf.rdf#crowell>,
+ <http://people.csail.mit.edu/lkagal/foaf#me>,
+ <http://people.csail.mit.edu/psz/foaf.rdf#me>,
+ <http://people.csail.mit.edu/ryanlee/about#ryanlee>,
+ <http://people.w3.org/simon/foaf#i>,
+ <http://presbrey.mit.edu/foaf.rdf#presbrey>,
+ <http://qdos.com/people/tom.xrdf#me>,
+ <http://research.microsoft.com/~henrikn/foaf.xml#me>,
+ <http://rit.mellon.org/Members/ihf/foaf.rdf#me>,
+ <http://teole.jfouffa.org/People/Teole/card.rdf#me>,
+ <http://users.ecs.soton.ac.uk/mc/mcfoaf.rdf#me>,
+ <http://web.mit.edu/ruthdhan/www/foaf.rdf#ruthdhan>,
+ <http://web.mit.edu/shinnyih/foaf.rdf#>,
+ <http://www.aaronsw.com/about.xrdf#aaronsw>,
+ <http://www.cambridgesemantics.com/people/about/lee>,
+ <http://www.cs.umd.edu/~hendler/2003/foaf.rdf#jhendler>,
+ <http://www.dajobe.org/foaf.rdf#i>,
+ <http://www.isi.edu/~gil/foaf.rdf#me>,
+ <http://www.ivan-herman.net/foaf.rdf#me>,
+ <http://www.kjetil.kjernsmo.net/foaf#me>,
+ <http://www.lassila.org/ora.rdf#me>,
+ <http://www.mindswap.org/2004/owl/mindswappers#Bijan.Parsia>,
+ <http://www.mindswap.org/2004/owl/mindswappers#Jennifer.Golbeck>,
+ <http://www.w3.org/People/Berners-Lee/card#amy>,
+ <http://www.w3.org/People/Connolly/#me>,
+ <http://www.w3.org/People/EM/contact#me>,
+ <http://www.w3.org/People/Jacobs/contact.rdf#IanJacobs>,
+ <http://www.w3.org/People/djweitzner/foaf#DJW>,
+ <http://www.w3.org/People/karl/karl-foaf.xrdf#me> ;
+ foaf:name "Tim Berners-Lee" .
diff --git a/examples/foaf.rdf b/examples/foaf.rdf
deleted file mode 100644
index 8c9dbfcd..00000000
--- a/examples/foaf.rdf
+++ /dev/null
@@ -1,346 +0,0 @@
-<rdf:RDF
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:foaf="http://xmlns.com/foaf/0.1/"
- xmlns:s="http://www.w3.org/2000/01/rdf-schema#"
- xmlns:owl="http://www.w3.org/2002/07/owl#"
- xmlns:con="http://www.w3.org/2000/10/swap/pim/contact#"
- xmlns:geo="http://www.w3.org/2003/01/geo/wgs84_pos#">
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/2002/01/tr-automation/tr.rdf">
- <dc:title>W3C Standards and Technical Reports</dc:title>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf">
- <cc:license rdf:resource="http://creativecommons.org/licenses/by-nc/3.0/"/>
- <dc:title>Tim Berners-Lee's FOAF file</dc:title>
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/PersonalProfileDocument"/>
- <foaf:maker rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- <foaf:primaryTopic rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#cm">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://www.koalie.net/foaf.rdf"/>
- <foaf:name>Coralie Mercier</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#dj">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://www.grorg.org/dean/foaf.rdf"/>
- <foaf:homepage rdf:resource="http://www.grorg.org/dean/"/>
- <foaf:mbox rdf:resource="mailto:dean@w3.org"/>
- <foaf:mbox rdf:resource="mailto:dino@grorg.org"/>
- <foaf:mbox_sha1sum>6de4ff27ef927b9ba21ccc88257e41a2d7e7d293</foaf:mbox_sha1sum>
- <foaf:name>Dean Jackson</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#edd">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://heddley.com/edd/foaf.rdf"/>
- <foaf:homepage rdf:resource="http://heddley.com/edd/"/>
- <foaf:mbox rdf:resource="mailto:edd@usefulinc.com"/>
- <foaf:mbox rdf:resource="mailto:edd@xml.com"/>
- <foaf:mbox rdf:resource="mailto:edd@xmlhack.com"/>
- <foaf:name>Edd Dumbill</foaf:name>
- <foaf:nick>edd</foaf:nick>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#libby">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <owl:sameAs rdf:resource="http://swordfish.rdfweb.org/people/libby/rdfweb/webwho.xrdf#me"/>
- <foaf:img rdf:resource="http://swordfish.rdfweb.org/~libby/libby.jpg"/>
- <foaf:mbox rdf:resource="mailto:libby.miller@bristol.ac.uk"/>
- <foaf:name>Libby Miller</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#ss">
- <foaf:name>Susie Stephens</foaf:name>
- <foaf:organization rdf:resource="http://dbpedia.org/resource/Eli_Lilly_and_Company"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://bblfish.net/people/henry/card#me">
- <foaf:name>Henry Story</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dbpedia.org/resource/John_Gage">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:img rdf:resource="http://upload.wikimedia.org/wikipedia/commons/d/de/John_Gage.jpg"/>
- <foaf:name>John Gage</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dbpedia.org/resource/John_Klensin">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>John Klensin</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dbpedia.org/resource/John_Markoff">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>John Markoff</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dbpedia.org/resource/John_Seely_Brown">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <owl:sameAs rdf:resource="http://www4.wiwiss.fu-berlin.de/bookmashup/persons/John+Seely+Brown"/>
- <foaf:homepage rdf:resource="http://www.johnseelybrown.com/"/>
- <foaf:img rdf:resource="http://transliteracies.english.ucsb.edu/images/participants/t/brown-john-seely-2.jpg"/>
- <foaf:name>John Seely Brown</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dbpedia.org/resource/Tim_Bray">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Tim Bray</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2007/01/camp/data#course">
- <foaf:maker rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2007/wiki/people/JoeLambda#JL">
- <foaf:givenName>Joe</foaf:givenName><foaf:name>Joe Lambda</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/2007/wiki/people/RobertHoffmann#RMH">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Robert Hoffmann</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/breadcrumbs/blog/4">
- <dc:title>timbl's blog</dc:title>
- <s:seeAlso rdf:resource="http://dig.csail.mit.edu/breadcrumbs/blog/feed/4"/>
- <foaf:maker rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://dig.csail.mit.edu/data#DIG">
- <foaf:member rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://eikeon.com/foaf.rdf#eikeon">
- <foaf:name>Daniel Krech</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://hometown.aol.com/chbussler/foaf/chbussler.foaf#me">
- <foaf:name>Christoph Bussler</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://id.ecs.soton.ac.uk/person/1269">
- <foaf:name>Nicholas Gibbins</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://id.ecs.soton.ac.uk/person/1650">
- <foaf:name>Wendy Hall</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://id.ecs.soton.ac.uk/person/2686">
- <foaf:name>Nigel Shadbolt</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://id.ecs.soton.ac.uk/person/60">
- <foaf:name>Les Carr</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://inamidst.com/sbp/foaf#Sean">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Sean Palmer</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://my.opera.com/chaals/xml/foaf#me">
- <foaf:name>Charles McCathieNevile</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://my.opera.com/danbri/xml/foaf#me">
- <s:seeAlso rdf:resource="http://danbri.livejournal.com/data/foaf"/>
- <owl:sameAs rdf:resource="http://danbri.org/foaf.rdf#danbri"/>
- <owl:sameAs rdf:resource="http://www4.wiwiss.fu-berlin.de/dblp/resource/person/336851"/>
- <foaf:mbox_sha1sum>70c053d15de49ff03a1bcc374e4119b40798a66e</foaf:mbox_sha1sum>
- </rdf:Description>
- <rdf:Description rdf:about="http://my.opera.com/howcome/xml/foaf#howcome">
- <foaf:name>Håkon Wium Lie</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://myopenlink.net/dataspace/person/kidehen#this">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Kingsley Idehen</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://norman.walsh.name/knows/who#norman-walsh">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Norman Walsh</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://people.apache.org/~oshani/foaf.rdf#me">
- <foaf:name>Oshani Seneviratne</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://people.csail.mit.edu/lkagal/foaf#me">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:mailbox rdf:resource="mailto:lalana@csail.mit.edu"/>
- <foaf:name>Lalana Kagal</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://people.csail.mit.edu/psz/foaf.rdf#me">
- <foaf:name>Peter Szolovits</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://people.w3.org/simon/foaf#i">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Simon J. Hernandez</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://qdos.com/people/tom.xrdf#me">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Tom Ilube</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://research.microsoft.com/~henrikn/foaf.xml#me">
- <foaf:name>Henrik Nielsen</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://rit.mellon.org/Members/ihf/foaf.rdf#me">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:homepage rdf:resource="http://www.mellon.org/about_foundation/staff/program-area-staff/irafuchs"/>
- <foaf:img rdf:resource="http://www.sun.com/products-n-solutions/edu/images/jelc/fuchs.jpg"/>
- <foaf:name>Ira Fuchs</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://teole.jfouffa.org/People/Teole/card.rdf#me">
- <foaf:name>Philippe Le Hégaret</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://users.ecs.soton.ac.uk/mc/mcfoaf.rdf#me">
- <foaf:name>mc schraefel</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://web.mit.edu/shinnyih/foaf.rdf#">
- <foaf:name>Shinnyih Huang</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://wiki.ontoworld.org/index.php/_IRW2006">
- <dc:title>Identity, Reference and the Web workshop 2006</dc:title>
- <con:participant rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.aaronsw.com/about.xrdf#aaronsw">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://www.aaronsw.com/about.xrdf"/>
- <foaf:mbox rdf:resource="mailto:me@aaronsw.com"/>
- <foaf:name>Aaron Swartz</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.cambridgesemantics.com/people/about/lee">
- <foaf:name>Lee Feigenbaum</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.cs.umd.edu/~hendler/2003/foaf.rdf#jhendler">
- <foaf:name>Jim Hendler</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.dajobe.org/foaf.rdf#i">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:mailbox rdf:resource="mailto:dave@dajobe.org"/>
- <foaf:name>Dave Beckett</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.ecs.soton.ac.uk/~dt2/dlstuff/www2006_data#panel-panelk01">
- <s:label>The Next Wave of the Web (Plenary Panel)</s:label>
- <con:participant rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.isi.edu/~gil/foaf.rdf#me">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:name>Yolanda Gill</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.ivan-herman.net/foaf.rdf#me">
- <foaf:mbox_sha1sum>5ac8032d5f6012aa1775ea2f63e1676bafd5e80b</foaf:mbox_sha1sum>
- <foaf:mbox_sha1sum>c21b7ed00d78a35efcd8e567f8fd9cca71058c5</foaf:mbox_sha1sum>
- <foaf:mbox_sha1sum>eccd01ba8ce2391a439e9b052a9fbf37eae9f732</foaf:mbox_sha1sum>
- <foaf:name>Ivan Herman</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.kjetil.kjernsmo.net/foaf#me">
- <foaf:name>Kjetil Kjernsmo</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.lassila.org/ora.rdf#me">
- <foaf:name>Ora Lassila</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.mindswap.org/2004/owl/mindswappers#Bijan.Parsia">
- <foaf:name>Bijan Parsia</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.mindswap.org/2004/owl/mindswappers#Jennifer.Golbeck">
- <foaf:name>Jennifer Golbeck</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/Berners-Lee/card#amy">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:label>Amy van der Hiel</s:label>
- <s:seeAlso rdf:resource="http://people.w3.org/amy/foaf.rdf"/>
- <con:familyName>van der Hiel</con:familyName>
- <con:givenName>Amy</con:givenName>
- <foaf:mbox rdf:resource="mailto:amy@w3.org"/>
- <foaf:mbox_sha1sum>1839a1cc2e719a85ea7d9007f587b2899cd94064</foaf:mbox_sha1sum>
- <foaf:name>Amy van der Hiel</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/Berners-Lee/card#i">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:img rdf:resource="http://www.w3.org/Press/Stock/Berners-Lee/2001-europaeum-eighth.jpg"/>
- <foaf:name>Tim Berners-Lee</foaf:name>
- <foaf:knows rdf:resource="http://bblfish.net/people/henry/card#me"/>
- <foaf:knows rdf:resource="http://danbri.org/foaf#danbri"/>
- <foaf:knows rdf:resource="http://dbpedia.org/resource/John_Gage"/>
- <foaf:knows rdf:resource="http://dbpedia.org/resource/John_Klensin"/>
- <foaf:knows rdf:resource="http://dbpedia.org/resource/John_Markoff"/>
- <foaf:knows rdf:resource="http://dbpedia.org/resource/John_Seely_Brown"/>
- <foaf:knows rdf:resource="http://dbpedia.org/resource/Tim_Bray"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/2007/wiki/people/JoeLambda#JL"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/2007/wiki/people/RobertHoffmann#RMH"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#cm"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#edd"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#libby"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/2008/webdav/timbl/foaf.rdf#ss"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/People/RRS"/>
- <foaf:knows rdf:resource="http://dig.csail.mit.edu/People/yosi#YES"/>
- <foaf:knows rdf:resource="http://eikeon.com/foaf.rdf#eikeon"/>
- <foaf:knows rdf:resource="http://heddley.com/edd/foaf.rdf#edd"/>
- <foaf:knows rdf:resource="http://hometown.aol.com/chbussler/foaf/chbussler.foaf#me"/>
- <foaf:knows rdf:resource="http://id.ecs.soton.ac.uk/person/1269"/>
- <foaf:knows rdf:resource="http://id.ecs.soton.ac.uk/person/1650"/>
- <foaf:knows rdf:resource="http://id.ecs.soton.ac.uk/person/2686"/>
- <foaf:knows rdf:resource="http://id.ecs.soton.ac.uk/person/60"/>
- <foaf:knows rdf:resource="http://inamidst.com/sbp/foaf#Sean"/>
- <foaf:knows rdf:resource="http://my.opera.com/chaals/xml/foaf#me"/>
- <foaf:knows rdf:resource="http://my.opera.com/howcome/xml/foaf#howcome"/>
- <foaf:knows rdf:resource="http://myopenlink.net/dataspace/person/kidehen#this"/>
- <foaf:knows rdf:resource="http://norman.walsh.name/knows/who#norman-walsh"/>
- <foaf:knows rdf:resource="http://people.apache.org/~oshani/foaf.rdf#me"/>
- <foaf:knows rdf:resource="http://people.csail.mit.edu/crowell/foaf.rdf#crowell"/>
- <foaf:knows rdf:resource="http://people.csail.mit.edu/lkagal/foaf#me"/>
- <foaf:knows rdf:resource="http://people.csail.mit.edu/psz/foaf.rdf#me"/>
- <foaf:knows rdf:resource="http://people.csail.mit.edu/ryanlee/about#ryanlee"/>
- <foaf:knows rdf:resource="http://people.w3.org/simon/foaf#i"/>
- <foaf:knows rdf:resource="http://presbrey.mit.edu/foaf.rdf#presbrey"/>
- <foaf:knows rdf:resource="http://qdos.com/people/tom.xrdf#me"/>
- <foaf:knows rdf:resource="http://research.microsoft.com/~henrikn/foaf.xml#me"/>
- <foaf:knows rdf:resource="http://rit.mellon.org/Members/ihf/foaf.rdf#me"/>
- <foaf:knows rdf:resource="http://teole.jfouffa.org/People/Teole/card.rdf#me"/>
- <foaf:knows rdf:resource="http://users.ecs.soton.ac.uk/mc/mcfoaf.rdf#me"/>
- <foaf:knows rdf:resource="http://web.mit.edu/ruthdhan/www/foaf.rdf#ruthdhan"/>
- <foaf:knows rdf:resource="http://web.mit.edu/shinnyih/foaf.rdf#"/>
- <foaf:knows rdf:resource="http://www.aaronsw.com/about.xrdf#aaronsw"/>
- <foaf:knows rdf:resource="http://www.aaronsw.com/about.xrdf#aaronsw"/>
- <foaf:knows rdf:resource="http://www.cambridgesemantics.com/people/about/lee"/>
- <foaf:knows rdf:resource="http://www.cs.umd.edu/~hendler/2003/foaf.rdf#jhendler"/>
- <foaf:knows rdf:resource="http://www.dajobe.org/foaf.rdf#i"/>
- <foaf:knows rdf:resource="http://www.isi.edu/~gil/foaf.rdf#me"/>
- <foaf:knows rdf:resource="http://www.ivan-herman.net/foaf.rdf#me"/>
- <foaf:knows rdf:resource="http://www.kjetil.kjernsmo.net/foaf#me"/>
- <foaf:knows rdf:resource="http://www.lassila.org/ora.rdf#me"/>
- <foaf:knows rdf:resource="http://www.mindswap.org/2004/owl/mindswappers#Bijan.Parsia"/>
- <foaf:knows rdf:resource="http://www.mindswap.org/2004/owl/mindswappers#Jennifer.Golbeck"/>
- <foaf:knows rdf:resource="http://www.w3.org/People/Berners-Lee/card#amy"/>
- <foaf:knows rdf:resource="http://www.w3.org/People/Connolly/#me"/>
- <foaf:knows rdf:resource="http://www.w3.org/People/EM/contact#me"/>
- <foaf:knows rdf:resource="http://www.w3.org/People/Jacobs/contact.rdf#IanJacobs"/>
- <foaf:knows rdf:resource="http://www.w3.org/People/djweitzner/foaf#DJW"/>
- <foaf:knows rdf:resource="http://www.w3.org/People/karl/karl-foaf.xrdf#me"/>
- <foaf:knows rdf:parseType="Resource">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://dannyayers.com/me.rdf"/>
- <foaf:mbox_sha1sum>669fe353dbef63d12ba11f69ace8acbec1ac8b17</foaf:mbox_sha1sum>
- <foaf:name>Danny Ayers</foaf:name>
- </foaf:knows>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/Connolly/#me">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://www.w3.org/People/Connolly/home-smart.rdf"/>
- <foaf:mbox rdf:resource="mailto:connolly@w3.org"/>
- <foaf:name>Dan Connolly</foaf:name>
- <foaf:nick>DanCon</foaf:nick>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/EM/contact#me">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <s:seeAlso rdf:resource="http://www.w3.org/People/EM/contact"/>
- <foaf:homepage rdf:resource="http://purl.org/net/eric/"/>
- <foaf:img rdf:resource="http://www.ilrt.bristol.ac.uk/people/cmdjb/events/dc7/orig/eric.png"/>
- <foaf:img rdf:resource="http://www.oclc.org/~emiller/capture.jpg"/>
- <foaf:mbox rdf:resource="mailto:em@w3.org"/>
- <foaf:name>Eric Miller</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/Jacobs/contact.rdf#IanJacobs">
- <foaf:name>Ian Jacobs</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/djweitzner/foaf#DJW">
- <rdf:type rdf:resource="http://xmlns.com/foaf/0.1/Person"/>
- <foaf:mbox_sha1sum>032c319f439f63efba54f4fa51bfb3a3fafedfbe</foaf:mbox_sha1sum>
- <foaf:name>Daniel J Weitzner</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/People/karl/karl-foaf.xrdf#me">
- <s:seeAlso rdf:resource="http://www.w3.org/People/karl/karl-foaf.xrdf"/>
- <foaf:mbox rdf:resource="mailto:karl@w3.org"/>
- <foaf:name>Karl Dubost</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www.w3.org/data#W3C">
- <s:label>W3C</s:label>
- <s:seeAlso rdf:resource="http://dig.csail.mit.edu/2008/2002/01/tr-automation/tr.rdf"/>
- <con:publicHomePage rdf:resource="http://www.w3.org/"/>
- <foaf:homepage rdf:resource="http://dig.csail.mit.edu/2008/"/>
- <foaf:logo rdf:resource="http://dig.csail.mit.edu/2008/Icons/w3c_home"/>
- <foaf:name>World Wide Web Consortium</foaf:name>
- </rdf:Description>
- <rdf:Description rdf:about="http://www4.wiwiss.fu-berlin.de/booksMeshup/books/006251587X">
- <dc:creator rdf:resource="http://www.w3.org/People/Berners-Lee/card#i"/>
- <dc:title>Weaving the Web: The Original Design and Ultimate Destiny of the World Wide Web</dc:title>
- </rdf:Description>
-</rdf:RDF>
diff --git a/examples/foafpaths.py b/examples/foafpaths.py
index 4650d228..127b7f5e 100644
--- a/examples/foafpaths.py
+++ b/examples/foafpaths.py
@@ -1,10 +1,9 @@
"""
-
SPARQL 1.1 defines path operators for combining/repeating predicates
in triple-patterns.
-We overload some python operators on URIRefs to allow creating path
-operators directly in python.
+We overload some Python operators on URIRefs to allow creating path
+operators directly in Python.
============ =========================================
Operator Path
@@ -19,22 +18,21 @@ Operator Path
============ =========================================
-these can then be used in property position for ``s,p,o`` triple queries
+These can then be used in property position for ``s,p,o`` triple queries
for any graph method.
See the docs for :mod:`rdflib.paths` for the details.
This example shows how to get the name of friends with a single query.
-
"""
from rdflib import URIRef, Graph
from rdflib.namespace import FOAF
-if __name__ == '__main__':
+if __name__ == "__main__":
g = Graph()
- g.load("foaf.rdf")
+ g.load("foaf.n3", format="n3")
tim = URIRef("http://www.w3.org/People/Berners-Lee/card#i")
diff --git a/examples/graph_digest_benchmark.py b/examples/graph_digest_benchmark.py
index 678425d1..f0d0075c 100644
--- a/examples/graph_digest_benchmark.py
+++ b/examples/graph_digest_benchmark.py
@@ -1,10 +1,9 @@
#!/usr/bin/env python
-
-'''
+"""
This benchmark will produce graph digests for all of the
downloadable ontologies available in Bioportal.
-'''
+"""
from __future__ import print_function
@@ -22,7 +21,7 @@ from collections import defaultdict
from multiprocessing import Process, Semaphore, Queue
-bioportal_query = '''
+bioportal_query = """
PREFIX metadata: <http://data.bioontology.org/metadata/>
select distinct ?ontology ?title ?download where {
@@ -32,29 +31,29 @@ select distinct ?ontology ?title ?download where {
?links metadata:Ontology ?download.
filter(regex(?download, "/download"))
}
-'''
+"""
stat_cols = [
- 'id',
- 'ontology',
- 'download_url',
- 'tree_depth',
- 'color_count',
- 'individuations',
- 'prunings',
- 'initial_color_count',
- 'adjacent_nodes',
- 'initial_coloring_runtime',
- 'triple_count',
- 'graph_digest',
- 'to_hash_runtime',
- 'canonicalize_triples_runtime',
- 'error',
+ "id",
+ "ontology",
+ "download_url",
+ "tree_depth",
+ "color_count",
+ "individuations",
+ "prunings",
+ "initial_color_count",
+ "adjacent_nodes",
+ "initial_coloring_runtime",
+ "triple_count",
+ "graph_digest",
+ "to_hash_runtime",
+ "canonicalize_triples_runtime",
+ "error",
]
def files_benchmark(ontologies, output_file, threads):
- w = open(output_file, 'w')
+ w = open(output_file, "w")
writer = csv.DictWriter(w, stat_cols)
writer.writeheader()
tasks = Queue()
@@ -68,17 +67,18 @@ def files_benchmark(ontologies, output_file, threads):
stats = q.get()
og = Graph()
try:
- og.load(stats['download_url'])
- print(stats['ontology'], stats['id'])
+ og.load(stats["download_url"])
+ print(stats["ontology"], stats["id"])
ig = to_isomorphic(og)
graph_digest = ig.graph_digest(stats)
finished_tasks.put(stats)
except Exception as e:
- print('ERROR', stats['id'], e)
- stats['error'] = str(e)
+ print("ERROR", stats["id"], e)
+ stats["error"] = str(e)
finished_tasks.put(stats)
except queue.Empty:
pass
+
for i in range(int(threads)):
print("Starting worker", i)
t = Process(target=worker, args=[tasks, finished_tasks, dl_lock])
@@ -86,11 +86,13 @@ def files_benchmark(ontologies, output_file, threads):
t.start()
for download in ontologies:
stats = defaultdict(str)
- stats.update({
- "id": download.split("/")[-1].split(".")[0],
- "ontology": download.split("/")[-1].split(".")[0],
- "download_url": download
- })
+ stats.update(
+ {
+ "id": download.split("/")[-1].split(".")[0],
+ "ontology": download.split("/")[-1].split(".")[0],
+ "download_url": download,
+ }
+ )
tasks.put(stats)
tasks.close()
written_tasks = 0
@@ -104,13 +106,13 @@ def files_benchmark(ontologies, output_file, threads):
def bioportal_benchmark(apikey, output_file, threads):
metadata = Namespace("http://data.bioontology.org/metadata/")
- url = 'http://data.bioontology.org/ontologies?apikey=%s' % apikey
+ url = "http://data.bioontology.org/ontologies?apikey=%s" % apikey
ontology_graph = Graph()
print(url)
ontology_list_json = urlopen(url).read()
- ontology_graph.parse(StringIO(unicode(ontology_list_json)), format="json-ld")
+ ontology_graph.parse(StringIO(ontology_list_json), format="json-ld")
ontologies = ontology_graph.query(bioportal_query)
- w = open(output_file, 'w')
+ w = open(output_file, "w")
writer = csv.DictWriter(w, stat_cols)
writer.writeheader()
tasks = Queue()
@@ -126,19 +128,20 @@ def bioportal_benchmark(apikey, output_file, threads):
try:
try:
dl_lock.acquire()
- og.load(stats['download_url'] + "?apikey=%s" % apikey)
+ og.load(stats["download_url"] + "?apikey=%s" % apikey)
finally:
dl_lock.release()
- print(stats['ontology'], stats['id'])
+ print(stats["ontology"], stats["id"])
ig = to_isomorphic(og)
graph_digest = ig.graph_digest(stats)
finished_tasks.put(stats)
except Exception as e:
- print('ERROR', stats['id'], e)
- stats['error'] = str(e)
+ print("ERROR", stats["id"], e)
+ stats["error"] = str(e)
finished_tasks.put(stats)
- except Empty:
+ except:
pass
+
for i in range(int(threads)):
print("Starting worker", i)
t = Process(target=worker, args=[tasks, finished_tasks, dl_lock])
@@ -146,11 +149,7 @@ def bioportal_benchmark(apikey, output_file, threads):
t.start()
for ontology, title, download in ontologies:
stats = defaultdict(str)
- stats.update({
- "id": ontology,
- "ontology": title,
- "download_url": download
- })
+ stats.update({"id": ontology, "ontology": title, "download_url": download})
tasks.put(stats)
tasks.close()
written_tasks = 0
@@ -162,7 +161,7 @@ def bioportal_benchmark(apikey, output_file, threads):
written_tasks += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 4:
files_benchmark(sys.argv[1:-2], sys.argv[-2], sys.argv[-1])
else:
diff --git a/examples/prepared_query.py b/examples/prepared_query.py
index eb5f2b8c..95912e59 100644
--- a/examples/prepared_query.py
+++ b/examples/prepared_query.py
@@ -1,29 +1,25 @@
-
"""
-
SPARQL Queries be prepared (i.e parsed and translated to SPARQL algebra)
by the :meth:`rdflib.plugins.sparql.prepareQuery` method.
When executing, variables can be bound with the
``initBindings`` keyword parameter
-
-
"""
import rdflib
from rdflib.plugins.sparql import prepareQuery
from rdflib.namespace import FOAF
-if __name__ == '__main__':
+if __name__ == "__main__":
q = prepareQuery(
- 'SELECT ?s WHERE { ?person foaf:knows ?s .}',
- initNs={"foaf": FOAF})
+ "SELECT ?s WHERE { ?person foaf:knows ?s .}", initNs={"foaf": FOAF}
+ )
g = rdflib.Graph()
- g.load("foaf.rdf")
+ g.load("foaf.n3", format="n3")
tim = rdflib.URIRef("http://www.w3.org/People/Berners-Lee/card#i")
- for row in g.query(q, initBindings={'person': tim}):
+ for row in g.query(q, initBindings={"person": tim}):
print(row)
diff --git a/examples/rdfa_example.py b/examples/rdfa_example.py
index e49bebbc..30462541 100644
--- a/examples/rdfa_example.py
+++ b/examples/rdfa_example.py
@@ -1,22 +1,24 @@
"""
-
A simple example showing how to process RDFa from the web
-
"""
from rdflib import Graph
-if __name__ == '__main__':
+if __name__ == "__main__":
g = Graph()
- g.parse('http://www.worldcat.org/title/library-of-babel/oclc/44089369', format='rdfa')
+ g.parse(
+ "https://www.worldcat.org/title/library-of-babel/oclc/44089369", format="rdfa"
+ )
print("Books found:")
- for row in g.query("""SELECT ?title ?author WHERE {
+ for row in g.query(
+ """SELECT ?title ?author WHERE {
[ a schema:Book ;
schema:author [ rdfs:label ?author ] ;
schema:name ?title ]
- FILTER (LANG(?title) = 'en') } """):
+ FILTER (LANG(?title) = 'en') } """
+ ):
print("%s by %s" % (row.title, row.author))
diff --git a/examples/resource.py b/examples/resource.py
index d676ae5f..e82ad0b0 100644
--- a/examples/resource.py
+++ b/examples/resource.py
@@ -1,29 +1,26 @@
"""
-
RDFLib has a :class:`~rdflib.resource.Resource` class, for a resource-centric API.
A resource acts like a URIRef with an associated graph, and allows
quickly adding or querying for triples where this resource is the
subject.
-
-
"""
from rdflib import Graph, RDF, RDFS, Literal
from rdflib.namespace import FOAF
-if __name__ == '__main__':
+if __name__ == "__main__":
g = Graph()
- bob = g.resource('urn:bob')
+ bob = g.resource("urn:bob")
bob.set(RDF.type, FOAF.Person) # .set replaces all other values
bob.set(FOAF.name, Literal("Bob"))
- bill = g.resource('urn:bill')
+ bill = g.resource("urn:bill")
- bill.add(RDF.type, FOAF.Person) # add adds to existing values
+ bill.add(RDF.type, FOAF.Person) # adds to existing values
bill.add(RDF.type, FOAF.Agent)
bill.set(RDFS.label, Literal("Bill"))
diff --git a/examples/simple_example.py b/examples/simple_example.py
index ef5ec73a..15ae0d81 100644
--- a/examples/simple_example.py
+++ b/examples/simple_example.py
@@ -1,8 +1,7 @@
-
from rdflib import Graph, Literal, BNode, RDF
from rdflib.namespace import FOAF, DC
-if __name__ == '__main__':
+if __name__ == "__main__":
store = Graph()
@@ -24,29 +23,37 @@ if __name__ == '__main__':
print(s, p, o)
# For each foaf:Person in the store print out its mbox property.
+ print()
print("--- printing mboxes ---")
for person in store.subjects(RDF.type, FOAF["Person"]):
for mbox in store.objects(person, FOAF["mbox"]):
print(mbox)
+ print("--- saving RDF to a file (donna_foaf.rdf) ---")
# Serialize the store as RDF/XML to the file donna_foaf.rdf.
store.serialize("donna_foaf.rdf", format="pretty-xml", max_depth=3)
# Let's show off the serializers
-
+ print()
print("RDF Serializations:")
# Serialize as XML
print("--- start: rdf-xml ---")
- print(store.serialize(format="pretty-xml"))
+ print(store.serialize(format="pretty-xml").decode("utf-8"))
print("--- end: rdf-xml ---\n")
# Serialize as Turtle
print("--- start: turtle ---")
- print(store.serialize(format="turtle"))
+ print(store.serialize(format="turtle").decode("utf-8"))
print("--- end: turtle ---\n")
# Serialize as NTriples
print("--- start: ntriples ---")
- print(store.serialize(format="nt"))
+ print(store.serialize(format="nt").decode("utf-8"))
print("--- end: ntriples ---\n")
+
+ # Serialize as JSON-LD
+ # only if you have the JSON-LD plugin installed!
+ print("--- start: JSON-LD ---")
+ print(store.serialize(format="json-ld").decode("utf-8"))
+ print("--- end: JSON-LD ---\n")
diff --git a/examples/sleepycat_example.py b/examples/sleepycat_example.py
index b112717e..1130da0e 100644
--- a/examples/sleepycat_example.py
+++ b/examples/sleepycat_example.py
@@ -1,8 +1,6 @@
"""
-
A simple example showing how to use a Sleepycat store to do on-disk
persistence.
-
"""
from rdflib import ConjunctiveGraph, Namespace, Literal
@@ -10,33 +8,33 @@ from rdflib.store import NO_STORE, VALID_STORE
from tempfile import mktemp
-if __name__ == '__main__':
+if __name__ == "__main__":
path = mktemp()
# Open previously created store, or create it if it doesn't exist yet
- graph = ConjunctiveGraph('Sleepycat')
+ graph = ConjunctiveGraph("Sleepycat")
rt = graph.open(path, create=False)
if rt == NO_STORE:
- # There is no underlying Sleepycat infrastructure, create it
+ # There is no underlying Sleepycat infrastructure, so create it
graph.open(path, create=True)
else:
- assert rt == VALID_STORE, 'The underlying store is corrupt'
+ assert rt == VALID_STORE, "The underlying store is corrupt"
- print('Triples in graph before add: ', len(graph))
+ print("Triples in graph before add: ", len(graph))
# Now we'll add some triples to the graph & commit the changes
- rdflib = Namespace('http://rdflib.net/test/')
- graph.bind('test', 'http://rdflib.net/test/')
+ rdflib = Namespace("http://rdflib.net/test/")
+ graph.bind("test", "http://rdflib.net/test/")
- graph.add((rdflib['pic:1'], rdflib.name, Literal('Jane & Bob')))
- graph.add((rdflib['pic:2'], rdflib.name, Literal('Squirrel in Tree')))
+ graph.add((rdflib["pic:1"], rdflib.name, Literal("Jane & Bob")))
+ graph.add((rdflib["pic:2"], rdflib.name, Literal("Squirrel in Tree")))
- print('Triples in graph after add: ', len(graph))
+ print("Triples in graph after add: ", len(graph))
# display the graph in RDF/XML
- print(graph.serialize(format='n3'))
+ print(graph.serialize(format="n3"))
# close when done, otherwise sleepycat will leak lock entries.
graph.close()
@@ -45,16 +43,17 @@ if __name__ == '__main__':
# reopen the graph
- graph = ConjunctiveGraph('Sleepycat')
+ graph = ConjunctiveGraph("Sleepycat")
graph.open(path, create=False)
- print('Triples still in graph: ', len(graph))
+ print("Triples still in graph: ", len(graph))
graph.close()
# Clean up the temp folder to remove the Sleepycat database files...
import os
+
for f in os.listdir(path):
- os.unlink(path + '/' + f)
+ os.unlink(path + "/" + f)
os.rmdir(path)
diff --git a/examples/slice.py b/examples/slice.py
index 525edb95..33aacf9b 100644
--- a/examples/slice.py
+++ b/examples/slice.py
@@ -1,5 +1,4 @@
"""
-
RDFLib Graphs (and Resources) can be "sliced" with [] syntax
This is a short-hand for iterating over triples
@@ -8,21 +7,20 @@ Combined with SPARQL paths (see ``foafpaths.py``) - quite complex queries
can be realised.
See :meth:`rdflib.graph.Graph.__getitem__` for details
-
"""
from rdflib import Graph, RDF
from rdflib.namespace import FOAF
-if __name__ == '__main__':
+if __name__ == "__main__":
graph = Graph()
- graph.load("foaf.rdf")
+ graph.load("foaf.n3", format="n3")
- for person in graph[: RDF.type: FOAF.Person]:
+ for person in graph[: RDF.type : FOAF.Person]:
- friends = list(graph[person:FOAF.knows * '+' / FOAF.name])
+ friends = list(graph[person : FOAF.knows * "+" / FOAF.name])
if friends:
print("%s's circle of friends:" % graph.value(person, FOAF.name))
for name in friends:
diff --git a/examples/smushing.py b/examples/smushing.py
index 3b174f83..c1c3cb8b 100644
--- a/examples/smushing.py
+++ b/examples/smushing.py
@@ -4,7 +4,7 @@ A FOAF smushing example.
Filter a graph by normalizing all ``foaf:Persons`` into URIs based on
their ``mbox_sha1sum``.
-Suppose I got two `FOAF <http://xmlns.com/foaf/0.1>`_ documents each
+Suppose I get two `FOAF <http://xmlns.com/foaf/0.1>`_ documents each
talking about the same person (according to ``mbox_sha1sum``) but they
each used a :class:`rdflib.term.BNode` for the subject. For this demo
I've combined those two documents into one file:
@@ -19,7 +19,6 @@ without having to access my ever-growing archive. Even if another
``65b983bb397fb71849da910996741752ace8369b`` document comes in next
year, I would still give it the same stable subject URI that merges
with my existing data.
-
"""
from rdflib import Graph, Namespace
@@ -27,20 +26,20 @@ from rdflib.namespace import FOAF
STABLE = Namespace("http://example.com/person/mbox_sha1sum/")
-if __name__ == '__main__':
+if __name__ == "__main__":
g = Graph()
g.parse("smushingdemo.n3", format="n3")
newURI = {} # old subject : stable uri
- for s, p, o in g.triples((None, FOAF['mbox_sha1sum'], None)):
+ for s, p, o in g.triples((None, FOAF["mbox_sha1sum"], None)):
newURI[s] = STABLE[o]
out = Graph()
- out.bind('foaf', FOAF)
+ out.bind("foaf", FOAF)
for s, p, o in g:
s = newURI.get(s, s)
o = newURI.get(o, o) # might be linked to another person
out.add((s, p, o))
- print(out.serialize(format="n3").decode('utf-8'))
+ print(out.serialize(format="n3").decode("utf-8"))
diff --git a/examples/sparql_query_example.py b/examples/sparql_query_example.py
index b1ffff5f..ce84b17d 100644
--- a/examples/sparql_query_example.py
+++ b/examples/sparql_query_example.py
@@ -1,6 +1,4 @@
-
"""
-
SPARQL Query using :meth:`rdflib.graph.Graph.query`
The method returns a :class:`~rdflib.query.Result`, iterating over
@@ -11,20 +9,18 @@ For variable names that are not valid python identifiers, dict access
(i.e. with ``row[var] / __getitem__``) is also possible.
:attr:`~rdflib.query.ResultRow.vars` contains the variables
-
"""
import rdflib
-if __name__ == '__main__':
+if __name__ == "__main__":
g = rdflib.Graph()
- g.load("foaf.rdf")
+ g.load("foaf.n3", format="n3")
# the QueryProcessor knows the FOAF prefix from the graph
- # which in turn knows it from reading the RDF/XML file
- for row in g.query(
- 'select ?s where { [] foaf:knows ?s .}'):
+ # which in turn knows it from reading the N3 RDF file
+ for row in g.query("SELECT ?s WHERE { [] foaf:knows ?s .}"):
print(row.s)
# or row["s"]
# or row[rdflib.Variable("s")]
diff --git a/examples/sparql_update_example.py b/examples/sparql_update_example.py
index a604eebd..fd7bce4e 100644
--- a/examples/sparql_update_example.py
+++ b/examples/sparql_update_example.py
@@ -1,26 +1,25 @@
-
"""
-
SPARQL Update statements can be applied with :meth:`rdflib.graph.Graph.update`
-
"""
import rdflib
-if __name__ == '__main__':
+if __name__ == "__main__":
g = rdflib.Graph()
- g.load("foaf.rdf")
-
- g.update('''
- PREFIX foaf: <http://xmlns.com/foaf/0.1/>
- PREFIX dbpedia: <http://dbpedia.org/resource/>
- INSERT
- { ?s a dbpedia:Human . }
- WHERE
- { ?s a foaf:Person . }
- ''')
-
- for x in g.subjects(
- rdflib.RDF.type, rdflib.URIRef('http://dbpedia.org/resource/Human')):
- print(x)
+ g.load("foaf.n3", format="n3")
+
+ print("Initially there are {} triples in the graph".format(len(g)))
+
+ g.update(
+ """
+ PREFIX foaf: <http://xmlns.com/foaf/0.1/>
+ PREFIX dbpedia: <http://dbpedia.org/resource/>
+ INSERT
+ { ?s a dbpedia:Human . }
+ WHERE
+ { ?s a foaf:Person . }
+ """
+ )
+
+ print("After the UPDATE, there are {} triples in the graph".format(len(g)))
diff --git a/examples/sparqlstore_example.py b/examples/sparqlstore_example.py
index afef011d..936f6540 100644
--- a/examples/sparqlstore_example.py
+++ b/examples/sparqlstore_example.py
@@ -1,23 +1,34 @@
"""
-
A simple example showing how to use the SPARQLStore
-
"""
+import locale
from rdflib import Graph, URIRef, Namespace
+from rdflib.plugins.stores.sparqlstore import SPARQLStore
-if __name__ == '__main__':
-
- dbo = Namespace('http://dbpedia.org/ontology/')
+if __name__ == "__main__":
- graph = Graph('SPARQLStore', identifier="http://dbpedia.org")
+ dbo = Namespace("http://dbpedia.org/ontology/")
+ # using a Graph with the Store type string set to "SPARQLStore"
+ graph = Graph("SPARQLStore", identifier="http://dbpedia.org")
graph.open("http://dbpedia.org/sparql")
- pop = graph.value(
- URIRef("http://dbpedia.org/resource/Berlin"),
- dbo.populationTotal)
+ pop = graph.value(URIRef("http://dbpedia.org/resource/Berlin"), dbo.populationTotal)
- print(graph.store.queryString)
+ print(
+ "According to DBPedia, Berlin has a population of {0:,}".format(
+ int(pop), ",d"
+ ).replace(",", ".")
+ )
- print("According to DBPedia Berlin has a population of", pop)
+ # using a SPARQLStore object directly
+ s = SPARQLStore(endpoint="http://dbpedia.org/sparql")
+ s.open(None)
+ pop = graph.value(
+ URIRef("http://dbpedia.org/resource/Brisbane"), dbo.populationTotal
+ )
+ print(
+ "According to DBPedia, Brisbane has a population of "
+ "{0:,}".format(int(pop), ",d")
+ )
diff --git a/examples/swap_primer.py b/examples/swap_primer.py
index dacb92e4..35dc107c 100644
--- a/examples/swap_primer.py
+++ b/examples/swap_primer.py
@@ -1,65 +1,70 @@
"""
-
This is a simple primer using some of the
example stuff in the Primer on N3:
http://www.w3.org/2000/10/swap/Primer
-
"""
-# Load up RDFLib
-
-from rdflib import ConjunctiveGraph, Namespace, Literal
+from rdflib import ConjunctiveGraph, Namespace, Literal, URIRef
from rdflib.namespace import OWL, DC
-if __name__ == '__main__':
+if __name__ == "__main__":
# Firstly, it doesn't have to be so complex.
# Here we create a "Graph" of our work.
# Think of it as a blank piece of graph paper!
primer = ConjunctiveGraph()
- myNS = Namespace('#')
+ myNS = Namespace("#")
primer.add((myNS.pat, myNS.knows, myNS.jo))
# or:
- primer.add((myNS['pat'], myNS['age'], Literal(24)))
+ primer.add((myNS["pat"], myNS["age"], Literal(24)))
# Now, with just that, lets see how the system
# recorded *way* too many details about what
# you just asserted as fact.
- #
from pprint import pprint
+
+ print("All the things in the Graph:")
pprint(list(primer))
# just think .whatever((s, p, o))
# here we report on what we know
+ print("==================")
+
+ print("Subjects:")
pprint(list(primer.subjects()))
+ print("Predicates:")
pprint(list(primer.predicates()))
+ print("Objects:")
pprint(list(primer.objects()))
+ print("==================")
# and other things that make sense
- # what do we know about pat?
+ print("What we know about pat:")
pprint(list(primer.predicate_objects(myNS.pat)))
- # who is what age?
+ print("Who is what age?")
pprint(list(primer.subject_objects(myNS.age)))
+ print("==================")
+ print("==================")
+
# Okay, so lets now work with a bigger
# dataset from the example, and start
# with a fresh new graph.
+ del primer
primer = ConjunctiveGraph()
# Lets start with a verbatim string straight from the primer text:
mySource = """
-
-
- @prefix : <http://www.w3.org/2000/10/swap/Primer#>.
+ @prefix : <#> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@@ -92,9 +97,6 @@ if __name__ == '__main__':
:Woman = foo:FemaleAdult .
:Title a rdf:Property; = dc:title .
-
-
-
""" # --- End of primer code
# To make this go easier to spit back out...
@@ -103,28 +105,38 @@ if __name__ == '__main__':
# By default, your main namespace is the URI of your
# current working directory, so lets make that simpler:
- myNS = Namespace('http://www.w3.org/2000/10/swap/Primer#')
- primer.bind('', myNS)
- primer.bind('owl', OWL)
- primer.bind('dc', DC)
- primer.bind('swap', 'http://www.w3.org/2000/10/swap/')
+ primer.bind("owl", OWL)
+ primer.bind("dc", DC)
+ primer.bind("swap", "http://www.w3.org/2000/10/swap/")
# Lets load it up!
- primer.parse(data=mySource, format='n3')
+ primer.parse(data=mySource, format="n3")
# Now you can query, either directly straight into a list:
- [(x, y, z) for x, y, z in primer]
+ print()
+ print("Printing bigger example's triples:")
+ for i in [(x, y, z) for x, y, z in primer]:
+ print(i)
# or spit it back out (mostly) the way we created it:
- print(primer.serialize(format='n3'))
+ print()
+ print("Printing bigger example as N3:")
+ print(primer.serialize(format="n3").decode("utf-8"))
# for more insight into things already done, lets see the namespaces
- list(primer.namespaces())
+ print()
+ print("Printing bigger example's namespaces:")
+ for n in list(primer.namespaces()):
+ print(n)
- # lets ask something about the data
+ # lets ask something about the data, using a SPARQL query
- list(primer.objects(myNS.pat, myNS.child))
+ print()
+ print("Who are pat's children?")
+ q = "SELECT ?child WHERE { :pat :child ?child }"
+ for r in primer.query(q):
+ print(r)
diff --git a/examples/transitive.py b/examples/transitive.py
index 5251ea79..432b3723 100644
--- a/examples/transitive.py
+++ b/examples/transitive.py
@@ -1,5 +1,4 @@
"""
-
An example illustrating how to use the
:meth:`~rdflib.graph.Graph.transitive_subjects` and
:meth:`~rdflib.graph.Graph.transitive_objects` graph methods
@@ -42,21 +41,20 @@ User-defined transitive closures
The method :meth:`~rdflib.graph.Graph.transitiveClosure` returns
transtive closures of user-defined functions.
-
"""
-if __name__ == '__main__':
+if __name__ == "__main__":
from rdflib import ConjunctiveGraph, URIRef
- person = URIRef('ex:person')
- dad = URIRef('ex:d')
- mom = URIRef('ex:m')
- momOfDad = URIRef('ex:gm0')
- momOfMom = URIRef('ex:gm1')
- dadOfDad = URIRef('ex:gf0')
- dadOfMom = URIRef('ex:gf1')
+ person = URIRef("ex:person")
+ dad = URIRef("ex:d")
+ mom = URIRef("ex:m")
+ momOfDad = URIRef("ex:gm0")
+ momOfMom = URIRef("ex:gm1")
+ dadOfDad = URIRef("ex:gf0")
+ dadOfMom = URIRef("ex:gf1")
- parent = URIRef('ex:parent')
+ parent = URIRef("ex:parent")
g = ConjunctiveGraph()
g.add((person, parent, dad))
diff --git a/rdflib/__init__.py b/rdflib/__init__.py
index 29cc681c..bce8204f 100644
--- a/rdflib/__init__.py
+++ b/rdflib/__init__.py
@@ -45,47 +45,64 @@ A tiny example:
__docformat__ = "restructuredtext en"
# The format of the __version__ line is matched by a regex in setup.py
-__version__ = "5.0.0-dev"
-__date__ = "2017/01/29"
+__version__ = "5.0.0"
+__date__ = "2020-04-18"
__all__ = [
- 'URIRef',
- 'BNode',
- 'Literal',
- 'Variable',
-
- 'Namespace',
-
- 'Dataset',
- 'Graph',
- 'ConjunctiveGraph',
-
- 'RDF',
- 'RDFS',
- 'OWL',
- 'XSD',
-
- 'util',
+ "URIRef",
+ "BNode",
+ "Literal",
+ "Variable",
+ "Namespace",
+ "Dataset",
+ "Graph",
+ "ConjunctiveGraph",
+ "CSVW",
+ "DC",
+ "DCAT",
+ "DCTERMS",
+ "DOAP",
+ "FOAF",
+ "ODRL2",
+ "ORG",
+ "OWL",
+ "PROF",
+ "PROV",
+ "QB",
+ "RDF",
+ "RDFS",
+ "SDO",
+ "SH",
+ "SKOS",
+ "SOSA",
+ "SSN",
+ "TIME",
+ "VOID",
+ "XMLNS",
+ "XSD",
+ "util",
]
import sys
-assert sys.version_info >= (2, 7, 0), "rdflib requires Python 2.7 or higher"
import logging
+
logger = logging.getLogger(__name__)
_interactive_mode = False
try:
import __main__
- if not hasattr(__main__, '__file__') and sys.stdout!=None and sys.stderr.isatty():
+
+ if not hasattr(__main__, "__file__") and sys.stdout is not None and sys.stderr.isatty():
# show log messages in interactive mode
_interactive_mode = True
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
del __main__
except ImportError:
- #Main already imported from elsewhere
+ # Main already imported from elsewhere
import warnings
- warnings.warn('__main__ already imported', ImportWarning)
+
+ warnings.warn("__main__ already imported", ImportWarning)
del warnings
if _interactive_mode:
@@ -96,23 +113,6 @@ del _interactive_mode
del sys
-import six
-try:
- six.unichr(0x10FFFF)
-except ValueError:
- import warnings
- warnings.warn(
- 'You are using a narrow Python build!\n'
- 'This means that your Python does not properly support chars > 16bit.\n'
- 'On your system chars like c=u"\\U0010FFFF" will have a len(c)==2.\n'
- 'As this can cause hard to debug problems with string processing\n'
- '(slicing, regexp, ...) later on, we strongly advise to use a wide\n'
- 'Python build in production systems.',
- ImportWarning)
- del warnings
-del six
-
-
NORMALIZE_LITERALS = True
"""
If True - Literals lexical forms are normalized when created.
@@ -156,19 +156,45 @@ In particular, this determines how the rich comparison operators for
Literal work, eq, __neq__, __lt__, etc.
"""
-from rdflib.term import (
- URIRef, BNode, Literal, Variable)
+from rdflib.term import URIRef, BNode, Literal, Variable
from rdflib.namespace import Namespace
from rdflib.graph import Dataset, Graph, ConjunctiveGraph
-from rdflib.namespace import RDF, RDFS, OWL, XSD
+from rdflib.namespace import (
+ CSVW,
+ DC,
+ DCAT,
+ DCTERMS,
+ DOAP,
+ FOAF,
+ ODRL2,
+ ORG,
+ OWL,
+ PROF,
+ PROV,
+ QB,
+ RDF,
+ RDFS,
+ SDO,
+ SH,
+ SKOS,
+ SOSA,
+ SSN,
+ TIME,
+ VOID,
+ XMLNS,
+ XSD,
+)
from rdflib import plugin
from rdflib import query
+
# tedious sop to flake8
assert plugin
assert query
from rdflib import util
+
+from .container import *
diff --git a/rdflib/collection.py b/rdflib/collection.py
index 52eda4a9..8b667a23 100644
--- a/rdflib/collection.py
+++ b/rdflib/collection.py
@@ -7,7 +7,7 @@ from rdflib.term import BNode
from rdflib.term import Literal
-__all__ = ['Collection']
+__all__ = ["Collection"]
class Collection(object):
@@ -67,7 +67,7 @@ class Collection(object):
"2"^^<http://www.w3.org/2001/XMLSchema#integer>
"3"^^<http://www.w3.org/2001/XMLSchema#integer> )
"""
- return "( %s )" % (' '.join([i.n3() for i in self]))
+ return "( %s )" % (" ".join([i.n3() for i in self]))
def _get_container(self, index):
"""Gets the first, rest holding node at index."""
@@ -103,8 +103,7 @@ class Collection(object):
elif not newLink:
raise Exception("Malformed RDF Collection: %s" % self.uri)
else:
- assert len(newLink) == 1, \
- "Malformed RDF Collection: %s" % self.uri
+ assert len(newLink) == 1, "Malformed RDF Collection: %s" % self.uri
listName = newLink[0]
def __getitem__(self, key):
@@ -246,6 +245,7 @@ class Collection(object):
def test():
import doctest
+
doctest.testmod()
@@ -253,14 +253,14 @@ if __name__ == "__main__":
test()
from rdflib import Graph
+
g = Graph()
c = Collection(g, BNode())
assert len(c) == 0
- c = Collection(
- g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
+ c = Collection(g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
assert len(c) == 4
@@ -272,7 +272,7 @@ if __name__ == "__main__":
try:
del c[500]
- except IndexError as i:
+ except IndexError:
pass
c.append(Literal("5"))
diff --git a/rdflib/compare.py b/rdflib/compare.py
index d28b3336..839cfbb0 100644
--- a/rdflib/compare.py
+++ b/rdflib/compare.py
@@ -83,8 +83,14 @@ from __future__ import print_function
# - Add warning and/or safety mechanism before working on large graphs?
# - use this in existing Graph.isomorphic?
-__all__ = ['IsomorphicGraph', 'to_isomorphic', 'isomorphic',
- 'to_canonical_graph', 'graph_diff', 'similar']
+__all__ = [
+ "IsomorphicGraph",
+ "to_isomorphic",
+ "isomorphic",
+ "to_canonical_graph",
+ "graph_diff",
+ "similar",
+]
from rdflib.graph import Graph, ConjunctiveGraph, ReadOnlyGraphAggregate
from rdflib.term import BNode, Node
@@ -93,8 +99,6 @@ from hashlib import sha256
from datetime import datetime
from collections import defaultdict
-from six import text_type
-
def _total_seconds(td):
result = td.days * 24 * 60 * 60
@@ -114,10 +118,11 @@ class _runtime(object):
def wrapped_f(*args, **kwargs):
start = datetime.now()
result = f(*args, **kwargs)
- if 'stats' in kwargs and kwargs['stats'] is not None:
- stats = kwargs['stats']
+ if "stats" in kwargs and kwargs["stats"] is not None:
+ stats = kwargs["stats"]
stats[self.label] = _total_seconds(datetime.now() - start)
return result
+
return wrapped_f
@@ -130,12 +135,13 @@ class _call_count(object):
self.label = f.__name__ + "_runtime"
def wrapped_f(*args, **kwargs):
- if 'stats' in kwargs and kwargs['stats'] is not None:
- stats = kwargs['stats']
+ if "stats" in kwargs and kwargs["stats"] is not None:
+ stats = kwargs["stats"]
if self.label not in stats:
stats[self.label] = 0
stats[self.label] += 1
return f(*args, **kwargs)
+
return wrapped_f
@@ -211,12 +217,13 @@ class Color:
if isinstance(x, Node):
return x.n3()
else:
- return text_type(x)
+ return str(x)
+
if isinstance(color, Node):
return stringify(color)
value = 0
for triple in color:
- value += self.hashfunc(' '.join([stringify(x) for x in triple]))
+ value += self.hashfunc(" ".join([stringify(x) for x in triple]))
val = u"%x" % value
self._hash_cache[color] = val
return val
@@ -227,18 +234,16 @@ class Color:
new_color = list(self.color)
for node in W.nodes:
new_color += [
- (1, p, W.hash_color())
- for s, p, o in graph.triples((n, None, node))]
+ (1, p, W.hash_color()) for s, p, o in graph.triples((n, None, node))
+ ]
new_color += [
- (W.hash_color(), p, 3)
- for s, p, o in graph.triples((node, None, n))]
+ (W.hash_color(), p, 3) for s, p, o in graph.triples((node, None, n))
+ ]
new_color = tuple(new_color)
new_hash_color = self.hash_color(new_color)
if new_hash_color not in colors:
- c = Color(
- [], self.hashfunc, new_color,
- hash_cache=self._hash_cache)
+ c = Color([], self.hashfunc, new_color, hash_cache=self._hash_cache)
colors[new_hash_color] = c
colors[new_hash_color].nodes.append(n)
return colors.values()
@@ -248,19 +253,19 @@ class Color:
def copy(self):
return Color(
- self.nodes[:], self.hashfunc, self.color,
- hash_cache=self._hash_cache)
+ self.nodes[:], self.hashfunc, self.color, hash_cache=self._hash_cache
+ )
class _TripleCanonicalizer(object):
-
def __init__(self, graph, hashfunc=sha256):
self.graph = graph
def _hashfunc(s):
h = hashfunc()
- h.update(text_type(s).encode("utf8"))
+ h.update(str(s).encode("utf8"))
return int(h.hexdigest(), 16)
+
self._hash_cache = {}
self.hashfunc = _hashfunc
@@ -292,12 +297,10 @@ class _TripleCanonicalizer(object):
self._neighbors[p].add(s)
self._neighbors[p].add(p)
if len(bnodes) > 0:
- return [
- Color(list(bnodes), self.hashfunc, hash_cache=self._hash_cache)
- ] + [
- Color([x], self.hashfunc, x, hash_cache=self._hash_cache)
- for x in others
- ]
+ return [Color(list(bnodes), self.hashfunc, hash_cache=self._hash_cache)] + [
+ Color([x], self.hashfunc, x, hash_cache=self._hash_cache)
+ for x in others
+ ]
else:
return []
@@ -306,12 +309,12 @@ class _TripleCanonicalizer(object):
new_color.append((len(color.nodes),))
color.nodes.remove(individual)
- c = Color([individual], self.hashfunc, tuple(new_color),
- hash_cache=self._hash_cache)
+ c = Color(
+ [individual], self.hashfunc, tuple(new_color), hash_cache=self._hash_cache
+ )
return c
def _get_candidates(self, coloring):
- candidates = [c for c in coloring if not c.discrete()]
for c in [c for c in coloring if not c.discrete()]:
for node in c.nodes:
yield node, c
@@ -323,14 +326,16 @@ class _TripleCanonicalizer(object):
W = sequence.pop()
for c in coloring[:]:
if len(c.nodes) > 1 or isinstance(c.nodes[0], BNode):
- colors = sorted(c.distinguish(W, self.graph),
- key=lambda x: x.key(),
- reverse=True)
+ colors = sorted(
+ c.distinguish(W, self.graph),
+ key=lambda x: x.key(),
+ reverse=True,
+ )
coloring.remove(c)
coloring.extend(colors)
try:
si = sequence.index(c)
- sequence = sequence[:si] + colors + sequence[si+1:]
+ sequence = sequence[:si] + colors + sequence[si + 1:]
except ValueError:
sequence = colors[1:] + sequence
combined_colors = []
@@ -349,9 +354,9 @@ class _TripleCanonicalizer(object):
def to_hash(self, stats=None):
result = 0
for triple in self.canonical_triples(stats=stats):
- result += self.hashfunc(' '.join([x.n3() for x in triple]))
+ result += self.hashfunc(" ".join([x.n3() for x in triple]))
if stats is not None:
- stats['graph_digest'] = "%x" % result
+ stats["graph_digest"] = "%x" % result
return result
def _experimental_path(self, coloring):
@@ -377,8 +382,8 @@ class _TripleCanonicalizer(object):
@_call_count("individuations")
def _traces(self, coloring, stats=None, depth=[0]):
- if stats is not None and 'prunings' not in stats:
- stats['prunings'] = 0
+ if stats is not None and "prunings" not in stats:
+ stats["prunings"] = 0
depth[0] += 1
candidates = self._get_candidates(coloring)
best = []
@@ -410,24 +415,23 @@ class _TripleCanonicalizer(object):
experimental_score = set([c.key() for c in experimental])
if last_coloring:
generator = self._create_generator(
- [last_coloring, experimental],
- generator)
+ [last_coloring, experimental], generator
+ )
last_coloring = experimental
if best_score is None or best_score < color_score:
best = [refined_coloring]
best_score = color_score
- best_experimental = experimental
best_experimental_score = experimental_score
elif best_score > color_score:
# prune this branch.
if stats is not None:
- stats['prunings'] += 1
+ stats["prunings"] += 1
elif experimental_score != best_experimental_score:
best.append(refined_coloring)
else:
# prune this branch.
if stats is not None:
- stats['prunings'] += 1
+ stats["prunings"] += 1
discrete = [x for x in best if self._discrete(x)]
if len(discrete) == 0:
best_score = None
@@ -445,32 +449,34 @@ class _TripleCanonicalizer(object):
def canonical_triples(self, stats=None):
if stats is not None:
- start_canonicalization = datetime.now()
- if stats is not None:
start_coloring = datetime.now()
coloring = self._initial_color()
if stats is not None:
- stats['triple_count'] = len(self.graph)
- stats['adjacent_nodes'] = max(0, len(coloring) - 1)
+ stats["triple_count"] = len(self.graph)
+ stats["adjacent_nodes"] = max(0, len(coloring) - 1)
coloring = self._refine(coloring, coloring[:])
if stats is not None:
- stats['initial_coloring_runtime'] = _total_seconds(datetime.now() - start_coloring)
- stats['initial_color_count'] = len(coloring)
+ stats["initial_coloring_runtime"] = _total_seconds(
+ datetime.now() - start_coloring
+ )
+ stats["initial_color_count"] = len(coloring)
if not self._discrete(coloring):
depth = [0]
coloring = self._traces(coloring, stats=stats, depth=depth)
if stats is not None:
- stats['tree_depth'] = depth[0]
+ stats["tree_depth"] = depth[0]
elif stats is not None:
- stats['individuations'] = 0
- stats['tree_depth'] = 0
+ stats["individuations"] = 0
+ stats["tree_depth"] = 0
if stats is not None:
- stats['color_count'] = len(coloring)
+ stats["color_count"] = len(coloring)
bnode_labels = dict([(c.nodes[0], c.hash_color()) for c in coloring])
if stats is not None:
- stats["canonicalize_triples_runtime"] = _total_seconds(datetime.now() - start_coloring)
+ stats["canonicalize_triples_runtime"] = _total_seconds(
+ datetime.now() - start_coloring
+ )
for triple in self.graph:
result = tuple(self._canonicalize_bnodes(triple, bnode_labels))
yield result
diff --git a/rdflib/compat.py b/rdflib/compat.py
index bf6fa9bc..c058e8df 100644
--- a/rdflib/compat.py
+++ b/rdflib/compat.py
@@ -10,8 +10,6 @@ import re
import codecs
import warnings
-import six
-
# clean ElementTree import
try:
@@ -33,8 +31,7 @@ except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
- raise Exception(
- "Failed to import ElementTree from any known place")
+ raise Exception("Failed to import ElementTree from any known place")
try:
etree_register_namespace = etree.register_namespace
@@ -46,70 +43,58 @@ except AttributeError:
etreenative._namespace_map[uri] = prefix
-def cast_bytes(s, enc='utf-8'):
- if isinstance(s, six.text_type):
+def cast_bytes(s, enc="utf-8"):
+ if isinstance(s, str):
return s.encode(enc)
return s
-if six.PY3:
- # Python 3:
- # ---------
-
- def ascii(stream):
- return codecs.getreader('ascii')(stream)
-
- def bopen(*args, **kwargs):
- return open(*args, mode='rb', **kwargs)
+def ascii(stream):
+ return codecs.getreader("ascii")(stream)
- long_type = int
- def sign(n):
- if n < 0:
- return -1
- if n > 0:
- return 1
- return 0
+def bopen(*args, **kwargs):
+ return open(*args, mode="rb", **kwargs)
-else:
- # Python 2
- # --------
- def ascii(stream):
- return stream
+long_type = int
- bopen = open
- long_type = long
+def sign(n):
+ if n < 0:
+ return -1
+ if n > 0:
+ return 1
+ return 0
- def sign(n):
- return cmp(n, 0)
-r_unicodeEscape = re.compile(r'(\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})')
+r_unicodeEscape = re.compile(r"(\\u[0-9A-Fa-f]{4}|\\U[0-9A-Fa-f]{8})")
def _unicodeExpand(s):
- return r_unicodeEscape.sub(lambda m: six.unichr(int(m.group(0)[2:], 16)), s)
+ return r_unicodeEscape.sub(lambda m: chr(int(m.group(0)[2:], 16)), s)
narrow_build = False
try:
- six.unichr(0x10FFFF)
+ chr(0x10FFFF)
except ValueError:
narrow_build = True
if narrow_build:
+
def _unicodeExpand(s):
try:
- return r_unicodeEscape.sub(
- lambda m: six.unichr(int(m.group(0)[2:], 16)), s)
+ return r_unicodeEscape.sub(lambda m: chr(int(m.group(0)[2:], 16)), s)
except ValueError:
warnings.warn(
- 'Encountered a unicode char > 0xFFFF in a narrow python build. '
- 'Trying to degrade gracefully, but this can cause problems '
- 'later when working with the string:\n%s' % s)
+ "Encountered a unicode char > 0xFFFF in a narrow python build. "
+ "Trying to degrade gracefully, but this can cause problems "
+ "later when working with the string:\n%s" % s
+ )
return r_unicodeEscape.sub(
- lambda m: codecs.decode(m.group(0), 'unicode_escape'), s)
+ lambda m: codecs.decode(m.group(0), "unicode_escape"), s
+ )
def decodeStringEscape(s):
@@ -117,17 +102,14 @@ def decodeStringEscape(s):
s is byte-string - replace \ escapes in string
"""
- if not six.PY3:
- s = s.decode('string-escape')
- else:
- s = s.replace('\\t', '\t')
- s = s.replace('\\n', '\n')
- s = s.replace('\\r', '\r')
- s = s.replace('\\b', '\b')
- s = s.replace('\\f', '\f')
- s = s.replace('\\"', '"')
- s = s.replace("\\'", "'")
- s = s.replace('\\\\', '\\')
+ s = s.replace("\\t", "\t")
+ s = s.replace("\\n", "\n")
+ s = s.replace("\\r", "\r")
+ s = s.replace("\\b", "\b")
+ s = s.replace("\\f", "\f")
+ s = s.replace('\\"', '"')
+ s = s.replace("\\'", "'")
+ s = s.replace("\\\\", "\\")
return s
# return _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
@@ -136,22 +118,18 @@ def decodeStringEscape(s):
def decodeUnicodeEscape(s):
"""
s is a unicode string
- replace \n and \\u00AC unicode escapes
+ replace ``\\n`` and ``\\u00AC`` unicode escapes
"""
- if not six.PY3:
- s = s.encode('utf-8').decode('string-escape')
- s = _unicodeExpand(s)
- else:
- s = s.replace('\\t', '\t')
- s = s.replace('\\n', '\n')
- s = s.replace('\\r', '\r')
- s = s.replace('\\b', '\b')
- s = s.replace('\\f', '\f')
- s = s.replace('\\"', '"')
- s = s.replace("\\'", "'")
- s = s.replace('\\\\', '\\')
-
- s = _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
+ s = s.replace("\\t", "\t")
+ s = s.replace("\\n", "\n")
+ s = s.replace("\\r", "\r")
+ s = s.replace("\\b", "\b")
+ s = s.replace("\\f", "\f")
+ s = s.replace('\\"', '"')
+ s = s.replace("\\'", "'")
+ s = s.replace("\\\\", "\\")
+
+ s = _unicodeExpand(s) # hmm - string escape doesn't do unicode escaping
return s
diff --git a/rdflib/container.py b/rdflib/container.py
new file mode 100644
index 00000000..5960d0a7
--- /dev/null
+++ b/rdflib/container.py
@@ -0,0 +1,265 @@
+from rdflib.namespace import RDF
+from rdflib.term import BNode
+from rdflib import URIRef
+from random import randint
+
+__all__ = ["Container", "Bag", "Seq", "Alt", "NoElementException"]
+
+
+class Container(object):
+ """A class for constructing RDF containers, as per https://www.w3.org/TR/rdf11-mt/#rdf-containers
+
+ Basic usage, creating a ``Bag`` and adding to it::
+
+ >>> from rdflib import Graph, BNode, Literal, Bag
+ >>> g = Graph()
+ >>> b = Bag(g, BNode(), [Literal("One"), Literal("Two"), Literal("Three")])
+ >>> print(g.serialize(format="turtle").decode())
+ @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
+ <BLANKLINE>
+ [] a rdf:Bag ;
+ rdf:_1 "One" ;
+ rdf:_2 "Two" ;
+ rdf:_3 "Three" .
+ <BLANKLINE>
+ <BLANKLINE>
+
+ >>> # print out an item using an index reference
+ >>> print(b[2])
+ Two
+
+ >>> # add a new item
+ >>> b.append(Literal("Hello"))
+ >>> print(g.serialize(format="turtle").decode())
+ @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
+ <BLANKLINE>
+ [] a rdf:Bag ;
+ rdf:_1 "One" ;
+ rdf:_2 "Two" ;
+ rdf:_3 "Three" ;
+ rdf:_4 "Hello" .
+ <BLANKLINE>
+ <BLANKLINE>
+
+ """
+
+ def __init__(self, graph, uri, seq=[], rtype="Bag"):
+ """Creates a Container
+
+ :param graph: a Graph instance
+ :param uri: URI or Blank Node of the Container
+ :param seq: the elements of the Container
+ :param rtype: the type of Container, one of "Bag", "Seq" or "Alt"
+ """
+
+ self.graph = graph
+ self.uri = uri or BNode()
+ self._len = 0
+ self._rtype = rtype # rdf:Bag or rdf:Seq or rdf:Alt
+
+ self.append_multiple(seq)
+
+ # adding triple corresponding to container type
+ self.graph.add((self.uri, RDF.type, RDF[self._rtype]))
+
+ def n3(self):
+
+ items = []
+ for i in range(len(self)):
+
+ v = self[i + 1]
+ items.append(v)
+
+ return "( %s )" % " ".join([a.n3() for a in items])
+
+ def _get_container(self):
+ """Returns the URI of the container"""
+
+ return self.uri
+
+ def __len__(self):
+ """Number of items in container"""
+
+ return self._len
+
+ def type_of_conatiner(self):
+ return self._rtype
+
+ def index(self, item):
+ """Returns the 1-based numerical index of the item in the container"""
+
+ pred = self.graph.predicates(self.uri, item)
+ if not pred:
+ raise ValueError("%s is not in %s" % (item, "container"))
+ LI_INDEX = URIRef(str(RDF) + "_")
+
+ i = None
+ for p in pred:
+ i = int(p.replace(LI_INDEX, ""))
+ return i
+
+ def __getitem__(self, key):
+ """Returns item of the container at index key"""
+
+ c = self._get_container()
+
+ assert isinstance(key, int)
+ elem_uri = str(RDF) + "_" + str(key)
+ if key <= 0 or key > len(self):
+ raise KeyError(key)
+ v = self.graph.value(c, URIRef(elem_uri))
+ if v:
+ return v
+ else:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ """Sets the item at index key or predicate rdf:_key of the container to value"""
+
+ assert isinstance(key, int)
+
+ c = self._get_container()
+ elem_uri = str(RDF) + "_" + str(key)
+ if key <= 0 or key > len(self):
+ raise KeyError(key)
+
+ self.graph.set((c, URIRef(elem_uri), value))
+
+ def __delitem__(self, key):
+ """Removing the item with index key or predicate rdf:_key"""
+
+ assert isinstance(key, int)
+ if key <= 0 or key > len(self):
+ raise KeyError(key)
+
+ graph = self.graph
+ container = self.uri
+ elem_uri = str(RDF) + "_" + str(key)
+ graph.remove((container, URIRef(elem_uri), None))
+ for j in range(key + 1, len(self) + 1):
+ elem_uri = str(RDF) + "_" + str(j)
+ v = graph.value(container, URIRef(elem_uri))
+ graph.remove((container, URIRef(elem_uri), v))
+ elem_uri = str(RDF) + "_" + str(j - 1)
+ graph.add((container, URIRef(elem_uri), v))
+
+ self._len -= 1
+
+ def items(self):
+ """Returns a list of all items in the container"""
+
+ l_ = []
+ container = self.uri
+ i = 1
+ while True:
+ elem_uri = str(RDF) + "_" + str(i)
+
+ if (container, URIRef(elem_uri), None) in self.graph:
+ i += 1
+ l_.append(self.graph.value(container, URIRef(elem_uri)))
+ else:
+ break
+ return l_
+
+ def end(self): #
+
+ # find end index (1-based) of container
+
+ container = self.uri
+ i = 1
+ while True:
+ elem_uri = str(RDF) + "_" + str(i)
+
+ if (container, URIRef(elem_uri), None) in self.graph:
+ i += 1
+ else:
+ return i - 1
+
+ def append(self, item):
+ """Adding item to the end of the container"""
+
+ end = self.end()
+ elem_uri = str(RDF) + "_" + str(end + 1)
+ container = self.uri
+ self.graph.add((container, URIRef(elem_uri), item))
+ self._len += 1
+
+ def append_multiple(self, other):
+ """Adding multiple elements to the container to the end which are in python list other"""
+
+ end = self.end() # it should return the last index
+
+ container = self.uri
+ for item in other:
+
+ end += 1
+ self._len += 1
+ elem_uri = str(RDF) + "_" + str(end)
+ self.graph.add((container, URIRef(elem_uri), item))
+
+ def clear(self):
+ """Removing all elements from the container"""
+
+ container = self.uri
+ graph = self.graph
+ i = 1
+ while True:
+ elem_uri = str(RDF) + "_" + str(i)
+ if (container, URIRef(elem_uri), None) in self.graph:
+ graph.remove((container, URIRef(elem_uri), None))
+ i += 1
+ else:
+ break
+ self._len = 0
+
+
+class Bag(Container):
+ """Unordered container (no preference order of elements)"""
+
+ def __init__(self, graph, uri, seq=[]):
+ Container.__init__(self, graph, uri, seq, "Bag")
+
+
+class Alt(Container):
+ def __init__(self, graph, uri, seq=[]):
+ Container.__init__(self, graph, uri, seq, "Alt")
+
+ def anyone(self):
+ if len(self) == 0:
+ raise NoElementException()
+ else:
+ p = randint(1, len(self))
+ item = self.__getitem__(p)
+ return item
+
+
+class Seq(Container):
+ def __init__(self, graph, uri, seq=[]):
+ Container.__init__(self, graph, uri, seq, "Seq")
+
+ def add_at_position(self, pos, item):
+ assert isinstance(pos, int)
+ if pos <= 0 or pos > len(self) + 1:
+ raise ValueError("Invalid Position for inserting element in rdf:Seq")
+
+ if pos == len(self) + 1:
+ self.append(item)
+ else:
+ for j in range(len(self), pos - 1, -1):
+ container = self._get_container()
+ elem_uri = str(RDF) + "_" + str(j)
+ v = self.graph.value(container, URIRef(elem_uri))
+ self.graph.remove((container, URIRef(elem_uri), v))
+ elem_uri = str(RDF) + "_" + str(j + 1)
+ self.graph.add((container, URIRef(elem_uri), v))
+ elem_uri_pos = str(RDF) + "_" + str(pos)
+ self.graph.add((container, URIRef(elem_uri_pos), item))
+ self._len += 1
+
+
+class NoElementException(Exception):
+ def __init__(self, message="rdf:Alt Container is empty"):
+ self.message = message
+
+ def __str__(self):
+ return self.message
diff --git a/rdflib/events.py b/rdflib/events.py
index 2c563c10..816925ed 100644
--- a/rdflib/events.py
+++ b/rdflib/events.py
@@ -26,7 +26,7 @@ fired:
<rdflib.events.Event ['data', 'foo', 'used_by']>
"""
-__all__ = ['Event', 'Dispatcher']
+__all__ = ["Event", "Dispatcher"]
class Event(object):
@@ -47,7 +47,7 @@ class Event(object):
def __repr__(self):
attrs = sorted(self.__dict__.keys())
- return '<rdflib.events.Event %s>' % ([a for a in attrs],)
+ return "<rdflib.events.Event %s>" % ([a for a in attrs],)
class Dispatcher(object):
@@ -84,14 +84,15 @@ class Dispatcher(object):
lst = self._dispatch_map.get(type(event), None)
if lst is None:
raise ValueError("unknown event type: %s" % type(event))
- for l in lst:
- l(event)
+ for l_ in lst:
+ l_(event)
def test():
import doctest
+
doctest.testmod()
-if __name__ == '__main__':
+if __name__ == "__main__":
test()
diff --git a/rdflib/exceptions.py b/rdflib/exceptions.py
index 85195a53..4e31c0b8 100644
--- a/rdflib/exceptions.py
+++ b/rdflib/exceptions.py
@@ -2,9 +2,15 @@
TODO:
"""
-__all__ = ['Error', 'TypeCheckError', 'SubjectTypeError',
- 'PredicateTypeError', 'ObjectTypeError', 'ContextTypeError',
- 'ParserError']
+__all__ = [
+ "Error",
+ "TypeCheckError",
+ "SubjectTypeError",
+ "PredicateTypeError",
+ "ObjectTypeError",
+ "ContextTypeError",
+ "ParserError",
+]
class Error(Exception):
@@ -29,8 +35,10 @@ class SubjectTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "Subject must be instance of URIRef or BNode: %s(%s)" \
- % (self.node, self.type)
+ self.msg = "Subject must be instance of URIRef or BNode: %s(%s)" % (
+ self.node,
+ self.type,
+ )
class PredicateTypeError(TypeCheckError):
@@ -38,8 +46,10 @@ class PredicateTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "Predicate must be a URIRef instance: %s(%s)" \
- % (self.node, self.type)
+ self.msg = "Predicate must be a URIRef instance: %s(%s)" % (
+ self.node,
+ self.type,
+ )
class ObjectTypeError(TypeCheckError):
@@ -48,9 +58,11 @@ class ObjectTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "\
-Object must be instance of URIRef, Literal, or BNode: %s(%s)" % \
- (self.node, self.type)
+ self.msg = (
+ "\
+Object must be instance of URIRef, Literal, or BNode: %s(%s)"
+ % (self.node, self.type)
+ )
class ContextTypeError(TypeCheckError):
@@ -58,8 +70,10 @@ class ContextTypeError(TypeCheckError):
def __init__(self, node):
TypeCheckError.__init__(self, node)
- self.msg = "Context must be instance of URIRef or BNode: %s(%s)" \
- % (self.node, self.type)
+ self.msg = "Context must be instance of URIRef or BNode: %s(%s)" % (
+ self.node,
+ self.type,
+ )
class ParserError(Error):
@@ -77,5 +91,9 @@ class UniquenessError(Error):
"""A uniqueness assumption was made in the context, and that is not true"""
def __init__(self, values):
- Error.__init__(self, "\
-Uniqueness assumption is not fulfilled. Multiple values are: %s" % values)
+ Error.__init__(
+ self,
+ "\
+Uniqueness assumption is not fulfilled. Multiple values are: %s"
+ % values,
+ )
diff --git a/rdflib/extras/cmdlineutils.py b/rdflib/extras/cmdlineutils.py
index a771d4d7..9abb10ba 100644
--- a/rdflib/extras/cmdlineutils.py
+++ b/rdflib/extras/cmdlineutils.py
@@ -8,14 +8,16 @@ from rdflib.util import guess_format
def _help():
- sys.stderr.write("""
+ sys.stderr.write(
+ """
program.py [-f <format>] [-o <output>] [files...]
Read RDF files given on STDOUT - does something to the resulting graph
If no files are given, read from stdin
-o specifies file for output, if not given stdout is used
-f specifies parser to use, if not given it is guessed from extension
-""")
+"""
+ )
def main(target, _help=_help, options="", stdin=True):
@@ -57,11 +59,15 @@ def main(target, _help=_help, options="", stdin=True):
start1 = time.time()
sys.stderr.write("Loading %s as %s... " % (x, f))
g.load(x, format=f)
- sys.stderr.write("done.\t(%d triples\t%.2f seconds)\n" %
- (len(g) - size, time.time() - start1))
+ sys.stderr.write(
+ "done.\t(%d triples\t%.2f seconds)\n"
+ % (len(g) - size, time.time() - start1)
+ )
size = len(g)
- sys.stderr.write("Loaded a total of %d triples in %.2f seconds.\n" %
- (len(g), time.time() - start))
+ sys.stderr.write(
+ "Loaded a total of %d triples in %.2f seconds.\n"
+ % (len(g), time.time() - start)
+ )
target(g, out, args)
diff --git a/rdflib/extras/describer.py b/rdflib/extras/describer.py
index c7444776..cec3b602 100644
--- a/rdflib/extras/describer.py
+++ b/rdflib/extras/describer.py
@@ -119,7 +119,6 @@ from rdflib.term import URIRef
class Describer(object):
-
def __init__(self, graph=None, about=None, base=None):
if graph is None:
graph = Graph()
@@ -143,7 +142,7 @@ class Describer(object):
rdflib.term.URIRef(u'http://example.org/')
"""
- kws.setdefault('base', self.base)
+ kws.setdefault("base", self.base)
subject = cast_identifier(subject, **kws)
if self._subjects:
self._subjects[-1] = subject
@@ -195,7 +194,7 @@ class Describer(object):
"""
- kws.setdefault('base', self.base)
+ kws.setdefault("base", self.base)
p = cast_identifier(p)
o = cast_identifier(o, **kws)
self.graph.add((self._current(), p, o))
@@ -221,7 +220,7 @@ class Describer(object):
rdflib.term.Literal(u'Net')
"""
- kws.setdefault('base', self.base)
+ kws.setdefault("base", self.base)
p = cast_identifier(p)
s = cast_identifier(s, **kws)
self.graph.add((s, p, self._current()))
diff --git a/rdflib/extras/external_graph_libs.py b/rdflib/extras/external_graph_libs.py
index 8617b370..fa311490 100644
--- a/rdflib/extras/external_graph_libs.py
+++ b/rdflib/extras/external_graph_libs.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
@@ -17,18 +17,22 @@ see ../../test/test_extras_external_graph_libs.py for conditional tests
"""
import logging
+
logger = logging.getLogger(__name__)
-def _identity(x): return x
+def _identity(x):
+ return x
def _rdflib_to_networkx_graph(
- graph,
- nxgraph,
- calc_weights,
- edge_attrs,
- transform_s=_identity, transform_o=_identity):
+ graph,
+ nxgraph,
+ calc_weights,
+ edge_attrs,
+ transform_s=_identity,
+ transform_o=_identity,
+):
"""Helper method for multidigraph, digraph and graph.
Modifies nxgraph in-place!
@@ -50,6 +54,7 @@ def _rdflib_to_networkx_graph(
assert callable(transform_s)
assert callable(transform_o)
import networkx as nx
+
for s, p, o in graph:
ts, to = transform_s(s), transform_o(o) # apply possible transformations
data = nxgraph.get_edge_data(ts, to)
@@ -57,21 +62,20 @@ def _rdflib_to_networkx_graph(
# no edge yet, set defaults
data = edge_attrs(s, p, o)
if calc_weights:
- data['weight'] = 1
+ data["weight"] = 1
nxgraph.add_edge(ts, to, **data)
else:
# already have an edge, just update attributes
if calc_weights:
- data['weight'] += 1
- if 'triples' in data:
+ data["weight"] += 1
+ if "triples" in data:
d = edge_attrs(s, p, o)
- data['triples'].extend(d['triples'])
+ data["triples"].extend(d["triples"])
def rdflib_to_networkx_multidigraph(
- graph,
- edge_attrs=lambda s, p, o: {'key': p},
- **kwds):
+ graph, edge_attrs=lambda s, p, o: {"key": p}, **kwds
+):
"""Converts the given graph into a networkx.MultiDiGraph.
The subjects and objects are the later nodes of the MultiDiGraph.
@@ -116,16 +120,18 @@ def rdflib_to_networkx_multidigraph(
True
"""
import networkx as nx
+
mdg = nx.MultiDiGraph()
_rdflib_to_networkx_graph(graph, mdg, False, edge_attrs, **kwds)
return mdg
def rdflib_to_networkx_digraph(
- graph,
- calc_weights=True,
- edge_attrs=lambda s, p, o: {'triples': [(s, p, o)]},
- **kwds):
+ graph,
+ calc_weights=True,
+ edge_attrs=lambda s, p, o: {"triples": [(s, p, o)]},
+ **kwds
+):
"""Converts the given graph into a networkx.DiGraph.
As an rdflib.Graph() can contain multiple edges between nodes, by default
@@ -176,16 +182,18 @@ def rdflib_to_networkx_digraph(
False
"""
import networkx as nx
+
dg = nx.DiGraph()
_rdflib_to_networkx_graph(graph, dg, calc_weights, edge_attrs, **kwds)
return dg
def rdflib_to_networkx_graph(
- graph,
- calc_weights=True,
- edge_attrs=lambda s, p, o: {'triples': [(s, p, o)]},
- **kwds):
+ graph,
+ calc_weights=True,
+ edge_attrs=lambda s, p, o: {"triples": [(s, p, o)]},
+ **kwds
+):
"""Converts the given graph into a networkx.Graph.
As an rdflib.Graph() can contain multiple directed edges between nodes, by
@@ -236,6 +244,7 @@ def rdflib_to_networkx_graph(
False
"""
import networkx as nx
+
g = nx.Graph()
_rdflib_to_networkx_graph(graph, g, calc_weights, edge_attrs, **kwds)
return g
@@ -243,11 +252,11 @@ def rdflib_to_networkx_graph(
def rdflib_to_graphtool(
graph,
- v_prop_names=[str('term')],
- e_prop_names=[str('term')],
- transform_s=lambda s, p, o: {str('term'): s},
- transform_p=lambda s, p, o: {str('term'): p},
- transform_o=lambda s, p, o: {str('term'): o},
+ v_prop_names=[str("term")],
+ e_prop_names=[str("term")],
+ transform_s=lambda s, p, o: {str("term"): s},
+ transform_p=lambda s, p, o: {str("term"): p},
+ transform_o=lambda s, p, o: {str("term"): o},
):
"""Converts the given graph into a graph_tool.Graph().
@@ -306,12 +315,13 @@ def rdflib_to_graphtool(
True
"""
import graph_tool as gt
+
g = gt.Graph()
- vprops = [(vpn, g.new_vertex_property('object')) for vpn in v_prop_names]
+ vprops = [(vpn, g.new_vertex_property("object")) for vpn in v_prop_names]
for vpn, vprop in vprops:
g.vertex_properties[vpn] = vprop
- eprops = [(epn, g.new_edge_property('object')) for epn in e_prop_names]
+ eprops = [(epn, g.new_edge_property("object")) for epn in e_prop_names]
for epn, eprop in eprops:
g.edge_properties[epn] = eprop
node_to_vertex = {}
@@ -341,10 +351,12 @@ def rdflib_to_graphtool(
return g
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
import logging.config
+
logging.basicConfig(level=logging.DEBUG)
import nose
- nose.run(argv=[sys.argv[0], sys.argv[0], '-v', '--without-doctest'])
+
+ nose.run(argv=[sys.argv[0], sys.argv[0], "-v", "--without-doctest"])
diff --git a/rdflib/extras/infixowl.py b/rdflib/extras/infixowl.py
index fb033198..c043675a 100644
--- a/rdflib/extras/infixowl.py
+++ b/rdflib/extras/infixowl.py
@@ -4,8 +4,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from six import PY3
-
__doc__ = """
RDFLib Python binding for OWL Abstract Syntax
@@ -118,15 +116,7 @@ Python
import itertools
-from rdflib import (
- BNode,
- Literal,
- Namespace,
- RDF,
- RDFS,
- URIRef,
- Variable
-)
+from rdflib import BNode, Literal, Namespace, RDF, RDFS, URIRef, Variable
from rdflib.graph import Graph
from rdflib.collection import Collection
from rdflib.namespace import XSD as _XSD_NS
@@ -135,6 +125,7 @@ from rdflib.term import Identifier
from rdflib.util import first
import logging
+
logger = logging.getLogger(__name__)
@@ -148,43 +139,43 @@ operators can be defined.
"""
__all__ = [
- 'OWL_NS',
- 'nsBinds',
- 'ACE_NS',
- 'CLASS_RELATIONS',
- 'some',
- 'only',
- 'max',
- 'min',
- 'exactly',
- 'value',
- 'PropertyAbstractSyntax',
- 'AllClasses',
- 'AllDifferent',
- 'AllProperties',
- 'AnnotatableTerms',
- 'BooleanClass',
- 'Callable',
- 'CastClass',
- 'Class',
- 'ClassNamespaceFactory',
- 'classOrIdentifier',
- 'classOrTerm',
- 'CommonNSBindings',
- 'ComponentTerms',
- 'DeepClassClear',
- 'EnumeratedClass',
- 'generateQName',
- 'GetIdentifiedClasses',
- 'Individual',
- 'MalformedClass',
- 'manchesterSyntax',
- 'Ontology',
- 'OWLRDFListProxy',
- 'Property',
- 'propertyOrIdentifier',
- 'Restriction',
- 'termDeletionDecorator',
+ "OWL_NS",
+ "nsBinds",
+ "ACE_NS",
+ "CLASS_RELATIONS",
+ "some",
+ "only",
+ "max",
+ "min",
+ "exactly",
+ "value",
+ "PropertyAbstractSyntax",
+ "AllClasses",
+ "AllDifferent",
+ "AllProperties",
+ "AnnotatableTerms",
+ "BooleanClass",
+ "Callable",
+ "CastClass",
+ "Class",
+ "ClassNamespaceFactory",
+ "classOrIdentifier",
+ "classOrTerm",
+ "CommonNSBindings",
+ "ComponentTerms",
+ "DeepClassClear",
+ "EnumeratedClass",
+ "generateQName",
+ "GetIdentifiedClasses",
+ "Individual",
+ "MalformedClass",
+ "manchesterSyntax",
+ "Ontology",
+ "OWLRDFListProxy",
+ "Property",
+ "propertyOrIdentifier",
+ "Restriction",
+ "termDeletionDecorator",
]
# definition of an Infix operator class
@@ -218,18 +209,18 @@ class Infix:
OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
nsBinds = {
- 'skos': 'http://www.w3.org/2004/02/skos/core#',
- 'rdf': RDF,
- 'rdfs': RDFS,
- 'owl': OWL_NS,
- 'list': URIRef('http://www.w3.org/2000/10/swap/list#'),
- 'dc': "http://purl.org/dc/elements/1.1/",
+ "skos": "http://www.w3.org/2004/02/skos/core#",
+ "rdf": RDF,
+ "rdfs": RDFS,
+ "owl": OWL_NS,
+ "list": URIRef("http://www.w3.org/2000/10/swap/list#"),
+ "dc": "http://purl.org/dc/elements/1.1/",
}
def generateQName(graph, uri):
prefix, uri, localName = graph.compute_qname(classOrIdentifier(uri))
- return u':'.join([prefix, localName])
+ return u":".join([prefix, localName])
def classOrTerm(thing):
@@ -244,8 +235,9 @@ def classOrIdentifier(thing):
if isinstance(thing, (Property, Class)):
return thing.identifier
else:
- assert isinstance(thing, (URIRef, BNode)), \
+ assert isinstance(thing, (URIRef, BNode)), (
"Expecting a Class, Property, URIRef, or BNode.. not a %s" % thing
+ )
return thing
@@ -268,8 +260,9 @@ def manchesterSyntax(thing, store, boolean=None, transientList=False):
children = [manchesterSyntax(child, store) for child in thing]
else:
liveChildren = iter(Collection(store, thing))
- children = [manchesterSyntax(
- child, store) for child in Collection(store, thing)]
+ children = [
+ manchesterSyntax(child, store) for child in Collection(store, thing)
+ ]
if boolean == OWL_NS.intersectionOf:
childList = []
named = []
@@ -279,82 +272,77 @@ def manchesterSyntax(thing, store, boolean=None, transientList=False):
else:
childList.append(child)
if named:
+
def castToQName(x):
prefix, uri, localName = store.compute_qname(x)
- return ':'.join([prefix, localName])
+ return ":".join([prefix, localName])
if len(named) > 1:
- prefix = u'( ' + u' AND '.join(map(
- castToQName, named)) + u' )'
+ prefix = u"( " + u" AND ".join(map(castToQName, named)) + u" )"
else:
prefix = manchesterSyntax(named[0], store)
if childList:
- return str(prefix) + u' THAT ' + u' AND '.join(
- [str(manchesterSyntax(x, store)) for x in childList])
+ return (
+ str(prefix)
+ + u" THAT "
+ + u" AND ".join(
+ [str(manchesterSyntax(x, store)) for x in childList]
+ )
+ )
else:
return prefix
else:
- return u'( ' + u' AND '.join(
- [str(c) for c in children]) + u' )'
+ return u"( " + u" AND ".join([str(c) for c in children]) + u" )"
elif boolean == OWL_NS.unionOf:
- return u'( ' + u' OR '.join([str(c) for c in children]) + ' )'
+ return u"( " + u" OR ".join([str(c) for c in children]) + " )"
elif boolean == OWL_NS.oneOf:
- return u'{ ' + u' '.join([str(c) for c in children]) + ' }'
+ return u"{ " + u" ".join([str(c) for c in children]) + " }"
else:
assert boolean == OWL_NS.complementOf
- elif OWL_NS.Restriction in store.objects(
- subject=thing, predicate=RDF.type):
- prop = list(
- store.objects(subject=thing, predicate=OWL_NS.onProperty))[0]
+ elif OWL_NS.Restriction in store.objects(subject=thing, predicate=RDF.type):
+ prop = list(store.objects(subject=thing, predicate=OWL_NS.onProperty))[0]
prefix, uri, localName = store.compute_qname(prop)
- propString = u':'.join([prefix, localName])
+ propString = u":".join([prefix, localName])
label = first(store.objects(subject=prop, predicate=RDFS.label))
if label:
propString = "'%s'" % label
- for onlyClass in store.objects(
- subject=thing, predicate=OWL_NS.allValuesFrom):
- return u'( %s ONLY %s )' % (
- propString, manchesterSyntax(onlyClass, store))
+ for onlyClass in store.objects(subject=thing, predicate=OWL_NS.allValuesFrom):
+ return u"( %s ONLY %s )" % (propString, manchesterSyntax(onlyClass, store))
for val in store.objects(subject=thing, predicate=OWL_NS.hasValue):
- return u'( %s VALUE %s )' % (
- propString,
- manchesterSyntax(val, store))
- for someClass in store.objects(
- subject=thing, predicate=OWL_NS.someValuesFrom):
- return u'( %s SOME %s )' % (
- propString, manchesterSyntax(someClass, store))
- cardLookup = {OWL_NS.maxCardinality: 'MAX',
- OWL_NS.minCardinality: 'MIN',
- OWL_NS.cardinality: 'EQUALS'}
- for s, p, o in store.triples_choices(
- (thing, list(cardLookup.keys()), None)):
- return u'( %s %s %s )' % (
- propString, cardLookup[p], o)
+ return u"( %s VALUE %s )" % (propString, manchesterSyntax(val, store))
+ for someClass in store.objects(subject=thing, predicate=OWL_NS.someValuesFrom):
+ return u"( %s SOME %s )" % (propString, manchesterSyntax(someClass, store))
+ cardLookup = {
+ OWL_NS.maxCardinality: "MAX",
+ OWL_NS.minCardinality: "MIN",
+ OWL_NS.cardinality: "EQUALS",
+ }
+ for s, p, o in store.triples_choices((thing, list(cardLookup.keys()), None)):
+ return u"( %s %s %s )" % (propString, cardLookup[p], o)
compl = list(store.objects(subject=thing, predicate=OWL_NS.complementOf))
if compl:
- return '( NOT %s )' % (manchesterSyntax(compl[0], store))
+ return "( NOT %s )" % (manchesterSyntax(compl[0], store))
else:
- prolog = '\n'.join(
- ["PREFIX %s: <%s>" % (k, nsBinds[k]) for k in nsBinds])
- qstr = \
- prolog + \
- "\nSELECT ?p ?bool WHERE {?class a owl:Class; ?p ?bool ." + \
- "?bool rdf:first ?foo }"
+ prolog = "\n".join(["PREFIX %s: <%s>" % (k, nsBinds[k]) for k in nsBinds])
+ qstr = (
+ prolog
+ + "\nSELECT ?p ?bool WHERE {?class a owl:Class; ?p ?bool ."
+ + "?bool rdf:first ?foo }"
+ )
initb = {Variable("?class"): thing}
- for boolProp, col in \
- store.query(qstr, processor="sparql", initBindings=initb):
+ for boolProp, col in store.query(qstr, processor="sparql", initBindings=initb):
if not isinstance(thing, URIRef):
return manchesterSyntax(col, store, boolean=boolProp)
try:
prefix, uri, localName = store.compute_qname(thing)
- qname = u':'.join([prefix, localName])
+ qname = u":".join([prefix, localName])
except Exception:
if isinstance(thing, BNode):
return thing.n3()
return u"<" + thing + ">"
logger.debug(list(store.objects(subject=thing, predicate=RDF.type)))
raise
- return '[]' # +thing._id.encode('utf-8')+'</em>'
+ return "[]" # +thing._id.encode('utf-8')+'</em>'
label = first(Class(thing, graph=store).label)
if label:
return label
@@ -372,6 +360,7 @@ def termDeletionDecorator(prop):
def someFunc(func):
func.property = prop
return func
+
return someFunc
@@ -382,6 +371,7 @@ class TermDeletionHelper:
def __call__(self, f):
def _remover(inst):
inst.graph.remove((inst.identifier, self.prop, None))
+
return _remover
@@ -389,6 +379,7 @@ class Individual(object):
"""
A typed individual
"""
+
factoryGraph = Graph()
def serialize(self, graph):
@@ -404,9 +395,8 @@ class Individual(object):
self.qname = None
if not isinstance(self.identifier, BNode):
try:
- prefix, uri, localName = self.graph.compute_qname(
- self.identifier)
- self.qname = u':'.join([prefix, localName])
+ prefix, uri, localName = self.graph.compute_qname(self.identifier)
+ self.qname = u":".join([prefix, localName])
except:
pass
@@ -426,21 +416,18 @@ class Individual(object):
self.delete()
def _get_type(self):
- for _t in self.graph.objects(
- subject=self.identifier, predicate=RDF.type):
+ for _t in self.graph.objects(subject=self.identifier, predicate=RDF.type):
yield _t
def _set_type(self, kind):
if not kind:
return
if isinstance(kind, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, RDF.type, classOrIdentifier(kind)))
+ self.graph.add((self.identifier, RDF.type, classOrIdentifier(kind)))
else:
for c in kind:
assert isinstance(c, (Individual, Identifier))
- self.graph.add(
- (self.identifier, RDF.type, classOrIdentifier(c)))
+ self.graph.add((self.identifier, RDF.type, classOrIdentifier(c)))
@TermDeletionHelper(RDF.type)
def _delete_type(self):
@@ -464,43 +451,43 @@ class Individual(object):
def _set_identifier(self, i):
assert i
if i != self.__identifier:
- oldStmtsOut = [(p, o) for s, p, o in self.graph.triples(
- (self.__identifier, None, None))]
- oldStmtsIn = [(s, p) for s, p, o in self.graph.triples(
- (None, None, self.__identifier))]
+ oldStmtsOut = [
+ (p, o)
+ for s, p, o in self.graph.triples((self.__identifier, None, None))
+ ]
+ oldStmtsIn = [
+ (s, p)
+ for s, p, o in self.graph.triples((None, None, self.__identifier))
+ ]
for p1, o1 in oldStmtsOut:
self.graph.remove((self.__identifier, p1, o1))
for s1, p1 in oldStmtsIn:
self.graph.remove((s1, p1, self.__identifier))
self.__identifier = i
- self.graph.addN(
- [(i, p1, o1, self.graph) for p1, o1 in oldStmtsOut])
+ self.graph.addN([(i, p1, o1, self.graph) for p1, o1 in oldStmtsOut])
self.graph.addN([(s1, p1, i, self.graph) for s1, p1 in oldStmtsIn])
if not isinstance(i, BNode):
try:
prefix, uri, localName = self.graph.compute_qname(i)
- self.qname = u':'.join([prefix, localName])
+ self.qname = u":".join([prefix, localName])
except:
pass
identifier = property(_get_identifier, _set_identifier)
def _get_sameAs(self):
- for _t in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.sameAs):
+ for _t in self.graph.objects(subject=self.identifier, predicate=OWL_NS.sameAs):
yield _t
def _set_sameAs(self, term):
# if not kind:
# return
if isinstance(term, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, OWL_NS.sameAs, classOrIdentifier(term)))
+ self.graph.add((self.identifier, OWL_NS.sameAs, classOrIdentifier(term)))
else:
for c in term:
assert isinstance(c, (Individual, Identifier))
- self.graph.add(
- (self.identifier, OWL_NS.sameAs, classOrIdentifier(c)))
+ self.graph.add((self.identifier, OWL_NS.sameAs, classOrIdentifier(c)))
@TermDeletionHelper(OWL_NS.sameAs)
def _delete_sameAs(self):
@@ -509,7 +496,7 @@ class Individual(object):
sameAs = property(_get_sameAs, _set_sameAs, _delete_sameAs)
-ACE_NS = Namespace('http://attempto.ifi.uzh.ch/ace_lexicon#')
+ACE_NS = Namespace("http://attempto.ifi.uzh.ch/ace_lexicon#")
class AnnotatableTerms(Individual):
@@ -517,16 +504,13 @@ class AnnotatableTerms(Individual):
Terms in an OWL ontology with rdfs:label and rdfs:comment
"""
- def __init__(self,
- identifier,
- graph=None,
- nameAnnotation=None,
- nameIsLabel=False):
+ def __init__(self, identifier, graph=None, nameAnnotation=None, nameIsLabel=False):
super(AnnotatableTerms, self).__init__(identifier, graph)
if nameAnnotation:
self.setupACEAnnotations()
- self.PN_sgProp.extent = [(self.identifier,
- self.handleAnnotation(nameAnnotation))]
+ self.PN_sgProp.extent = [
+ (self.identifier, self.handleAnnotation(nameAnnotation))
+ ]
if nameIsLabel:
self.label = [nameAnnotation]
@@ -534,41 +518,42 @@ class AnnotatableTerms(Individual):
return val if isinstance(val, Literal) else Literal(val)
def setupACEAnnotations(self):
- self.graph.bind('ace', ACE_NS, override=False)
+ self.graph.bind("ace", ACE_NS, override=False)
# PN_sg singular form of a proper name ()
- self.PN_sgProp = Property(ACE_NS.PN_sg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.PN_sgProp = Property(
+ ACE_NS.PN_sg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# CN_sg singular form of a common noun
- self.CN_sgProp = Property(ACE_NS.CN_sg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.CN_sgProp = Property(
+ ACE_NS.CN_sg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# CN_pl plural form of a common noun
- self.CN_plProp = Property(ACE_NS.CN_pl,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.CN_plProp = Property(
+ ACE_NS.CN_pl, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# singular form of a transitive verb
- self.TV_sgProp = Property(ACE_NS.TV_sg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.TV_sgProp = Property(
+ ACE_NS.TV_sg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# plural form of a transitive verb
- self.TV_plProp = Property(ACE_NS.TV_pl,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.TV_plProp = Property(
+ ACE_NS.TV_pl, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
# past participle form a transitive verb
- self.TV_vbgProp = Property(ACE_NS.TV_vbg,
- baseType=OWL_NS.AnnotationProperty,
- graph=self.graph)
+ self.TV_vbgProp = Property(
+ ACE_NS.TV_vbg, baseType=OWL_NS.AnnotationProperty, graph=self.graph
+ )
def _get_comment(self):
for comment in self.graph.objects(
- subject=self.identifier, predicate=RDFS.comment):
+ subject=self.identifier, predicate=RDFS.comment
+ ):
yield comment
def _set_comment(self, comment):
@@ -587,8 +572,7 @@ class AnnotatableTerms(Individual):
comment = property(_get_comment, _set_comment, _del_comment)
def _get_seeAlso(self):
- for sA in self.graph.objects(
- subject=self.identifier, predicate=RDFS.seeAlso):
+ for sA in self.graph.objects(subject=self.identifier, predicate=RDFS.seeAlso):
yield sA
def _set_seeAlso(self, seeAlsos):
@@ -600,11 +584,11 @@ class AnnotatableTerms(Individual):
@TermDeletionHelper(RDFS.seeAlso)
def _del_seeAlso(self):
pass
+
seeAlso = property(_get_seeAlso, _set_seeAlso, _del_seeAlso)
def _get_label(self):
- for label in self.graph.objects(
- subject=self.identifier, predicate=RDFS.label):
+ for label in self.graph.objects(subject=self.identifier, predicate=RDFS.label):
yield label
def _set_label(self, label):
@@ -613,8 +597,8 @@ class AnnotatableTerms(Individual):
if isinstance(label, Identifier):
self.graph.add((self.identifier, RDFS.label, label))
else:
- for l in label:
- self.graph.add((self.identifier, RDFS.label, l))
+ for l_ in label:
+ self.graph.add((self.identifier, RDFS.label, l_))
@TermDeletionHelper(RDFS.label)
def _delete_label(self):
@@ -636,8 +620,7 @@ class AnnotatableTerms(Individual):
class Ontology(AnnotatableTerms):
""" The owl ontology metadata"""
- def __init__(self,
- identifier=None, imports=None, comment=None, graph=None):
+ def __init__(self, identifier=None, imports=None, comment=None, graph=None):
super(Ontology, self).__init__(identifier, graph)
self.imports = imports and imports or []
self.comment = comment and comment or []
@@ -649,16 +632,17 @@ class Ontology(AnnotatableTerms):
def _get_imports(self):
for owl in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS['imports']):
+ subject=self.identifier, predicate=OWL_NS["imports"]
+ ):
yield owl
def _set_imports(self, other):
if not other:
return
for o in other:
- self.graph.add((self.identifier, OWL_NS['imports'], o))
+ self.graph.add((self.identifier, OWL_NS["imports"], o))
- @TermDeletionHelper(OWL_NS['imports'])
+ @TermDeletionHelper(OWL_NS["imports"])
def _del_imports(self):
pass
@@ -676,25 +660,32 @@ def AllClasses(graph):
def AllProperties(graph):
prevProps = set()
for s, p, o in graph.triples_choices(
- (None, RDF.type, [OWL_NS.SymmetricProperty,
- OWL_NS.FunctionalProperty,
- OWL_NS.InverseFunctionalProperty,
- OWL_NS.TransitiveProperty,
- OWL_NS.DatatypeProperty,
- OWL_NS.ObjectProperty,
- OWL_NS.AnnotationProperty])):
- if o in [OWL_NS.SymmetricProperty,
- OWL_NS.InverseFunctionalProperty,
- OWL_NS.TransitiveProperty,
- OWL_NS.ObjectProperty]:
+ (
+ None,
+ RDF.type,
+ [
+ OWL_NS.SymmetricProperty,
+ OWL_NS.FunctionalProperty,
+ OWL_NS.InverseFunctionalProperty,
+ OWL_NS.TransitiveProperty,
+ OWL_NS.DatatypeProperty,
+ OWL_NS.ObjectProperty,
+ OWL_NS.AnnotationProperty,
+ ],
+ )
+ ):
+ if o in [
+ OWL_NS.SymmetricProperty,
+ OWL_NS.InverseFunctionalProperty,
+ OWL_NS.TransitiveProperty,
+ OWL_NS.ObjectProperty,
+ ]:
bType = OWL_NS.ObjectProperty
else:
bType = OWL_NS.DatatypeProperty
if s not in prevProps:
prevProps.add(s)
- yield Property(s,
- graph=graph,
- baseType=bType)
+ yield Property(s, graph=graph, baseType=bType)
class ClassNamespaceFactory(Namespace):
@@ -711,20 +702,22 @@ class ClassNamespaceFactory(Namespace):
return self.term(name)
-CLASS_RELATIONS = set(
- OWL_NS.resourceProperties
-).difference([OWL_NS.onProperty,
- OWL_NS.allValuesFrom,
- OWL_NS.hasValue,
- OWL_NS.someValuesFrom,
- OWL_NS.inverseOf,
- OWL_NS.imports,
- OWL_NS.versionInfo,
- OWL_NS.backwardCompatibleWith,
- OWL_NS.incompatibleWith,
- OWL_NS.unionOf,
- OWL_NS.intersectionOf,
- OWL_NS.oneOf])
+CLASS_RELATIONS = set(OWL_NS.resourceProperties).difference(
+ [
+ OWL_NS.onProperty,
+ OWL_NS.allValuesFrom,
+ OWL_NS.hasValue,
+ OWL_NS.someValuesFrom,
+ OWL_NS.inverseOf,
+ OWL_NS.imports,
+ OWL_NS.versionInfo,
+ OWL_NS.backwardCompatibleWith,
+ OWL_NS.incompatibleWith,
+ OWL_NS.unionOf,
+ OWL_NS.intersectionOf,
+ OWL_NS.oneOf,
+ ]
+)
def ComponentTerms(cls):
@@ -736,10 +729,8 @@ def ComponentTerms(cls):
try:
cls = CastClass(cls, Individual.factoryGraph)
for s, p, innerClsId in cls.factoryGraph.triples_choices(
- (cls.identifier,
- [OWL_NS.allValuesFrom,
- OWL_NS.someValuesFrom],
- None)):
+ (cls.identifier, [OWL_NS.allValuesFrom, OWL_NS.someValuesFrom], None)
+ ):
innerCls = Class(innerClsId, skipOWLClassMembership=True)
if isinstance(innerClsId, BNode):
for _c in ComponentTerms(innerCls):
@@ -766,13 +757,10 @@ def ComponentTerms(cls):
else:
yield innerCls
for s, p, o in cls.factoryGraph.triples_choices(
- (classOrIdentifier(cls),
- CLASS_RELATIONS,
- None)
+ (classOrIdentifier(cls), CLASS_RELATIONS, None)
):
if isinstance(o, BNode):
- for _c in ComponentTerms(
- CastClass(o, Individual.factoryGraph)):
+ for _c in ComponentTerms(CastClass(o, Individual.factoryGraph)):
yield _c
else:
yield innerCls
@@ -823,29 +811,29 @@ def DeepClassClear(classToPrune):
>>> list(g.triples((otherClass.identifier, None, None))) #doctest: +SKIP
[]
"""
+
def deepClearIfBNode(_class):
if isinstance(classOrIdentifier(_class), BNode):
DeepClassClear(_class)
+
classToPrune = CastClass(classToPrune, Individual.factoryGraph)
for c in classToPrune.subClassOf:
deepClearIfBNode(c)
classToPrune.graph.remove((classToPrune.identifier, RDFS.subClassOf, None))
for c in classToPrune.equivalentClass:
deepClearIfBNode(c)
- classToPrune.graph.remove(
- (classToPrune.identifier, OWL_NS.equivalentClass, None))
+ classToPrune.graph.remove((classToPrune.identifier, OWL_NS.equivalentClass, None))
inverseClass = classToPrune.complementOf
if inverseClass:
- classToPrune.graph.remove(
- (classToPrune.identifier, OWL_NS.complementOf, None))
+ classToPrune.graph.remove((classToPrune.identifier, OWL_NS.complementOf, None))
deepClearIfBNode(inverseClass)
if isinstance(classToPrune, BooleanClass):
for c in classToPrune:
deepClearIfBNode(c)
classToPrune.clear()
- classToPrune.graph.remove((classToPrune.identifier,
- classToPrune._operator,
- None))
+ classToPrune.graph.remove(
+ (classToPrune.identifier, classToPrune._operator, None)
+ )
class MalformedClass(Exception):
@@ -858,40 +846,36 @@ class MalformedClass(Exception):
def CastClass(c, graph=None):
graph = graph is None and c.factoryGraph or graph
- for kind in graph.objects(subject=classOrIdentifier(c),
- predicate=RDF.type):
+ for kind in graph.objects(subject=classOrIdentifier(c), predicate=RDF.type):
if kind == OWL_NS.Restriction:
- kwArgs = {'identifier': classOrIdentifier(c),
- 'graph': graph}
- for s, p, o in graph.triples((classOrIdentifier(c),
- None,
- None)):
+ kwArgs = {"identifier": classOrIdentifier(c), "graph": graph}
+ for s, p, o in graph.triples((classOrIdentifier(c), None, None)):
if p != RDF.type:
if p == OWL_NS.onProperty:
- kwArgs['onProperty'] = o
+ kwArgs["onProperty"] = o
else:
if p not in Restriction.restrictionKinds:
continue
kwArgs[str(p.split(OWL_NS)[-1])] = o
- if not set([str(i.split(OWL_NS)[-1])
- for i in Restriction.restrictionKinds]
- ).intersection(kwArgs):
+ if not set(
+ [str(i.split(OWL_NS)[-1]) for i in Restriction.restrictionKinds]
+ ).intersection(kwArgs):
raise MalformedClass("Malformed owl:Restriction")
return Restriction(**kwArgs)
else:
- for s, p, o in graph.triples_choices((classOrIdentifier(c),
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf,
- OWL_NS.oneOf],
- None)):
+ for s, p, o in graph.triples_choices(
+ (
+ classOrIdentifier(c),
+ [OWL_NS.intersectionOf, OWL_NS.unionOf, OWL_NS.oneOf],
+ None,
+ )
+ ):
if p == OWL_NS.oneOf:
return EnumeratedClass(classOrIdentifier(c), graph=graph)
else:
- return BooleanClass(
- classOrIdentifier(c), operator=p, graph=graph)
+ return BooleanClass(classOrIdentifier(c), operator=p, graph=graph)
# assert (classOrIdentifier(c),RDF.type,OWL_NS.Class) in graph
- return Class(
- classOrIdentifier(c), graph=graph, skipOWLClassMembership=True)
+ return Class(classOrIdentifier(c), graph=graph, skipOWLClassMembership=True)
class Class(AnnotatableTerms):
@@ -947,28 +931,37 @@ class Class(AnnotatableTerms):
CN_plProp = nounAnnotations
if CN_sgProp:
- self.CN_sgProp.extent = [(self.identifier,
- self.handleAnnotation(CN_sgProp))]
+ self.CN_sgProp.extent = [
+ (self.identifier, self.handleAnnotation(CN_sgProp))
+ ]
if CN_plProp:
- self.CN_plProp.extent = [(self.identifier,
- self.handleAnnotation(CN_plProp))]
-
- def __init__(self, identifier=None, subClassOf=None, equivalentClass=None,
- disjointWith=None, complementOf=None, graph=None,
- skipOWLClassMembership=False, comment=None,
- nounAnnotations=None,
- nameAnnotation=None,
- nameIsLabel=False):
- super(Class, self).__init__(identifier, graph,
- nameAnnotation, nameIsLabel)
+ self.CN_plProp.extent = [
+ (self.identifier, self.handleAnnotation(CN_plProp))
+ ]
+
+ def __init__(
+ self,
+ identifier=None,
+ subClassOf=None,
+ equivalentClass=None,
+ disjointWith=None,
+ complementOf=None,
+ graph=None,
+ skipOWLClassMembership=False,
+ comment=None,
+ nounAnnotations=None,
+ nameAnnotation=None,
+ nameIsLabel=False,
+ ):
+ super(Class, self).__init__(identifier, graph, nameAnnotation, nameIsLabel)
if nounAnnotations:
self.setupNounAnnotations(nounAnnotations)
- if not skipOWLClassMembership \
- and (self.identifier, RDF.type, OWL_NS.Class) \
- not in self.graph and \
- (self.identifier, RDF.type, OWL_NS.Restriction) \
- not in self.graph:
+ if (
+ not skipOWLClassMembership
+ and (self.identifier, RDF.type, OWL_NS.Class) not in self.graph
+ and (self.identifier, RDF.type, OWL_NS.Restriction) not in self.graph
+ ):
self.graph.add((self.identifier, RDF.type, OWL_NS.Class))
self.subClassOf = subClassOf and subClassOf or []
@@ -979,9 +972,9 @@ class Class(AnnotatableTerms):
self.comment = comment and comment or []
def _get_extent(self, graph=None):
- for member in (
- graph is None and self.graph or graph).subjects(
- predicate=RDF.type, object=self.identifier):
+ for member in (graph is None and self.graph or graph).subjects(
+ predicate=RDF.type, object=self.identifier
+ ):
yield member
def _set_extent(self, other):
@@ -1003,7 +996,7 @@ class Class(AnnotatableTerms):
annotation = property(_get_annotation, lambda x: x)
def _get_extentQuery(self):
- return (Variable('CLASS'), RDF.type, self.identifier)
+ return (Variable("CLASS"), RDF.type, self.identifier)
def _set_extentQuery(self, other):
pass
@@ -1030,8 +1023,7 @@ class Class(AnnotatableTerms):
def __isub__(self, other):
assert isinstance(other, Class)
- self.graph.remove(
- (classOrIdentifier(other), RDFS.subClassOf, self.identifier))
+ self.graph.remove((classOrIdentifier(other), RDFS.subClassOf, self.identifier))
return self
def __invert__(self):
@@ -1046,7 +1038,8 @@ class Class(AnnotatableTerms):
this class and 'other' and return it
"""
return BooleanClass(
- operator=OWL_NS.unionOf, members=[self, other], graph=self.graph)
+ operator=OWL_NS.unionOf, members=[self, other], graph=self.graph
+ )
def __and__(self, other):
"""
@@ -1074,22 +1067,20 @@ class Class(AnnotatableTerms):
True
"""
return BooleanClass(
- operator=OWL_NS.intersectionOf,
- members=[self, other], graph=self.graph)
+ operator=OWL_NS.intersectionOf, members=[self, other], graph=self.graph
+ )
def _get_subClassOf(self):
for anc in self.graph.objects(
- subject=self.identifier, predicate=RDFS.subClassOf):
- yield Class(anc,
- graph=self.graph,
- skipOWLClassMembership=True)
+ subject=self.identifier, predicate=RDFS.subClassOf
+ ):
+ yield Class(anc, graph=self.graph, skipOWLClassMembership=True)
def _set_subClassOf(self, other):
if not other:
return
for sc in other:
- self.graph.add(
- (self.identifier, RDFS.subClassOf, classOrIdentifier(sc)))
+ self.graph.add((self.identifier, RDFS.subClassOf, classOrIdentifier(sc)))
@TermDeletionHelper(RDFS.subClassOf)
def _del_subClassOf(self):
@@ -1099,45 +1090,48 @@ class Class(AnnotatableTerms):
def _get_equivalentClass(self):
for ec in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.equivalentClass):
+ subject=self.identifier, predicate=OWL_NS.equivalentClass
+ ):
yield Class(ec, graph=self.graph)
def _set_equivalentClass(self, other):
if not other:
return
for sc in other:
- self.graph.add((self.identifier,
- OWL_NS.equivalentClass, classOrIdentifier(sc)))
+ self.graph.add(
+ (self.identifier, OWL_NS.equivalentClass, classOrIdentifier(sc))
+ )
@TermDeletionHelper(OWL_NS.equivalentClass)
def _del_equivalentClass(self):
pass
equivalentClass = property(
- _get_equivalentClass, _set_equivalentClass, _del_equivalentClass)
+ _get_equivalentClass, _set_equivalentClass, _del_equivalentClass
+ )
def _get_disjointWith(self):
for dc in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.disjointWith):
+ subject=self.identifier, predicate=OWL_NS.disjointWith
+ ):
yield Class(dc, graph=self.graph)
def _set_disjointWith(self, other):
if not other:
return
for c in other:
- self.graph.add(
- (self.identifier, OWL_NS.disjointWith, classOrIdentifier(c)))
+ self.graph.add((self.identifier, OWL_NS.disjointWith, classOrIdentifier(c)))
@TermDeletionHelper(OWL_NS.disjointWith)
def _del_disjointWith(self):
pass
- disjointWith = property(
- _get_disjointWith, _set_disjointWith, _del_disjointWith)
+ disjointWith = property(_get_disjointWith, _set_disjointWith, _del_disjointWith)
def _get_complementOf(self):
- comp = list(self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.complementOf))
+ comp = list(
+ self.graph.objects(subject=self.identifier, predicate=OWL_NS.complementOf)
+ )
if not comp:
return None
elif len(comp) == 1:
@@ -1148,15 +1142,13 @@ class Class(AnnotatableTerms):
def _set_complementOf(self, other):
if not other:
return
- self.graph.add(
- (self.identifier, OWL_NS.complementOf, classOrIdentifier(other)))
+ self.graph.add((self.identifier, OWL_NS.complementOf, classOrIdentifier(other)))
@TermDeletionHelper(OWL_NS.complementOf)
def _del_complementOf(self):
pass
- complementOf = property(
- _get_complementOf, _set_complementOf, _del_complementOf)
+ complementOf = property(_get_complementOf, _set_complementOf, _del_complementOf)
def _get_parents(self):
"""
@@ -1187,24 +1179,22 @@ class Class(AnnotatableTerms):
[Class: ex:Parent , Class: ex:Male ]
"""
- for parent in itertools.chain(self.subClassOf,
- self.equivalentClass):
+ for parent in itertools.chain(self.subClassOf, self.equivalentClass):
yield parent
link = first(self.factoryGraph.subjects(RDF.first, self.identifier))
if link:
- listSiblings = list(self.factoryGraph.transitive_subjects(RDF.rest,
- link))
+ listSiblings = list(self.factoryGraph.transitive_subjects(RDF.rest, link))
if listSiblings:
collectionHead = listSiblings[-1]
else:
collectionHead = link
- for disjCls in self.factoryGraph.subjects(
- OWL_NS.unionOf, collectionHead):
+ for disjCls in self.factoryGraph.subjects(OWL_NS.unionOf, collectionHead):
if isinstance(disjCls, URIRef):
yield Class(disjCls, skipOWLClassMembership=True)
for rdfList in self.factoryGraph.objects(
- self.identifier, OWL_NS.intersectionOf):
+ self.identifier, OWL_NS.intersectionOf
+ ):
for member in OWLRDFListProxy([rdfList], graph=self.factoryGraph):
if isinstance(member, URIRef):
yield Class(member, skipOWLClassMembership=True)
@@ -1217,10 +1207,8 @@ class Class(AnnotatableTerms):
# sc = list(self.subClassOf)
ec = list(self.equivalentClass)
for boolClass, p, rdfList in self.graph.triples_choices(
- (self.identifier,
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf],
- None)):
+ (self.identifier, [OWL_NS.intersectionOf, OWL_NS.unionOf], None)
+ ):
ec.append(manchesterSyntax(rdfList, self.graph, boolean=p))
for e in ec:
return False
@@ -1229,8 +1217,7 @@ class Class(AnnotatableTerms):
return True
def subSumpteeIds(self):
- for s in self.graph.subjects(
- predicate=RDFS.subClassOf, object=self.identifier):
+ for s in self.graph.subjects(predicate=RDFS.subClassOf, object=self.identifier):
yield s
# def __iter__(self):
@@ -1246,62 +1233,80 @@ class Class(AnnotatableTerms):
sc = list(self.subClassOf)
ec = list(self.equivalentClass)
for boolClass, p, rdfList in self.graph.triples_choices(
- (self.identifier,
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf],
- None)):
+ (self.identifier, [OWL_NS.intersectionOf, OWL_NS.unionOf], None)
+ ):
ec.append(manchesterSyntax(rdfList, self.graph, boolean=p))
dc = list(self.disjointWith)
c = self.complementOf
if c:
dc.append(c)
- klassKind = ''
+ klassKind = ""
label = list(self.graph.objects(self.identifier, RDFS.label))
- label = label and '(' + label[0] + ')' or ''
+ label = label and "(" + label[0] + ")" or ""
if sc:
if full:
- scJoin = '\n '
+ scJoin = "\n "
else:
- scJoin = ', '
+ scJoin = ", "
necStatements = [
- isinstance(s, Class) and isinstance(self.identifier, BNode)
- and repr(CastClass(s, self.graph)) or
+ isinstance(s, Class)
+ and isinstance(self.identifier, BNode)
+ and repr(CastClass(s, self.graph))
+ or
# repr(BooleanClass(classOrIdentifier(s),
# operator=None,
# graph=self.graph)) or
- manchesterSyntax(classOrIdentifier(s), self.graph) for s in sc]
+ manchesterSyntax(classOrIdentifier(s), self.graph)
+ for s in sc
+ ]
if necStatements:
klassKind = "Primitive Type %s" % label
- exprs.append("SubClassOf: %s" % scJoin.join(
- [str(n) for n in necStatements]))
+ exprs.append(
+ "SubClassOf: %s" % scJoin.join([str(n) for n in necStatements])
+ )
if full:
exprs[-1] = "\n " + exprs[-1]
if ec:
nec_SuffStatements = [
- isinstance(s, str) and s
- or manchesterSyntax(classOrIdentifier(s), self.graph) for s in ec]
+ isinstance(s, str)
+ and s
+ or manchesterSyntax(classOrIdentifier(s), self.graph)
+ for s in ec
+ ]
if nec_SuffStatements:
klassKind = "A Defined Class %s" % label
- exprs.append("EquivalentTo: %s" % ', '.join(nec_SuffStatements))
+ exprs.append("EquivalentTo: %s" % ", ".join(nec_SuffStatements))
if full:
exprs[-1] = "\n " + exprs[-1]
if dc:
- exprs.append("DisjointWith %s\n" % '\n '.join(
- [manchesterSyntax(classOrIdentifier(s), self.graph)
- for s in dc]))
+ exprs.append(
+ "DisjointWith %s\n"
+ % "\n ".join(
+ [manchesterSyntax(classOrIdentifier(s), self.graph) for s in dc]
+ )
+ )
if full:
exprs[-1] = "\n " + exprs[-1]
descr = list(self.graph.objects(self.identifier, RDFS.comment))
if full and normalization:
- klassDescr = klassKind and '\n ## %s ##' % klassKind +\
- (descr and "\n %s" % descr[0] or '') + \
- ' . '.join(exprs) or ' . '.join(exprs)
+ klassDescr = (
+ klassKind
+ and "\n ## %s ##" % klassKind
+ + (descr and "\n %s" % descr[0] or "")
+ + " . ".join(exprs)
+ or " . ".join(exprs)
+ )
else:
- klassDescr = full and (descr and "\n %s" %
- descr[0] or '') or '' + ' . '.join(exprs)
- return (isinstance(self.identifier, BNode) and
- "Some Class " or
- "Class: %s " % self.qname) + klassDescr
+ klassDescr = (
+ full
+ and (descr and "\n %s" % descr[0] or "")
+ or "" + " . ".join(exprs)
+ )
+ return (
+ isinstance(self.identifier, BNode)
+ and "Some Class "
+ or "Class: %s " % self.qname
+ ) + klassDescr
class OWLRDFListProxy(object):
@@ -1315,10 +1320,10 @@ class OWLRDFListProxy(object):
if member not in self._rdfList:
self._rdfList.append(classOrIdentifier(member))
else:
- self._rdfList = Collection(self.graph, BNode(),
- [classOrIdentifier(m) for m in members])
- self.graph.add(
- (self.identifier, self._operator, self._rdfList.uri))
+ self._rdfList = Collection(
+ self.graph, BNode(), [classOrIdentifier(m) for m in members]
+ )
+ self.graph.add((self.identifier, self._operator, self._rdfList.uri))
def __eq__(self, other):
"""
@@ -1412,6 +1417,7 @@ class EnumeratedClass(OWLRDFListProxy, Class):
<BLANKLINE>
<BLANKLINE>
"""
+
_operator = OWL_NS.oneOf
def isPrimitive(self):
@@ -1420,16 +1426,16 @@ class EnumeratedClass(OWLRDFListProxy, Class):
def __init__(self, identifier=None, members=None, graph=None):
Class.__init__(self, identifier, graph=graph)
members = members and members or []
- rdfList = list(self.graph.objects(
- predicate=OWL_NS.oneOf, subject=self.identifier))
+ rdfList = list(
+ self.graph.objects(predicate=OWL_NS.oneOf, subject=self.identifier)
+ )
OWLRDFListProxy.__init__(self, rdfList, members)
def __repr__(self):
"""
Returns the Manchester Syntax equivalent for this class
"""
- return manchesterSyntax(
- self._rdfList.uri, self.graph, boolean=self._operator)
+ return manchesterSyntax(self._rdfList.uri, self.graph, boolean=self._operator)
def serialize(self, graph):
clonedList = Collection(graph, BNode())
@@ -1475,10 +1481,11 @@ class BooleanClassExtentHelper:
def _getExtent():
for c in Individual.factoryGraph.subjects(self.operator):
yield BooleanClass(c, operator=self.operator)
+
return _getExtent
-class Callable():
+class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
@@ -1490,38 +1497,40 @@ class BooleanClass(OWLRDFListProxy, Class):
owl:complementOf is an attribute of Class, however
"""
+
@BooleanClassExtentHelper(OWL_NS.intersectionOf)
@Callable
def getIntersections():
pass
+
getIntersections = Callable(getIntersections)
@BooleanClassExtentHelper(OWL_NS.unionOf)
@Callable
def getUnions():
pass
+
getUnions = Callable(getUnions)
- def __init__(self, identifier=None, operator=OWL_NS.intersectionOf,
- members=None, graph=None):
+ def __init__(
+ self, identifier=None, operator=OWL_NS.intersectionOf, members=None, graph=None
+ ):
if operator is None:
props = []
- for s, p, o in graph.triples_choices((identifier,
- [OWL_NS.intersectionOf,
- OWL_NS.unionOf],
- None)):
+ for s, p, o in graph.triples_choices(
+ (identifier, [OWL_NS.intersectionOf, OWL_NS.unionOf], None)
+ ):
props.append(p)
operator = p
assert len(props) == 1, repr(props)
Class.__init__(self, identifier, graph=graph)
- assert operator in [OWL_NS.intersectionOf,
- OWL_NS.unionOf], str(operator)
+ assert operator in [OWL_NS.intersectionOf, OWL_NS.unionOf], str(operator)
self._operator = operator
- rdfList = list(
- self.graph.objects(predicate=operator, subject=self.identifier))
- assert not members or not rdfList, \
- "This is a previous boolean class description!" + \
- repr(Collection(self.graph, rdfList[0]).n3())
+ rdfList = list(self.graph.objects(predicate=operator, subject=self.identifier))
+ assert not members or not rdfList, (
+ "This is a previous boolean class description!"
+ + repr(Collection(self.graph, rdfList[0]).n3())
+ )
OWLRDFListProxy.__init__(self, rdfList, members)
def copy(self):
@@ -1529,7 +1538,8 @@ class BooleanClass(OWLRDFListProxy, Class):
Create a copy of this class
"""
copyOfClass = BooleanClass(
- operator=self._operator, members=list(self), graph=self.graph)
+ operator=self._operator, members=list(self), graph=self.graph
+ )
return copyOfClass
def serialize(self, graph):
@@ -1569,12 +1579,11 @@ class BooleanClass(OWLRDFListProxy, Class):
>>> testClass #doctest: +SKIP
( ex:Fire OR ex:Water )
>>> try: testClass.changeOperator(OWL_NS.unionOf)
- ... except Exception%s: print(e)
+ ... except Exception as e: print(e)
The new operator is already being used!
- """ % 'as e' if PY3 else ', e'
- assert newOperator != self._operator, \
- "The new operator is already being used!"
+ """
+ assert newOperator != self._operator, "The new operator is already being used!"
self.graph.remove((self.identifier, self._operator, self._rdfList.uri))
self.graph.add((self.identifier, newOperator, self._rdfList.uri))
self._operator = newOperator
@@ -1583,8 +1592,7 @@ class BooleanClass(OWLRDFListProxy, Class):
"""
Returns the Manchester Syntax equivalent for this class
"""
- return manchesterSyntax(
- self._rdfList.uri, self.graph, boolean=self._operator)
+ return manchesterSyntax(self._rdfList.uri, self.graph, boolean=self._operator)
def __or__(self, other):
"""
@@ -1613,30 +1621,37 @@ class Restriction(Class):
{ individualRestrictionComponent } ')'
"""
- restrictionKinds = [OWL_NS.allValuesFrom,
- OWL_NS.someValuesFrom,
- OWL_NS.hasValue,
- OWL_NS.maxCardinality,
- OWL_NS.minCardinality]
-
- def __init__(self,
- onProperty,
- graph=Graph(),
- allValuesFrom=None,
- someValuesFrom=None,
- value=None,
- cardinality=None,
- maxCardinality=None,
- minCardinality=None,
- identifier=None):
- super(Restriction, self).__init__(identifier,
- graph=graph,
- skipOWLClassMembership=True)
- if (self.identifier,
+ restrictionKinds = [
+ OWL_NS.allValuesFrom,
+ OWL_NS.someValuesFrom,
+ OWL_NS.hasValue,
+ OWL_NS.maxCardinality,
+ OWL_NS.minCardinality,
+ ]
+
+ def __init__(
+ self,
+ onProperty,
+ graph=Graph(),
+ allValuesFrom=None,
+ someValuesFrom=None,
+ value=None,
+ cardinality=None,
+ maxCardinality=None,
+ minCardinality=None,
+ identifier=None,
+ ):
+ super(Restriction, self).__init__(
+ identifier, graph=graph, skipOWLClassMembership=True
+ )
+ if (
+ self.identifier,
OWL_NS.onProperty,
- propertyOrIdentifier(onProperty)) not in graph:
- graph.add((self.identifier, OWL_NS.onProperty,
- propertyOrIdentifier(onProperty)))
+ propertyOrIdentifier(onProperty),
+ ) not in graph:
+ graph.add(
+ (self.identifier, OWL_NS.onProperty, propertyOrIdentifier(onProperty))
+ )
self.onProperty = onProperty
restrTypes = [
(allValuesFrom, OWL_NS.allValuesFrom),
@@ -1644,7 +1659,8 @@ class Restriction(Class):
(value, OWL_NS.hasValue),
(cardinality, OWL_NS.cardinality),
(maxCardinality, OWL_NS.maxCardinality),
- (minCardinality, OWL_NS.minCardinality)]
+ (minCardinality, OWL_NS.minCardinality),
+ ]
validRestrProps = [(i, oTerm) for (i, oTerm) in restrTypes if i]
assert len(validRestrProps)
restrictionRange, restrictionType = validRestrProps.pop()
@@ -1654,13 +1670,11 @@ class Restriction(Class):
elif isinstance(restrictionRange, Class):
self.restrictionRange = classOrIdentifier(restrictionRange)
else:
- self.restrictionRange = first(self.graph.objects(self.identifier,
- restrictionType))
- if (self.identifier,
- restrictionType,
- self.restrictionRange) not in self.graph:
- self.graph.add(
- (self.identifier, restrictionType, self.restrictionRange))
+ self.restrictionRange = first(
+ self.graph.objects(self.identifier, restrictionType)
+ )
+ if (self.identifier, restrictionType, self.restrictionRange) not in self.graph:
+ self.graph.add((self.identifier, restrictionType, self.restrictionRange))
assert self.restrictionRange is not None, Class(self.identifier)
if (self.identifier, RDF.type, OWL_NS.Restriction) not in self.graph:
self.graph.add((self.identifier, RDF.type, OWL_NS.Restriction))
@@ -1690,8 +1704,7 @@ class Restriction(Class):
[rdflib.term.URIRef(
u'http://www.w3.org/2002/07/owl#DatatypeProperty')]
"""
- Property(
- self.onProperty, graph=self.graph, baseType=None).serialize(graph)
+ Property(self.onProperty, graph=self.graph, baseType=None).serialize(graph)
for s, p, o in self.graph.triples((self.identifier, None, None)):
graph.add((s, p, o))
if p in [OWL_NS.allValuesFrom, OWL_NS.someValuesFrom]:
@@ -1710,18 +1723,20 @@ class Restriction(Class):
"""
assert isinstance(other, Class), repr(other) + repr(type(other))
if isinstance(other, Restriction):
- return other.onProperty == self.onProperty and \
- other.restrictionRange == self.restrictionRange
+ return (
+ other.onProperty == self.onProperty
+ and other.restrictionRange == self.restrictionRange
+ )
else:
return False
def _get_onProperty(self):
- return list(self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.onProperty))[0]
+ return list(
+ self.graph.objects(subject=self.identifier, predicate=OWL_NS.onProperty)
+ )[0]
def _set_onProperty(self, prop):
- triple = (
- self.identifier, OWL_NS.onProperty, propertyOrIdentifier(prop))
+ triple = (self.identifier, OWL_NS.onProperty, propertyOrIdentifier(prop))
if not prop:
return
elif triple in self.graph:
@@ -1737,13 +1752,13 @@ class Restriction(Class):
def _get_allValuesFrom(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.allValuesFrom):
+ subject=self.identifier, predicate=OWL_NS.allValuesFrom
+ ):
return Class(i, graph=self.graph)
return None
def _set_allValuesFrom(self, other):
- triple = (
- self.identifier, OWL_NS.allValuesFrom, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.allValuesFrom, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1755,18 +1770,17 @@ class Restriction(Class):
def _del_allValuesFrom(self):
pass
- allValuesFrom = property(
- _get_allValuesFrom, _set_allValuesFrom, _del_allValuesFrom)
+ allValuesFrom = property(_get_allValuesFrom, _set_allValuesFrom, _del_allValuesFrom)
def _get_someValuesFrom(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.someValuesFrom):
+ subject=self.identifier, predicate=OWL_NS.someValuesFrom
+ ):
return Class(i, graph=self.graph)
return None
def _set_someValuesFrom(self, other):
- triple = (
- self.identifier, OWL_NS.someValuesFrom, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.someValuesFrom, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1779,11 +1793,11 @@ class Restriction(Class):
pass
someValuesFrom = property(
- _get_someValuesFrom, _set_someValuesFrom, _del_someValuesFrom)
+ _get_someValuesFrom, _set_someValuesFrom, _del_someValuesFrom
+ )
def _get_hasValue(self):
- for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.hasValue):
+ for i in self.graph.objects(subject=self.identifier, predicate=OWL_NS.hasValue):
return Class(i, graph=self.graph)
return None
@@ -1804,13 +1818,13 @@ class Restriction(Class):
def _get_cardinality(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.cardinality):
+ subject=self.identifier, predicate=OWL_NS.cardinality
+ ):
return Class(i, graph=self.graph)
return None
def _set_cardinality(self, other):
- triple = (
- self.identifier, OWL_NS.cardinality, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.cardinality, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1822,18 +1836,17 @@ class Restriction(Class):
def _del_cardinality(self):
pass
- cardinality = property(
- _get_cardinality, _set_cardinality, _del_cardinality)
+ cardinality = property(_get_cardinality, _set_cardinality, _del_cardinality)
def _get_maxCardinality(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.maxCardinality):
+ subject=self.identifier, predicate=OWL_NS.maxCardinality
+ ):
return Class(i, graph=self.graph)
return None
def _set_maxCardinality(self, other):
- triple = (
- self.identifier, OWL_NS.maxCardinality, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.maxCardinality, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1846,17 +1859,18 @@ class Restriction(Class):
pass
maxCardinality = property(
- _get_maxCardinality, _set_maxCardinality, _del_maxCardinality)
+ _get_maxCardinality, _set_maxCardinality, _del_maxCardinality
+ )
def _get_minCardinality(self):
for i in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.minCardinality):
+ subject=self.identifier, predicate=OWL_NS.minCardinality
+ ):
return Class(i, graph=self.graph)
return None
def _set_minCardinality(self, other):
- triple = (
- self.identifier, OWL_NS.minCardinality, classOrIdentifier(other))
+ triple = (self.identifier, OWL_NS.minCardinality, classOrIdentifier(other))
if not other:
return
elif triple in self.graph:
@@ -1869,12 +1883,13 @@ class Restriction(Class):
pass
minCardinality = property(
- _get_minCardinality, _set_minCardinality, _del_minCardinality)
+ _get_minCardinality, _set_minCardinality, _del_minCardinality
+ )
def restrictionKind(self):
- for p in self.graph.triple_choices((self.identifier,
- self.restrictionKinds,
- None)):
+ for p in self.graph.triple_choices(
+ (self.identifier, self.restrictionKinds, None)
+ ):
return p.split(OWL_NS)[-1]
raise
@@ -1884,24 +1899,28 @@ class Restriction(Class):
"""
return manchesterSyntax(self.identifier, self.graph)
-### Infix Operators ###
+# Infix Operators #
-some = Infix(lambda prop, _class: Restriction(prop, graph=_class.graph,
- someValuesFrom=_class))
-only = Infix(lambda prop, _class: Restriction(prop, graph=_class.graph,
- allValuesFrom=_class))
-max = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph,
- maxCardinality=_class))
-min = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph,
- minCardinality=_class))
-exactly = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph,
- cardinality=_class))
-value = Infix(
- lambda prop, _class: Restriction(prop, graph=prop.graph, value=_class))
-PropertyAbstractSyntax =\
- """
+some = Infix(
+ lambda prop, _class: Restriction(prop, graph=_class.graph, someValuesFrom=_class)
+)
+only = Infix(
+ lambda prop, _class: Restriction(prop, graph=_class.graph, allValuesFrom=_class)
+)
+max = Infix(
+ lambda prop, _class: Restriction(prop, graph=prop.graph, maxCardinality=_class)
+)
+min = Infix(
+ lambda prop, _class: Restriction(prop, graph=prop.graph, minCardinality=_class)
+)
+exactly = Infix(
+ lambda prop, _class: Restriction(prop, graph=prop.graph, cardinality=_class)
+)
+value = Infix(lambda prop, _class: Restriction(prop, graph=prop.graph, value=_class))
+
+PropertyAbstractSyntax = """
%s( %s { %s }
%s
{ 'super(' datavaluedPropertyID ')'} ['Functional']
@@ -1932,33 +1951,40 @@ class Property(AnnotatableTerms):
TV_plProp = verbAnnotations
TV_vbg = verbAnnotations
if TV_sgProp:
- self.TV_sgProp.extent = [(self.identifier,
- self.handleAnnotation(TV_sgProp))]
+ self.TV_sgProp.extent = [
+ (self.identifier, self.handleAnnotation(TV_sgProp))
+ ]
if TV_plProp:
- self.TV_plProp.extent = [(self.identifier,
- self.handleAnnotation(TV_plProp))]
+ self.TV_plProp.extent = [
+ (self.identifier, self.handleAnnotation(TV_plProp))
+ ]
if TV_vbg:
- self.TV_vbgProp.extent = [(self.identifier,
- self.handleAnnotation(TV_vbg))]
+ self.TV_vbgProp.extent = [(self.identifier, self.handleAnnotation(TV_vbg))]
def __init__(
- self, identifier=None, graph=None, baseType=OWL_NS.ObjectProperty,
- subPropertyOf=None, domain=None, range=None, inverseOf=None,
- otherType=None, equivalentProperty=None,
+ self,
+ identifier=None,
+ graph=None,
+ baseType=OWL_NS.ObjectProperty,
+ subPropertyOf=None,
+ domain=None,
+ range=None,
+ inverseOf=None,
+ otherType=None,
+ equivalentProperty=None,
comment=None,
verbAnnotations=None,
nameAnnotation=None,
- nameIsLabel=False):
- super(Property, self).__init__(identifier, graph,
- nameAnnotation, nameIsLabel)
+ nameIsLabel=False,
+ ):
+ super(Property, self).__init__(identifier, graph, nameAnnotation, nameIsLabel)
if verbAnnotations:
self.setupVerbAnnotations(verbAnnotations)
assert not isinstance(self.identifier, BNode)
if baseType is None:
# None give, determine via introspection
- self._baseType = first(
- Individual(self.identifier, graph=self.graph).type)
+ self._baseType = first(Individual(self.identifier, graph=self.graph).type)
else:
if (self.identifier, RDF.type, baseType) not in self.graph:
self.graph.add((self.identifier, RDF.type, baseType))
@@ -1972,16 +1998,15 @@ class Property(AnnotatableTerms):
def serialize(self, graph):
for fact in self.graph.triples((self.identifier, None, None)):
graph.add(fact)
- for p in itertools.chain(self.subPropertyOf,
- self.inverseOf):
+ for p in itertools.chain(self.subPropertyOf, self.inverseOf):
p.serialize(graph)
- for c in itertools.chain(self.domain,
- self.range):
+ for c in itertools.chain(self.domain, self.range):
CastClass(c, self.graph).serialize(graph)
def _get_extent(self, graph=None):
for triple in (graph is None and self.graph or graph).triples(
- (None, self.identifier, None)):
+ (None, self.identifier, None)
+ ):
yield triple
def _set_extent(self, other):
@@ -1995,36 +2020,44 @@ class Property(AnnotatableTerms):
def __repr__(self):
rt = []
if OWL_NS.ObjectProperty in self.type:
- rt.append('ObjectProperty( %s annotation(%s)'
- % (self.qname, first(self.comment) and
- first(self.comment) or ''))
+ rt.append(
+ "ObjectProperty( %s annotation(%s)"
+ % (self.qname, first(self.comment) and first(self.comment) or "")
+ )
if first(self.inverseOf):
twoLinkInverse = first(first(self.inverseOf).inverseOf)
- if twoLinkInverse \
- and twoLinkInverse.identifier == self.identifier:
+ if twoLinkInverse and twoLinkInverse.identifier == self.identifier:
inverseRepr = first(self.inverseOf).qname
else:
inverseRepr = repr(first(self.inverseOf))
- rt.append(" inverseOf( %s )%s" % (
- inverseRepr,
- OWL_NS.SymmetricProperty in self.type and
- ' Symmetric' or
- ''))
+ rt.append(
+ " inverseOf( %s )%s"
+ % (
+ inverseRepr,
+ OWL_NS.SymmetricProperty in self.type and " Symmetric" or "",
+ )
+ )
for s, p, roleType in self.graph.triples_choices(
- (self.identifier,
- RDF.type,
- [OWL_NS.FunctionalProperty,
- OWL_NS.InverseFunctionalProperty,
- OWL_NS.TransitiveProperty])):
+ (
+ self.identifier,
+ RDF.type,
+ [
+ OWL_NS.FunctionalProperty,
+ OWL_NS.InverseFunctionalProperty,
+ OWL_NS.TransitiveProperty,
+ ],
+ )
+ ):
rt.append(str(roleType.split(OWL_NS)[-1]))
else:
- rt.append('DatatypeProperty( %s %s'
- % (self.qname, first(self.comment) and
- first(self.comment) or
- ''))
- for s, p, roleType in self.graph.triples((
- self.identifier, RDF.type, OWL_NS.FunctionalProperty)):
- rt.append(' Functional')
+ rt.append(
+ "DatatypeProperty( %s %s"
+ % (self.qname, first(self.comment) and first(self.comment) or "")
+ )
+ for s, p, roleType in self.graph.triples(
+ (self.identifier, RDF.type, OWL_NS.FunctionalProperty)
+ ):
+ rt.append(" Functional")
def canonicalName(term, g):
normalizedName = classOrIdentifier(term)
@@ -2032,55 +2065,71 @@ class Property(AnnotatableTerms):
return term
elif normalizedName.startswith(_XSD_NS):
return str(term)
- elif first(g.triples_choices((
- normalizedName,
- [OWL_NS.unionOf,
- OWL_NS.intersectionOf], None))):
+ elif first(
+ g.triples_choices(
+ (normalizedName, [OWL_NS.unionOf, OWL_NS.intersectionOf], None)
+ )
+ ):
return repr(term)
else:
return str(term.qname)
- rt.append(' '.join([" super( %s )" % canonicalName(
- superP, self.graph)
- for superP in self.subPropertyOf]))
- rt.append(' '.join([" domain( %s )" % canonicalName(
- domain, self.graph)
- for domain in self.domain]))
- rt.append(' '.join([" range( %s )" % canonicalName(
- range, self.graph)
- for range in self.range]))
- rt = '\n'.join([expr for expr in rt if expr])
- rt += '\n)'
- return str(rt).encode('utf-8')
+
+ rt.append(
+ " ".join(
+ [
+ " super( %s )" % canonicalName(superP, self.graph)
+ for superP in self.subPropertyOf
+ ]
+ )
+ )
+ rt.append(
+ " ".join(
+ [
+ " domain( %s )" % canonicalName(domain, self.graph)
+ for domain in self.domain
+ ]
+ )
+ )
+ rt.append(
+ " ".join(
+ [
+ " range( %s )" % canonicalName(range, self.graph)
+ for range in self.range
+ ]
+ )
+ )
+ rt = "\n".join([expr for expr in rt if expr])
+ rt += "\n)"
+ return str(rt).encode("utf-8")
def _get_subPropertyOf(self):
for anc in self.graph.objects(
- subject=self.identifier, predicate=RDFS.subPropertyOf):
+ subject=self.identifier, predicate=RDFS.subPropertyOf
+ ):
yield Property(anc, graph=self.graph, baseType=None)
def _set_subPropertyOf(self, other):
if not other:
return
for sP in other:
- self.graph.add(
- (self.identifier, RDFS.subPropertyOf, classOrIdentifier(sP)))
+ self.graph.add((self.identifier, RDFS.subPropertyOf, classOrIdentifier(sP)))
@TermDeletionHelper(RDFS.subPropertyOf)
def _del_subPropertyOf(self):
pass
- subPropertyOf = property(
- _get_subPropertyOf, _set_subPropertyOf, _del_subPropertyOf)
+ subPropertyOf = property(_get_subPropertyOf, _set_subPropertyOf, _del_subPropertyOf)
def _get_inverseOf(self):
for anc in self.graph.objects(
- subject=self.identifier, predicate=OWL_NS.inverseOf):
+ subject=self.identifier, predicate=OWL_NS.inverseOf
+ ):
yield Property(anc, graph=self.graph, baseType=None)
def _set_inverseOf(self, other):
if not other:
return
- self.graph.add(
- (self.identifier, OWL_NS.inverseOf, classOrIdentifier(other)))
+ self.graph.add((self.identifier, OWL_NS.inverseOf, classOrIdentifier(other)))
@TermDeletionHelper(OWL_NS.inverseOf)
def _del_inverseOf(self):
@@ -2089,20 +2138,17 @@ class Property(AnnotatableTerms):
inverseOf = property(_get_inverseOf, _set_inverseOf, _del_inverseOf)
def _get_domain(self):
- for dom in self.graph.objects(
- subject=self.identifier, predicate=RDFS.domain):
+ for dom in self.graph.objects(subject=self.identifier, predicate=RDFS.domain):
yield Class(dom, graph=self.graph)
def _set_domain(self, other):
if not other:
return
if isinstance(other, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, RDFS.domain, classOrIdentifier(other)))
+ self.graph.add((self.identifier, RDFS.domain, classOrIdentifier(other)))
else:
for dom in other:
- self.graph.add(
- (self.identifier, RDFS.domain, classOrIdentifier(dom)))
+ self.graph.add((self.identifier, RDFS.domain, classOrIdentifier(dom)))
@TermDeletionHelper(RDFS.domain)
def _del_domain(self):
@@ -2111,20 +2157,17 @@ class Property(AnnotatableTerms):
domain = property(_get_domain, _set_domain, _del_domain)
def _get_range(self):
- for ran in self.graph.objects(
- subject=self.identifier, predicate=RDFS.range):
+ for ran in self.graph.objects(subject=self.identifier, predicate=RDFS.range):
yield Class(ran, graph=self.graph)
def _set_range(self, ranges):
if not ranges:
return
if isinstance(ranges, (Individual, Identifier)):
- self.graph.add(
- (self.identifier, RDFS.range, classOrIdentifier(ranges)))
+ self.graph.add((self.identifier, RDFS.range, classOrIdentifier(ranges)))
else:
for range in ranges:
- self.graph.add(
- (self.identifier, RDFS.range, classOrIdentifier(range)))
+ self.graph.add((self.identifier, RDFS.range, classOrIdentifier(range)))
@TermDeletionHelper(RDFS.range)
def _del_range(self):
@@ -2144,9 +2187,9 @@ def CommonNSBindings(graph, additionalNS={}):
Takes a graph and binds the common namespaces (rdf,rdfs, & owl)
"""
namespace_manager = NamespaceManager(graph)
- namespace_manager.bind('rdfs', RDFS)
- namespace_manager.bind('rdf', RDF)
- namespace_manager.bind('owl', OWL_NS)
+ namespace_manager.bind("rdfs", RDFS)
+ namespace_manager.bind("rdf", RDF)
+ namespace_manager.bind("owl", OWL_NS)
for prefix, uri in list(additionalNS.items()):
namespace_manager.bind(prefix, uri, override=False)
graph.namespace_manager = namespace_manager
@@ -2154,8 +2197,9 @@ def CommonNSBindings(graph, additionalNS={}):
def test():
import doctest
+
doctest.testmod()
-if __name__ == '__main__':
+if __name__ == "__main__":
test()
diff --git a/rdflib/graph.py b/rdflib/graph.py
index 4a27e6de..12d18dce 100644
--- a/rdflib/graph.py
+++ b/rdflib/graph.py
@@ -3,9 +3,35 @@ from __future__ import division
from __future__ import print_function
from rdflib.term import Literal # required for doctests
-assert Literal # avoid warning
from rdflib.namespace import Namespace # required for doctests
-assert Namespace # avoid warning
+
+import logging
+
+import random
+from rdflib.namespace import RDF, RDFS, SKOS
+from rdflib import plugin, exceptions, query
+from rdflib.term import Node, URIRef, Genid
+from rdflib.term import BNode
+import rdflib.term
+from rdflib.paths import Path
+from rdflib.store import Store
+from rdflib.serializer import Serializer
+from rdflib.parser import Parser
+from rdflib.parser import create_input_source
+from rdflib.namespace import NamespaceManager
+from rdflib.resource import Resource
+from rdflib.collection import Collection
+
+import os
+import shutil
+import tempfile
+
+from io import BytesIO
+from urllib.parse import urlparse
+
+assert Literal # avoid warning
+assert Namespace # avoid warning
+logger = logging.getLogger(__name__)
__doc__ = """\
@@ -88,13 +114,13 @@ Instantiating Graphs with a IOMemory store and an identifier -
>>> g = Graph('IOMemory', URIRef("http://rdflib.net"))
>>> g.identifier
- rdflib.term.URIRef(u'http://rdflib.net')
- >>> str(g) # doctest: +NORMALIZE_WHITESPACE
+ rdflib.term.URIRef('http://rdflib.net')
+ >>> str(g) # doctest: +NORMALIZE_WHITESPACE
"<http://rdflib.net> a rdfg:Graph;rdflib:storage
[a rdflib:Store;rdfs:label 'IOMemory']."
Creating a ConjunctiveGraph - The top level container for all named Graphs
-in a 'database':
+in a "database":
>>> g = ConjunctiveGraph()
>>> str(g.default_context)
@@ -109,7 +135,7 @@ via triple pattern:
0
>>> g.add((statementId, RDF.type, RDF.Statement))
>>> g.add((statementId, RDF.subject,
- ... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
+ ... URIRef("http://rdflib.net/store/ConjunctiveGraph")))
>>> g.add((statementId, RDF.predicate, RDFS.label))
>>> g.add((statementId, RDF.object, Literal("Conjunctive Graph")))
>>> print(len(g))
@@ -145,11 +171,11 @@ by RDFLib they are UUIDs and unique.
>>> g1 = Graph()
>>> g2 = Graph()
- >>> u = URIRef(u'http://example.com/foo')
- >>> g1.add([u, RDFS.label, Literal('foo')])
- >>> g1.add([u, RDFS.label, Literal('bar')])
- >>> g2.add([u, RDFS.label, Literal('foo')])
- >>> g2.add([u, RDFS.label, Literal('bing')])
+ >>> u = URIRef("http://example.com/foo")
+ >>> g1.add([u, RDFS.label, Literal("foo")])
+ >>> g1.add([u, RDFS.label, Literal("bar")])
+ >>> g2.add([u, RDFS.label, Literal("foo")])
+ >>> g2.add([u, RDFS.label, Literal("bing")])
>>> len(g1 + g2) # adds bing as label
3
>>> len(g1 - g2) # removes foo
@@ -162,7 +188,7 @@ by RDFLib they are UUIDs and unique.
Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within
the same store:
- >>> store = plugin.get('IOMemory', Store)()
+ >>> store = plugin.get("IOMemory", Store)()
>>> g1 = Graph(store)
>>> g2 = Graph(store)
>>> g3 = Graph(store)
@@ -171,21 +197,21 @@ the same store:
>>> stmt3 = BNode()
>>> g1.add((stmt1, RDF.type, RDF.Statement))
>>> g1.add((stmt1, RDF.subject,
- ... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
+ ... URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g1.add((stmt1, RDF.predicate, RDFS.label))
- >>> g1.add((stmt1, RDF.object, Literal("Conjunctive Graph")))
+ >>> g1.add((stmt1, RDF.object, Literal('Conjunctive Graph')))
>>> g2.add((stmt2, RDF.type, RDF.Statement))
>>> g2.add((stmt2, RDF.subject,
- ... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
+ ... URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g2.add((stmt2, RDF.predicate, RDF.type))
>>> g2.add((stmt2, RDF.object, RDFS.Class))
>>> g3.add((stmt3, RDF.type, RDF.Statement))
>>> g3.add((stmt3, RDF.subject,
- ... URIRef(u'http://rdflib.net/store/ConjunctiveGraph')))
+ ... URIRef('http://rdflib.net/store/ConjunctiveGraph')))
>>> g3.add((stmt3, RDF.predicate, RDFS.comment))
>>> g3.add((stmt3, RDF.object, Literal(
- ... "The top-level aggregate graph - The sum " +
- ... "of all named graphs within a Store")))
+ ... 'The top-level aggregate graph - The sum ' +
+ ... 'of all named graphs within a Store')))
>>> len(list(ConjunctiveGraph(store).subjects(RDF.type, RDF.Statement)))
3
>>> len(list(ReadOnlyGraphAggregate([g1,g2]).subjects(
@@ -219,68 +245,39 @@ Parsing N3 from a string
... rdf:predicate rdfs:label;
... rdf:object "Conjunctive Graph" ] .
... '''
- >>> g2 = g2.parse(data=src, format='n3')
+ >>> g2 = g2.parse(data=src, format="n3")
>>> print(len(g2))
4
Using Namespace class:
- >>> RDFLib = Namespace('http://rdflib.net/')
+ >>> RDFLib = Namespace("http://rdflib.net/")
>>> RDFLib.ConjunctiveGraph
- rdflib.term.URIRef(u'http://rdflib.net/ConjunctiveGraph')
- >>> RDFLib['Graph']
- rdflib.term.URIRef(u'http://rdflib.net/Graph')
+ rdflib.term.URIRef('http://rdflib.net/ConjunctiveGraph')
+ >>> RDFLib["Graph"]
+ rdflib.term.URIRef('http://rdflib.net/Graph')
"""
-import logging
-logger = logging.getLogger(__name__)
-
-# import md5
-import random
-import warnings
-
-from hashlib import md5
-
-
-from rdflib.namespace import RDF, RDFS, SKOS
-
-from rdflib import plugin, exceptions, query
-
-from rdflib.term import Node, URIRef, Genid
-from rdflib.term import BNode
-
-import rdflib.term
-
-from rdflib.paths import Path
-
-from rdflib.store import Store
-from rdflib.serializer import Serializer
-from rdflib.parser import Parser
-from rdflib.parser import create_input_source
-from rdflib.namespace import NamespaceManager
-from rdflib.resource import Resource
-from rdflib.collection import Collection
-
-import os
-import shutil
-import tempfile
-
-from six import BytesIO
-from six import b
-from six.moves.urllib.parse import urlparse
__all__ = [
- 'Graph', 'ConjunctiveGraph', 'QuotedGraph', 'Seq',
- 'ModificationException', 'Dataset',
- 'UnSupportedAggregateOperation', 'ReadOnlyGraphAggregate']
+ "Graph",
+ "ConjunctiveGraph",
+ "QuotedGraph",
+ "Seq",
+ "ModificationException",
+ "Dataset",
+ "UnSupportedAggregateOperation",
+ "ReadOnlyGraphAggregate",
+ "BatchAddGraph",
+]
class Graph(Node):
"""An RDF Graph
- The constructor accepts one argument, the 'store'
- that will be used to store the graph data (see the 'store'
+ The constructor accepts one argument, the "store"
+ that will be used to store the graph data (see the "store"
package for stores currently shipped with rdflib).
Stores can be context-aware or unaware. Unaware stores take up
@@ -291,13 +288,15 @@ class Graph(Node):
The Graph constructor can take an identifier which identifies the Graph
by name. If none is given, the graph is assigned a BNode for its
identifier.
- For more on named graphs, see: http://www.w3.org/2004/03/trix/
+ For more on named graphs, see: http://www.w3.org/2004/03/trix/
"""
- def __init__(self, store='default', identifier=None,
- namespace_manager=None):
+ def __init__(
+ self, store="default", identifier=None, namespace_manager=None, base=None
+ ):
super(Graph, self).__init__()
+ self.base = base
self.__identifier = identifier or BNode()
if not isinstance(self.__identifier, Node):
@@ -315,10 +314,12 @@ class Graph(Node):
def __get_store(self):
return self.__store
+
store = property(__get_store) # read-only attr
def __get_identifier(self):
return self.__identifier
+
identifier = property(__get_identifier) # read-only attr
def _get_namespace_manager(self):
@@ -329,23 +330,24 @@ class Graph(Node):
def _set_namespace_manager(self, nm):
self.__namespace_manager = nm
- namespace_manager = property(_get_namespace_manager,
- _set_namespace_manager,
- doc="this graph's namespace-manager")
+ namespace_manager = property(
+ _get_namespace_manager,
+ _set_namespace_manager,
+ doc="this graph's namespace-manager",
+ )
def __repr__(self):
return "<Graph identifier=%s (%s)>" % (self.identifier, type(self))
def __str__(self):
if isinstance(self.identifier, URIRef):
- return ("%s a rdfg:Graph;rdflib:storage " +
- "[a rdflib:Store;rdfs:label '%s'].") % (
- self.identifier.n3(),
- self.store.__class__.__name__)
+ return (
+ "%s a rdfg:Graph;rdflib:storage " + "[a rdflib:Store;rdfs:label '%s']."
+ ) % (self.identifier.n3(), self.store.__class__.__name__)
else:
- return ("[a rdfg:Graph;rdflib:storage " +
- "[a rdflib:Store;rdfs:label '%s']].") % (
- self.store.__class__.__name__)
+ return (
+ "[a rdfg:Graph;rdflib:storage " + "[a rdflib:Store;rdfs:label '%s']]."
+ ) % self.store.__class__.__name__
def toPython(self):
return self
@@ -377,28 +379,26 @@ class Graph(Node):
Might be necessary for stores that require closing a connection to a
database or releasing some resource.
"""
- self.__store.close(
- commit_pending_transaction=commit_pending_transaction)
+ self.__store.close(commit_pending_transaction=commit_pending_transaction)
def add(self, triple):
"""Add a triple with self as context"""
s, p, o = triple
- assert isinstance(s, Node), \
- "Subject %s must be an rdflib term" % (s,)
- assert isinstance(p, Node), \
- "Predicate %s must be an rdflib term" % (p,)
- assert isinstance(o, Node), \
- "Object %s must be an rdflib term" % (o,)
+ assert isinstance(s, Node), "Subject %s must be an rdflib term" % (s,)
+ assert isinstance(p, Node), "Predicate %s must be an rdflib term" % (p,)
+ assert isinstance(o, Node), "Object %s must be an rdflib term" % (o,)
self.__store.add((s, p, o), self, quoted=False)
def addN(self, quads):
"""Add a sequence of triple with context"""
- self.__store.addN((s, p, o, c) for s, p, o, c in quads
- if isinstance(c, Graph) and
- c.identifier is self.identifier and
- _assertnode(s, p, o)
- )
+ self.__store.addN(
+ (s, p, o, c)
+ for s, p, o, c in quads
+ if isinstance(c, Graph)
+ and c.identifier is self.identifier
+ and _assertnode(s, p, o)
+ )
def remove(self, triple):
"""Remove a triple from the graph
@@ -417,10 +417,10 @@ class Graph(Node):
s, p, o = triple
if isinstance(p, Path):
for _s, _o in p.eval(self, s, o):
- yield (_s, p, _o)
+ yield _s, p, _o
else:
for (s, p, o), cg in self.__store.triples((s, p, o), context=self):
- yield (s, p, o)
+ yield s, p, o
def __getitem__(self, item):
"""
@@ -431,16 +431,16 @@ class Graph(Node):
>>> import rdflib
>>> g = rdflib.Graph()
- >>> g.add((rdflib.URIRef('urn:bob'), rdflib.RDFS.label, rdflib.Literal('Bob')))
+ >>> g.add((rdflib.URIRef("urn:bob"), rdflib.RDFS.label, rdflib.Literal("Bob")))
- >>> list(g[rdflib.URIRef('urn:bob')]) # all triples about bob
- [(rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.Literal(u'Bob'))]
+ >>> list(g[rdflib.URIRef("urn:bob")]) # all triples about bob
+ [(rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.Literal('Bob'))]
>>> list(g[:rdflib.RDFS.label]) # all label triples
- [(rdflib.term.URIRef(u'urn:bob'), rdflib.term.Literal(u'Bob'))]
+ [(rdflib.term.URIRef('urn:bob'), rdflib.term.Literal('Bob'))]
- >>> list(g[::rdflib.Literal('Bob')]) # all triples with bob as object
- [(rdflib.term.URIRef(u'urn:bob'), rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'))]
+ >>> list(g[::rdflib.Literal("Bob")]) # all triples with bob as object
+ [(rdflib.term.URIRef('urn:bob'), rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'))]
Combined with SPARQL paths, more complex queries can be
written concisely:
@@ -455,7 +455,7 @@ class Graph(Node):
All friends and friends of friends of Bob
- g[bob : FOAF.knows * '+']
+ g[bob : FOAF.knows * "+"]
etc.
@@ -489,7 +489,9 @@ class Graph(Node):
return self.predicate_objects(item)
else:
- raise TypeError("You can only index a graph by a single rdflib term or path, or a slice of rdflib terms.")
+ raise TypeError(
+ "You can only index a graph by a single rdflib term or path, or a slice of rdflib terms."
+ )
def __len__(self):
"""Returns the number of triples in the graph
@@ -516,7 +518,9 @@ class Graph(Node):
if other is None:
return -1
elif isinstance(other, Graph):
- return cmp(self.identifier, other.identifier)
+ return (self.identifier > other.identifier) - (
+ self.identifier < other.identifier
+ )
else:
# Note if None is considered equivalent to owl:Nothing
# Then perhaps a graph with length 0 should be considered
@@ -524,21 +528,20 @@ class Graph(Node):
return 1
def __eq__(self, other):
- return isinstance(other, Graph) \
- and self.identifier == other.identifier
+ return isinstance(other, Graph) and self.identifier == other.identifier
def __lt__(self, other):
- return (other is None) \
- or (isinstance(other, Graph) and
- self.identifier < other.identifier)
+ return (other is None) or (
+ isinstance(other, Graph) and self.identifier < other.identifier
+ )
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
- return (isinstance(other, Graph) and
- self.identifier > other.identifier) \
- or (other is not None)
+ return (isinstance(other, Graph) and self.identifier > other.identifier) or (
+ other is not None
+ )
def __ge__(self, other):
return self > other or self == other
@@ -560,8 +563,7 @@ class Graph(Node):
"""Set-theoretic union
BNode IDs are not changed."""
retval = Graph()
- for (prefix, uri) in set(
- list(self.namespaces()) + list(other.namespaces())):
+ for (prefix, uri) in set(list(self.namespaces()) + list(other.namespaces())):
retval.bind(prefix, uri)
for x in self:
retval.add(x)
@@ -583,7 +585,7 @@ class Graph(Node):
BNode IDs are not changed."""
retval = Graph()
for x in self:
- if not x in other:
+ if x not in other:
retval.add(x)
return retval
@@ -604,10 +606,12 @@ class Graph(Node):
(subject, predicate, object).
"""
(subject, predicate, object_) = triple
- assert subject is not None, \
- "s can't be None in .set([s,p,o]), as it would remove (*, p, *)"
- assert predicate is not None, \
- "p can't be None in .set([s,p,o]), as it would remove (s, *, *)"
+ assert (
+ subject is not None
+ ), "s can't be None in .set([s,p,o]), as it would remove (*, p, *)"
+ assert (
+ predicate is not None
+ ), "p can't be None in .set([s,p,o]), as it would remove (s, *, *)"
self.remove((subject, predicate, None))
self.add((subject, predicate, object_))
@@ -644,11 +648,13 @@ class Graph(Node):
def triples_choices(self, triple, context=None):
subject, predicate, object_ = triple
for (s, p, o), cg in self.store.triples_choices(
- (subject, predicate, object_), context=self):
- yield (s, p, o)
+ (subject, predicate, object_), context=self
+ ):
+ yield s, p, o
- def value(self, subject=None, predicate=RDF.value, object=None,
- default=None, any=True):
+ def value(
+ self, subject=None, predicate=RDF.value, object=None, default=None, any=True
+ ):
"""Get a value for a pair of two criteria
Exactly one of subject, predicate, object must be None. Useful if one
@@ -665,9 +671,11 @@ class Graph(Node):
"""
retval = default
- if (subject is None and predicate is None) or \
- (subject is None and object is None) or \
- (predicate is None and object is None):
+ if (
+ (subject is None and predicate is None)
+ or (subject is None and object is None)
+ or (predicate is None and object is None)
+ ):
return None
if object is None:
@@ -685,20 +693,25 @@ class Graph(Node):
if any is False:
try:
next(values)
- msg = ("While trying to find a value for (%s, %s, %s) the"
- " following multiple values where found:\n" %
- (subject, predicate, object))
- triples = self.store.triples(
- (subject, predicate, object), None)
+ msg = (
+ "While trying to find a value for (%s, %s, %s) the"
+ " following multiple values where found:\n"
+ % (subject, predicate, object)
+ )
+ triples = self.store.triples((subject, predicate, object), None)
for (s, p, o), contexts in triples:
msg += "(%s, %s, %s)\n (contexts: %s)\n" % (
- s, p, o, list(contexts))
+ s,
+ p,
+ o,
+ list(contexts),
+ )
raise exceptions.UniquenessError(msg)
except StopIteration:
pass
return retval
- def label(self, subject, default=''):
+ def label(self, subject, default=""):
"""Query for the RDFS.label of the subject
Return default if no label exists or any label if multiple exist.
@@ -707,14 +720,19 @@ class Graph(Node):
return default
return self.value(subject, RDFS.label, default=default, any=True)
- def preferredLabel(self, subject, lang=None, default=None,
- labelProperties=(SKOS.prefLabel, RDFS.label)):
+ def preferredLabel(
+ self,
+ subject,
+ lang=None,
+ default=None,
+ labelProperties=(SKOS.prefLabel, RDFS.label),
+ ):
"""
Find the preferred label for subject.
By default prefers skos:prefLabels over rdfs:labels. In case at least
one prefLabel is found returns those, else returns labels. In case a
- language string (e.g., 'en', 'de' or even '' for no lang-tagged
+ language string (e.g., "en", "de" or even "" for no lang-tagged
literals) is given, only such labels will be considered.
Return a list of (labelProp, label) pairs, where labelProp is either
@@ -724,30 +742,30 @@ class Graph(Node):
>>> from rdflib.namespace import SKOS
>>> from pprint import pprint
>>> g = ConjunctiveGraph()
- >>> u = URIRef(u'http://example.com/foo')
- >>> g.add([u, RDFS.label, Literal('foo')])
- >>> g.add([u, RDFS.label, Literal('bar')])
+ >>> u = URIRef("http://example.com/foo")
+ >>> g.add([u, RDFS.label, Literal("foo")])
+ >>> g.add([u, RDFS.label, Literal("bar")])
>>> pprint(sorted(g.preferredLabel(u)))
- [(rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'bar')),
- (rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'foo'))]
- >>> g.add([u, SKOS.prefLabel, Literal('bla')])
+ [(rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
+ rdflib.term.Literal('bar')),
+ (rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
+ rdflib.term.Literal('foo'))]
+ >>> g.add([u, SKOS.prefLabel, Literal("bla")])
>>> pprint(g.preferredLabel(u))
- [(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
- >>> g.add([u, SKOS.prefLabel, Literal('blubb', lang='en')])
+ [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
+ rdflib.term.Literal('bla'))]
+ >>> g.add([u, SKOS.prefLabel, Literal("blubb", lang="en")])
>>> sorted(g.preferredLabel(u)) #doctest: +NORMALIZE_WHITESPACE
- [(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla')),
- (rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
- >>> g.preferredLabel(u, lang='') #doctest: +NORMALIZE_WHITESPACE
- [(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
- >>> pprint(g.preferredLabel(u, lang='en'))
- [(rdflib.term.URIRef(u'http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
+ rdflib.term.Literal('bla')),
+ (rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
+ rdflib.term.Literal('blubb', lang='en'))]
+ >>> g.preferredLabel(u, lang="") #doctest: +NORMALIZE_WHITESPACE
+ [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
+ rdflib.term.Literal('bla'))]
+ >>> pprint(g.preferredLabel(u, lang="en"))
+ [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
+ rdflib.term.Literal('blubb', lang='en'))]
"""
if default is None:
@@ -755,22 +773,26 @@ class Graph(Node):
# setup the language filtering
if lang is not None:
- if lang == '': # we only want not language-tagged literals
- def langfilter(l): return l.language is None
+ if lang == "": # we only want not language-tagged literals
+ def langfilter(l_):
+ return l_.language is None
else:
- def langfilter(l): return l.language == lang
+ def langfilter(l_):
+ return l_.language == lang
+
else: # we don't care about language tags
- def langfilter(l): return True
+ def langfilter(l_):
+ return True
for labelProp in labelProperties:
labels = list(filter(langfilter, self.objects(subject, labelProp)))
if len(labels) == 0:
continue
else:
- return [(labelProp, l) for l in labels]
+ return [(labelProp, l_) for l_ in labels]
return default
- def comment(self, subject, default=''):
+ def comment(self, subject, default=""):
"""Query for the RDFS.comment of the subject
Return default if no comment exists
@@ -801,9 +823,9 @@ class Graph(Node):
>>> from rdflib.collection import Collection
>>> g=Graph()
- >>> a=BNode('foo')
- >>> b=BNode('bar')
- >>> c=BNode('baz')
+ >>> a=BNode("foo")
+ >>> b=BNode("bar")
+ >>> c=BNode("baz")
>>> g.add((a,RDF.first,RDF.type))
>>> g.add((a,RDF.rest,b))
>>> g.add((b,RDF.first,RDFS.label))
@@ -811,12 +833,12 @@ class Graph(Node):
>>> g.add((c,RDF.first,RDFS.comment))
>>> g.add((c,RDF.rest,RDF.nil))
>>> def topList(node,g):
- ... for s in g.subjects(RDF.rest,node):
+ ... for s in g.subjects(RDF.rest, node):
... yield s
>>> def reverseList(node,g):
- ... for f in g.objects(node,RDF.first):
+ ... for f in g.objects(node, RDF.first):
... print(f)
- ... for s in g.subjects(RDF.rest,node):
+ ... for s in g.subjects(RDF.rest, node):
... yield s
>>> [rt for rt in g.transitiveClosure(
@@ -901,11 +923,12 @@ class Graph(Node):
if replace, replace any existing prefix with the new namespace
- for example: graph.bind('foaf', 'http://xmlns.com/foaf/0.1/')
+ for example: graph.bind("foaf", "http://xmlns.com/foaf/0.1/")
"""
return self.namespace_manager.bind(
- prefix, namespace, override=override, replace=replace)
+ prefix, namespace, override=override, replace=replace
+ )
def namespaces(self):
"""Generator over all the prefix, namespace tuples"""
@@ -916,16 +939,22 @@ class Graph(Node):
"""Turn uri into an absolute URI if it's not one already"""
return self.namespace_manager.absolutize(uri, defrag)
- def serialize(self, destination=None, format="xml",
- base=None, encoding=None, **args):
+ def serialize(
+ self, destination=None, format="xml", base=None, encoding=None, **args
+ ):
"""Serialize the Graph to destination
If destination is None serialize method returns the serialization as a
string. Format defaults to xml (AKA rdf/xml).
Format support can be extended with plugins,
- but 'xml', 'n3', 'turtle', 'nt', 'pretty-xml', 'trix', 'trig' and 'nquads' are built in.
+ but "xml", "n3", "turtle", "nt", "pretty-xml", "trix", "trig" and "nquads" are built in.
"""
+
+ # if base is not given as attribute use the base set for the graph
+ if base is None:
+ base = self.base
+
serializer = plugin.get(format, Serializer)(self)
if destination is None:
stream = BytesIO()
@@ -938,8 +967,9 @@ class Graph(Node):
location = destination
scheme, netloc, path, params, _query, fragment = urlparse(location)
if netloc != "":
- print("WARNING: not saving as location" +
- "is not a local file reference")
+ print(
+ "WARNING: not saving as location" + "is not a local file reference"
+ )
return
fd, name = tempfile.mkstemp()
stream = os.fdopen(fd, "wb")
@@ -951,8 +981,16 @@ class Graph(Node):
shutil.copy(name, path)
os.remove(name)
- def parse(self, source=None, publicID=None, format=None,
- location=None, file=None, data=None, **args):
+ def parse(
+ self,
+ source=None,
+ publicID=None,
+ format=None,
+ location=None,
+ file=None,
+ data=None,
+ **args
+ ):
"""
Parse source adding the resulting triples to the Graph.
@@ -970,7 +1008,7 @@ class Graph(Node):
- `data`: A string containing the data to be parsed.
- `format`: Used if format can not be determined from source.
Defaults to rdf/xml. Format support can be extended with plugins,
- but 'xml', 'n3', 'nt', 'trix', 'rdfa' are built in.
+ but "xml", "n3", "nt" & "trix" are built in.
- `publicID`: the logical URI to use as the document base. If None
specified the document location is used (at least in the case where
there is a document location).
@@ -983,8 +1021,8 @@ class Graph(Node):
>>> my_data = '''
... <rdf:RDF
- ... xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#'
- ... xmlns:rdfs='http://www.w3.org/2000/01/rdf-schema#'
+ ... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ ... xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
... >
... <rdf:Description>
... <rdfs:label>Example</rdfs:label>
@@ -994,8 +1032,8 @@ class Graph(Node):
... '''
>>> import tempfile
>>> fd, file_name = tempfile.mkstemp()
- >>> f = os.fdopen(fd, 'w')
- >>> dummy = f.write(my_data) # Returns num bytes written on py3
+ >>> f = os.fdopen(fd, "w")
+ >>> dummy = f.write(my_data) # Returns num bytes written
>>> f.close()
>>> g = Graph()
@@ -1018,9 +1056,14 @@ class Graph(Node):
"""
- source = create_input_source(source=source, publicID=publicID,
- location=location, file=file,
- data=data, format=format)
+ source = create_input_source(
+ source=source,
+ publicID=publicID,
+ location=location,
+ file=file,
+ data=data,
+ format=format,
+ )
if format is None:
format = source.content_type
if format is None:
@@ -1038,9 +1081,16 @@ class Graph(Node):
def load(self, source, publicID=None, format="xml"):
self.parse(source, publicID, format)
- def query(self, query_object, processor='sparql',
- result='sparql', initNs=None, initBindings=None,
- use_store_provided=True, **kwargs):
+ def query(
+ self,
+ query_object,
+ processor="sparql",
+ result="sparql",
+ initNs=None,
+ initBindings=None,
+ use_store_provided=True,
+ **kwargs
+ ):
"""
Query this graph.
@@ -1061,11 +1111,12 @@ class Graph(Node):
if hasattr(self.store, "query") and use_store_provided:
try:
return self.store.query(
- query_object, initNs, initBindings,
- self.default_union and
- '__UNION__' or
- self.identifier,
- **kwargs)
+ query_object,
+ initNs,
+ initBindings,
+ self.default_union and "__UNION__" or self.identifier,
+ **kwargs
+ )
except NotImplementedError:
pass # store has no own implementation
@@ -1074,12 +1125,17 @@ class Graph(Node):
if not isinstance(processor, query.Processor):
processor = plugin.get(processor, query.Processor)(self)
- return result(processor.query(
- query_object, initBindings, initNs, **kwargs))
-
- def update(self, update_object, processor='sparql',
- initNs=None, initBindings=None,
- use_store_provided=True, **kwargs):
+ return result(processor.query(query_object, initBindings, initNs, **kwargs))
+
+ def update(
+ self,
+ update_object,
+ processor="sparql",
+ initNs=None,
+ initBindings=None,
+ use_store_provided=True,
+ **kwargs
+ ):
"""Update this graph with the given update query."""
initBindings = initBindings or {}
initNs = initNs or dict(self.namespaces())
@@ -1087,11 +1143,12 @@ class Graph(Node):
if hasattr(self.store, "update") and use_store_provided:
try:
return self.store.update(
- update_object, initNs, initBindings,
- self.default_union and
- '__UNION__' or
- self.identifier,
- **kwargs)
+ update_object,
+ initNs,
+ initBindings,
+ self.default_union and "__UNION__" or self.identifier,
+ **kwargs
+ )
except NotImplementedError:
pass # store has no own implementation
@@ -1222,7 +1279,7 @@ class Graph(Node):
s = s.skolemize(authority=authority, basepath=basepath)
if o == bnode:
o = o.skolemize(authority=authority, basepath=basepath)
- return (s, p, o)
+ return s, p, o
def do_skolemize2(t):
(s, p, o) = t
@@ -1230,15 +1287,14 @@ class Graph(Node):
s = s.skolemize(authority=authority, basepath=basepath)
if isinstance(o, BNode):
o = o.skolemize(authority=authority, basepath=basepath)
- return (s, p, o)
+ return s, p, o
retval = Graph() if new_graph is None else new_graph
if bnode is None:
self._process_skolem_tuples(retval, do_skolemize2)
elif isinstance(bnode, BNode):
- self._process_skolem_tuples(
- retval, lambda t: do_skolemize(bnode, t))
+ self._process_skolem_tuples(retval, lambda t: do_skolemize(bnode, t))
return retval
@@ -1249,7 +1305,7 @@ class Graph(Node):
s = s.de_skolemize()
if o == uriref:
o = o.de_skolemize()
- return (s, p, o)
+ return s, p, o
def do_de_skolemize2(t):
(s, p, o) = t
@@ -1257,23 +1313,20 @@ class Graph(Node):
s = s.de_skolemize()
if isinstance(o, Genid):
o = o.de_skolemize()
- return (s, p, o)
+ return s, p, o
retval = Graph() if new_graph is None else new_graph
if uriref is None:
self._process_skolem_tuples(retval, do_de_skolemize2)
elif isinstance(uriref, Genid):
- self._process_skolem_tuples(
- retval, lambda t: do_de_skolemize(uriref, t))
+ self._process_skolem_tuples(retval, lambda t: do_de_skolemize(uriref, t))
return retval
class ConjunctiveGraph(Graph):
-
- """
- A ConjunctiveGraph is an (unnamed) aggregation of all the named
+ """A ConjunctiveGraph is an (unnamed) aggregation of all the named
graphs in a store.
It has a ``default`` graph, whose name is associated with the
@@ -1284,21 +1337,24 @@ class ConjunctiveGraph(Graph):
All methods that add triples work against this default graph.
All queries are carried out against the union of all graphs.
-
"""
- def __init__(self, store='default', identifier=None):
+ def __init__(self, store="default", identifier=None, default_graph_base=None):
super(ConjunctiveGraph, self).__init__(store, identifier=identifier)
- assert self.store.context_aware, ("ConjunctiveGraph must be backed by"
- " a context aware store.")
+ assert self.store.context_aware, (
+ "ConjunctiveGraph must be backed by" " a context aware store."
+ )
self.context_aware = True
self.default_union = True # Conjunctive!
- self.default_context = Graph(store=self.store,
- identifier=identifier or BNode())
+ self.default_context = Graph(
+ store=self.store, identifier=identifier or BNode(), base=default_graph_base
+ )
def __str__(self):
- pattern = ("[a rdflib:ConjunctiveGraph;rdflib:storage "
- "[a rdflib:Store;rdfs:label '%s']]")
+ pattern = (
+ "[a rdflib:ConjunctiveGraph;rdflib:storage "
+ "[a rdflib:Store;rdfs:label '%s']]"
+ )
return pattern % self.store.__class__.__name__
def _spoc(self, triple_or_quad, default=False):
@@ -1348,8 +1404,7 @@ class ConjunctiveGraph(Graph):
"""Add a sequence of triples with context"""
self.store.addN(
- (s, p, o, self._graph(c)) for s, p, o, c in quads if
- _assertnode(s, p, o)
+ (s, p, o, self._graph(c)) for s, p, o, c in quads if _assertnode(s, p, o)
)
def remove(self, triple_or_quad):
@@ -1389,7 +1444,7 @@ class ConjunctiveGraph(Graph):
context = self
for s, o in p.eval(context, s, o):
- yield (s, p, o)
+ yield s, p, o
else:
for (s, p, o), cg in self.store.triples((s, p, o), context=context):
yield s, p, o
@@ -1412,9 +1467,8 @@ class ConjunctiveGraph(Graph):
else:
context = self._graph(context)
- for (s1, p1, o1), cg in self.store.triples_choices((s, p, o),
- context=context):
- yield (s1, p1, o1)
+ for (s1, p1, o1), cg in self.store.triples_choices((s, p, o), context=context):
+ yield s1, p1, o1
def __len__(self):
"""Number of triples in the entire conjunctive graph"""
@@ -1434,13 +1488,14 @@ class ConjunctiveGraph(Graph):
else:
yield self.get_context(context)
- def get_context(self, identifier, quoted=False):
+ def get_context(self, identifier, quoted=False, base=None):
"""Return a context graph for the given identifier
identifier must be a URIRef or BNode.
"""
- return Graph(store=self.store, identifier=identifier,
- namespace_manager=self)
+ return Graph(
+ store=self.store, identifier=identifier, namespace_manager=self, base=base
+ )
def remove_context(self, context):
"""Removes the given context from the graph"""
@@ -1453,8 +1508,16 @@ class ConjunctiveGraph(Graph):
context_id = "#context"
return URIRef(context_id, base=uri)
- def parse(self, source=None, publicID=None, format="xml",
- location=None, file=None, data=None, **args):
+ def parse(
+ self,
+ source=None,
+ publicID=None,
+ format="xml",
+ location=None,
+ file=None,
+ data=None,
+ **args
+ ):
"""
Parse source adding the resulting triples to its own context
(sub graph of this graph).
@@ -1468,8 +1531,13 @@ class ConjunctiveGraph(Graph):
"""
source = create_input_source(
- source=source, publicID=publicID, location=location,
- file=file, data=data, format=format)
+ source=source,
+ publicID=publicID,
+ location=location,
+ file=file,
+ data=data,
+ format=format,
+ )
g_id = publicID and publicID or source.getPublicId()
if not isinstance(g_id, Node):
@@ -1481,10 +1549,10 @@ class ConjunctiveGraph(Graph):
return context
def __reduce__(self):
- return (ConjunctiveGraph, (self.store, self.identifier))
+ return ConjunctiveGraph, (self.store, self.identifier)
-DATASET_DEFAULT_GRAPH_ID = URIRef('urn:x-rdflib:default')
+DATASET_DEFAULT_GRAPH_ID = URIRef("urn:x-rdflib:default")
class Dataset(ConjunctiveGraph):
@@ -1501,68 +1569,68 @@ class Dataset(ConjunctiveGraph):
>>> # Create a new Dataset
>>> ds = Dataset()
>>> # simple triples goes to default graph
- >>> ds.add((URIRef('http://example.org/a'),
- ... URIRef('http://www.example.org/b'),
- ... Literal('foo')))
+ >>> ds.add((URIRef("http://example.org/a"),
+ ... URIRef("http://www.example.org/b"),
+ ... Literal("foo")))
>>>
>>> # Create a graph in the dataset, if the graph name has already been
>>> # used, the corresponding graph will be returned
>>> # (ie, the Dataset keeps track of the constituent graphs)
- >>> g = ds.graph(URIRef('http://www.example.com/gr'))
+ >>> g = ds.graph(URIRef("http://www.example.com/gr"))
>>>
>>> # add triples to the new graph as usual
>>> g.add(
- ... (URIRef('http://example.org/x'),
- ... URIRef('http://example.org/y'),
- ... Literal('bar')) )
+ ... (URIRef("http://example.org/x"),
+ ... URIRef("http://example.org/y"),
+ ... Literal("bar")) )
>>> # alternatively: add a quad to the dataset -> goes to the graph
>>> ds.add(
- ... (URIRef('http://example.org/x'),
- ... URIRef('http://example.org/z'),
- ... Literal('foo-bar'),g) )
+ ... (URIRef("http://example.org/x"),
+ ... URIRef("http://example.org/z"),
+ ... Literal("foo-bar"),g) )
>>>
>>> # querying triples return them all regardless of the graph
>>> for t in ds.triples((None,None,None)): # doctest: +SKIP
... print(t) # doctest: +NORMALIZE_WHITESPACE
- (rdflib.term.URIRef(u'http://example.org/a'),
- rdflib.term.URIRef(u'http://www.example.org/b'),
- rdflib.term.Literal(u'foo'))
- (rdflib.term.URIRef(u'http://example.org/x'),
- rdflib.term.URIRef(u'http://example.org/z'),
- rdflib.term.Literal(u'foo-bar'))
- (rdflib.term.URIRef(u'http://example.org/x'),
- rdflib.term.URIRef(u'http://example.org/y'),
- rdflib.term.Literal(u'bar'))
+ (rdflib.term.URIRef("http://example.org/a"),
+ rdflib.term.URIRef("http://www.example.org/b"),
+ rdflib.term.Literal("foo"))
+ (rdflib.term.URIRef("http://example.org/x"),
+ rdflib.term.URIRef("http://example.org/z"),
+ rdflib.term.Literal("foo-bar"))
+ (rdflib.term.URIRef("http://example.org/x"),
+ rdflib.term.URIRef("http://example.org/y"),
+ rdflib.term.Literal("bar"))
>>>
>>> # querying quads return quads; the fourth argument can be unrestricted
>>> # or restricted to a graph
>>> for q in ds.quads((None, None, None, None)): # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
- (rdflib.term.URIRef(u'http://example.org/a'),
- rdflib.term.URIRef(u'http://www.example.org/b'),
- rdflib.term.Literal(u'foo'),
+ (rdflib.term.URIRef("http://example.org/a"),
+ rdflib.term.URIRef("http://www.example.org/b"),
+ rdflib.term.Literal("foo"),
None)
- (rdflib.term.URIRef(u'http://example.org/x'),
- rdflib.term.URIRef(u'http://example.org/y'),
- rdflib.term.Literal(u'bar'),
- rdflib.term.URIRef(u'http://www.example.com/gr'))
- (rdflib.term.URIRef(u'http://example.org/x'),
- rdflib.term.URIRef(u'http://example.org/z'),
- rdflib.term.Literal(u'foo-bar'),
- rdflib.term.URIRef(u'http://www.example.com/gr'))
+ (rdflib.term.URIRef("http://example.org/x"),
+ rdflib.term.URIRef("http://example.org/y"),
+ rdflib.term.Literal("bar"),
+ rdflib.term.URIRef("http://www.example.com/gr"))
+ (rdflib.term.URIRef("http://example.org/x"),
+ rdflib.term.URIRef("http://example.org/z"),
+ rdflib.term.Literal("foo-bar"),
+ rdflib.term.URIRef("http://www.example.com/gr"))
>>>
>>> for q in ds.quads((None,None,None,g)): # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
- (rdflib.term.URIRef(u'http://example.org/x'),
- rdflib.term.URIRef(u'http://example.org/y'),
- rdflib.term.Literal(u'bar'),
- rdflib.term.URIRef(u'http://www.example.com/gr'))
- (rdflib.term.URIRef(u'http://example.org/x'),
- rdflib.term.URIRef(u'http://example.org/z'),
- rdflib.term.Literal(u'foo-bar'),
- rdflib.term.URIRef(u'http://www.example.com/gr'))
+ (rdflib.term.URIRef("http://example.org/x"),
+ rdflib.term.URIRef("http://example.org/y"),
+ rdflib.term.Literal("bar"),
+ rdflib.term.URIRef("http://www.example.com/gr"))
+ (rdflib.term.URIRef("http://example.org/x"),
+ rdflib.term.URIRef("http://example.org/z"),
+ rdflib.term.Literal("foo-bar"),
+ rdflib.term.URIRef("http://www.example.com/gr"))
>>> # Note that in the call above -
- >>> # ds.quads((None,None,None,'http://www.example.com/gr'))
+ >>> # ds.quads((None,None,None,"http://www.example.com/gr"))
>>> # would have been accepted, too
>>>
>>> # graph names in the dataset can be queried:
@@ -1590,36 +1658,53 @@ class Dataset(ConjunctiveGraph):
.. versionadded:: 4.0
"""
- def __init__(self, store='default', default_union=False):
+ def __init__(self, store="default", default_union=False, default_graph_base=None):
super(Dataset, self).__init__(store=store, identifier=None)
if not self.store.graph_aware:
raise Exception("DataSet must be backed by a graph-aware store!")
- self.default_context = Graph(store=self.store, identifier=DATASET_DEFAULT_GRAPH_ID)
+ self.default_context = Graph(
+ store=self.store,
+ identifier=DATASET_DEFAULT_GRAPH_ID,
+ base=default_graph_base,
+ )
self.default_union = default_union
def __str__(self):
- pattern = ("[a rdflib:Dataset;rdflib:storage "
- "[a rdflib:Store;rdfs:label '%s']]")
+ pattern = (
+ "[a rdflib:Dataset;rdflib:storage " "[a rdflib:Store;rdfs:label '%s']]"
+ )
return pattern % self.store.__class__.__name__
- def graph(self, identifier=None):
+ def graph(self, identifier=None, base=None):
if identifier is None:
from rdflib.term import rdflib_skolem_genid
+
self.bind(
- "genid", "http://rdflib.net" + rdflib_skolem_genid,
- override=False)
+ "genid", "http://rdflib.net" + rdflib_skolem_genid, override=False
+ )
identifier = BNode().skolemize()
g = self._graph(identifier)
+ g.base = base
self.store.add_graph(g)
return g
- def parse(self, source=None, publicID=None, format="xml",
- location=None, file=None, data=None, **args):
- c = ConjunctiveGraph.parse(self, source, publicID, format, location, file, data, **args)
+ def parse(
+ self,
+ source=None,
+ publicID=None,
+ format="xml",
+ location=None,
+ file=None,
+ data=None,
+ **args
+ ):
+ c = ConjunctiveGraph.parse(
+ self, source, publicID, format, location, file, data, **args
+ )
self.graph(c)
return c
@@ -1650,9 +1735,9 @@ class Dataset(ConjunctiveGraph):
def quads(self, quad):
for s, p, o, c in super(Dataset, self).quads(quad):
if c.identifier == self.default_context:
- yield (s, p, o, None)
+ yield s, p, o, None
else:
- yield (s, p, o, c.identifier)
+ yield s, p, o, c.identifier
class QuotedGraph(Graph):
@@ -1669,12 +1754,9 @@ class QuotedGraph(Graph):
def add(self, triple):
"""Add a triple with self as context"""
s, p, o = triple
- assert isinstance(s, Node), \
- "Subject %s must be an rdflib term" % (s,)
- assert isinstance(p, Node), \
- "Predicate %s must be an rdflib term" % (p,)
- assert isinstance(o, Node), \
- "Object %s must be an rdflib term" % (o,)
+ assert isinstance(s, Node), "Subject %s must be an rdflib term" % (s,)
+ assert isinstance(p, Node), "Predicate %s must be an rdflib term" % (p,)
+ assert isinstance(o, Node), "Object %s must be an rdflib term" % (o,)
self.store.add((s, p, o), self, quoted=True)
@@ -1682,10 +1764,11 @@ class QuotedGraph(Graph):
"""Add a sequence of triple with context"""
self.store.addN(
- (s, p, o, c) for s, p, o, c in quads
- if isinstance(c, QuotedGraph) and
- c.identifier is self.identifier and
- _assertnode(s, p, o)
+ (s, p, o, c)
+ for s, p, o, c in quads
+ if isinstance(c, QuotedGraph)
+ and c.identifier is self.identifier
+ and _assertnode(s, p, o)
)
def n3(self):
@@ -1695,12 +1778,14 @@ class QuotedGraph(Graph):
def __str__(self):
identifier = self.identifier.n3()
label = self.store.__class__.__name__
- pattern = ("{this rdflib.identifier %s;rdflib:storage "
- "[a rdflib:Store;rdfs:label '%s']}")
+ pattern = (
+ "{this rdflib.identifier %s;rdflib:storage "
+ "[a rdflib:Store;rdfs:label '%s']}"
+ )
return pattern % (identifier, label)
def __reduce__(self):
- return (QuotedGraph, (self.store, self.identifier))
+ return QuotedGraph, (self.store, self.identifier)
# Make sure QuotedGraph is ordered correctly
@@ -1735,7 +1820,7 @@ class Seq(object):
LI_INDEX = URIRef(str(RDF) + "_")
for (p, o) in graph.predicate_objects(subject):
if p.startswith(LI_INDEX): # != RDF.Seq: #
- i = int(p.replace(LI_INDEX, ''))
+ i = int(p.replace(LI_INDEX, ""))
_list.append((i, o))
# here is the trick: the predicates are _1, _2, _3, etc. Ie,
@@ -1761,23 +1846,22 @@ class Seq(object):
class ModificationException(Exception):
-
def __init__(self):
pass
def __str__(self):
- return ("Modifications and transactional operations not allowed on "
- "ReadOnlyGraphAggregate instances")
+ return (
+ "Modifications and transactional operations not allowed on "
+ "ReadOnlyGraphAggregate instances"
+ )
class UnSupportedAggregateOperation(Exception):
-
def __init__(self):
pass
def __str__(self):
- return ("This operation is not supported by ReadOnlyGraphAggregate "
- "instances")
+ return "This operation is not supported by ReadOnlyGraphAggregate " "instances"
class ReadOnlyGraphAggregate(ConjunctiveGraph):
@@ -1787,16 +1871,17 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
ConjunctiveGraph over an explicit subset of the entire store.
"""
- def __init__(self, graphs, store='default'):
+ def __init__(self, graphs, store="default"):
if store is not None:
super(ReadOnlyGraphAggregate, self).__init__(store)
Graph.__init__(self, store)
self.__namespace_manager = None
- assert isinstance(graphs, list) \
- and graphs \
- and [g for g in graphs if isinstance(g, Graph)], \
- "graphs argument must be a list of Graphs!!"
+ assert (
+ isinstance(graphs, list)
+ and graphs
+ and [g for g in graphs if isinstance(g, Graph)]
+ ), "graphs argument must be a list of Graphs!!"
self.graphs = graphs
def __repr__(self):
@@ -1838,7 +1923,7 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
yield s, p, o
else:
for s1, p1, o1 in graph.triples((s, p, o)):
- yield (s1, p1, o1)
+ yield s1, p1, o1
def __contains__(self, triple_or_quad):
context = None
@@ -1855,7 +1940,7 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
s, p, o = triple
for graph in self.graphs:
for s1, p1, o1 in graph.triples((s, p, o)):
- yield (s1, p1, o1, graph)
+ yield s1, p1, o1, graph
def __len__(self):
return sum(len(g) for g in self.graphs)
@@ -1869,7 +1954,7 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
elif isinstance(other, Graph):
return -1
elif isinstance(other, ReadOnlyGraphAggregate):
- return cmp(self.graphs, other.graphs)
+ return (self.graphs > other.graphs) - (self.graphs < other.graphs)
else:
return -1
@@ -1886,15 +1971,15 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
for graph in self.graphs:
choices = graph.triples_choices((subject, predicate, object_))
for (s, p, o) in choices:
- yield (s, p, o)
+ yield s, p, o
def qname(self, uri):
- if hasattr(self, 'namespace_manager') and self.namespace_manager:
+ if hasattr(self, "namespace_manager") and self.namespace_manager:
return self.namespace_manager.qname(uri)
raise UnSupportedAggregateOperation()
def compute_qname(self, uri, generate=True):
- if hasattr(self, 'namespace_manager') and self.namespace_manager:
+ if hasattr(self, "namespace_manager") and self.namespace_manager:
return self.namespace_manager.compute_qname(uri, generate)
raise UnSupportedAggregateOperation()
@@ -1902,7 +1987,7 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
raise UnSupportedAggregateOperation()
def namespaces(self):
- if hasattr(self, 'namespace_manager'):
+ if hasattr(self, "namespace_manager"):
for prefix, namespace in self.namespace_manager.namespaces():
yield prefix, namespace
else:
@@ -1925,15 +2010,82 @@ class ReadOnlyGraphAggregate(ConjunctiveGraph):
def _assertnode(*terms):
for t in terms:
- assert isinstance(t, Node), \
- 'Term %s must be an rdflib term' % (t,)
+ assert isinstance(t, Node), "Term %s must be an rdflib term" % (t,)
return True
+class BatchAddGraph(object):
+ """
+ Wrapper around graph that turns calls to :meth:`add` (and optionally, :meth:`addN`)
+ into calls to :meth:`~rdflib.graph.Graph.addN`.
+
+ :Parameters:
+
+ - `graph`: The graph to wrap
+ - `batch_size`: The maximum number of triples to buffer before passing to
+ `graph`'s `addN`
+ - `batch_addn`: If True, then even calls to `addN` will be batched according to
+ `batch_size`
+
+ :ivar graph: The wrapped graph
+ :ivar count: The number of triples buffered since initaialization or the last call
+ to :meth:`reset`
+ :ivar batch: The current buffer of triples
+
+ """
+
+ def __init__(self, graph, batch_size=1000, batch_addn=False):
+ if not batch_size or batch_size < 2:
+ raise ValueError("batch_size must be a positive number")
+ self.graph = graph
+ self.__graph_tuple = (graph,)
+ self.__batch_size = batch_size
+ self.__batch_addn = batch_addn
+ self.reset()
+
+ def reset(self):
+ """
+ Manually clear the buffered triples and reset the count to zero
+ """
+ self.batch = []
+ self.count = 0
+
+ def add(self, triple_or_quad):
+ """
+ Add a triple to the buffer
+
+ :param triple: The triple to add
+ """
+ if len(self.batch) >= self.__batch_size:
+ self.graph.addN(self.batch)
+ self.batch = []
+ self.count += 1
+ if len(triple_or_quad) == 3:
+ self.batch.append(triple_or_quad + self.__graph_tuple)
+ else:
+ self.batch.append(triple_or_quad)
+
+ def addN(self, quads):
+ if self.__batch_addn:
+ for q in quads:
+ self.add(q)
+ else:
+ self.graph.addN(quads)
+
+ def __enter__(self):
+ self.reset()
+ return self
+
+ def __exit__(self, *exc):
+ if exc[0] is None:
+ self.graph.addN(self.batch)
+
+
def test():
import doctest
+
doctest.testmod()
-if __name__ == '__main__':
+if __name__ == "__main__":
test()
diff --git a/rdflib/namespace.py b/rdflib/namespace.py
index 78f71c22..091fbc60 100644
--- a/rdflib/namespace.py
+++ b/rdflib/namespace.py
@@ -7,12 +7,10 @@ import logging
import os
from unicodedata import category
-from six import string_types
-from six import text_type
-from six.moves.urllib.request import pathname2url
-from six.moves.urllib.parse import urldefrag
-from six.moves.urllib.parse import urljoin
+from urllib.request import pathname2url
+from urllib.parse import urldefrag
+from urllib.parse import urljoin
from rdflib.term import URIRef, Variable, _XSD_PFX, _is_valid_uri
@@ -73,15 +71,40 @@ The following namespaces are available by directly importing from rdflib:
"""
__all__ = [
- 'is_ncname', 'split_uri', 'Namespace',
- 'ClosedNamespace', 'NamespaceManager',
- 'XMLNS', 'RDF', 'RDFS', 'XSD', 'OWL',
- 'SKOS', 'DOAP', 'FOAF', 'DC', 'DCTERMS', 'VOID']
+ "is_ncname",
+ "split_uri",
+ "Namespace",
+ "ClosedNamespace",
+ "NamespaceManager",
+ "CSVW",
+ "DC",
+ "DCAT",
+ "DCTERMS",
+ "DOAP",
+ "FOAF",
+ "ODRL2",
+ "ORG",
+ "OWL",
+ "PROF",
+ "PROV",
+ "QB",
+ "RDF",
+ "RDFS",
+ "SDO",
+ "SH",
+ "SKOS",
+ "SOSA",
+ "SSN",
+ "TIME",
+ "VOID",
+ "XMLNS",
+ "XSD",
+]
logger = logging.getLogger(__name__)
-class Namespace(text_type):
+class Namespace(str):
__doc__ = """
Utility class for quickly generating URIRefs with a common prefix
@@ -97,18 +120,18 @@ class Namespace(text_type):
def __new__(cls, value):
try:
- rt = text_type.__new__(cls, value)
+ rt = str.__new__(cls, value)
except UnicodeDecodeError:
- rt = text_type.__new__(cls, value, 'utf-8')
+ rt = str.__new__(cls, value, "utf-8")
return rt
@property
def title(self):
- return URIRef(self + 'title')
+ return URIRef(self + "title")
def term(self, name):
# need to handle slices explicitly because of __getitem__ override
- return URIRef(self + (name if isinstance(name, string_types) else ''))
+ return URIRef(self + (name if isinstance(name, str) else ""))
def __getitem__(self, key, default=None):
return self.term(key)
@@ -120,10 +143,10 @@ class Namespace(text_type):
return self.term(name)
def __repr__(self):
- return "Namespace(%r)" % text_type(self)
+ return "Namespace(%r)" % str(self)
-class URIPattern(text_type):
+class URIPattern(str):
__doc__ = """
Utility class for creating URIs according to some pattern
@@ -138,19 +161,19 @@ class URIPattern(text_type):
def __new__(cls, value):
try:
- rt = text_type.__new__(cls, value)
+ rt = str.__new__(cls, value)
except UnicodeDecodeError:
- rt = text_type.__new__(cls, value, 'utf-8')
+ rt = str.__new__(cls, value, "utf-8")
return rt
def __mod__(self, *args, **kwargs):
- return URIRef(text_type(self).__mod__(*args, **kwargs))
+ return URIRef(str(self).__mod__(*args, **kwargs))
def format(self, *args, **kwargs):
- return URIRef(text_type.format(self, *args, **kwargs))
+ return URIRef(str.format(self, *args, **kwargs))
def __repr__(self):
- return "URIPattern(%r)" % text_type(self)
+ return "URIPattern(%r)" % str(self)
class ClosedNamespace(object):
@@ -169,9 +192,7 @@ class ClosedNamespace(object):
def term(self, name):
uri = self.__uris.get(name)
if uri is None:
- raise KeyError(
- "term '{}' not in namespace '{}'".format(name, self.uri)
- )
+ raise KeyError("term '{}' not in namespace '{}'".format(name, self.uri))
else:
return uri
@@ -188,10 +209,10 @@ class ClosedNamespace(object):
raise AttributeError(e)
def __str__(self):
- return text_type(self.uri)
+ return str(self.uri)
def __repr__(self):
- return "rdf.namespace.ClosedNamespace(%r)" % text_type(self.uri)
+ return "rdf.namespace.ClosedNamespace(%r)" % str(self.uri)
class _RDFNamespace(ClosedNamespace):
@@ -204,31 +225,49 @@ class _RDFNamespace(ClosedNamespace):
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
terms=[
# Syntax Names
- "RDF", "Description", "ID", "about", "parseType",
- "resource", "li", "nodeID", "datatype",
-
+ "RDF",
+ "Description",
+ "ID",
+ "about",
+ "parseType",
+ "resource",
+ "li",
+ "nodeID",
+ "datatype",
# RDF Classes
- "Seq", "Bag", "Alt", "Statement", "Property",
- "List", "PlainLiteral",
-
+ "Seq",
+ "Bag",
+ "Alt",
+ "Statement",
+ "Property",
+ "List",
+ "PlainLiteral",
# RDF Properties
- "subject", "predicate", "object", "type",
- "value", "first", "rest",
+ "subject",
+ "predicate",
+ "object",
+ "type",
+ "value",
+ "first",
+ "rest",
# and _n where n is a non-negative integer
-
# RDF Resources
"nil",
-
# Added in RDF 1.1
- "XMLLiteral", "HTML", "langString",
-
+ "XMLLiteral",
+ "HTML",
+ "langString",
# Added in JSON-LD 1.1
- "JSON", "CompoundLiteral", "language", "direction"]
+ "JSON",
+ "CompoundLiteral",
+ "language",
+ "direction",
+ ],
)
def term(self, name):
# Container membership properties
- if name.startswith('_'):
+ if name.startswith("_"):
try:
i = int(name[1:])
except ValueError:
@@ -240,85 +279,222 @@ class _RDFNamespace(ClosedNamespace):
return super(_RDFNamespace, self).term(name)
-RDF = _RDFNamespace()
-
-
-RDFS = ClosedNamespace(
- uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
- terms=[
- "Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label",
- "domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container",
- "ContainerMembershipProperty", "member", "Datatype"]
-)
-
-OWL = Namespace('http://www.w3.org/2002/07/owl#')
-
-XSD = Namespace(_XSD_PFX)
-
-CSVW = Namespace('http://www.w3.org/ns/csvw#')
-DC = Namespace('http://purl.org/dc/elements/1.1/')
-DCAT = Namespace('http://www.w3.org/ns/dcat#')
-DCTERMS = Namespace('http://purl.org/dc/terms/')
-DOAP = Namespace('http://usefulinc.com/ns/doap#')
+CSVW = Namespace("http://www.w3.org/ns/csvw#")
+DC = Namespace("http://purl.org/dc/elements/1.1/")
+DCAT = Namespace("http://www.w3.org/ns/dcat#")
+DCTERMS = Namespace("http://purl.org/dc/terms/")
+DOAP = Namespace("http://usefulinc.com/ns/doap#")
FOAF = ClosedNamespace(
- uri=URIRef('http://xmlns.com/foaf/0.1/'),
+ uri=URIRef("http://xmlns.com/foaf/0.1/"),
terms=[
# all taken from http://xmlns.com/foaf/spec/
- 'Agent', 'Person', 'name', 'title', 'img',
- 'depiction', 'depicts', 'familyName',
- 'givenName', 'knows', 'based_near', 'age', 'made',
- 'maker', 'primaryTopic', 'primaryTopicOf', 'Project', 'Organization',
- 'Group', 'member', 'Document', 'Image', 'nick',
- 'mbox', 'homepage', 'weblog', 'openid', 'jabberID',
- 'mbox_sha1sum', 'interest', 'topic_interest', 'topic', 'page',
- 'workplaceHomepage', 'workInfoHomepage', 'schoolHomepage', 'publications', 'currentProject',
- 'pastProject', 'account', 'OnlineAccount', 'accountName', 'accountServiceHomepage',
- 'PersonalProfileDocument', 'tipjar', 'sha1', 'thumbnail', 'logo'
- ]
+ "Agent",
+ "Person",
+ "name",
+ "title",
+ "img",
+ "depiction",
+ "depicts",
+ "familyName",
+ "givenName",
+ "knows",
+ "based_near",
+ "age",
+ "made",
+ "maker",
+ "primaryTopic",
+ "primaryTopicOf",
+ "Project",
+ "Organization",
+ "Group",
+ "member",
+ "Document",
+ "Image",
+ "nick",
+ "mbox",
+ "homepage",
+ "weblog",
+ "openid",
+ "jabberID",
+ "mbox_sha1sum",
+ "interest",
+ "topic_interest",
+ "topic",
+ "page",
+ "workplaceHomepage",
+ "workInfoHomepage",
+ "schoolHomepage",
+ "publications",
+ "currentProject",
+ "pastProject",
+ "account",
+ "OnlineAccount",
+ "accountName",
+ "accountServiceHomepage",
+ "PersonalProfileDocument",
+ "tipjar",
+ "sha1",
+ "thumbnail",
+ "logo",
+ ],
)
-ODRL2 = Namespace('http://www.w3.org/ns/odrl/2/')
-ORG = Namespace('http://www.w3.org/ns/org#')
+ODRL2 = Namespace("http://www.w3.org/ns/odrl/2/")
+ORG = Namespace("http://www.w3.org/ns/org#")
+OWL = Namespace("http://www.w3.org/2002/07/owl#")
+PROF = Namespace("http://www.w3.org/ns/dx/prof/")
PROV = ClosedNamespace(
- uri=URIRef('http://www.w3.org/ns/prov#'),
+ uri=URIRef("http://www.w3.org/ns/prov#"),
terms=[
- 'Entity', 'Activity', 'Agent', 'wasGeneratedBy', 'wasDerivedFrom',
- 'wasAttributedTo', 'startedAtTime', 'used', 'wasInformedBy', 'endedAtTime',
- 'wasAssociatedWith', 'actedOnBehalfOf', 'Collection', 'EmptyCollection', 'Bundle',
- 'Person', 'SoftwareAgent', 'Organization', 'Location', 'alternateOf',
- 'specializationOf', 'generatedAtTime', 'hadPrimarySource', 'value', 'wasQuotedFrom',
- 'wasRevisionOf', 'invalidatedAtTime', 'wasInvalidatedBy', 'hadMember', 'wasStartedBy',
- 'wasEndedBy', 'invalidated', 'influenced', 'atLocation', 'generated',
- 'Influence', 'EntityInfluence', 'Usage', 'Start', 'End',
- 'Derivation', 'PrimarySource', 'Quotation', 'Revision', 'ActivityInfluence',
- 'Generation', 'Communication', 'Invalidation', 'AgentInfluence',
- 'Attribution', 'Association', 'Plan', 'Delegation', 'InstantaneousEvent',
- 'Role', 'wasInfluencedBy', 'qualifiedInfluence', 'qualifiedGeneration', 'qualifiedDerivation',
- 'qualifiedPrimarySource', 'qualifiedQuotation', 'qualifiedRevision', 'qualifiedAttribution',
- 'qualifiedInvalidation', 'qualifiedStart', 'qualifiedUsage', 'qualifiedCommunication', 'qualifiedAssociation',
- 'qualifiedEnd', 'qualifiedDelegation', 'influencer', 'entity', 'hadUsage', 'hadGeneration',
- 'activity', 'agent', 'hadPlan', 'hadActivity', 'atTime', 'hadRole'
- ]
+ "Entity",
+ "Activity",
+ "Agent",
+ "wasGeneratedBy",
+ "wasDerivedFrom",
+ "wasAttributedTo",
+ "startedAtTime",
+ "used",
+ "wasInformedBy",
+ "endedAtTime",
+ "wasAssociatedWith",
+ "actedOnBehalfOf",
+ "Collection",
+ "EmptyCollection",
+ "Bundle",
+ "Person",
+ "SoftwareAgent",
+ "Organization",
+ "Location",
+ "alternateOf",
+ "specializationOf",
+ "generatedAtTime",
+ "hadPrimarySource",
+ "value",
+ "wasQuotedFrom",
+ "wasRevisionOf",
+ "invalidatedAtTime",
+ "wasInvalidatedBy",
+ "hadMember",
+ "wasStartedBy",
+ "wasEndedBy",
+ "invalidated",
+ "influenced",
+ "atLocation",
+ "generated",
+ "Influence",
+ "EntityInfluence",
+ "Usage",
+ "Start",
+ "End",
+ "Derivation",
+ "PrimarySource",
+ "Quotation",
+ "Revision",
+ "ActivityInfluence",
+ "Generation",
+ "Communication",
+ "Invalidation",
+ "AgentInfluence",
+ "Attribution",
+ "Association",
+ "Plan",
+ "Delegation",
+ "InstantaneousEvent",
+ "Role",
+ "wasInfluencedBy",
+ "qualifiedInfluence",
+ "qualifiedGeneration",
+ "qualifiedDerivation",
+ "qualifiedPrimarySource",
+ "qualifiedQuotation",
+ "qualifiedRevision",
+ "qualifiedAttribution",
+ "qualifiedInvalidation",
+ "qualifiedStart",
+ "qualifiedUsage",
+ "qualifiedCommunication",
+ "qualifiedAssociation",
+ "qualifiedEnd",
+ "qualifiedDelegation",
+ "influencer",
+ "entity",
+ "hadUsage",
+ "hadGeneration",
+ "activity",
+ "agent",
+ "hadPlan",
+ "hadActivity",
+ "atTime",
+ "hadRole",
+ ],
)
-PROF = Namespace('http://www.w3.org/ns/dx/prof/')
-SDO = Namespace('https://schema.org/')
-SH = Namespace('http://www.w3.org/ns/shacl#')
+QB = Namespace("http://purl.org/linked-data/cube#")
+RDF = _RDFNamespace()
+RDFS = ClosedNamespace(
+ uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"),
+ terms=[
+ "Resource",
+ "Class",
+ "subClassOf",
+ "subPropertyOf",
+ "comment",
+ "label",
+ "domain",
+ "range",
+ "seeAlso",
+ "isDefinedBy",
+ "Literal",
+ "Container",
+ "ContainerMembershipProperty",
+ "member",
+ "Datatype",
+ ],
+)
+SDO = Namespace("https://schema.org/")
+SH = Namespace("http://www.w3.org/ns/shacl#")
SKOS = ClosedNamespace(
- uri=URIRef('http://www.w3.org/2004/02/skos/core#'),
+ uri=URIRef("http://www.w3.org/2004/02/skos/core#"),
terms=[
# all taken from https://www.w3.org/TR/skos-reference/#L1302
- 'Concept', 'ConceptScheme', 'inScheme', 'hasTopConcept', 'topConceptOf',
- 'altLabel', 'hiddenLabel', 'prefLabel', 'notation', 'changeNote',
- 'definition', 'editorialNote', 'example', 'historyNote', 'note',
- 'scopeNote', 'broader', 'broaderTransitive', 'narrower', 'narrowerTransitive',
- 'related', 'semanticRelation', 'Collection', 'OrderedCollection', 'member',
- 'memberList', 'broadMatch', 'closeMatch', 'exactMatch', 'mappingRelation',
- 'narrowMatch', 'relatedMatch'
- ]
+ "Concept",
+ "ConceptScheme",
+ "inScheme",
+ "hasTopConcept",
+ "topConceptOf",
+ "altLabel",
+ "hiddenLabel",
+ "prefLabel",
+ "notation",
+ "changeNote",
+ "definition",
+ "editorialNote",
+ "example",
+ "historyNote",
+ "note",
+ "scopeNote",
+ "broader",
+ "broaderTransitive",
+ "narrower",
+ "narrowerTransitive",
+ "related",
+ "semanticRelation",
+ "Collection",
+ "OrderedCollection",
+ "member",
+ "memberList",
+ "broadMatch",
+ "closeMatch",
+ "exactMatch",
+ "mappingRelation",
+ "narrowMatch",
+ "relatedMatch",
+ ],
)
-SOSA = Namespace('http://www.w3.org/ns/ssn/')
-SSN = Namespace('http://www.w3.org/ns/sosa/')
-TIME = Namespace('http://www.w3.org/2006/time#')
-VOID = Namespace('http://rdfs.org/ns/void#')
+SOSA = Namespace("http://www.w3.org/ns/ssn/")
+SSN = Namespace("http://www.w3.org/ns/sosa/")
+TIME = Namespace("http://www.w3.org/2006/time#")
+VOID = Namespace("http://rdfs.org/ns/void#")
+XMLNS = Namespace("http://www.w3.org/XML/1998/namespace")
+XSD = Namespace(_XSD_PFX)
class NamespaceManager(object):
@@ -376,6 +552,7 @@ class NamespaceManager(object):
def __get_store(self):
return self.graph.store
+
store = property(__get_store)
def qname(self, uri):
@@ -387,10 +564,10 @@ class NamespaceManager(object):
def qname_strict(self, uri):
prefix, namespace, name = self.compute_qname_strict(uri)
- if prefix == '':
+ if prefix == "":
return name
else:
- return ':'.join((prefix, name))
+ return ":".join((prefix, name))
def normalizeUri(self, rdfTerm):
"""
@@ -402,7 +579,7 @@ class NamespaceManager(object):
namespace, name = split_uri(rdfTerm)
if namespace not in self.__strie:
insert_strie(self.__strie, self.__trie, str(namespace))
- namespace = URIRef(text_type(namespace))
+ namespace = URIRef(str(namespace))
except:
if isinstance(rdfTerm, Variable):
return "?%s" % rdfTerm
@@ -415,13 +592,15 @@ class NamespaceManager(object):
return "<%s>" % rdfTerm
else:
qNameParts = self.compute_qname(rdfTerm)
- return ':'.join([qNameParts[0], qNameParts[-1]])
+ return ":".join([qNameParts[0], qNameParts[-1]])
def compute_qname(self, uri, generate=True):
if not _is_valid_uri(uri):
raise ValueError(
- '"{}" does not look like a valid URI, cannot serialize this. Did you want to urlencode it?'.format(uri)
+ '"{}" does not look like a valid URI, cannot serialize this. Did you want to urlencode it?'.format(
+ uri
+ )
)
if uri not in self.__cache:
@@ -464,39 +643,45 @@ class NamespaceManager(object):
# if output needs to be strict (e.g. for xml) then
# only the strict output should bear the overhead
prefix, namespace, name = self.compute_qname(uri)
- if is_ncname(text_type(name)):
+ if is_ncname(str(name)):
return prefix, namespace, name
else:
if uri not in self.__cache_strict:
try:
namespace, name = split_uri(uri, NAME_START_CATEGORIES)
except ValueError as e:
- message = ('This graph cannot be serialized to a strict format '
- 'because there is no valid way to shorten {}'.format(uri))
+ message = (
+ "This graph cannot be serialized to a strict format "
+ "because there is no valid way to shorten {}".format(uri)
+ )
raise ValueError(message)
# omitted for strict since NCNames cannot be empty
- #namespace = URIRef(uri)
- #prefix = self.store.prefix(namespace)
- #if not prefix:
- #raise e
+ # namespace = URIRef(uri)
+ # prefix = self.store.prefix(namespace)
+ # if not prefix:
+ # raise e
if namespace not in self.__strie:
insert_strie(self.__strie, self.__trie, namespace)
# omitted for strict
- #if self.__strie[namespace]:
- #pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
- #if pl_namespace is not None:
- #namespace = pl_namespace
- #name = uri[len(namespace):]
+ # if self.__strie[namespace]:
+ # pl_namespace = get_longest_namespace(self.__strie[namespace], uri)
+ # if pl_namespace is not None:
+ # namespace = pl_namespace
+ # name = uri[len(namespace):]
namespace = URIRef(namespace)
- prefix = self.store.prefix(namespace) # warning multiple prefixes problem
+ prefix = self.store.prefix(
+ namespace
+ ) # warning multiple prefixes problem
if prefix is None:
if not generate:
raise KeyError(
- "No known prefix for {} and generate=False".format(namespace)
+ "No known prefix for {} and generate=False".format(
+ namespace
+ )
)
num = 1
while 1:
@@ -519,10 +704,10 @@ class NamespaceManager(object):
"""
- namespace = URIRef(text_type(namespace))
+ namespace = URIRef(str(namespace))
# When documenting explain that override only applies in what cases
if prefix is None:
- prefix = ''
+ prefix = ""
bound_namespace = self.store.namespace(prefix)
# Check if the bound_namespace contains a URI
# and if so convert it into a URIRef for comparison
@@ -581,6 +766,7 @@ class NamespaceManager(object):
result = "%s#" % result
return URIRef(result)
+
# From: http://www.w3.org/TR/REC-xml#NT-CombiningChar
#
# * Name start characters must have one of the categories Ll, Lu, Lo,
@@ -616,7 +802,7 @@ class NamespaceManager(object):
NAME_START_CATEGORIES = ["Ll", "Lu", "Lo", "Lt", "Nl"]
-SPLIT_START_CATEGORIES = NAME_START_CATEGORIES + ['Nd']
+SPLIT_START_CATEGORIES = NAME_START_CATEGORIES + ["Nd"]
NAME_CATEGORIES = NAME_START_CATEGORIES + ["Mc", "Me", "Mn", "Lm", "Nd"]
ALLOWED_NAME_CHARS = [u"\u00B7", u"\u0387", u"-", u".", u"_", u":"]
@@ -635,7 +821,7 @@ def is_ncname(name):
for i in range(1, len(name)):
c = name[i]
if not category(c) in NAME_CATEGORIES:
- if c != ':' and c in ALLOWED_NAME_CHARS:
+ if c != ":" and c in ALLOWED_NAME_CHARS:
continue
return 0
# if in compatibility area
@@ -647,9 +833,6 @@ def is_ncname(name):
return 0
-XMLNS = "http://www.w3.org/XML/1998/namespace"
-
-
def split_uri(uri, split_start=SPLIT_START_CATEGORIES):
if uri.startswith(XMLNS):
return (XMLNS, uri.split(XMLNS)[1])
@@ -670,6 +853,7 @@ def split_uri(uri, split_start=SPLIT_START_CATEGORIES):
break
raise ValueError("Can't split '{}'".format(uri))
+
def insert_trie(trie, value): # aka get_subtrie_or_insert
""" Insert a value into the trie if it is not already contained in the trie.
Return the subtree for the value regardless of whether it is a new value
@@ -684,16 +868,20 @@ def insert_trie(trie, value): # aka get_subtrie_or_insert
if not multi_check:
trie[value] = {}
multi_check = True # there can be multiple longer existing prefixes
- dict_ = trie.pop(key) # does not break strie since key<->dict_ remains unchanged
+ dict_ = trie.pop(
+ key
+ ) # does not break strie since key<->dict_ remains unchanged
trie[value][key] = dict_
if value not in trie:
trie[value] = {}
return trie[value]
+
def insert_strie(strie, trie, value):
if value not in strie:
strie[value] = insert_trie(trie, value)
+
def get_longest_namespace(trie, value):
for key in trie:
if value.startswith(key):
diff --git a/rdflib/parser.py b/rdflib/parser.py
index 8512cb33..9e501c03 100644
--- a/rdflib/parser.py
+++ b/rdflib/parser.py
@@ -16,15 +16,14 @@ from __future__ import print_function
import os
import sys
-from six import BytesIO
-from six import string_types
-from six import text_type
+from io import BytesIO
-from six.moves.urllib.request import pathname2url
-from six.moves.urllib.request import Request
-from six.moves.urllib.request import url2pathname
-from six.moves.urllib.parse import urljoin
-from six.moves.urllib.request import urlopen
+
+from urllib.request import pathname2url
+from urllib.request import Request
+from urllib.request import url2pathname
+from urllib.parse import urljoin
+from urllib.request import urlopen
from xml.sax import xmlreader
@@ -33,12 +32,15 @@ from rdflib.term import URIRef
from rdflib.namespace import Namespace
__all__ = [
- 'Parser', 'InputSource', 'StringInputSource',
- 'URLInputSource', 'FileInputSource']
+ "Parser",
+ "InputSource",
+ "StringInputSource",
+ "URLInputSource",
+ "FileInputSource",
+]
class Parser(object):
-
def __init__(self):
pass
@@ -58,7 +60,7 @@ class InputSource(xmlreader.InputSource, object):
def close(self):
f = self.getByteStream()
- if f and hasattr(f, 'close'):
+ if f and hasattr(f, "close"):
f.close()
@@ -77,8 +79,7 @@ class StringInputSource(InputSource):
headers = {
- 'User-agent':
- 'rdflib-%s (http://rdflib.net/; eikeon@eikeon.com)' % __version__
+ "User-agent": "rdflib-%s (http://rdflib.net/; eikeon@eikeon.com)" % __version__
}
@@ -93,28 +94,30 @@ class URLInputSource(InputSource):
# copy headers to change
myheaders = dict(headers)
- if format == 'application/rdf+xml':
- myheaders['Accept'] = 'application/rdf+xml, */*;q=0.1'
- elif format == 'n3':
- myheaders['Accept'] = 'text/n3, */*;q=0.1'
- elif format == 'turtle':
- myheaders['Accept'] = 'text/turtle,application/x-turtle, */*;q=0.1'
- elif format == 'nt':
- myheaders['Accept'] = 'text/plain, */*;q=0.1'
- elif format == 'json-ld':
- myheaders['Accept'] = (
- 'application/ld+json, application/json;q=0.9, */*;q=0.1')
+ if format == "application/rdf+xml":
+ myheaders["Accept"] = "application/rdf+xml, */*;q=0.1"
+ elif format == "n3":
+ myheaders["Accept"] = "text/n3, */*;q=0.1"
+ elif format == "turtle":
+ myheaders["Accept"] = "text/turtle,application/x-turtle, */*;q=0.1"
+ elif format == "nt":
+ myheaders["Accept"] = "text/plain, */*;q=0.1"
+ elif format == "json-ld":
+ myheaders[
+ "Accept"
+ ] = "application/ld+json, application/json;q=0.9, */*;q=0.1"
else:
- myheaders['Accept'] = (
- 'application/rdf+xml,text/rdf+n3;q=0.9,' +
- 'application/xhtml+xml;q=0.5, */*;q=0.1')
+ myheaders["Accept"] = (
+ "application/rdf+xml,text/rdf+n3;q=0.9,"
+ + "application/xhtml+xml;q=0.5, */*;q=0.1"
+ )
req = Request(system_id, None, myheaders)
file = urlopen(req)
# Fix for issue 130 https://github.com/RDFLib/rdflib/issues/130
- self.url = file.geturl() # in case redirections took place
+ self.url = file.geturl() # in case redirections took place
self.setPublicId(self.url)
- self.content_type = file.info().get('content-type')
+ self.content_type = file.info().get("content-type")
if self.content_type is not None:
self.content_type = self.content_type.split(";", 1)[0]
self.setByteStream(file)
@@ -126,7 +129,6 @@ class URLInputSource(InputSource):
class FileInputSource(InputSource):
-
def __init__(self, file):
base = urljoin("file:", pathname2url(os.getcwd()))
system_id = URIRef(urljoin("file:", pathname2url(file.name)), base=base)
@@ -139,23 +141,27 @@ class FileInputSource(InputSource):
return repr(self.file)
-def create_input_source(source=None, publicID=None,
- location=None, file=None, data=None, format=None):
+def create_input_source(
+ source=None, publicID=None, location=None, file=None, data=None, format=None
+):
"""
Return an appropriate InputSource instance for the given
parameters.
"""
# test that exactly one of source, location, file, and data is not None.
- if sum((
- source is not None,
- location is not None,
- file is not None,
- data is not None,
- )) != 1:
- raise ValueError(
- 'exactly one of source, location, file or data must be given'
+ if (
+ sum(
+ (
+ source is not None,
+ location is not None,
+ file is not None,
+ data is not None,
+ )
)
+ != 1
+ ):
+ raise ValueError("exactly one of source, location, file or data must be given")
input_source = None
@@ -163,7 +169,7 @@ def create_input_source(source=None, publicID=None,
if isinstance(source, InputSource):
input_source = source
else:
- if isinstance(source, string_types):
+ if isinstance(source, str):
location = source
elif hasattr(source, "read") and not isinstance(source, Namespace):
f = source
@@ -174,8 +180,9 @@ def create_input_source(source=None, publicID=None,
elif hasattr(f, "name"):
input_source.setSystemId(f.name)
else:
- raise Exception("Unexpected type '%s' for source '%s'" %
- (type(source), source))
+ raise Exception(
+ "Unexpected type '%s' for source '%s'" % (type(source), source)
+ )
absolute_location = None # Further to fix for issue 130
@@ -199,8 +206,8 @@ def create_input_source(source=None, publicID=None,
input_source = FileInputSource(file)
if data is not None:
- if isinstance(data, text_type):
- data = data.encode('utf-8')
+ if isinstance(data, str):
+ data = data.encode("utf-8")
input_source = StringInputSource(data)
auto_close = True
diff --git a/rdflib/paths.py b/rdflib/paths.py
index 163f046f..d2fccd22 100644
--- a/rdflib/paths.py
+++ b/rdflib/paths.py
@@ -1,6 +1,3 @@
-from six import PY3
-
-
__doc__ = """
This module implements the SPARQL 1.1 Property path operators, as
@@ -32,18 +29,19 @@ In SPARQL the syntax is as follows:
| | of the path by zero or one matches of elt. |
+--------------------+-------------------------------------------------+
|!iri or | Negated property set. An IRI which is not one of|
-|!(iri\ :sub:`1`\ | | iri\ :sub:`1`...iri\ :sub:`n`. |
-|... |iri\ :sub:`n`) | !iri is short for !(iri). |
+|!(iri\ :sub:`1`\ \| | iri\ :sub:`1`...iri\ :sub:`n`. |
+|... \|iri\ :sub:`n`)| !iri is short for !(iri). |
+--------------------+-------------------------------------------------+
|!^iri or | Negated property set where the excluded matches |
-|!(^iri\ :sub:`1`\ | | are based on reversed path. That is, not one of |
-|... |^iri\ :sub:`n`)| iri\ :sub:`1`...iri\ :sub:`n` as reverse paths. |
+|!(^iri\ :sub:`1`\ \|| are based on reversed path. That is, not one of |
+|...\|^iri\ :sub:`n`)| iri\ :sub:`1`...iri\ :sub:`n` as reverse paths. |
| | !^iri is short for !(^iri). |
+--------------------+-------------------------------------------------+
-|!(iri\ :sub:`1`\ | | A combination of forward and reverse |
-|...|iri\ :sub:`j`\ || properties in a negated property set. |
-|^iri\ :sub:`j+1`\ | | |
-|... |^iri\ :sub:`n`)| |
+|!(iri\ :sub:`1`\ \| | A combination of forward and reverse |
+|...\|iri\ :sub:`j`\ | properties in a negated property set. |
+|\|^iri\ :sub:`j+1`\ | |
+|\|... \|^iri\ | |
+|:sub:`n`)| | |
+--------------------+-------------------------------------------------+
|(elt) | A group path elt, brackets control precedence. |
+--------------------+-------------------------------------------------+
@@ -190,9 +188,9 @@ from rdflib.term import URIRef, Node
# property paths
-ZeroOrMore = '*'
-OneOrMore = '+'
-ZeroOrOne = '?'
+ZeroOrMore = "*"
+OneOrMore = "+"
+ZeroOrOne = "?"
class Path(object):
@@ -207,14 +205,16 @@ class Path(object):
def __lt__(self, other):
if not isinstance(other, (Path, Node)):
- raise TypeError('unorderable types: %s() < %s()' % (
- repr(self), repr(other)))
+ raise TypeError(
+ "unorderable types: %s() < %s()" % (repr(self), repr(other))
+ )
return repr(self) < repr(other)
def __le__(self, other):
if not isinstance(other, (Path, Node)):
- raise TypeError('unorderable types: %s() < %s()' % (
- repr(self), repr(other)))
+ raise TypeError(
+ "unorderable types: %s() < %s()" % (repr(self), repr(other))
+ )
return repr(self) <= repr(other)
def __ne__(self, other):
@@ -228,7 +228,6 @@ class Path(object):
class InvPath(Path):
-
def __init__(self, arg):
self.arg = arg
@@ -240,7 +239,7 @@ class InvPath(Path):
return "Path(~%s)" % (self.arg,)
def n3(self):
- return '^%s' % self.arg.n3()
+ return "^%s" % self.arg.n3()
class SequencePath(Path):
@@ -284,7 +283,7 @@ class SequencePath(Path):
return "Path(%s)" % " / ".join(str(x) for x in self.args)
def n3(self):
- return '/'.join(a.n3() for a in self.args)
+ return "/".join(a.n3() for a in self.args)
class AlternativePath(Path):
@@ -305,7 +304,7 @@ class AlternativePath(Path):
return "Path(%s)" % " | ".join(str(x) for x in self.args)
def n3(self):
- return '|'.join(a.n3() for a in self.args)
+ return "|".join(a.n3() for a in self.args)
class MulPath(Path):
@@ -323,7 +322,7 @@ class MulPath(Path):
self.zero = False
self.more = True
else:
- raise Exception('Unknown modifier %s' % mod)
+ raise Exception("Unknown modifier %s" % mod)
def eval(self, graph, subj=None, obj=None, first=True):
if self.zero and first:
@@ -386,7 +385,7 @@ class MulPath(Path):
f = list(_fwd(s, None, set()))
for s1, o1 in f:
assert s1 == s
- yield(s1, o1)
+ yield (s1, o1)
done = set() # the spec does by defn. not allow duplicates
if subj:
@@ -409,7 +408,7 @@ class MulPath(Path):
return "Path(%s%s)" % (self.path, self.mod)
def n3(self):
- return '%s%s' % (self.path.n3(), self.mod)
+ return "%s%s" % (self.path.n3(), self.mod)
class NegatedPath(Path):
@@ -420,8 +419,9 @@ class NegatedPath(Path):
self.args = arg.args
else:
raise Exception(
- 'Can only negate URIRefs, InvPaths or ' +
- 'AlternativePaths, not: %s' % (arg,))
+ "Can only negate URIRefs, InvPaths or "
+ + "AlternativePaths, not: %s" % (arg,)
+ )
def eval(self, graph, subj=None, obj=None):
for s, p, o in graph.triples((subj, None, obj)):
@@ -433,7 +433,7 @@ class NegatedPath(Path):
if (o, a.arg, s) in graph:
break
else:
- raise Exception('Invalid path in NegatedPath: %s' % a)
+ raise Exception("Invalid path in NegatedPath: %s" % a)
else:
yield s, o
@@ -441,7 +441,7 @@ class NegatedPath(Path):
return "Path(! %s)" % ",".join(str(x) for x in self.args)
def n3(self):
- return '!(%s)' % ('|'.join(self.args))
+ return "!(%s)" % ("|".join(self.args))
class PathList(list):
@@ -453,7 +453,7 @@ def path_alternative(self, other):
alternative path
"""
if not isinstance(other, (URIRef, Path)):
- raise Exception('Only URIRefs or Paths can be in paths!')
+ raise Exception("Only URIRefs or Paths can be in paths!")
return AlternativePath(self, other)
@@ -462,7 +462,7 @@ def path_sequence(self, other):
sequence path
"""
if not isinstance(other, (URIRef, Path)):
- raise Exception('Only URIRefs or Paths can be in paths!')
+ raise Exception("Only URIRefs or Paths can be in paths!")
return SequencePath(self, other)
@@ -491,9 +491,10 @@ def neg_path(p):
return NegatedPath(p)
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
else:
# monkey patch
@@ -505,13 +506,9 @@ else:
URIRef.__invert__ = inv_path
URIRef.__neg__ = neg_path
URIRef.__truediv__ = path_sequence
- if not PY3:
- URIRef.__div__ = path_sequence
Path.__invert__ = inv_path
Path.__neg__ = neg_path
Path.__mul__ = mul_path
Path.__or__ = path_alternative
Path.__truediv__ = path_sequence
- if not PY3:
- Path.__div__ = path_sequence
diff --git a/rdflib/plugin.py b/rdflib/plugin.py
index 051b73a3..cc5b6d35 100644
--- a/rdflib/plugin.py
+++ b/rdflib/plugin.py
@@ -31,22 +31,27 @@ from __future__ import print_function
from rdflib.store import Store
from rdflib.parser import Parser
from rdflib.serializer import Serializer
-from rdflib.query import ResultParser, ResultSerializer, \
- Processor, Result, UpdateProcessor
+from rdflib.query import (
+ ResultParser,
+ ResultSerializer,
+ Processor,
+ Result,
+ UpdateProcessor,
+)
from rdflib.exceptions import Error
-__all__ = [
- 'register', 'get', 'plugins', 'PluginException', 'Plugin', 'PKGPlugin']
+__all__ = ["register", "get", "plugins", "PluginException", "Plugin", "PKGPlugin"]
-entry_points = {'rdf.plugins.store': Store,
- 'rdf.plugins.serializer': Serializer,
- 'rdf.plugins.parser': Parser,
- 'rdf.plugins.resultparser': ResultParser,
- 'rdf.plugins.resultserializer': ResultSerializer,
- 'rdf.plugins.queryprocessor': Processor,
- 'rdf.plugins.queryresult': Result,
- 'rdf.plugins.updateprocessor': UpdateProcessor
- }
+entry_points = {
+ "rdf.plugins.store": Store,
+ "rdf.plugins.serializer": Serializer,
+ "rdf.plugins.parser": Parser,
+ "rdf.plugins.resultparser": ResultParser,
+ "rdf.plugins.resultserializer": ResultSerializer,
+ "rdf.plugins.queryprocessor": Processor,
+ "rdf.plugins.queryresult": Result,
+ "rdf.plugins.updateprocessor": UpdateProcessor,
+}
_plugins = {}
@@ -56,7 +61,6 @@ class PluginException(Error):
class Plugin(object):
-
def __init__(self, name, kind, module_path, class_name):
self.name = name
self.kind = kind
@@ -72,7 +76,6 @@ class Plugin(object):
class PKGPlugin(Plugin):
-
def __init__(self, name, kind, ep):
self.name = name
self.kind = kind
@@ -102,8 +105,7 @@ def get(name, kind):
try:
p = _plugins[(name, kind)]
except KeyError:
- raise PluginException(
- "No plugin registered for (%s, %s)" % (name, kind))
+ raise PluginException("No plugin registered for (%s, %s)" % (name, kind))
return p.getClass()
@@ -125,193 +127,182 @@ def plugins(name=None, kind=None):
Pass in name and kind to filter... else leave None to match all.
"""
for p in _plugins.values():
- if (name is None or name == p.name) and (
- kind is None or kind == p.kind):
+ if (name is None or name == p.name) and (kind is None or kind == p.kind):
yield p
+register("default", Store, "rdflib.plugins.memory", "IOMemory")
+register("IOMemory", Store, "rdflib.plugins.memory", "IOMemory")
+register("Auditable", Store, "rdflib.plugins.stores.auditable", "AuditableStore")
+register("Concurrent", Store, "rdflib.plugins.stores.concurrent", "ConcurrentStore")
+register("Sleepycat", Store, "rdflib.plugins.sleepycat", "Sleepycat")
+register("SPARQLStore", Store, "rdflib.plugins.stores.sparqlstore", "SPARQLStore")
+register(
+ "SPARQLUpdateStore", Store, "rdflib.plugins.stores.sparqlstore", "SPARQLUpdateStore"
+)
+
+register(
+ "application/rdf+xml",
+ Serializer,
+ "rdflib.plugins.serializers.rdfxml",
+ "XMLSerializer",
+)
+register("xml", Serializer, "rdflib.plugins.serializers.rdfxml", "XMLSerializer")
+register("text/n3", Serializer, "rdflib.plugins.serializers.n3", "N3Serializer")
+register("n3", Serializer, "rdflib.plugins.serializers.n3", "N3Serializer")
+register(
+ "text/turtle", Serializer, "rdflib.plugins.serializers.turtle", "TurtleSerializer"
+)
+register("turtle", Serializer, "rdflib.plugins.serializers.turtle", "TurtleSerializer")
+register("ttl", Serializer, "rdflib.plugins.serializers.turtle", "TurtleSerializer")
+register("trig", Serializer, "rdflib.plugins.serializers.trig", "TrigSerializer")
+register(
+ "application/n-triples", Serializer, "rdflib.plugins.serializers.nt", "NTSerializer"
+)
+register("ntriples", Serializer, "rdflib.plugins.serializers.nt", "NTSerializer")
+register("nt", Serializer, "rdflib.plugins.serializers.nt", "NTSerializer")
+register("nt11", Serializer, "rdflib.plugins.serializers.nt", "NT11Serializer")
+
+register(
+ "pretty-xml", Serializer, "rdflib.plugins.serializers.rdfxml", "PrettyXMLSerializer"
+)
+register("trix", Serializer, "rdflib.plugins.serializers.trix", "TriXSerializer")
+register(
+ "application/trix", Serializer, "rdflib.plugins.serializers.trix", "TriXSerializer"
+)
+register(
+ "application/n-quads",
+ Serializer,
+ "rdflib.plugins.serializers.nquads",
+ "NQuadsSerializer",
+)
+register("nquads", Serializer, "rdflib.plugins.serializers.nquads", "NQuadsSerializer")
+
+register("application/rdf+xml", Parser, "rdflib.plugins.parsers.rdfxml", "RDFXMLParser")
+register("xml", Parser, "rdflib.plugins.parsers.rdfxml", "RDFXMLParser")
+register("text/n3", Parser, "rdflib.plugins.parsers.notation3", "N3Parser")
+register("n3", Parser, "rdflib.plugins.parsers.notation3", "N3Parser")
+register("text/turtle", Parser, "rdflib.plugins.parsers.notation3", "TurtleParser")
+register("turtle", Parser, "rdflib.plugins.parsers.notation3", "TurtleParser")
+register("ttl", Parser, "rdflib.plugins.parsers.notation3", "TurtleParser")
+register("application/n-triples", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("ntriples", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("nt", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("nt11", Parser, "rdflib.plugins.parsers.nt", "NTParser")
+register("application/n-quads", Parser, "rdflib.plugins.parsers.nquads", "NQuadsParser")
+register("nquads", Parser, "rdflib.plugins.parsers.nquads", "NQuadsParser")
+register("application/trix", Parser, "rdflib.plugins.parsers.trix", "TriXParser")
+register("trix", Parser, "rdflib.plugins.parsers.trix", "TriXParser")
+register("trig", Parser, "rdflib.plugins.parsers.trig", "TrigParser")
+
+
+register("sparql", Result, "rdflib.plugins.sparql.processor", "SPARQLResult")
+register("sparql", Processor, "rdflib.plugins.sparql.processor", "SPARQLProcessor")
+
+register(
+ "sparql",
+ UpdateProcessor,
+ "rdflib.plugins.sparql.processor",
+ "SPARQLUpdateProcessor",
+)
+
+
+register(
+ "xml",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultSerializer",
+)
+register(
+ "application/sparql-results+xml",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultSerializer",
+)
+register(
+ "txt",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.txtresults",
+ "TXTResultSerializer",
+)
+register(
+ "json",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultSerializer",
+)
+register(
+ "application/sparql-results+json",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultSerializer",
+)
+register(
+ "csv",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.csvresults",
+ "CSVResultSerializer",
+)
+register(
+ "text/csv",
+ ResultSerializer,
+ "rdflib.plugins.sparql.results.csvresults",
+ "CSVResultSerializer",
+)
+
+register(
+ "xml", ResultParser, "rdflib.plugins.sparql.results.xmlresults", "XMLResultParser"
+)
+register(
+ "application/sparql-results+xml",
+ ResultParser,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultParser",
+)
+register(
+ "application/sparql-results+xml; charset=UTF-8",
+ ResultParser,
+ "rdflib.plugins.sparql.results.xmlresults",
+ "XMLResultParser",
+)
+
+register(
+ "application/rdf+xml",
+ ResultParser,
+ "rdflib.plugins.sparql.results.graph",
+ "GraphResultParser",
+)
+
+
+register(
+ "json",
+ ResultParser,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultParser",
+)
+register(
+ "application/sparql-results+json",
+ ResultParser,
+ "rdflib.plugins.sparql.results.jsonresults",
+ "JSONResultParser",
+)
+
register(
- 'default', Store,
- 'rdflib.plugins.memory', 'IOMemory')
-register(
- 'IOMemory', Store,
- 'rdflib.plugins.memory', 'IOMemory')
-register(
- 'Auditable', Store,
- 'rdflib.plugins.stores.auditable', 'AuditableStore')
-register(
- 'Concurrent', Store,
- 'rdflib.plugins.stores.concurrent', 'ConcurrentStore')
+ "csv", ResultParser, "rdflib.plugins.sparql.results.csvresults", "CSVResultParser"
+)
register(
- 'Sleepycat', Store,
- 'rdflib.plugins.sleepycat', 'Sleepycat')
-register(
- 'SPARQLStore', Store,
- 'rdflib.plugins.stores.sparqlstore', 'SPARQLStore')
-register(
- 'SPARQLUpdateStore', Store,
- 'rdflib.plugins.stores.sparqlstore', 'SPARQLUpdateStore')
+ "text/csv",
+ ResultParser,
+ "rdflib.plugins.sparql.results.csvresults",
+ "CSVResultParser",
+)
register(
- 'application/rdf+xml', Serializer,
- 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
-register(
- 'xml', Serializer,
- 'rdflib.plugins.serializers.rdfxml', 'XMLSerializer')
-register(
- 'text/n3', Serializer,
- 'rdflib.plugins.serializers.n3', 'N3Serializer')
-register(
- 'n3', Serializer,
- 'rdflib.plugins.serializers.n3', 'N3Serializer')
-register(
- 'text/turtle', Serializer,
- 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
-register(
- 'turtle', Serializer,
- 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
-register(
- 'ttl', Serializer,
- 'rdflib.plugins.serializers.turtle', 'TurtleSerializer')
-register(
- 'trig', Serializer,
- 'rdflib.plugins.serializers.trig', 'TrigSerializer')
-register(
- 'application/n-triples', Serializer,
- 'rdflib.plugins.serializers.nt', 'NTSerializer')
-register(
- 'ntriples', Serializer,
- 'rdflib.plugins.serializers.nt', 'NTSerializer')
-register(
- 'nt', Serializer,
- 'rdflib.plugins.serializers.nt', 'NTSerializer')
-register(
- 'nt11', Serializer,
- 'rdflib.plugins.serializers.nt', 'NT11Serializer')
-
-register(
- 'pretty-xml', Serializer,
- 'rdflib.plugins.serializers.rdfxml', 'PrettyXMLSerializer')
-register(
- 'trix', Serializer,
- 'rdflib.plugins.serializers.trix', 'TriXSerializer')
-register(
- 'application/trix', Serializer,
- 'rdflib.plugins.serializers.trix', 'TriXSerializer')
-register(
- 'application/n-quads', Serializer,
- 'rdflib.plugins.serializers.nquads', 'NQuadsSerializer')
-register(
- 'nquads', Serializer,
- 'rdflib.plugins.serializers.nquads', 'NQuadsSerializer')
-
-register(
- 'application/rdf+xml', Parser,
- 'rdflib.plugins.parsers.rdfxml', 'RDFXMLParser')
-register(
- 'xml', Parser,
- 'rdflib.plugins.parsers.rdfxml', 'RDFXMLParser')
-register(
- 'text/n3', Parser,
- 'rdflib.plugins.parsers.notation3', 'N3Parser')
-register(
- 'n3', Parser,
- 'rdflib.plugins.parsers.notation3', 'N3Parser')
-register(
- 'text/turtle', Parser,
- 'rdflib.plugins.parsers.notation3', 'TurtleParser')
-register(
- 'turtle', Parser,
- 'rdflib.plugins.parsers.notation3', 'TurtleParser')
-register(
- 'ttl', Parser,
- 'rdflib.plugins.parsers.notation3', 'TurtleParser')
-register(
- 'application/n-triples', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'ntriples', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'nt', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'nt11', Parser,
- 'rdflib.plugins.parsers.nt', 'NTParser')
-register(
- 'application/n-quads', Parser,
- 'rdflib.plugins.parsers.nquads', 'NQuadsParser')
-register(
- 'nquads', Parser,
- 'rdflib.plugins.parsers.nquads', 'NQuadsParser')
-register(
- 'application/trix', Parser,
- 'rdflib.plugins.parsers.trix', 'TriXParser')
-register(
- 'trix', Parser,
- 'rdflib.plugins.parsers.trix', 'TriXParser')
-register(
- 'trig', Parser,
- 'rdflib.plugins.parsers.trig', 'TrigParser')
-
-
-register(
- 'sparql', Result,
- 'rdflib.plugins.sparql.processor', 'SPARQLResult')
-register(
- 'sparql', Processor,
- 'rdflib.plugins.sparql.processor', 'SPARQLProcessor')
-
-register(
- 'sparql', UpdateProcessor,
- 'rdflib.plugins.sparql.processor', 'SPARQLUpdateProcessor')
-
-
-register(
- 'xml', ResultSerializer,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultSerializer')
-register(
- 'txt', ResultSerializer,
- 'rdflib.plugins.sparql.results.txtresults', 'TXTResultSerializer')
-register(
- 'json', ResultSerializer,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultSerializer')
-register(
- 'csv', ResultSerializer,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultSerializer')
-
-register(
- 'xml', ResultParser,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
-register(
- 'application/sparql-results+xml', ResultParser,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
-register(
- 'application/sparql-results+xml; charset=UTF-8', ResultParser,
- 'rdflib.plugins.sparql.results.xmlresults', 'XMLResultParser')
-
-register(
- 'application/rdf+xml', ResultParser,
- 'rdflib.plugins.sparql.results.graph', 'GraphResultParser')
-
-
-register(
- 'json', ResultParser,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultParser')
-register(
- 'application/sparql-results+json', ResultParser,
- 'rdflib.plugins.sparql.results.jsonresults', 'JSONResultParser')
-
-register(
- 'csv', ResultParser,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultParser')
-register(
- 'text/csv', ResultParser,
- 'rdflib.plugins.sparql.results.csvresults', 'CSVResultParser')
-
-register(
- 'tsv', ResultParser,
- 'rdflib.plugins.sparql.results.tsvresults', 'TSVResultParser')
-register(
- 'text/tab-separated-values', ResultParser,
- 'rdflib.plugins.sparql.results.tsvresults', 'TSVResultParser')
+ "tsv", ResultParser, "rdflib.plugins.sparql.results.tsvresults", "TSVResultParser"
+)
+register(
+ "text/tab-separated-values",
+ ResultParser,
+ "rdflib.plugins.sparql.results.tsvresults",
+ "TSVResultParser",
+)
diff --git a/rdflib/plugins/memory.py b/rdflib/plugins/memory.py
index 7345311a..6cedc2b3 100644
--- a/rdflib/plugins/memory.py
+++ b/rdflib/plugins/memory.py
@@ -4,11 +4,9 @@ from __future__ import print_function
import random
-from rdflib.term import BNode
-from rdflib.store import Store, NO_STORE, VALID_STORE
-from six import iteritems
+from rdflib.store import Store
-__all__ = ['Memory', 'IOMemory']
+__all__ = ["Memory", "IOMemory"]
ANY = Any = None
@@ -98,14 +96,12 @@ class Memory(Store):
if predicate in subjectDictionary:
if object != ANY: # subject+predicate+object is given
if object in subjectDictionary[predicate]:
- yield (subject, predicate, object), \
- self.__contexts()
+ yield (subject, predicate, object), self.__contexts()
else: # given object not found
pass
else: # subject+predicate is given, object unbound
for o in subjectDictionary[predicate].keys():
- yield (subject, predicate, o), \
- self.__contexts()
+ yield (subject, predicate, o), self.__contexts()
else: # given predicate not found
pass
else: # subject given, predicate unbound
@@ -167,7 +163,7 @@ class Memory(Store):
return self.__prefix.get(namespace, None)
def namespaces(self):
- for prefix, namespace in iteritems(self.__namespace):
+ for prefix, namespace in self.__namespace.items():
yield prefix, namespace
def __contexts(self):
@@ -198,6 +194,7 @@ class IOMemory(Store):
slow.
"""
+
context_aware = True
formula_aware = True
graph_aware = True
@@ -224,11 +221,12 @@ class IOMemory(Store):
self.__obj2int = {None: None} # maps objects to integer keys
# Indexes for each triple part, and a list of contexts for each triple
- self.__subjectIndex = {} # key: sid val: set(enctriples)
+ self.__subjectIndex = {} # key: sid val: set(enctriples)
self.__predicateIndex = {} # key: pid val: set(enctriples)
- self.__objectIndex = {} # key: oid val: set(enctriples)
- self.__tripleContexts = {
- } # key: enctriple val: {cid1: quoted, cid2: quoted ...}
+ self.__objectIndex = {} # key: oid val: set(enctriples)
+ self.__tripleContexts = (
+ {}
+ ) # key: enctriple val: {cid1: quoted, cid2: quoted ...}
self.__contextTriples = {None: set()} # key: cid val: set(enctriples)
# all contexts used in store (unencoded)
@@ -247,7 +245,7 @@ class IOMemory(Store):
return self.__prefix.get(namespace, None)
def namespaces(self):
- for prefix, namespace in iteritems(self.__namespace):
+ for prefix, namespace in self.__namespace.items():
yield prefix, namespace
def add(self, triple, context, quoted=False):
@@ -296,16 +294,20 @@ class IOMemory(Store):
del self.__tripleContexts[enctriple]
- if not req_cid is None and \
- req_cid in self.__contextTriples and \
- len(self.__contextTriples[req_cid]) == 0:
+ if (
+ req_cid is not None
+ and req_cid in self.__contextTriples
+ and len(self.__contextTriples[req_cid]) == 0
+ ):
# all triples are removed out of this context
# and it's not the default context so delete it
del self.__contextTriples[req_cid]
- if triplepat == (None, None, None) and \
- context in self.__all_contexts and \
- not self.graph_aware:
+ if (
+ triplepat == (None, None, None)
+ and context in self.__all_contexts
+ and not self.graph_aware
+ ):
# remove the whole context
self.__all_contexts.remove(context)
@@ -324,9 +326,11 @@ class IOMemory(Store):
# optimize "triple in graph" case (all parts given)
if sid is not None and pid is not None and oid is not None:
- if sid in self.__subjectIndex and \
- enctriple in self.__subjectIndex[sid] and \
- self.__tripleHasContext(enctriple, cid):
+ if (
+ sid in self.__subjectIndex
+ and enctriple in self.__subjectIndex[sid]
+ and self.__tripleHasContext(enctriple, cid)
+ ):
return ((triplein, self.__contexts(enctriple)) for i in [0])
else:
return self.__emptygen()
@@ -355,12 +359,14 @@ class IOMemory(Store):
else:
enctriples = sets[0].copy()
- return ((self.__decodeTriple(enctriple), self.__contexts(enctriple))
- for enctriple in enctriples
- if self.__tripleHasContext(enctriple, cid))
+ return (
+ (self.__decodeTriple(enctriple), self.__contexts(enctriple))
+ for enctriple in enctriples
+ if self.__tripleHasContext(enctriple, cid)
+ )
def contexts(self, triple=None):
- if triple is None or triple is (None, None, None):
+ if triple is None or triple == (None, None, None):
return (context for context in self.__all_contexts)
enctriple = self.__encodeTriple(triple)
@@ -404,8 +410,7 @@ class IOMemory(Store):
if enctriple not in self.__tripleContexts:
# triple exists with default ctx info
# start with a copy of the default ctx info
- self.__tripleContexts[
- enctriple] = self.__defaultContexts.copy()
+ self.__tripleContexts[enctriple] = self.__defaultContexts.copy()
self.__tripleContexts[enctriple][cid] = quoted
if not quoted:
@@ -448,12 +453,11 @@ class IOMemory(Store):
def __tripleHasContext(self, enctriple, cid):
"""return True iff the triple exists in the given context"""
ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts)
- return (cid in ctxs)
+ return cid in ctxs
def __removeTripleContext(self, enctriple, cid):
"""remove the context from the triple"""
- ctxs = self.__tripleContexts.get(
- enctriple, self.__defaultContexts).copy()
+ ctxs = self.__tripleContexts.get(enctriple, self.__defaultContexts).copy()
del ctxs[cid]
if ctxs == self.__defaultContexts:
del self.__tripleContexts[enctriple]
@@ -493,7 +497,11 @@ class IOMemory(Store):
def __contexts(self, enctriple):
"""return a generator for all the non-quoted contexts
(unencoded) the encoded triple appears in"""
- return (self.__int2obj.get(cid) for cid in self.__getTripleContexts(enctriple, skipQuoted=True) if cid is not None)
+ return (
+ self.__int2obj.get(cid)
+ for cid in self.__getTripleContexts(enctriple, skipQuoted=True)
+ if cid is not None
+ )
def __emptygen(self):
"""return an empty generator"""
diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py
index 4b6ff5d1..c427f153 100755
--- a/rdflib/plugins/parsers/notation3.py
+++ b/rdflib/plugins/parsers/notation3.py
@@ -32,7 +32,6 @@ from __future__ import division
from __future__ import print_function
# Python standard libraries
-import types
import sys
import os
import re
@@ -45,19 +44,20 @@ from uuid import uuid4
from rdflib.term import URIRef, BNode, Literal, Variable, _XSD_PFX, _unique_id
from rdflib.graph import QuotedGraph, ConjunctiveGraph, Graph
-
-from six import b
-from six import binary_type
-
from rdflib.compat import long_type
-from six import string_types
-from six import text_type
-from six import unichr
from rdflib.compat import narrow_build
-__all__ = ['BadSyntax', 'N3Parser', 'TurtleParser',
- "splitFragP", "join", "base",
- "runNamespace", "uniqueURI", "hexify"]
+__all__ = [
+ "BadSyntax",
+ "N3Parser",
+ "TurtleParser",
+ "splitFragP",
+ "join",
+ "base",
+ "runNamespace",
+ "uniqueURI",
+ "hexify",
+]
from rdflib.parser import Parser
@@ -81,8 +81,7 @@ def splitFragP(uriref, punct=0):
if i >= 0:
return uriref[:i], uriref[i:]
else:
- return uriref, ''
-
+ return uriref, ""
def join(here, there):
@@ -120,65 +119,64 @@ def join(here, there):
u'http://example.org/#Andr\\xe9'
"""
-# assert(here.find("#") < 0), \
-# "Base may not contain hash: '%s'" % here # why must caller splitFrag?
+ # assert(here.find("#") < 0), \
+ # "Base may not contain hash: '%s'" % here # why must caller splitFrag?
- slashl = there.find('/')
- colonl = there.find(':')
+ slashl = there.find("/")
+ colonl = there.find(":")
- # join(base, 'foo:/') -- absolute
+ # join(base, 'foo:/') -- absolute
if colonl >= 0 and (slashl < 0 or colonl < slashl):
return there
- bcolonl = here.find(':')
- assert(bcolonl >= 0), \
- "Base uri '%s' is not absolute" % here # else it's not absolute
+ bcolonl = here.find(":")
+ assert bcolonl >= 0, (
+ "Base uri '%s' is not absolute" % here
+ ) # else it's not absolute
path, frag = splitFragP(there)
if not path:
return here + frag
- # join('mid:foo@example', '../foo') bzzt
- if here[bcolonl + 1:bcolonl + 2] != '/':
- raise ValueError(
- ("Base <%s> has no slash after "
- "colon - with relative '%s'.") % (here, there))
+ # join('mid:foo@example', '../foo') bzzt
+ if here[bcolonl + 1: bcolonl + 2] != "/":
+ raise ValueError("Base <%s> has no slash after " "colon - with relative '%s'." % (here, there))
- if here[bcolonl + 1:bcolonl + 3] == '//':
- bpath = here.find('/', bcolonl + 3)
+ if here[bcolonl + 1: bcolonl + 3] == "//":
+ bpath = here.find("/", bcolonl + 3)
else:
bpath = bcolonl + 1
- # join('http://xyz', 'foo')
+ # join('http://xyz', 'foo')
if bpath < 0:
bpath = len(here)
- here = here + '/'
+ here = here + "/"
- # join('http://xyz/', '//abc') => 'http://abc'
- if there[:2] == '//':
- return here[:bcolonl + 1] + there
+ # join('http://xyz/', '//abc') => 'http://abc'
+ if there[:2] == "//":
+ return here[: bcolonl + 1] + there
- # join('http://xyz/', '/abc') => 'http://xyz/abc'
- if there[:1] == '/':
+ # join('http://xyz/', '/abc') => 'http://xyz/abc'
+ if there[:1] == "/":
return here[:bpath] + there
- slashr = here.rfind('/')
+ slashr = here.rfind("/")
while 1:
- if path[:2] == './':
+ if path[:2] == "./":
path = path[2:]
- if path == '.':
- path = ''
- elif path[:3] == '../' or path == '..':
+ if path == ".":
+ path = ""
+ elif path[:3] == "../" or path == "..":
path = path[3:]
- i = here.rfind('/', bpath, slashr)
+ i = here.rfind("/", bpath, slashr)
if i >= 0:
- here = here[:i + 1]
+ here = here[: i + 1]
slashr = i
else:
break
- return here[:slashr + 1] + path + frag
+ return here[: slashr + 1] + path + frag
def base():
@@ -190,7 +188,7 @@ def base():
we should put it in the hostname just to prevent ambiguity
"""
- # return "file://" + hostname + os.getcwd() + "/"
+ # return "file://" + hostname + os.getcwd() + "/"
return "file://" + _fixslash(os.getcwd()) + "/"
@@ -198,7 +196,7 @@ def _fixslash(s):
""" Fix windowslike filename to unixlike - (#ifdef WINDOWS)"""
s = s.replace("\\", "/")
if s[0] != "/" and s[1] == ":":
- s = s[2:] # @@@ Hack when drive letter present
+ s = s[2:] # @@@ Hack when drive letter present
return s
@@ -219,7 +217,7 @@ ANONYMOUS = 3
XMLLITERAL = 25
Logic_NS = "http://www.w3.org/2000/10/swap/log#"
-NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
+NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
forSomeSym = Logic_NS + "forSome"
forAllSym = Logic_NS + "forAll"
@@ -230,7 +228,7 @@ DAML_sameAs_URI = OWL_NS + "sameAs"
parsesTo_URI = Logic_NS + "parsesTo"
RDF_spec = "http://www.w3.org/TR/REC-rdf-syntax/"
-List_NS = RDF_NS_URI # From 20030808
+List_NS = RDF_NS_URI # From 20030808
_Old_Logic_NS = "http://www.w3.org/2000/10/swap/log.n3#"
N3_first = (SYMBOL, List_NS + "first")
@@ -245,21 +243,21 @@ runNamespaceValue = None
def runNamespace():
- "Return a URI suitable as a namespace for run-local objects"
- # @@@ include hostname (privacy?) (hash it?)
+ """Returns a URI suitable as a namespace for run-local objects"""
+ # @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue is None:
- runNamespaceValue = join(base(), _unique_id()) + '#'
+ runNamespaceValue = join(base(), _unique_id()) + "#"
return runNamespaceValue
+
nextu = 0
def uniqueURI():
- "A unique URI"
+ """A unique URI"""
global nextu
nextu += 1
- # return runNamespace() + "u_" + `nextu`
return runNamespace() + "u_" + str(nextu)
@@ -270,20 +268,21 @@ chatty_flag = 50
def BecauseOfData(*args, **kargs):
- # print args, kargs
+ # print args, kargs
pass
def becauseSubexpression(*args, **kargs):
- # print args, kargs
+ # print args, kargs
pass
+
N3_forSome_URI = forSomeSym
N3_forAll_URI = forAllSym
# Magic resources we know about
-ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
+ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
# This is the hash on namespace URIs
RDF_type = (SYMBOL, RDF_type_URI)
@@ -297,104 +296,97 @@ DOUBLE_DATATYPE = _XSD_PFX + "double"
FLOAT_DATATYPE = _XSD_PFX + "float"
INTEGER_DATATYPE = _XSD_PFX + "integer"
-option_noregen = 0 # If set, do not regenerate genids on output
+option_noregen = 0 # If set, do not regenerate genids on output
# @@ I18n - the notname chars need extending for well known unicode non-text
# characters. The XML spec switched to assuming unknown things were name
# characaters.
# _namechars = string.lowercase + string.uppercase + string.digits + '_-'
-_notQNameChars = \
- "\t\r\n !\"#$&'()*,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
+_notQNameChars = "\t\r\n !\"#$&'()*,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
_notKeywordsChars = _notQNameChars + "."
-_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
-_rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
+_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
+_rdfns = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+
+hexChars = "ABCDEFabcdef0123456789"
+escapeChars = "(_~.-!$&'()*+,;=/?#@%)" # valid for \ escapes in localnames
-hexChars = 'ABCDEFabcdef0123456789'
-escapeChars = "(_~.-!$&'()*+,;=/?#@%)" # valid for \ escapes in localnames
def unicodeExpand(m):
try:
- return unichr(int(m.group(1), 16))
+ return chr(int(m.group(1), 16))
except:
raise Exception("Invalid unicode code point: " + m.group(1))
-if narrow_build:
- def unicodeExpand(m):
- try:
- return unichr(int(m.group(1), 16))
- except ValueError:
- warnings.warn(
- 'Encountered a unicode char > 0xFFFF in a narrow python build. '
- 'Trying to degrade gracefully, but this can cause problems '
- 'later when working with the string:\n%s' % m.group(0))
- return codecs.decode(m.group(0), 'unicode_escape')
-
-unicodeEscape4 = re.compile(
- r'\\u([0-9a-fA-F]{4})')
-unicodeEscape8 = re.compile(
- r'\\U([0-9a-fA-F]{8})')
+unicodeEscape4 = re.compile(r"\\u([0-9a-fA-F]{4})")
+unicodeEscape8 = re.compile(r"\\U([0-9a-fA-F]{8})")
-N3CommentCharacter = "#" # For unix script # ! compatabilty
+N3CommentCharacter = "#" # For unix script # ! compatabilty
########################################## Parse string to sink
#
# Regular expressions:
-eol = re.compile(
- r'[ \t]*(#[^\n]*)?\r?\n') # end of line, poss. w/comment
-eof = re.compile(
- r'[ \t]*(#[^\n]*)?$') # end of file, poss. w/comment
-ws = re.compile(r'[ \t]*') # Whitespace not including NL
-signed_integer = re.compile(r'[-+]?[0-9]+') # integer
-integer_syntax = re.compile(r'[-+]?[0-9]+')
-decimal_syntax = re.compile(r'[-+]?[0-9]*\.[0-9]+')
-exponent_syntax = re.compile(r'[-+]?(?:[0-9]+\.[0-9]*(?:e|E)[-+]?[0-9]+|'+
- r'\.[0-9](?:e|E)[-+]?[0-9]+|'+
- r'[0-9]+(?:e|E)[-+]?[0-9]+)')
-digitstring = re.compile(r'[0-9]+') # Unsigned integer
+eol = re.compile(r"[ \t]*(#[^\n]*)?\r?\n") # end of line, poss. w/comment
+eof = re.compile(r"[ \t]*(#[^\n]*)?$") # end of file, poss. w/comment
+ws = re.compile(r"[ \t]*") # Whitespace not including NL
+signed_integer = re.compile(r"[-+]?[0-9]+") # integer
+integer_syntax = re.compile(r"[-+]?[0-9]+")
+decimal_syntax = re.compile(r"[-+]?[0-9]*\.[0-9]+")
+exponent_syntax = re.compile(
+ r"[-+]?(?:[0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)(?:e|E)[-+]?[0-9]+"
+)
+digitstring = re.compile(r"[0-9]+") # Unsigned integer
interesting = re.compile(r"""[\\\r\n\"\']""")
-langcode = re.compile(r'[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*')
+langcode = re.compile(r"[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*")
class SinkParser:
- def __init__(self, store, openFormula=None, thisDoc="", baseURI=None,
- genPrefix="", why=None, turtle=False):
+ def __init__(
+ self,
+ store,
+ openFormula=None,
+ thisDoc="",
+ baseURI=None,
+ genPrefix="",
+ why=None,
+ turtle=False,
+ ):
""" note: namespace names should *not* end in # ;
the # will get added during qname processing """
self._bindings = {}
if thisDoc != "":
- assert ':' in thisDoc, "Document URI not absolute: <%s>" % thisDoc
- self._bindings[""] = thisDoc + "#" # default
+ assert ":" in thisDoc, "Document URI not absolute: <%s>" % thisDoc
+ self._bindings[""] = thisDoc + "#" # default
self._store = store
if genPrefix:
store.setGenPrefix(genPrefix) # pass it on
self._thisDoc = thisDoc
- self.lines = 0 # for error handling
- self.startOfLine = 0 # For calculating character number
+ self.lines = 0 # for error handling
+ self.startOfLine = 0 # For calculating character number
self._genPrefix = genPrefix
- self.keywords = ['a', 'this', 'bind', 'has', 'is', 'of',
- 'true', 'false']
- self.keywordsSet = 0 # Then only can others be considerd qnames
+ self.keywords = ["a", "this", "bind", "has", "is", "of", "true", "false"]
+ self.keywordsSet = 0 # Then only can others be considerd qnames
self._anonymousNodes = {}
- # Dict of anon nodes already declared ln: Term
+ # Dict of anon nodes already declared ln: Term
self._variables = {}
self._parentVariables = {}
- self._reason = why # Why the parser was asked to parse this
+ self._reason = why # Why the parser was asked to parse this
- self.turtle = turtle # raise exception when encountering N3 extensions
+ self.turtle = turtle # raise exception when encountering N3 extensions
# Turtle allows single or double quotes around strings, whereas N3
# only allows double quotes.
self.string_delimiters = ('"', "'") if turtle else ('"',)
- self._reason2 = None # Why these triples
- # was: diag.tracking
+ self._reason2 = None # Why these triples
+ # was: diag.tracking
if tracking:
self._reason2 = BecauseOfData(
- store.newSymbol(thisDoc), because=self._reason)
+ store.newSymbol(thisDoc), because=self._reason
+ )
if baseURI:
self._baseURI = baseURI
@@ -404,7 +396,7 @@ class SinkParser:
else:
self._baseURI = None
- assert not self._baseURI or ':' in self._baseURI
+ assert not self._baseURI or ":" in self._baseURI
if not self._genPrefix:
if self._thisDoc:
@@ -434,21 +426,20 @@ class SinkParser:
_L1C1. It used to be used only for tracking, but for tests in general
it makes the canonical ordering of bnodes repeatable."""
- return "%s_L%iC%i" % (self._genPrefix, self.lines,
- i - self.startOfLine + 1)
+ return "%s_L%iC%i" % (self._genPrefix, self.lines, i - self.startOfLine + 1)
def formula(self):
return self._formula
def loadStream(self, stream):
- return self.loadBuf(stream.read()) # Not ideal
+ return self.loadBuf(stream.read()) # Not ideal
def loadBuf(self, buf):
"""Parses a buffer and returns its top level formula"""
self.startDoc()
self.feed(buf)
- return self.endDoc() # self._formula
+ return self.endDoc() # self._formula
def feed(self, octets):
"""Feed an octet stream tothe parser
@@ -459,10 +450,10 @@ class SinkParser:
So if there is more data to feed to the
parser, it should be straightforward to recover."""
- if not isinstance(octets, text_type):
- s = octets.decode('utf-8')
- # NB already decoded, so \ufeff
- if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode('utf-8'):
+ if not isinstance(octets, str):
+ s = octets.decode("utf-8")
+ # NB already decoded, so \ufeff
+ if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode("utf-8"):
s = s[1:]
else:
s = octets
@@ -475,15 +466,14 @@ class SinkParser:
i = self.directiveOrStatement(s, j)
if i < 0:
- #print("# next char: %s" % s[j])
- self.BadSyntax(s, j,
- "expected directive or statement")
+ # print("# next char: %s" % s[j])
+ self.BadSyntax(s, j, "expected directive or statement")
def directiveOrStatement(self, argstr, h):
i = self.skipSpace(argstr, h)
if i < 0:
- return i # EOF
+ return i # EOF
if self.turtle:
j = self.sparqlDirective(argstr, i)
@@ -500,8 +490,8 @@ class SinkParser:
return j
- # @@I18N
- # _namechars = string.lowercase + string.uppercase + string.digits + '_-'
+ # @@I18N
+ # _namechars = string.lowercase + string.uppercase + string.digits + '_-'
def tok(self, tok, argstr, i, colon=False):
"""Check for keyword. Space must have been stripped on entry and
@@ -512,15 +502,17 @@ class SinkParser:
"""
assert tok[0] not in _notNameChars # not for punctuation
- if argstr[i:i + 1] == "@":
+ if argstr[i: i + 1] == "@":
i = i + 1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
- if (argstr[i:i + len(tok)] == tok
- and ( argstr[i + len(tok)] in _notKeywordsChars)
- or (colon and argstr[i+len(tok)] == ':')):
+ if (
+ argstr[i: i + len(tok)] == tok
+ and (argstr[i + len(tok)] in _notKeywordsChars)
+ or (colon and argstr[i + len(tok)] == ":")
+ ):
i = i + len(tok)
return i
else:
@@ -534,109 +526,114 @@ class SinkParser:
assert tok[0] not in _notNameChars # not for punctuation
- if (argstr[i:i + len(tok)].lower() == tok.lower()
- and (argstr[i + len(tok)] in _notQNameChars)):
+ if argstr[i: i + len(tok)].lower() == tok.lower() and (
+ argstr[i + len(tok)] in _notQNameChars
+ ):
i = i + len(tok)
return i
else:
return -1
-
def directive(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
res = []
- j = self.tok('bind', argstr, i) # implied "#". Obsolete.
+ j = self.tok("bind", argstr, i) # implied "#". Obsolete.
if j > 0:
- self.BadSyntax(argstr, i,
- "keyword bind is obsolete: use @prefix")
+ self.BadSyntax(argstr, i, "keyword bind is obsolete: use @prefix")
- j = self.tok('keywords', argstr, i)
+ j = self.tok("keywords", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'keywords' when in Turtle mode.")
i = self.commaSeparatedList(argstr, j, res, self.bareWord)
if i < 0:
- self.BadSyntax(argstr, i,
- "'@keywords' needs comma separated list of words")
+ self.BadSyntax(
+ argstr, i, "'@keywords' needs comma separated list of words"
+ )
self.setKeywords(res[:])
return i
- j = self.tok('forAll', argstr, i)
+ j = self.tok("forAll", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'forAll' when in Turtle mode.")
i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
- self.BadSyntax(argstr, i,
- "Bad variable list after @forAll")
+ self.BadSyntax(argstr, i, "Bad variable list after @forAll")
for x in res:
- # self._context.declareUniversal(x)
+ # self._context.declareUniversal(x)
if x not in self._variables or x in self._parentVariables:
self._variables[x] = self._context.newUniversal(x)
return i
- j = self.tok('forSome', argstr, i)
+ j = self.tok("forSome", argstr, i)
if j > 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'forSome' when in Turtle mode.")
- i = self. commaSeparatedList(argstr, j, res, self.uri_ref2)
+ i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
- self.BadSyntax(argstr, i,
- "Bad variable list after @forSome")
+ self.BadSyntax(argstr, i, "Bad variable list after @forSome")
for x in res:
self._context.declareExistential(x)
return i
- j = self.tok('prefix', argstr, i, colon=True) # no implied "#"
+ j = self.tok("prefix", argstr, i, colon=True) # no implied "#"
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected qname after @prefix")
+ self.BadSyntax(argstr, j, "expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected <uriref> after @prefix _qname_")
+ self.BadSyntax(argstr, i, "expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
- self.BadSyntax(argstr, j,
- "With no base URI, cannot use " +
- "relative URI in @prefix <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no base URI, cannot use "
+ + "relative URI in @prefix <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
- j = self.tok('base', argstr, i) # Added 2007/7/7
+ j = self.tok("base", argstr, i) # Added 2007/7/7
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <uri> after @base ")
+ self.BadSyntax(argstr, j, "expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
- self.BadSyntax(argstr, j,
- "With no previous base URI, cannot use " +
- "relative URI in @base <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no previous base URI, cannot use "
+ + "relative URI in @base <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._baseURI = ns
return i
- return -1 # Not a directive, could be something else.
+ return -1 # Not a directive, could be something else.
def sparqlDirective(self, argstr, i):
@@ -649,62 +646,67 @@ class SinkParser:
if j < 0:
return j # eof
- j = self.sparqlTok('PREFIX', argstr, i)
+ j = self.sparqlTok("PREFIX", argstr, i)
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected qname after @prefix")
+ self.BadSyntax(argstr, j, "expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected <uriref> after @prefix _qname_")
+ self.BadSyntax(argstr, i, "expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
- self.BadSyntax(argstr, j,
- "With no base URI, cannot use " +
- "relative URI in @prefix <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no base URI, cannot use "
+ + "relative URI in @prefix <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
- j = self.sparqlTok('BASE', argstr, i)
+ j = self.sparqlTok("BASE", argstr, i)
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <uri> after @base ")
+ self.BadSyntax(argstr, j, "expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
- self.BadSyntax(argstr, j,
- "With no previous base URI, cannot use " +
- "relative URI in @base <" + ns + ">")
- assert ':' in ns # must be absolute
+ self.BadSyntax(
+ argstr,
+ j,
+ "With no previous base URI, cannot use "
+ + "relative URI in @base <"
+ + ns
+ + ">",
+ )
+ assert ":" in ns # must be absolute
self._baseURI = ns
return i
- return -1 # Not a directive, could be something else.
-
+ return -1 # Not a directive, could be something else.
def bind(self, qn, uri):
- assert isinstance(
- uri, binary_type), "Any unicode must be %x-encoded already"
+ assert isinstance(uri, bytes), "Any unicode must be %x-encoded already"
if qn == "":
self._store.setDefaultNamespace(uri)
else:
self._store.bind(qn, uri)
def setKeywords(self, k):
- "Takes a list of strings"
+ """Takes a list of strings"""
if k is None:
self.keywordsSet = 0
else:
@@ -712,31 +714,29 @@ class SinkParser:
self.keywordsSet = 1
def startDoc(self):
- # was: self._store.startDoc()
+ # was: self._store.startDoc()
self._store.startDoc(self._formula)
def endDoc(self):
"""Signal end of document and stop parsing. returns formula"""
- self._store.endDoc(self._formula) # don't canonicalize yet
+ self._store.endDoc(self._formula) # don't canonicalize yet
return self._formula
def makeStatement(self, quadruple):
- # $$$$$$$$$$$$$$$$$$$$$
- # print "# Parser output: ", `quadruple`
+ # $$$$$$$$$$$$$$$$$$$$$
+ # print "# Parser output: ", `quadruple`
self._store.makeStatement(quadruple, why=self._reason2)
def statement(self, argstr, i):
r = []
- i = self.object(
- argstr, i, r) # Allow literal for subject - extends RDF
+ i = self.object(argstr, i, r) # Allow literal for subject - extends RDF
if i < 0:
return i
j = self.property_list(argstr, i, r[0])
if j < 0:
- self.BadSyntax(
- argstr, i, "expected propertylist")
+ self.BadSyntax(argstr, i, "expected propertylist")
return j
def subject(self, argstr, i, res):
@@ -758,77 +758,73 @@ class SinkParser:
r = []
- j = self.tok('has', argstr, i)
+ j = self.tok("has", argstr, i)
if j >= 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'has' keyword in Turtle mode")
i = self.prop(argstr, j, r)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected property after 'has'")
- res.append(('->', r[0]))
+ self.BadSyntax(argstr, j, "expected property after 'has'")
+ res.append(("->", r[0]))
return i
- j = self.tok('is', argstr, i)
+ j = self.tok("is", argstr, i)
if j >= 0:
if self.turtle:
self.BadSyntax(argstr, i, "Found 'is' keyword in Turtle mode")
i = self.prop(argstr, j, r)
if i < 0:
- self.BadSyntax(argstr, j,
- "expected <property> after 'is'")
+ self.BadSyntax(argstr, j, "expected <property> after 'is'")
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "End of file found, expected property after 'is'")
+ self.BadSyntax(
+ argstr, i, "End of file found, expected property after 'is'"
+ )
i = j
- j = self.tok('of', argstr, i)
+ j = self.tok("of", argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "expected 'of' after 'is' <prop>")
- res.append(('<-', r[0]))
+ self.BadSyntax(argstr, i, "expected 'of' after 'is' <prop>")
+ res.append(("<-", r[0]))
return j
- j = self.tok('a', argstr, i)
+ j = self.tok("a", argstr, i)
if j >= 0:
- res.append(('->', RDF_type))
+ res.append(("->", RDF_type))
return j
- if argstr[i:i + 2] == "<=":
+ if argstr[i: i + 2] == "<=":
if self.turtle:
- self.BadSyntax(argstr, i,
- "Found '<=' in Turtle mode. ")
+ self.BadSyntax(argstr, i, "Found '<=' in Turtle mode. ")
- res.append(('<-', self._store.newSymbol(Logic_NS + "implies")))
+ res.append(("<-", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- if argstr[i:i + 1] == "=":
+ if argstr[i: i + 1] == "=":
if self.turtle:
self.BadSyntax(argstr, i, "Found '=' in Turtle mode")
- if argstr[i + 1:i + 2] == ">":
- res.append(('->', self._store.newSymbol(Logic_NS + "implies")))
+ if argstr[i + 1: i + 2] == ">":
+ res.append(("->", self._store.newSymbol(Logic_NS + "implies")))
return i + 2
- res.append(('->', DAML_sameAs))
+ res.append(("->", DAML_sameAs))
return i + 1
- if argstr[i:i + 2] == ":=":
+ if argstr[i: i + 2] == ":=":
if self.turtle:
self.BadSyntax(argstr, i, "Found ':=' in Turtle mode")
- # patch file relates two formulae, uses this @@ really?
- res.append(('->', Logic_NS + "becomes"))
+ # patch file relates two formulae, uses this @@ really?
+ res.append(("->", Logic_NS + "becomes"))
return i + 2
j = self.prop(argstr, i, r)
if j >= 0:
- res.append(('->', r[0]))
+ res.append(("->", r[0]))
return j
- if argstr[i:i + 2] == ">-" or argstr[i:i + 2] == "<-":
- self.BadSyntax(argstr, j,
- ">- ... -> syntax is obsolete.")
+ if argstr[i: i + 2] == ">-" or argstr[i: i + 2] == "<-":
+ self.BadSyntax(argstr, j, ">- ... -> syntax is obsolete.")
return -1
@@ -846,16 +842,15 @@ class SinkParser:
"""
j = self.nodeOrLiteral(argstr, i, res)
if j < 0:
- return j # nope
+ return j # nope
- while argstr[j:j + 1] in "!^": # no spaces, must follow exactly (?)
- ch = argstr[j:j + 1]
+ while argstr[j: j + 1] in "!^": # no spaces, must follow exactly (?)
+ ch = argstr[j: j + 1]
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(argstr, j + 1, res)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found in middle of path syntax")
+ self.BadSyntax(argstr, j, "EOF found in middle of path syntax")
pred = res.pop()
if ch == "^": # Reverse traverse
self.makeStatement((self._context, pred, obj, subj))
@@ -884,18 +879,19 @@ class SinkParser:
if j < 0:
return j # eof
i = j
- ch = argstr[i:i + 1] # Quick 1-character checks first:
+ ch = argstr[i: i + 1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
j = self.skipSpace(argstr, i + 1)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF after '['")
+ self.BadSyntax(argstr, i, "EOF after '['")
# Hack for "is" binding name to anon node
- if argstr[j:j + 1] == "=":
+ if argstr[j: j + 1] == "=":
if self.turtle:
- self.BadSyntax(argstr, j, "Found '[=' or '[ =' when in turtle mode.")
+ self.BadSyntax(
+ argstr, j, "Found '[=' or '[ =' when in turtle mode."
+ )
i = j + 1
objs = []
j = self.objectList(argstr, i, objs)
@@ -903,33 +899,31 @@ class SinkParser:
subj = objs[0]
if len(objs) > 1:
for obj in objs:
- self.makeStatement((self._context,
- DAML_sameAs, subj, obj))
+ self.makeStatement((self._context, DAML_sameAs, subj, obj))
j = self.skipSpace(argstr, j)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF when objectList expected after [ = ")
- if argstr[j:j + 1] == ";":
+ self.BadSyntax(
+ argstr, i, "EOF when objectList expected after [ = "
+ )
+ if argstr[j: j + 1] == ";":
j = j + 1
else:
- self.BadSyntax(argstr, i,
- "objectList expected after [= ")
+ self.BadSyntax(argstr, i, "objectList expected after [= ")
if subj is None:
subj = self.blankNode(uri=bnodeID)
i = self.property_list(argstr, j, subj)
if i < 0:
- self.BadSyntax(argstr, j,
- "property_list expected")
+ self.BadSyntax(argstr, j, "property_list expected")
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF when ']' expected after [ <propertyList>")
- if argstr[j:j + 1] != "]":
- self.BadSyntax(argstr, j,
- "']' expected")
+ self.BadSyntax(
+ argstr, i, "EOF when ']' expected after [ <propertyList>"
+ )
+ if argstr[j: j + 1] != "]":
+ self.BadSyntax(argstr, j, "']' expected")
res.append(subj)
return j + 1
@@ -937,8 +931,8 @@ class SinkParser:
# if self.turtle:
# self.BadSyntax(argstr, i,
# "found '{' while in Turtle mode, Formulas not supported!")
- ch2 = argstr[i + 1:i + 2]
- if ch2 == '$':
+ ch2 = argstr[i + 1: i + 2]
+ if ch2 == "$":
# a set
i += 1
j = i + 1
@@ -947,27 +941,23 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(argstr, i,
- "needed '$}', found end.")
- if argstr[i:i + 2] == '$}':
+ self.BadSyntax(argstr, i, "needed '$}', found end.")
+ if argstr[i: i + 2] == "$}":
j = i + 2
break
if not first_run:
- if argstr[i:i + 1] == ',':
+ if argstr[i: i + 1] == ",":
i += 1
else:
- self.BadSyntax(
- argstr, i, "expected: ','")
+ self.BadSyntax(argstr, i, "expected: ','")
else:
first_run = False
item = []
- j = self.item(
- argstr, i, item) # @@@@@ should be path, was object
+ j = self.item(argstr, i, item) # @@@@@ should be path, was object
if j < 0:
- self.BadSyntax(argstr, i,
- "expected item in set or '$}'")
+ self.BadSyntax(argstr, i, "expected item in set or '$}'")
List.append(self._store.intern(item[0]))
res.append(self._store.newSet(List, self._context))
return j
@@ -990,17 +980,15 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed '}', found end.")
+ self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i:i + 1] == "}":
+ if argstr[i: i + 1] == "}":
j = i + 1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
- self.BadSyntax(
- argstr, i, "expected statement or '}'")
+ self.BadSyntax(argstr, i, "expected statement or '}'")
self._anonymousNodes = parentAnonymousNodes
self._variables = self._parentVariables
@@ -1008,13 +996,13 @@ class SinkParser:
self._context = self._parentContext
self._reason2 = reason2
self._parentContext = oldParentContext
- res.append(subj.close()) # No use until closed
+ res.append(subj.close()) # No use until closed
return j
if ch == "(":
thing_type = self._store.newList
- ch2 = argstr[i + 1:i + 2]
- if ch2 == '$':
+ ch2 = argstr[i + 1: i + 2]
+ if ch2 == "$":
thing_type = self._store.newSet
i += 1
j = i + 1
@@ -1023,34 +1011,34 @@ class SinkParser:
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed ')', found end.")
- if argstr[i:i + 1] == ')':
+ self.BadSyntax(argstr, i, "needed ')', found end.")
+ if argstr[i: i + 1] == ")":
j = i + 1
break
item = []
- j = self.item(
- argstr, i, item) # @@@@@ should be path, was object
+ j = self.item(argstr, i, item) # @@@@@ should be path, was object
if j < 0:
- self.BadSyntax(argstr, i,
- "expected item in list or ')'")
+ self.BadSyntax(argstr, i, "expected item in list or ')'")
List.append(self._store.intern(item[0]))
res.append(thing_type(List, self._context))
return j
- j = self.tok('this', argstr, i) # This context
+ j = self.tok("this", argstr, i) # This context
if j >= 0:
- self.BadSyntax(argstr, i,
- "Keyword 'this' was ancient N3. Now use " +
- "@forSome and @forAll keywords.")
-
- # booleans
- j = self.tok('true', argstr, i)
+ self.BadSyntax(
+ argstr,
+ i,
+ "Keyword 'this' was ancient N3. Now use "
+ + "@forSome and @forAll keywords.",
+ )
+
+ # booleans
+ j = self.tok("true", argstr, i)
if j >= 0:
res.append(True)
return j
- j = self.tok('false', argstr, i)
+ j = self.tok("false", argstr, i)
if j >= 0:
res.append(False)
return j
@@ -1067,23 +1055,24 @@ class SinkParser:
Leaves the terminating punctuation in the buffer
"""
while 1:
- while 1: # skip repeat ;
+ while 1: # skip repeat ;
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF found when expected verb in property list")
- if argstr[j]!=';': break
- i = j+1
+ self.BadSyntax(
+ argstr, i, "EOF found when expected verb in property list"
+ )
+ if argstr[j] != ";":
+ break
+ i = j + 1
- if argstr[j:j + 2] == ":-":
+ if argstr[j: j + 2] == ":-":
if self.turtle:
self.BadSyntax(argstr, j, "Found in ':-' in Turtle mode")
i = j + 2
res = []
j = self.node(argstr, i, res, subj)
if j < 0:
- self.BadSyntax(argstr, i,
- "bad {} or () or [] node after :- ")
+ self.BadSyntax(argstr, i, "bad {} or () or [] node after :- ")
i = j
continue
i = j
@@ -1095,20 +1084,18 @@ class SinkParser:
objs = []
i = self.objectList(argstr, j, objs)
if i < 0:
- self.BadSyntax(argstr, j,
- "objectList expected")
+ self.BadSyntax(argstr, j, "objectList expected")
for obj in objs:
dira, sym = v[0]
- if dira == '->':
+ if dira == "->":
self.makeStatement((self._context, sym, subj, obj))
else:
self.makeStatement((self._context, sym, obj, subj))
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found in list of objects")
- if argstr[i:i + 1] != ";":
+ self.BadSyntax(argstr, j, "EOF found in list of objects")
+ if argstr[i: i + 1] != ";":
return i
i = i + 1 # skip semicolon and continue
@@ -1118,10 +1105,9 @@ class SinkParser:
"""
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(argstr, i,
- "EOF found expecting comma sep list")
+ self.BadSyntax(argstr, i, "EOF found expecting comma sep list")
if argstr[i] == ".":
- return j # empty list is OK
+ return j # empty list is OK
i = what(argstr, i, res)
if i < 0:
return -1
@@ -1130,15 +1116,14 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- ch = argstr[j:j + 1]
+ ch = argstr[j: j + 1]
if ch != ",":
if ch != ".":
return -1
- return j # Found but not swallowed "."
+ return j # Found but not swallowed "."
i = what(argstr, j + 1, res)
if i < 0:
- self.BadSyntax(argstr, i,
- "bad list content")
+ self.BadSyntax(argstr, i, "bad list content")
def objectList(self, argstr, i, res):
i = self.object(argstr, i, res)
@@ -1147,10 +1132,9 @@ class SinkParser:
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, j,
- "EOF found after object")
- if argstr[j:j + 1] != ",":
- return j # Found something else!
+ self.BadSyntax(argstr, j, "EOF found after object")
+ if argstr[j: j + 1] != ",":
+ return j # Found something else!
i = self.object(argstr, j + 1, res)
if i < 0:
return i
@@ -1159,14 +1143,13 @@ class SinkParser:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
- if argstr[j:j + 1] == ".":
- return j + 1 # skip
- if argstr[j:j + 1] == "}":
- return j # don't skip it
- if argstr[j:j + 1] == "]":
+ if argstr[j: j + 1] == ".":
+ return j + 1 # skip
+ if argstr[j: j + 1] == "}":
+ return j # don't skip it
+ if argstr[j: j + 1] == "]":
return j
- self.BadSyntax(argstr, j,
- "expected '.' or '}' or ']' at end of statement")
+ self.BadSyntax(argstr, j, "expected '.' or '}' or ']' at end of statement")
def uri_ref2(self, argstr, i, res):
"""Generate uri from n3 representation.
@@ -1192,8 +1175,7 @@ class SinkParser:
if not self.turtle and pfx == "":
ns = join(self._baseURI or "", "#")
else:
- self.BadSyntax(argstr, i,
- "Prefix \"%s:\" not bound" % (pfx))
+ self.BadSyntax(argstr, i, 'Prefix "%s:" not bound' % (pfx))
symb = self._store.newSymbol(ns + ln)
if symb in self._variables:
res.append(self._variables[symb])
@@ -1227,11 +1209,11 @@ class SinkParser:
if self._baseURI:
uref = join(self._baseURI, uref) # was: uripath.join
else:
- assert ":" in uref, \
- "With no base URI, cannot deal with relative URIs"
- if argstr[i - 1:i] == "#" and not uref[-1:] == "#":
- uref = uref + \
- "#" # She meant it! Weirdness in urlparse?
+ assert (
+ ":" in uref
+ ), "With no base URI, cannot deal with relative URIs"
+ if argstr[i - 1: i] == "#" and not uref[-1:] == "#":
+ uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
res.append(self._variables[symb])
@@ -1239,17 +1221,15 @@ class SinkParser:
res.append(symb)
return i + 1
i = i + 1
- self.BadSyntax(argstr, j,
- "unterminated URI reference")
+ self.BadSyntax(argstr, j, "unterminated URI reference")
elif self.keywordsSet:
v = []
j = self.bareWord(argstr, i, v)
if j < 0:
- return -1 # Forget varibles as a class, only in context.
+ return -1 # Forget varibles as a class, only in context.
if v[0] in self.keywords:
- self.BadSyntax(argstr, i,
- 'Keyword "%s" not allowed here.' % v[0])
+ self.BadSyntax(argstr, i, 'Keyword "%s" not allowed here.' % v[0])
res.append(self._store.newSymbol(self._bindings[""] + v[0]))
return j
else:
@@ -1263,7 +1243,7 @@ class SinkParser:
if m is None:
break
self.lines = self.lines + 1
- i = m.end() # Point to first character unmatched
+ i = m.end() # Point to first character unmatched
self.startOfLine = i
m = ws.match(argstr, i)
if m is not None:
@@ -1281,30 +1261,31 @@ class SinkParser:
if j < 0:
return -1
- if argstr[j:j + 1] != "?":
+ if argstr[j: j + 1] != "?":
return -1
j = j + 1
i = j
if argstr[j] in "0123456789-":
- self.BadSyntax(argstr, j,
- "Varible name can't start with '%s'" % argstr[j])
+ self.BadSyntax(argstr, j, "Varible name can't start with '%s'" % argstr[j])
while i < len(argstr) and argstr[i] not in _notKeywordsChars:
i = i + 1
if self._parentContext is None:
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._variables:
self._variables[varURI] = self._context.newUniversal(
- varURI, why=self._reason2)
+ varURI, why=self._reason2
+ )
res.append(self._variables[varURI])
return i
- # @@ was:
- # self.BadSyntax(argstr, j,
- # "Can't use ?xxx syntax for variable in outermost level: %s"
- # % argstr[j-1:i])
+ # @@ was:
+ # self.BadSyntax(argstr, j,
+ # "Can't use ?xxx syntax for variable in outermost level: %s"
+ # % argstr[j-1:i])
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._parentVariables:
self._parentVariables[varURI] = self._parentContext.newUniversal(
- varURI, why=self._reason2)
+ varURI, why=self._reason2
+ )
res.append(self._parentVariables[varURI])
return i
@@ -1350,16 +1331,17 @@ class SinkParser:
if argstr[i - 1] == ".": # qname cannot end with "."
ln = ln[:-1]
- if not ln: return -1
+ if not ln:
+ return -1
i -= 1
else: # First character is non-alpha
- ln = '' # Was: None - TBL (why? useful?)
+ ln = "" # Was: None - TBL (why? useful?)
- if i < len(argstr) and argstr[i] == ':':
+ if i < len(argstr) and argstr[i] == ":":
pfx = ln
# bnodes names have different rules
- if pfx == '_':
+ if pfx == "_":
allowedChars = _notNameChars
else:
allowedChars = _notQNameChars
@@ -1367,10 +1349,10 @@ class SinkParser:
i = i + 1
lastslash = False
# start = i # TODO first char .
- ln = ''
+ ln = ""
while i < len(argstr):
c = argstr[i]
- if not lastslash and c == '\\':
+ if not lastslash and c == "\\":
lastslash = True
i += 1
@@ -1378,12 +1360,25 @@ class SinkParser:
if lastslash:
if c not in escapeChars:
- raise BadSyntax(self._thisDoc, self.line, argstr, i,
- "illegal escape "+c)
- elif c=='%':
- if argstr[i+1] not in hexChars or argstr[i+2] not in hexChars:
- raise BadSyntax(self._thisDoc, self.line, argstr, i,
- "illegal hex escape "+c)
+ raise BadSyntax(
+ self._thisDoc,
+ self.line,
+ argstr,
+ i,
+ "illegal escape " + c,
+ )
+ elif c == "%":
+ if (
+ argstr[i + 1] not in hexChars
+ or argstr[i + 2] not in hexChars
+ ):
+ raise BadSyntax(
+ self._thisDoc,
+ self.line,
+ argstr,
+ i,
+ "illegal hex escape " + c,
+ )
ln = ln + c
i = i + 1
@@ -1393,22 +1388,22 @@ class SinkParser:
if lastslash:
raise BadSyntax(
- self._thisDoc, self.line, argstr, i,
- "qname cannot end with \\")
+ self._thisDoc, self.line, argstr, i, "qname cannot end with \\"
+ )
-
- if argstr[i-1]=='.':
+ if argstr[i - 1] == ".":
# localname cannot end in .
ln = ln[:-1]
- if not ln: return -1
+ if not ln:
+ return -1
i -= 1
res.append((pfx, ln))
return i
- else: # delimiter was not ":"
+ else: # delimiter was not ":"
if ln and self.keywordsSet and ln not in self.keywords:
- res.append(('', ln))
+ res.append(("", ln))
return i
return -1
@@ -1424,7 +1419,7 @@ class SinkParser:
i = j
if argstr[i] in self.string_delimiters:
- if argstr[i:i + 3] == argstr[i] * 3:
+ if argstr[i: i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1472,7 +1467,7 @@ class SinkParser:
# return -1 ## or fall through?
if argstr[i] in self.string_delimiters:
- if argstr[i:i + 3] == argstr[i] * 3:
+ if argstr[i: i + 3] == argstr[i] * 3:
delim = argstr[i] * 3
else:
delim = argstr[i]
@@ -1481,17 +1476,20 @@ class SinkParser:
dt = None
j, s = self.strconst(argstr, i, delim)
lang = None
- if argstr[j:j + 1] == "@": # Language?
+ if argstr[j: j + 1] == "@": # Language?
m = langcode.match(argstr, j + 1)
if m is None:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "Bad language code syntax on string " +
- "literal, after @")
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "Bad language code syntax on string " + "literal, after @",
+ )
i = m.end()
- lang = argstr[j + 1:i]
+ lang = argstr[j + 1: i]
j = i
- if argstr[j:j + 2] == "^^":
+ if argstr[j: j + 2] == "^^":
res2 = []
j = self.uri_ref2(argstr, j + 2, res2) # Read datatype URI
dt = res2[0]
@@ -1503,7 +1501,7 @@ class SinkParser:
def uriOf(self, sym):
if isinstance(sym, tuple):
return sym[1] # old system for --pipe
- # return sym.uriref() # cwm api
+ # return sym.uriref() # cwm api
return sym
def strconst(self, argstr, i, delim):
@@ -1514,35 +1512,39 @@ class SinkParser:
delim2, delim3, delim4, delim5 = delim1 * 2, delim1 * 3, delim1 * 4, delim1 * 5
j = i
- ustr = u"" # Empty unicode string
+ ustr = u"" # Empty unicode string
startline = self.lines # Remember where for error messages
while j < len(argstr):
if argstr[j] == delim1:
if delim == delim1: # done when delim is " or '
i = j + 1
return i, ustr
- if delim == delim3: # done when delim is """ or ''' and, respectively ...
- if argstr[j:j + 5] == delim5: # ... we have "" or '' before
+ if (
+ delim == delim3
+ ): # done when delim is """ or ''' and, respectively ...
+ if argstr[j: j + 5] == delim5: # ... we have "" or '' before
i = j + 5
ustr = ustr + delim2
return i, ustr
- if argstr[j:j + 4] == delim4: # ... we have " or ' before
+ if argstr[j: j + 4] == delim4: # ... we have " or ' before
i = j + 4
ustr = ustr + delim1
return i, ustr
- if argstr[j:j + 3] == delim3: # current " or ' is part of delim
+ if argstr[j: j + 3] == delim3: # current " or ' is part of delim
i = j + 3
return i, ustr
- # we are inside of the string and current char is " or '
+ # we are inside of the string and current char is " or '
j = j + 1
ustr = ustr + delim1
continue
- m = interesting.search(argstr, j) # was argstr[j:].
- # Note for pos param to work, MUST be compiled ... re bug?
+ m = interesting.search(argstr, j) # was argstr[j:].
+ # Note for pos param to work, MUST be compiled ... re bug?
assert m, "Quote expected in string at ^ in %s^%s" % (
- argstr[j - 20:j], argstr[j:j + 20]) # at least need a quote
+ argstr[j - 20: j],
+ argstr[j: j + 20],
+ ) # at least need a quote
i = m.start()
try:
@@ -1553,12 +1555,15 @@ class SinkParser:
err = err + (" %02x" % ord(c))
streason = sys.exc_info()[1].__str__()
raise BadSyntax(
- self._thisDoc, startline, argstr, j,
- "Unicode error appending characters" +
- " %s to string, because\n\t%s"
- % (err, streason))
+ self._thisDoc,
+ startline,
+ argstr,
+ j,
+ "Unicode error appending characters"
+ + " %s to string, because\n\t%s" % (err, streason),
+ )
- # print "@@@ i = ",i, " j=",j, "m.end=", m.end()
+ # print "@@@ i = ",i, " j=",j, "m.end=", m.end()
ch = argstr[i]
if ch == delim1:
@@ -1571,8 +1576,12 @@ class SinkParser:
elif ch in "\r\n":
if delim == delim1:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "newline found in string literal")
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "newline found in string literal",
+ )
self.lines = self.lines + 1
ustr = ustr + ch
j = i + 1
@@ -1580,14 +1589,18 @@ class SinkParser:
elif ch == "\\":
j = i + 1
- ch = argstr[j:j + 1] # Will be empty if string ends
+ ch = argstr[j: j + 1] # Will be empty if string ends
if not ch:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "unterminated string literal (2)")
- k = 'abfrtvn\\"\''.find(ch)
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "unterminated string literal (2)",
+ )
+ k = "abfrtvn\\\"'".find(ch)
if k >= 0:
- uch = '\a\b\f\r\t\v\n\\"\''[k]
+ uch = "\a\b\f\r\t\v\n\\\"'"[k]
ustr = ustr + uch
j = j + 1
elif ch == "u":
@@ -1597,41 +1610,43 @@ class SinkParser:
j, ch = self.UEscape(argstr, j + 1, startline)
ustr = ustr + ch
else:
- self.BadSyntax(argstr, i,
- "bad escape")
+ self.BadSyntax(argstr, i, "bad escape")
- self.BadSyntax(argstr, i,
- "unterminated string literal")
+ self.BadSyntax(argstr, i, "unterminated string literal")
def _unicodeEscape(self, argstr, i, startline, reg, n, prefix):
- if len(argstr)<i+n:
+ if len(argstr) < i + n:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "unterminated string literal(3)")
+ self._thisDoc, startline, argstr, i, "unterminated string literal(3)"
+ )
try:
- return i+n, reg.sub(unicodeExpand, '\\'+prefix+argstr[i:i+n])
+ return i + n, reg.sub(unicodeExpand, "\\" + prefix + argstr[i: i + n])
except:
raise BadSyntax(
- self._thisDoc, startline, argstr, i,
- "bad string literal hex escape: "+argstr[i:i+n])
+ self._thisDoc,
+ startline,
+ argstr,
+ i,
+ "bad string literal hex escape: " + argstr[i: i + n],
+ )
def uEscape(self, argstr, i, startline):
- return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, 'u')
+ return self._unicodeEscape(argstr, i, startline, unicodeEscape4, 4, "u")
def UEscape(self, argstr, i, startline):
- return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, 'U')
+ return self._unicodeEscape(argstr, i, startline, unicodeEscape8, 8, "U")
def BadSyntax(self, argstr, i, msg):
raise BadSyntax(self._thisDoc, self.lines, argstr, i, msg)
+
# If we are going to do operators then they should generate
# [ is operator:plus of ( \1 \2 ) ]
class BadSyntax(SyntaxError):
def __init__(self, uri, lines, argstr, i, why):
- self._str = argstr.encode(
- 'utf-8') # Better go back to strings for errors
+ self._str = argstr.encode("utf-8") # Better go back to strings for errors
self._i = i
self._why = why
self.lines = lines
@@ -1651,16 +1666,21 @@ class BadSyntax(SyntaxError):
else:
post = ""
- return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' \
- % (self.lines + 1, self._uri, self._why, pre,
- argstr[st:i], argstr[i:i + 60], post)
+ return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' % (
+ self.lines + 1,
+ self._uri,
+ self._why,
+ pre,
+ argstr[st:i],
+ argstr[i: i + 60],
+ post,
+ )
@property
def message(self):
return str(self)
-
###############################################################################
class Formula(object):
number = 0
@@ -1673,25 +1693,24 @@ class Formula(object):
self.existentials = {}
self.universals = {}
- self.quotedgraph = QuotedGraph(
- store=parent.store, identifier=self.id())
+ self.quotedgraph = QuotedGraph(store=parent.store, identifier=self.id())
def __str__(self):
- return '_:Formula%s' % self.number
+ return "_:Formula%s" % self.number
def id(self):
- return BNode('_:Formula%s' % self.number)
+ return BNode("_:Formula%s" % self.number)
def newBlankNode(self, uri=None, why=None):
if uri is None:
self.counter += 1
- bn = BNode('f%sb%s' % (self.uuid, self.counter))
+ bn = BNode("f%sb%s" % (self.uuid, self.counter))
else:
- bn = BNode(uri.split('#').pop().replace('_', 'b'))
+ bn = BNode(uri.split("#").pop().replace("_", "b"))
return bn
def newUniversal(self, uri, why=None):
- return Variable(uri.split('#').pop())
+ return Variable(uri.split("#").pop())
def declareExistential(self, x):
self.existentials[x] = self.newBlankNode()
@@ -1701,7 +1720,7 @@ class Formula(object):
return self.quotedgraph
-r_hibyte = re.compile(r'([\x80-\xff])')
+r_hibyte = re.compile(r"([\x80-\xff])")
class RDFSink(object):
@@ -1726,9 +1745,9 @@ class RDFSink(object):
return arg.newBlankNode(uri)
elif isinstance(arg, Graph) or arg is None:
self.counter += 1
- bn = BNode('n' + str(self.counter))
+ bn = BNode("n" + str(self.counter))
else:
- bn = BNode(str(arg[0]).split('#').pop().replace('_', 'b'))
+ bn = BNode(str(arg[0]).split("#").pop().replace("_", "b"))
return bn
def newLiteral(self, s, dt, lang):
@@ -1738,18 +1757,12 @@ class RDFSink(object):
return Literal(s, lang=lang)
def newList(self, n, f):
- nil = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'
- )
+ nil = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#nil")
if not n:
return nil
- first = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#first'
- )
- rest = self.newSymbol(
- 'http://www.w3.org/1999/02/22-rdf-syntax-ns#rest'
- )
+ first = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#first")
+ rest = self.newSymbol("http://www.w3.org/1999/02/22-rdf-syntax-ns#rest")
af = a = self.newBlankNode(f)
for ne in n[:-1]:
@@ -1765,12 +1778,12 @@ class RDFSink(object):
return set(args)
def setDefaultNamespace(self, *args):
- return ':'.join(repr(n) for n in args)
+ return ":".join(repr(n) for n in args)
def makeStatement(self, quadruple, why=None):
f, p, s, o = quadruple
- if hasattr(p, 'formula'):
+ if hasattr(p, "formula"):
raise Exception("Formula used as predicate")
s = self.normalise(f, s)
@@ -1778,31 +1791,31 @@ class RDFSink(object):
o = self.normalise(f, o)
if f == self.rootFormula:
- # print s, p, o, '.'
+ # print s, p, o, '.'
self.graph.add((s, p, o))
elif isinstance(f, Formula):
f.quotedgraph.add((s, p, o))
else:
- f.add((s,p,o))
+ f.add((s, p, o))
- # return str(quadruple)
+ # return str(quadruple)
def normalise(self, f, n):
if isinstance(n, tuple):
- return URIRef(text_type(n[1]))
+ return URIRef(str(n[1]))
if isinstance(n, bool):
s = Literal(str(n).lower(), datatype=BOOLEAN_DATATYPE)
return s
if isinstance(n, int) or isinstance(n, long_type):
- s = Literal(text_type(n), datatype=INTEGER_DATATYPE)
+ s = Literal(str(n), datatype=INTEGER_DATATYPE)
return s
if isinstance(n, Decimal):
value = str(n)
- if value == '-0':
- value = '0'
+ if value == "-0":
+ value = "0"
s = Literal(value, datatype=DECIMAL_DATATYPE)
return s
@@ -1814,11 +1827,11 @@ class RDFSink(object):
if n in f.existentials:
return f.existentials[n]
- # if isinstance(n, Var):
- # if f.universals.has_key(n):
- # return f.universals[n]
- # f.universals[n] = f.newBlankNode()
- # return f.universals[n]
+ # if isinstance(n, Var):
+ # if f.universals.has_key(n):
+ # return f.universals[n]
+ # f.universals[n] = f.newBlankNode()
+ # return f.universals[n]
return n
@@ -1841,7 +1854,6 @@ class RDFSink(object):
#
-
def hexify(ustr):
"""Use URL encoding to return an ASCII string
corresponding to the given UTF8 string
@@ -1850,15 +1862,15 @@ def hexify(ustr):
%(b)s'http://example/a%%20b'
"""
- # s1=ustr.encode('utf-8')
+ # s1=ustr.encode('utf-8')
s = ""
- for ch in ustr: # .encode('utf-8'):
+ for ch in ustr: # .encode('utf-8'):
if ord(ch) > 126 or ord(ch) < 33:
ch = "%%%02X" % ord(ch)
else:
ch = "%c" % ord(ch)
s = s + ch
- return b(s)
+ return s.encode("latin-1")
class TurtleParser(Parser):
@@ -1876,13 +1888,13 @@ class TurtleParser(Parser):
if encoding not in [None, "utf-8"]:
raise Exception(
- ("N3/Turtle files are always utf-8 encoded, ",
- "I was passed: %s") % encoding)
+ ("N3/Turtle files are always utf-8 encoded, ", "I was passed: %s")
+ % encoding
+ )
sink = RDFSink(graph)
- baseURI = graph.absolutize(
- source.getPublicId() or source.getSystemId() or "")
+ baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
p = SinkParser(sink, baseURI=baseURI, turtle=turtle)
p.loadStream(source.getByteStream())
@@ -1904,38 +1916,40 @@ class N3Parser(TurtleParser):
pass
def parse(self, source, graph, encoding="utf-8"):
- # we're currently being handed a Graph, not a ConjunctiveGraph
+ # we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware # is this implied by formula_aware
assert graph.store.formula_aware
conj_graph = ConjunctiveGraph(store=graph.store)
conj_graph.default_context = graph # TODO: CG __init__ should have a
- # default_context arg
- # TODO: update N3Processor so that it can use conj_graph as the sink
+ # default_context arg
+ # TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
TurtleParser.parse(self, source, conj_graph, encoding, turtle=False)
-def _test(): # pragma: no cover
+def _test(): # pragma: no cover
import doctest
+
doctest.testmod()
# if __name__ == '__main__':
# _test()
-def main(): # pragma: no cover
+
+def main(): # pragma: no cover
g = ConjunctiveGraph()
sink = RDFSink(g)
- base_uri = 'file://' + os.path.join(os.getcwd(), sys.argv[1])
+ base_uri = "file://" + os.path.join(os.getcwd(), sys.argv[1])
p = SinkParser(sink, baseURI=base_uri)
- p._bindings[''] = p._baseURI + '#'
+ p._bindings[""] = p._baseURI + "#"
p.startDoc()
- f = open(sys.argv[1], 'rb')
+ f = open(sys.argv[1], "rb")
rdbytes = f.read()
f.close()
@@ -1945,7 +1959,8 @@ def main(): # pragma: no cover
print(t)
-if __name__ == '__main__':
+
+if __name__ == "__main__":
main()
# ends
diff --git a/rdflib/plugins/parsers/nquads.py b/rdflib/plugins/parsers/nquads.py
index 5857c9e4..0c29fc4c 100644
--- a/rdflib/plugins/parsers/nquads.py
+++ b/rdflib/plugins/parsers/nquads.py
@@ -28,8 +28,6 @@ from __future__ import print_function
from codecs import getreader
-from six import b
-
from rdflib import ConjunctiveGraph
# Build up from the NTriples parser:
@@ -37,28 +35,27 @@ from rdflib.plugins.parsers.ntriples import NTriplesParser
from rdflib.plugins.parsers.ntriples import ParseError
from rdflib.plugins.parsers.ntriples import r_tail
from rdflib.plugins.parsers.ntriples import r_wspace
-from rdflib.plugins.parsers.ntriples import r_wspaces
-__all__ = ['NQuadsParser']
+__all__ = ["NQuadsParser"]
class NQuadsParser(NTriplesParser):
-
def parse(self, inputsource, sink, **kwargs):
"""Parse f as an N-Triples file."""
- assert sink.store.context_aware, ("NQuadsParser must be given"
- " a context aware store.")
+ assert sink.store.context_aware, (
+ "NQuadsParser must be given" " a context aware store."
+ )
self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier)
source = inputsource.getByteStream()
- if not hasattr(source, 'read'):
+ if not hasattr(source, "read"):
raise ParseError("Item to parse must be a file-like object.")
- source = getreader('utf-8')(source)
+ source = getreader("utf-8")(source)
self.file = source
- self.buffer = ''
+ self.buffer = ""
while True:
self.line = __line = self.readline()
if self.line is None:
@@ -72,7 +69,7 @@ class NQuadsParser(NTriplesParser):
def parseline(self):
self.eat(r_wspace)
- if (not self.line) or self.line.startswith(('#')):
+ if (not self.line) or self.line.startswith(("#")):
return # The line is empty or a comment
subject = self.subject()
diff --git a/rdflib/plugins/parsers/nt.py b/rdflib/plugins/parsers/nt.py
index 783488af..d7d3b336 100644
--- a/rdflib/plugins/parsers/nt.py
+++ b/rdflib/plugins/parsers/nt.py
@@ -1,7 +1,7 @@
from rdflib.parser import Parser
from rdflib.plugins.parsers.ntriples import NTriplesParser
-__all__ = ['NTSink', 'NTParser']
+__all__ = ["NTSink", "NTParser"]
class NTSink(object):
diff --git a/rdflib/plugins/parsers/ntriples.py b/rdflib/plugins/parsers/ntriples.py
index 67dbe9d7..41c529a7 100644
--- a/rdflib/plugins/parsers/ntriples.py
+++ b/rdflib/plugins/parsers/ntriples.py
@@ -19,32 +19,28 @@ from rdflib.term import Literal
from rdflib.compat import cast_bytes
from rdflib.compat import decodeUnicodeEscape
-from rdflib.compat import ascii
-from six import BytesIO
-from six import string_types
-from six import text_type
-from six import unichr
+from io import BytesIO
-__all__ = ['unquote', 'uriquote', 'Sink', 'NTriplesParser']
+__all__ = ["unquote", "uriquote", "Sink", "NTriplesParser"]
uriref = r'<([^:]+:[^\s"<>]*)>'
literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"'
-litinfo = r'(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^' + uriref + r')?'
+litinfo = r"(?:@([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)|\^\^" + uriref + r")?"
-r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)')
-r_wspace = re.compile(r'[ \t]*')
-r_wspaces = re.compile(r'[ \t]+')
-r_tail = re.compile(r'[ \t]*\.[ \t]*(#.*)?')
+r_line = re.compile(r"([^\r\n]*)(?:\r\n|\r|\n)")
+r_wspace = re.compile(r"[ \t]*")
+r_wspaces = re.compile(r"[ \t]+")
+r_tail = re.compile(r"[ \t]*\.[ \t]*(#.*)?")
r_uriref = re.compile(uriref)
-r_nodeid = re.compile(r'_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)')
+r_nodeid = re.compile(r"_:([A-Za-z0-9_:]([-A-Za-z0-9_:\.]*[-A-Za-z0-9_:])?)")
r_literal = re.compile(literal + litinfo)
bufsiz = 2048
validate = False
-class Node(text_type):
+class Node(str):
pass
@@ -61,21 +57,20 @@ class Sink(object):
print(s, p, o)
-quot = {'t': u'\t', 'n': u'\n', 'r': u'\r', '"': u'"', '\\':
- u'\\'}
-r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)')
+quot = {"t": u"\t", "n": u"\n", "r": u"\r", '"': u'"', "\\": u"\\"}
+r_safe = re.compile(r"([\x20\x21\x23-\x5B\x5D-\x7E]+)")
r_quot = re.compile(r'\\(t|n|r|"|\\)')
-r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})')
+r_uniquot = re.compile(r"\\u([0-9A-F]{4})|\\U([0-9A-F]{8})")
def unquote(s):
"""Unquote an N-Triples string."""
if not validate:
- if isinstance(s, text_type): # nquads
+ if isinstance(s, str): # nquads
s = decodeUnicodeEscape(s)
else:
- s = s.decode('unicode-escape')
+ s = s.decode("unicode-escape")
return s
else:
@@ -100,23 +95,22 @@ def unquote(s):
codepoint = int(u or U, 16)
if codepoint > 0x10FFFF:
raise ParseError("Disallowed codepoint: %08X" % codepoint)
- result.append(unichr(codepoint))
- elif s.startswith('\\'):
+ result.append(chr(codepoint))
+ elif s.startswith("\\"):
raise ParseError("Illegal escape at: %s..." % s[:10])
else:
raise ParseError("Illegal literal character: %r" % s[0])
- return u''.join(result)
+ return u"".join(result)
-r_hibyte = re.compile(r'([\x80-\xFF])')
+r_hibyte = re.compile(r"([\x80-\xFF])")
def uriquote(uri):
if not validate:
return uri
else:
- return r_hibyte.sub(
- lambda m: '%%%02X' % ord(m.group(1)), uri)
+ return r_hibyte.sub(lambda m: "%%%02X" % ord(m.group(1)), uri)
class NTriplesParser(object):
@@ -138,14 +132,14 @@ class NTriplesParser(object):
def parse(self, f):
"""Parse f as an N-Triples file."""
- if not hasattr(f, 'read'):
+ if not hasattr(f, "read"):
raise ParseError("Item to parse must be a file-like object.")
# since N-Triples 1.1 files can and should be utf-8 encoded
- f = codecs.getreader('utf-8')(f)
+ f = codecs.getreader("utf-8")(f)
self.file = f
- self.buffer = ''
+ self.buffer = ""
while True:
self.line = self.readline()
if self.line is None:
@@ -158,7 +152,7 @@ class NTriplesParser(object):
def parsestring(self, s):
"""Parse s as an N-Triples string."""
- if not isinstance(s, string_types):
+ if not isinstance(s, str):
raise ParseError("Item to parse must be a string instance.")
f = BytesIO()
f.write(cast_bytes(s))
@@ -191,7 +185,7 @@ class NTriplesParser(object):
def parseline(self):
self.eat(r_wspace)
- if (not self.line) or self.line.startswith('#'):
+ if (not self.line) or self.line.startswith("#"):
return # The line is empty or a comment
subject = self.subject()
@@ -239,7 +233,7 @@ class NTriplesParser(object):
return objt
def uriref(self):
- if self.peek('<'):
+ if self.peek("<"):
uri = self.eat(r_uriref).group(1)
uri = unquote(uri)
uri = uriquote(uri)
@@ -247,7 +241,7 @@ class NTriplesParser(object):
return False
def nodeid(self):
- if self.peek('_'):
+ if self.peek("_"):
# Fix for https://github.com/RDFLib/rdflib/issues/204
bnode_id = self.eat(r_nodeid).group(1)
new_id = self._bnode_ids.get(bnode_id, None)
@@ -281,6 +275,7 @@ class NTriplesParser(object):
return Literal(lit, lang, dtype)
return False
+
# # Obsolete, unused
# def parseURI(uri):
# import urllib
diff --git a/rdflib/plugins/parsers/rdfxml.py b/rdflib/plugins/parsers/rdfxml.py
index 810f3550..76761edd 100644
--- a/rdflib/plugins/parsers/rdfxml.py
+++ b/rdflib/plugins/parsers/rdfxml.py
@@ -7,7 +7,7 @@ from xml.sax.handler import ErrorHandler
from xml.sax.saxutils import handler, quoteattr, escape
-from six.moves.urllib.parse import urldefrag, urljoin
+from urllib.parse import urldefrag, urljoin
from rdflib.namespace import RDF, is_ncname
from rdflib.term import URIRef
from rdflib.term import BNode
@@ -15,22 +15,30 @@ from rdflib.term import Literal
from rdflib.exceptions import ParserError, Error
from rdflib.parser import Parser
-__all__ = ['create_parser', 'BagID', 'ElementHandler',
- 'RDFXMLHandler', 'RDFXMLParser']
+__all__ = ["create_parser", "BagID", "ElementHandler", "RDFXMLHandler", "RDFXMLParser"]
RDFNS = RDF
# http://www.w3.org/TR/rdf-syntax-grammar/#eventterm-attribute-URI
# A mapping from unqualified terms to their qualified version.
-UNQUALIFIED = {"about": RDF.about,
- "ID": RDF.ID,
- "type": RDF.type,
- "resource": RDF.resource,
- "parseType": RDF.parseType}
+UNQUALIFIED = {
+ "about": RDF.about,
+ "ID": RDF.ID,
+ "type": RDF.type,
+ "resource": RDF.resource,
+ "parseType": RDF.parseType,
+}
# http://www.w3.org/TR/rdf-syntax-grammar/#coreSyntaxTerms
-CORE_SYNTAX_TERMS = [RDF.RDF, RDF.ID, RDF.about, RDF.parseType,
- RDF.resource, RDF.nodeID, RDF.datatype]
+CORE_SYNTAX_TERMS = [
+ RDF.RDF,
+ RDF.ID,
+ RDF.about,
+ RDF.parseType,
+ RDF.resource,
+ RDF.nodeID,
+ RDF.datatype,
+]
# http://www.w3.org/TR/rdf-syntax-grammar/#syntaxTerms
SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
@@ -39,15 +47,16 @@ SYNTAX_TERMS = CORE_SYNTAX_TERMS + [RDF.Description, RDF.li]
OLD_TERMS = [
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEach"),
URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#aboutEachPrefix"),
- URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID")]
+ URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#bagID"),
+]
NODE_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.li, ] + OLD_TERMS
NODE_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.nodeID, RDF.about]
-PROPERTY_ELEMENT_EXCEPTIONS = \
- CORE_SYNTAX_TERMS + [RDF.Description, ] + OLD_TERMS
-PROPERTY_ATTRIBUTE_EXCEPTIONS = \
+PROPERTY_ELEMENT_EXCEPTIONS = CORE_SYNTAX_TERMS + [RDF.Description, ] + OLD_TERMS
+PROPERTY_ATTRIBUTE_EXCEPTIONS = (
CORE_SYNTAX_TERMS + [RDF.Description, RDF.li] + OLD_TERMS
+)
PROPERTY_ELEMENT_ATTRIBUTES = [RDF.ID, RDF.resource, RDF.nodeID]
XMLNS = "http://www.w3.org/XML/1998/namespace"
@@ -56,7 +65,7 @@ LANG = (XMLNS, "lang")
class BagID(URIRef):
- __slots__ = ['li']
+ __slots__ = ["li"]
def __init__(self, val):
super(URIRef, self).__init__(val)
@@ -64,13 +73,26 @@ class BagID(URIRef):
def next_li(self):
self.li += 1
- return RDFNS['_%s' % self.li]
+ return RDFNS["_%s" % self.li]
class ElementHandler(object):
- __slots__ = ['start', 'char', 'end', 'li', 'id',
- 'base', 'subject', 'predicate', 'object',
- 'list', 'language', 'datatype', 'declared', 'data']
+ __slots__ = [
+ "start",
+ "char",
+ "end",
+ "li",
+ "id",
+ "base",
+ "subject",
+ "predicate",
+ "object",
+ "list",
+ "language",
+ "datatype",
+ "declared",
+ "data",
+ ]
def __init__(self):
self.start = None
@@ -89,11 +111,10 @@ class ElementHandler(object):
def next_li(self):
self.li += 1
- return RDFNS['_%s' % self.li]
+ return RDFNS["_%s" % self.li]
class RDFXMLHandler(handler.ContentHandler):
-
def __init__(self, store):
self.store = store
self.preserve_bnode_ids = False
@@ -103,7 +124,10 @@ class RDFXMLHandler(handler.ContentHandler):
document_element = ElementHandler()
document_element.start = self.document_element_start
document_element.end = lambda name, qname: None
- self.stack = [None, document_element, ]
+ self.stack = [
+ None,
+ document_element,
+ ]
self.ids = {} # remember IDs we have already seen
self.bnode = {}
self._ns_contexts = [{}] # contains uri -> prefix dicts
@@ -137,16 +161,14 @@ class RDFXMLHandler(handler.ContentHandler):
if parent and parent.base:
base = urljoin(parent.base, base)
else:
- systemId = self.locator.getPublicId() \
- or self.locator.getSystemId()
+ systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base = urljoin(systemId, base)
else:
if parent:
base = parent.base
if base is None:
- systemId = self.locator.getPublicId() \
- or self.locator.getSystemId()
+ systemId = self.locator.getPublicId() or self.locator.getSystemId()
if systemId:
base, frag = urldefrag(systemId)
current.base = base
@@ -181,25 +203,30 @@ class RDFXMLHandler(handler.ContentHandler):
def error(self, message):
locator = self.locator
- info = "%s:%s:%s: " % (locator.getSystemId(),
- locator.getLineNumber(),
- locator.getColumnNumber())
+ info = "%s:%s:%s: " % (
+ locator.getSystemId(),
+ locator.getLineNumber(),
+ locator.getColumnNumber(),
+ )
raise ParserError(info + message)
def get_current(self):
return self.stack[-2]
+
# Create a read only property called current so that self.current
# give the current element handler.
current = property(get_current)
def get_next(self):
return self.stack[-1]
+
# Create a read only property that gives the element handler to be
# used for the next element.
next = property(get_next)
def get_parent(self):
return self.stack[-3]
+
# Create a read only property that gives the current parent
# element handler
parent = property(get_parent)
@@ -233,7 +260,7 @@ class RDFXMLHandler(handler.ContentHandler):
def document_element_start(self, name, qname, attrs):
if name[0] and URIRef("".join(name)) == RDF.RDF:
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
next.start = self.node_element_start
next.end = self.node_element_end
else:
@@ -248,7 +275,7 @@ class RDFXMLHandler(handler.ContentHandler):
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
next.start = self.property_element_start
next.end = self.property_element_end
@@ -257,27 +284,21 @@ class RDFXMLHandler(handler.ContentHandler):
if RDF.ID in atts:
if RDF.about in atts or RDF.nodeID in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
id = atts[RDF.ID]
if not is_ncname(id):
self.error("rdf:ID value is not a valid NCName: %s" % id)
subject = absolutize("#%s" % id)
if subject in self.ids:
- self.error(
- "two elements cannot use the same ID: '%s'" % subject)
+ self.error("two elements cannot use the same ID: '%s'" % subject)
self.ids[subject] = 1 # IDs can only appear once within a document
elif RDF.nodeID in atts:
if RDF.ID in atts or RDF.about in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
nodeID = atts[RDF.nodeID]
if not is_ncname(nodeID):
- self.error(
- "rdf:nodeID value is not a valid NCName: %s" % nodeID)
+ self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
subject = self.bnode[nodeID]
@@ -288,9 +309,7 @@ class RDFXMLHandler(handler.ContentHandler):
subject = BNode(nodeID)
elif RDF.about in atts:
if RDF.ID in atts or RDF.nodeID in atts:
- self.error(
- "Can have at most one of rdf:ID, rdf:about, and rdf:nodeID"
- )
+ self.error("Can have at most one of rdf:ID, rdf:about, and rdf:nodeID")
subject = absolutize(atts[RDF.about])
else:
subject = BNode()
@@ -330,7 +349,9 @@ class RDFXMLHandler(handler.ContentHandler):
if self.parent.object and self.current != self.stack[2]:
- self.error("Repeat node-elements inside property elements: %s"%"".join(name))
+ self.error(
+ "Repeat node-elements inside property elements: %s" % "".join(name)
+ )
self.parent.object = self.current.subject
@@ -340,7 +361,7 @@ class RDFXMLHandler(handler.ContentHandler):
absolutize = self.absolutize
# Cheap hack so 2to3 doesn't turn it into __next__
- next = getattr(self, 'next')
+ next = getattr(self, "next")
object = None
current.data = None
current.list = None
@@ -366,17 +387,14 @@ class RDFXMLHandler(handler.ContentHandler):
nodeID = atts.get(RDF.nodeID, None)
parse_type = atts.get(RDF.parseType, None)
if resource is not None and nodeID is not None:
- self.error(
- "Property element cannot have both rdf:nodeID and rdf:resource"
- )
+ self.error("Property element cannot have both rdf:nodeID and rdf:resource")
if resource is not None:
object = absolutize(resource)
next.start = self.node_element_start
next.end = self.node_element_end
elif nodeID is not None:
if not is_ncname(nodeID):
- self.error(
- "rdf:nodeID value is not a valid NCName: %s" % nodeID)
+ self.error("rdf:nodeID value is not a valid NCName: %s" % nodeID)
if self.preserve_bnode_ids is False:
if nodeID in self.bnode:
object = self.bnode[nodeID]
@@ -401,16 +419,16 @@ class RDFXMLHandler(handler.ContentHandler):
elif parse_type == "Collection":
current.char = None
object = current.list = RDF.nil # BNode()
- # self.parent.subject
+ # self.parent.subject
next.start = self.node_element_start
next.end = self.list_node_element_end
else: # if parse_type=="Literal":
- # All other values are treated as Literal
- # See: http://www.w3.org/TR/rdf-syntax-grammar/
- # parseTypeOtherPropertyElt
+ # All other values are treated as Literal
+ # See: http://www.w3.org/TR/rdf-syntax-grammar/
+ # parseTypeOtherPropertyElt
object = Literal("", datatype=RDF.XMLLiteral)
current.char = self.literal_element_char
- current.declared = {XMLNS: 'xml'}
+ current.declared = {XMLNS: "xml"}
next.start = self.literal_element_start
next.char = self.literal_element_char
next.end = self.literal_element_end
@@ -466,18 +484,17 @@ class RDFXMLHandler(handler.ContentHandler):
literalLang = current.language
if current.datatype is not None:
literalLang = None
- current.object = Literal(
- current.data, literalLang, current.datatype)
+ current.object = Literal(current.data, literalLang, current.datatype)
current.data = None
if self.next.end == self.list_node_element_end:
if current.object != RDF.nil:
self.store.add((current.list, RDF.rest, RDF.nil))
if current.object is not None:
- self.store.add(
- (self.parent.subject, current.predicate, current.object))
+ self.store.add((self.parent.subject, current.predicate, current.object))
if current.id is not None:
- self.add_reified(current.id, (self.parent.subject,
- current.predicate, current.object))
+ self.add_reified(
+ current.id, (self.parent.subject, current.predicate, current.object)
+ )
current.subject = None
def list_node_element_end(self, name, qname):
@@ -513,9 +530,9 @@ class RDFXMLHandler(handler.ContentHandler):
if not name[0] in current.declared:
current.declared[name[0]] = prefix
if prefix:
- current.object += (' xmlns:%s="%s"' % (prefix, name[0]))
+ current.object += ' xmlns:%s="%s"' % (prefix, name[0])
else:
- current.object += (' xmlns="%s"' % name[0])
+ current.object += ' xmlns="%s"' % name[0]
else:
current.object = "<%s" % name[1]
@@ -526,7 +543,7 @@ class RDFXMLHandler(handler.ContentHandler):
name = current.declared[name[0]] + ":" + name[1]
else:
name = name[1]
- current.object += (' %s=%s' % (name, quoteattr(value)))
+ current.object += " %s=%s" % (name, quoteattr(value))
current.object += ">"
def literal_element_char(self, data):
@@ -549,8 +566,7 @@ def create_parser(target, store):
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
- parser.start_namespace_decl(
- "xml", "http://www.w3.org/XML/1998/namespace")
+ parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
@@ -563,7 +579,6 @@ def create_parser(target, store):
class RDFXMLParser(Parser):
-
def __init__(self):
pass
diff --git a/rdflib/plugins/parsers/trig.py b/rdflib/plugins/parsers/trig.py
index f4c3ff1b..8f270de0 100644
--- a/rdflib/plugins/parsers/trig.py
+++ b/rdflib/plugins/parsers/trig.py
@@ -5,18 +5,18 @@ from rdflib.parser import Parser
from .notation3 import SinkParser, RDFSink
-def becauseSubGraph(*args, **kwargs): pass
+def becauseSubGraph(*args, **kwargs):
+ pass
class TrigSinkParser(SinkParser):
-
def directiveOrStatement(self, argstr, h):
- #import pdb; pdb.set_trace()
+ # import pdb; pdb.set_trace()
i = self.skipSpace(argstr, h)
if i < 0:
- return i # EOF
+ return i # EOF
j = self.graph(argstr, i)
if j >= 0:
@@ -46,12 +46,11 @@ class TrigSinkParser(SinkParser):
if j >= 0:
return j
- if argstr[i] == '[':
+ if argstr[i] == "[":
j = self.skipSpace(argstr, i + 1)
if j < 0:
- self.BadSyntax(argstr, i,
- "Expected ] got EOF")
- if argstr[j] == ']':
+ self.BadSyntax(argstr, i, "Expected ] got EOF")
+ if argstr[j] == "]":
res.append(self.blankNode())
return j + 1
return -1
@@ -66,8 +65,8 @@ class TrigSinkParser(SinkParser):
raise Exception if it looks like a graph, but isn't.
"""
- #import pdb; pdb.set_trace()
- j = self.sparqlTok('GRAPH', argstr, i) # optional GRAPH keyword
+ # import pdb; pdb.set_trace()
+ j = self.sparqlTok("GRAPH", argstr, i) # optional GRAPH keyword
if j >= 0:
i = j
@@ -81,10 +80,9 @@ class TrigSinkParser(SinkParser):
j = self.skipSpace(argstr, i)
if j < 0:
- self.BadSyntax(argstr, i,
- "EOF found when expected graph")
+ self.BadSyntax(argstr, i, "EOF found when expected graph")
- if argstr[j:j + 1] == "=": # optional = for legacy support
+ if argstr[j: j + 1] == "=": # optional = for legacy support
i = self.skipSpace(argstr, j + 1)
if i < 0:
@@ -92,7 +90,7 @@ class TrigSinkParser(SinkParser):
else:
i = j
- if argstr[i:i + 1] != "{":
+ if argstr[i: i + 1] != "{":
return -1 # the node wasn't part of a graph
j = i + 1
@@ -106,17 +104,15 @@ class TrigSinkParser(SinkParser):
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
- self.BadSyntax(
- argstr, i, "needed '}', found end.")
+ self.BadSyntax(argstr, i, "needed '}', found end.")
- if argstr[i:i + 1] == "}":
+ if argstr[i: i + 1] == "}":
j = i + 1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
- self.BadSyntax(
- argstr, i, "expected statement or '}'")
+ self.BadSyntax(argstr, i, "expected statement or '}'")
self._context = self._parentContext
self._reason2 = reason2
@@ -138,22 +134,23 @@ class TrigParser(Parser):
if encoding not in [None, "utf-8"]:
raise Exception(
- ("TriG files are always utf-8 encoded, ",
- "I was passed: %s") % encoding)
+ ("TriG files are always utf-8 encoded, ", "I was passed: %s") % encoding
+ )
# we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware, "TriG Parser needs a context-aware store!"
conj_graph = ConjunctiveGraph(store=graph.store, identifier=graph.identifier)
conj_graph.default_context = graph # TODO: CG __init__ should have a
- # default_context arg
+ # default_context arg
# TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
sink = RDFSink(conj_graph)
baseURI = conj_graph.absolutize(
- source.getPublicId() or source.getSystemId() or "")
+ source.getPublicId() or source.getSystemId() or ""
+ )
p = TrigSinkParser(sink, baseURI=baseURI, turtle=True)
p.loadStream(source.getByteStream())
diff --git a/rdflib/plugins/parsers/trix.py b/rdflib/plugins/parsers/trix.py
index 26e0e35b..3286aaeb 100644
--- a/rdflib/plugins/parsers/trix.py
+++ b/rdflib/plugins/parsers/trix.py
@@ -5,16 +5,16 @@ from rdflib.namespace import Namespace
from rdflib.term import URIRef
from rdflib.term import BNode
from rdflib.term import Literal
-from rdflib.graph import Graph, ConjunctiveGraph
+from rdflib.graph import Graph
from rdflib.exceptions import ParserError
from rdflib.parser import Parser
-from six import text_type
+
from xml.sax.saxutils import handler
from xml.sax import make_parser
from xml.sax.handler import ErrorHandler
-__all__ = ['create_parser', 'TriXHandler', 'TriXParser']
+__all__ = ["create_parser", "TriXHandler", "TriXParser"]
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
@@ -56,7 +56,8 @@ class TriXHandler(handler.ContentHandler):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
- % (name[0], TRIXNS))
+ % (name[0], TRIXNS)
+ )
if name[1] == "TriX":
if self.state == 0:
@@ -98,7 +99,7 @@ class TriXHandler(handler.ContentHandler):
self.datatype = None
try:
- self.lang = attrs.getValue((text_type(XMLNS), u"lang"))
+ self.lang = attrs.getValue((str(XMLNS), u"lang"))
except:
# language not required - ignore
pass
@@ -115,7 +116,7 @@ class TriXHandler(handler.ContentHandler):
self.lang = None
self.datatype = None
try:
- self.lang = attrs.getValue((text_type(XMLNS), u"lang"))
+ self.lang = attrs.getValue((str(XMLNS), u"lang"))
except:
# language not required - ignore
pass
@@ -143,46 +144,55 @@ class TriXHandler(handler.ContentHandler):
if name[0] != str(TRIXNS):
self.error(
"Only elements in the TriX namespace are allowed. %s!=%s"
- % (name[0], TRIXNS))
+ % (name[0], TRIXNS)
+ )
if name[1] == "uri":
if self.state == 3:
- self.graph = Graph(store=self.store,
- identifier=URIRef(self.chars.strip()))
+ self.graph = Graph(
+ store=self.store, identifier=URIRef(self.chars.strip())
+ )
self.state = 2
elif self.state == 4:
self.triple += [URIRef(self.chars.strip())]
else:
self.error(
- "Illegal internal self.state - This should never " +
- "happen if the SAX parser ensures XML syntax correctness")
+ "Illegal internal self.state - This should never "
+ + "happen if the SAX parser ensures XML syntax correctness"
+ )
elif name[1] == "id":
if self.state == 3:
- self.graph = Graph(self.store, identifier=self.get_bnode(
- self.chars.strip()))
+ self.graph = Graph(
+ self.store, identifier=self.get_bnode(self.chars.strip())
+ )
self.state = 2
elif self.state == 4:
self.triple += [self.get_bnode(self.chars.strip())]
else:
self.error(
- "Illegal internal self.state - This should never " +
- "happen if the SAX parser ensures XML syntax correctness")
+ "Illegal internal self.state - This should never "
+ + "happen if the SAX parser ensures XML syntax correctness"
+ )
elif name[1] == "plainLiteral" or name[1] == "typedLiteral":
if self.state == 4:
- self.triple += [Literal(
- self.chars, lang=self.lang, datatype=self.datatype)]
+ self.triple += [
+ Literal(self.chars, lang=self.lang, datatype=self.datatype)
+ ]
else:
self.error(
- "This should never happen if the SAX parser " +
- "ensures XML syntax correctness")
+ "This should never happen if the SAX parser "
+ + "ensures XML syntax correctness"
+ )
elif name[1] == "triple":
if self.state == 4:
if len(self.triple) != 3:
- self.error("Triple has wrong length, got %d elements: %s" %
- (len(self.triple), self.triple))
+ self.error(
+ "Triple has wrong length, got %d elements: %s"
+ % (len(self.triple), self.triple)
+ )
self.graph.add(self.triple)
# self.store.store.add(self.triple,context=self.graph)
@@ -190,8 +200,9 @@ class TriXHandler(handler.ContentHandler):
self.state = 2
else:
self.error(
- "This should never happen if the SAX parser " +
- "ensures XML syntax correctness")
+ "This should never happen if the SAX parser "
+ + "ensures XML syntax correctness"
+ )
elif name[1] == "graph":
self.graph = None
@@ -228,7 +239,8 @@ class TriXHandler(handler.ContentHandler):
info = "%s:%s:%s: " % (
locator.getSystemId(),
locator.getLineNumber(),
- locator.getColumnNumber())
+ locator.getColumnNumber(),
+ )
raise ParserError(info + message)
@@ -237,8 +249,7 @@ def create_parser(store):
try:
# Workaround for bug in expatreader.py. Needed when
# expatreader is trying to guess a prefix.
- parser.start_namespace_decl(
- "xml", "http://www.w3.org/XML/1998/namespace")
+ parser.start_namespace_decl("xml", "http://www.w3.org/XML/1998/namespace")
except AttributeError:
pass # Not present in Jython (at least)
parser.setFeature(handler.feature_namespaces, 1)
@@ -255,8 +266,9 @@ class TriXParser(Parser):
pass
def parse(self, source, sink, **args):
- assert sink.store.context_aware, (
- "TriXParser must be given a context aware store.")
+ assert (
+ sink.store.context_aware
+ ), "TriXParser must be given a context aware store."
self._parser = create_parser(sink.store)
content_handler = self._parser.getContentHandler()
diff --git a/rdflib/plugins/serializers/n3.py b/rdflib/plugins/serializers/n3.py
index c5efc735..6c4e2ec4 100644
--- a/rdflib/plugins/serializers/n3.py
+++ b/rdflib/plugins/serializers/n3.py
@@ -3,10 +3,9 @@ Notation 3 (N3) RDF graph serializer for RDFLib.
"""
from rdflib.graph import Graph
from rdflib.namespace import Namespace, OWL
-from rdflib.plugins.serializers.turtle import (
- TurtleSerializer, SUBJECT, OBJECT)
+from rdflib.plugins.serializers.turtle import TurtleSerializer, SUBJECT, OBJECT
-__all__ = ['N3Serializer']
+__all__ = ["N3Serializer"]
SWAP_LOG = Namespace("http://www.w3.org/2000/10/swap/log#")
@@ -17,10 +16,7 @@ class N3Serializer(TurtleSerializer):
def __init__(self, store, parent=None):
super(N3Serializer, self).__init__(store)
- self.keywords.update({
- OWL.sameAs: '=',
- SWAP_LOG.implies: '=>'
- })
+ self.keywords.update({OWL.sameAs: "=", SWAP_LOG.implies: "=>"})
self.parent = parent
def reset(self):
@@ -33,8 +29,9 @@ class N3Serializer(TurtleSerializer):
self.parent.subjectDone(subject)
def isDone(self, subject):
- return (super(N3Serializer, self).isDone(subject)
- and (not self.parent or self.parent.isDone(subject)))
+ return super(N3Serializer, self).isDone(subject) and (
+ not self.parent or self.parent.isDone(subject)
+ )
def startDocument(self):
super(N3Serializer, self).startDocument()
@@ -88,8 +85,7 @@ class N3Serializer(TurtleSerializer):
properties = self.buildPredicateHash(subject)
if len(properties) == 0:
return False
- return (self.s_clause(subject)
- or super(N3Serializer, self).statement(subject))
+ return self.s_clause(subject) or super(N3Serializer, self).statement(subject)
def path(self, node, position, newline=False):
if not self.p_clause(node, position):
@@ -97,10 +93,10 @@ class N3Serializer(TurtleSerializer):
def s_clause(self, subject):
if isinstance(subject, Graph):
- self.write('\n' + self.indent())
+ self.write("\n" + self.indent())
self.p_clause(subject, SUBJECT)
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
else:
return False
@@ -109,13 +105,13 @@ class N3Serializer(TurtleSerializer):
if isinstance(node, Graph):
self.subjectDone(node)
if position is OBJECT:
- self.write(' ')
- self.write('{')
+ self.write(" ")
+ self.write("{")
self.depth += 1
serializer = N3Serializer(node, parent=self)
serializer.serialize(self.stream)
self.depth -= 1
- self.write(self.indent() + '}')
+ self.write(self.indent() + "}")
return True
else:
return False
diff --git a/rdflib/plugins/serializers/nquads.py b/rdflib/plugins/serializers/nquads.py
index 3785452b..70c414cd 100644
--- a/rdflib/plugins/serializers/nquads.py
+++ b/rdflib/plugins/serializers/nquads.py
@@ -2,20 +2,18 @@ import warnings
from rdflib.term import Literal
from rdflib.serializer import Serializer
-from six import b
from rdflib.plugins.serializers.nt import _quoteLiteral
-__all__ = ['NQuadsSerializer']
+__all__ = ["NQuadsSerializer"]
class NQuadsSerializer(Serializer):
-
def __init__(self, store):
if not store.context_aware:
raise Exception(
- "NQuads serialization only makes "
- "sense for context-aware stores!")
+ "NQuads serialization only makes " "sense for context-aware stores!"
+ )
super(NQuadsSerializer, self).__init__(store)
@@ -27,19 +25,24 @@ class NQuadsSerializer(Serializer):
encoding = self.encoding
for context in self.store.contexts():
for triple in context:
- stream.write(_nq_row(
- triple, context.identifier).encode(encoding, "replace"))
- stream.write(b("\n"))
+ stream.write(
+ _nq_row(triple, context.identifier).encode(encoding, "replace")
+ )
+ stream.write("\n".encode("latin-1"))
def _nq_row(triple, context):
if isinstance(triple[2], Literal):
- return u"%s %s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- _quoteLiteral(triple[2]),
- context.n3())
+ return u"%s %s %s %s .\n" % (
+ triple[0].n3(),
+ triple[1].n3(),
+ _quoteLiteral(triple[2]),
+ context.n3(),
+ )
else:
- return u"%s %s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- triple[2].n3(),
- context.n3())
+ return u"%s %s %s %s .\n" % (
+ triple[0].n3(),
+ triple[1].n3(),
+ triple[2].n3(),
+ context.n3(),
+ )
diff --git a/rdflib/plugins/serializers/nt.py b/rdflib/plugins/serializers/nt.py
index ea2e2f32..9ff72539 100644
--- a/rdflib/plugins/serializers/nt.py
+++ b/rdflib/plugins/serializers/nt.py
@@ -5,12 +5,11 @@ format.
"""
from rdflib.term import Literal
from rdflib.serializer import Serializer
-from six import b
import warnings
import codecs
-__all__ = ['NTSerializer']
+__all__ = ["NTSerializer"]
class NTSerializer(Serializer):
@@ -20,7 +19,7 @@ class NTSerializer(Serializer):
def __init__(self, store):
Serializer.__init__(self, store)
- self.encoding = 'ascii' # n-triples are ascii encoded
+ self.encoding = "ascii" # n-triples are ascii encoded
def serialize(self, stream, base=None, encoding=None, **args):
if base is not None:
@@ -30,7 +29,7 @@ class NTSerializer(Serializer):
encoding = self.encoding
for triple in self.store:
stream.write(_nt_row(triple).encode(self.encoding, "_rdflib_nt_escape"))
- stream.write(b("\n"))
+ stream.write("\n".encode("latin-1"))
class NT11Serializer(NTSerializer):
@@ -49,35 +48,33 @@ def _nt_row(triple):
return u"%s %s %s .\n" % (
triple[0].n3(),
triple[1].n3(),
- _quoteLiteral(triple[2]))
+ _quoteLiteral(triple[2]),
+ )
else:
- return u"%s %s %s .\n" % (triple[0].n3(),
- triple[1].n3(),
- triple[2].n3())
+ return u"%s %s %s .\n" % (triple[0].n3(), triple[1].n3(), triple[2].n3())
-def _quoteLiteral(l):
- '''
+def _quoteLiteral(l_):
+ """
a simpler version of term.Literal.n3()
- '''
+ """
- encoded = _quote_encode(l)
+ encoded = _quote_encode(l_)
- if l.language:
- if l.datatype:
+ if l_.language:
+ if l_.datatype:
raise Exception("Literal has datatype AND language!")
- return '%s@%s' % (encoded, l.language)
- elif l.datatype:
- return '%s^^<%s>' % (encoded, l.datatype)
+ return "%s@%s" % (encoded, l_.language)
+ elif l_.datatype:
+ return "%s^^<%s>" % (encoded, l_.datatype)
else:
- return '%s' % encoded
+ return "%s" % encoded
-def _quote_encode(l):
- return '"%s"' % l.replace('\\', '\\\\')\
- .replace('\n', '\\n')\
- .replace('"', '\\"')\
- .replace('\r', '\\r')
+def _quote_encode(l_):
+ return '"%s"' % l_.replace("\\", "\\\\").replace("\n", "\\n").replace(
+ '"', '\\"'
+ ).replace("\r", "\\r")
def _nt_unicode_error_resolver(err):
@@ -87,11 +84,11 @@ def _nt_unicode_error_resolver(err):
def _replace_single(c):
c = ord(c)
- fmt = u'\\u%04X' if c <= 0xFFFF else u'\\U%08X'
+ fmt = u"\\u%04X" if c <= 0xFFFF else u"\\U%08X"
return fmt % c
- string = err.object[err.start:err.end]
- return ("".join(_replace_single(c) for c in string), err.end)
+ string = err.object[err.start: err.end]
+ return "".join(_replace_single(c) for c in string), err.end
-codecs.register_error('_rdflib_nt_escape', _nt_unicode_error_resolver)
+codecs.register_error("_rdflib_nt_escape", _nt_unicode_error_resolver)
diff --git a/rdflib/plugins/serializers/rdfxml.py b/rdflib/plugins/serializers/rdfxml.py
index 631c8fe0..3f3c840d 100644
--- a/rdflib/plugins/serializers/rdfxml.py
+++ b/rdflib/plugins/serializers/rdfxml.py
@@ -9,20 +9,15 @@ from rdflib.util import first, more_than
from rdflib.collection import Collection
from rdflib.serializer import Serializer
-# from rdflib.exceptions import Error
-
-from six import b
-
from xml.sax.saxutils import quoteattr, escape
import xml.dom.minidom
from .xmlwriter import ESCAPE_ENTITIES
-__all__ = ['fix', 'XMLSerializer', 'PrettyXMLSerializer']
+__all__ = ["fix", "XMLSerializer", "PrettyXMLSerializer"]
class XMLSerializer(Serializer):
-
def __init__(self, store):
super(XMLSerializer, self).__init__(store)
@@ -46,22 +41,27 @@ class XMLSerializer(Serializer):
yield prefix, namespace
def serialize(self, stream, base=None, encoding=None, **args):
- self.base = base
+ # if base is given here, use that, if not and a base is set for the graph use that
+ if base is not None:
+ self.base = base
+ elif self.store.base is not None:
+ self.base = self.store.base
self.__stream = stream
self.__serialized = {}
encoding = self.encoding
- self.write = write = lambda uni: stream.write(
- uni.encode(encoding, 'replace'))
+ self.write = write = lambda uni: stream.write(uni.encode(encoding, "replace"))
# startDocument
write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding)
# startRDF
- write('<rdf:RDF\n')
+ write("<rdf:RDF\n")
# If provided, write xml:base attribute for the RDF
if "xml_base" in args:
- write(' xml:base="%s"\n' % args['xml_base'])
+ write(' xml:base="%s"\n' % args["xml_base"])
+ elif self.base:
+ write(' xml:base="%s"\n' % self.base)
# TODO:
# assert(
# namespaces["http://www.w3.org/1999/02/22-rdf-syntax-ns#"]=='rdf')
@@ -73,7 +73,7 @@ class XMLSerializer(Serializer):
write(' xmlns:%s="%s"\n' % (prefix, namespace))
else:
write(' xmlns="%s"\n' % namespace)
- write('>\n')
+ write(">\n")
# write out triples by subject
for subject in self.store.subjects():
@@ -87,7 +87,7 @@ class XMLSerializer(Serializer):
del self.__serialized
def subject(self, subject, depth=1):
- if not subject in self.__serialized:
+ if subject not in self.__serialized:
self.__serialized[subject] = 1
if isinstance(subject, (BNode, URIRef)):
@@ -96,8 +96,7 @@ class XMLSerializer(Serializer):
element_name = "rdf:Description"
if isinstance(subject, BNode):
- write('%s<%s rdf:nodeID="%s"' % (
- indent, element_name, subject))
+ write('%s<%s rdf:nodeID="%s"' % (indent, element_name, subject))
else:
uri = quoteattr(self.relativize(subject))
write("%s<%s rdf:about=%s" % (indent, element_name, uri))
@@ -105,8 +104,7 @@ class XMLSerializer(Serializer):
if (subject, None, None) in self.store:
write(">\n")
- for predicate, object in self.store.predicate_objects(
- subject):
+ for predicate, object in self.store.predicate_objects(subject):
self.predicate(predicate, object, depth + 1)
write("%s</%s>\n" % (indent, element_name))
@@ -127,22 +125,24 @@ class XMLSerializer(Serializer):
if object.datatype:
attributes += ' rdf:datatype="%s"' % object.datatype
- write("%s<%s%s>%s</%s>\n" %
- (indent, qname, attributes,
- escape(object, ESCAPE_ENTITIES), qname))
+ write(
+ "%s<%s%s>%s</%s>\n"
+ % (indent, qname, attributes, escape(object, ESCAPE_ENTITIES), qname)
+ )
else:
if isinstance(object, BNode):
- write('%s<%s rdf:nodeID="%s"/>\n' %
- (indent, qname, object))
+ write('%s<%s rdf:nodeID="%s"/>\n' % (indent, qname, object))
else:
- write("%s<%s rdf:resource=%s/>\n" %
- (indent, qname, quoteattr(self.relativize(object))))
+ write(
+ "%s<%s rdf:resource=%s/>\n"
+ % (indent, qname, quoteattr(self.relativize(object)))
+ )
XMLLANG = "http://www.w3.org/XML/1998/namespacelang"
XMLBASE = "http://www.w3.org/XML/1998/namespacebase"
-OWL_NS = Namespace('http://www.w3.org/2002/07/owl#')
+OWL_NS = Namespace("http://www.w3.org/2002/07/owl#")
# TODO:
@@ -155,7 +155,6 @@ def fix(val):
class PrettyXMLSerializer(Serializer):
-
def __init__(self, store, max_depth=3):
super(PrettyXMLSerializer, self).__init__(store)
self.forceRDFAbout = set()
@@ -163,7 +162,11 @@ class PrettyXMLSerializer(Serializer):
def serialize(self, stream, base=None, encoding=None, **args):
self.__serialized = {}
store = self.store
- self.base = base
+ # if base is given here, use that, if not and a base is set for the graph use that
+ if base is not None:
+ self.base = base
+ elif store.base is not None:
+ self.base = store.base
self.max_depth = args.get("max_depth", 3)
assert self.max_depth > 0, "max_depth must be greater than 0"
@@ -171,8 +174,7 @@ class PrettyXMLSerializer(Serializer):
self.writer = writer = XMLWriter(stream, nm, encoding)
namespaces = {}
- possible = set(store.predicates()).union(
- store.objects(None, RDF.type))
+ possible = set(store.predicates()).union(store.objects(None, RDF.type))
for predicate in possible:
prefix, namespace, local = nm.compute_qname_strict(predicate)
@@ -184,6 +186,8 @@ class PrettyXMLSerializer(Serializer):
if "xml_base" in args:
writer.attribute(XMLBASE, args["xml_base"])
+ elif self.base:
+ writer.attribute(XMLBASE, self.base)
writer.namespaces(namespaces.items())
@@ -211,7 +215,7 @@ class PrettyXMLSerializer(Serializer):
self.subject(subject, 1)
writer.pop(RDF.RDF)
- stream.write(b("\n"))
+ stream.write("\n".encode("latin-1"))
# Set to None so that the memory can get garbage collected.
self.__serialized = None
@@ -226,7 +230,7 @@ class PrettyXMLSerializer(Serializer):
writer.pop(RDF.Description)
self.forceRDFAbout.remove(subject)
- elif not subject in self.__serialized:
+ elif subject not in self.__serialized:
self.__serialized[subject] = 1
type = first(store.objects(subject, RDF.type))
@@ -239,6 +243,7 @@ class PrettyXMLSerializer(Serializer):
writer.push(element)
if isinstance(subject, BNode):
+
def subj_as_obj_more_than(ceil):
return True
# more_than(store.triples((None, None, subject)), ceil)
@@ -274,8 +279,9 @@ class PrettyXMLSerializer(Serializer):
if object.language:
writer.attribute(XMLLANG, object.language)
- if (object.datatype == RDF.XMLLiteral
- and isinstance(object.value, xml.dom.minidom.Document)):
+ if object.datatype == RDF.XMLLiteral and isinstance(
+ object.value, xml.dom.minidom.Document
+ ):
writer.attribute(RDF.parseType, "Literal")
writer.text(u"")
writer.stream.write(object)
@@ -294,17 +300,20 @@ class PrettyXMLSerializer(Serializer):
else:
if first(store.objects(object, RDF.first)): # may not have type
- # RDF.List
+ # RDF.List
self.__serialized[object] = 1
# Warn that any assertions on object other than
# RDF.first and RDF.rest are ignored... including RDF.List
import warnings
+
warnings.warn(
- "Assertions on %s other than RDF.first " % repr(object) +
- "and RDF.rest are ignored ... including RDF.List",
- UserWarning, stacklevel=2)
+ "Assertions on %s other than RDF.first " % repr(object)
+ + "and RDF.rest are ignored ... including RDF.List",
+ UserWarning,
+ stacklevel=2,
+ )
writer.attribute(RDF.parseType, "Collection")
col = Collection(store, object)
@@ -318,9 +327,11 @@ class PrettyXMLSerializer(Serializer):
if not isinstance(item, URIRef):
self.__serialized[item] = 1
else:
- if first(store.triples_choices(
- (object, RDF.type, [OWL_NS.Class, RDFS.Class]))) \
- and isinstance(object, URIRef):
+ if first(
+ store.triples_choices(
+ (object, RDF.type, [OWL_NS.Class, RDFS.Class])
+ )
+ ) and isinstance(object, URIRef):
writer.attribute(RDF.resource, self.relativize(object))
elif depth <= self.max_depth:
@@ -328,9 +339,11 @@ class PrettyXMLSerializer(Serializer):
elif isinstance(object, BNode):
- if not object in self.__serialized \
- and (object, None, None) in store \
- and len(list(store.subjects(object=object))) == 1:
+ if (
+ object not in self.__serialized
+ and (object, None, None) in store
+ and len(list(store.subjects(object=object))) == 1
+ ):
# inline blank nodes if they haven't been serialized yet
# and are only referenced once (regardless of depth)
self.subject(object, depth + 1)
diff --git a/rdflib/plugins/serializers/trig.py b/rdflib/plugins/serializers/trig.py
index 6c05ad75..abf8d33e 100644
--- a/rdflib/plugins/serializers/trig.py
+++ b/rdflib/plugins/serializers/trig.py
@@ -5,17 +5,16 @@ See <http://www.w3.org/TR/trig/> for syntax specification.
from collections import defaultdict
-from rdflib.plugins.serializers.turtle import TurtleSerializer, _GEN_QNAME_FOR_DT, VERB
-from rdflib.term import BNode, Literal
-from six import b
+from rdflib.plugins.serializers.turtle import TurtleSerializer
+from rdflib.term import BNode
-__all__ = ['TrigSerializer']
+__all__ = ["TrigSerializer"]
class TrigSerializer(TurtleSerializer):
short_name = "trig"
- indentString = 4 * u' '
+ indentString = 4 * u" "
def __init__(self, store):
if store.context_aware:
@@ -39,17 +38,24 @@ class TrigSerializer(TurtleSerializer):
for triple in context:
self.preprocessTriple(triple)
- self._contexts[context]=(self.orderSubjects(), self._subjects, self._references)
+ self._contexts[context] = (
+ self.orderSubjects(),
+ self._subjects,
+ self._references,
+ )
def reset(self):
super(TrigSerializer, self).reset()
self._contexts = {}
- def serialize(self, stream, base=None, encoding=None,
- spacious=None, **args):
+ def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
self.reset()
self.stream = stream
- self.base = base
+ # if base is given here, use that, if not and a base is set for the graph use that
+ if base is not None:
+ self.base = base
+ elif self.store.base is not None:
+ self.base = self.store.base
if spacious is not None:
self._spacious = spacious
@@ -69,7 +75,7 @@ class TrigSerializer(TurtleSerializer):
self._subjects = subjects
if self.default_context and store.identifier == self.default_context:
- self.write(self.indent() + '\n{')
+ self.write(self.indent() + "\n{")
else:
if isinstance(store.identifier, BNode):
iri = store.identifier.n3()
@@ -77,7 +83,7 @@ class TrigSerializer(TurtleSerializer):
iri = self.getQName(store.identifier)
if iri is None:
iri = store.identifier.n3()
- self.write(self.indent() + '\n%s {' % iri)
+ self.write(self.indent() + "\n%s {" % iri)
self.depth += 1
for subject in ordered_subjects:
@@ -86,9 +92,9 @@ class TrigSerializer(TurtleSerializer):
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
- self.write('\n')
+ self.write("\n")
self.depth -= 1
- self.write('}\n')
+ self.write("}\n")
self.endDocument()
- stream.write(b("\n"))
+ stream.write("\n".encode("latin-1"))
diff --git a/rdflib/plugins/serializers/trix.py b/rdflib/plugins/serializers/trix.py
index fceec6bd..f6115bf8 100644
--- a/rdflib/plugins/serializers/trix.py
+++ b/rdflib/plugins/serializers/trix.py
@@ -6,9 +6,8 @@ from rdflib.namespace import Namespace
from rdflib.graph import Graph, ConjunctiveGraph
-from six import text_type, b
-__all__ = ['TriXSerializer']
+__all__ = ["TriXSerializer"]
# TODO: Move this somewhere central
TRIXNS = Namespace("http://www.w3.org/2004/03/trix/trix-1/")
@@ -20,7 +19,8 @@ class TriXSerializer(Serializer):
super(TriXSerializer, self).__init__(store)
if not store.context_aware:
raise Exception(
- "TriX serialization only makes sense for context-aware stores")
+ "TriX serialization only makes sense for context-aware stores"
+ )
def serialize(self, stream, base=None, encoding=None, **args):
@@ -29,6 +29,11 @@ class TriXSerializer(Serializer):
self.writer = XMLWriter(stream, nm, encoding, extra_ns={"": TRIXNS})
self.writer.push(TRIXNS[u"TriX"])
+ # if base is given here, use that, if not and a base is set for the graph use that
+ if base is None and self.store.base is not None:
+ base = self.store.base
+ if base is not None:
+ self.writer.attribute("http://www.w3.org/XML/1998/namespacebase", base)
self.writer.namespaces()
if isinstance(self.store, ConjunctiveGraph):
@@ -40,13 +45,16 @@ class TriXSerializer(Serializer):
raise Exception("Unknown graph type: " + type(self.store))
self.writer.pop()
- stream.write(b("\n"))
+ stream.write("\n".encode("latin-1"))
def _writeGraph(self, graph):
self.writer.push(TRIXNS[u"graph"])
+ if graph.base:
+ self.writer.attribute(
+ "http://www.w3.org/XML/1998/namespacebase", graph.base
+ )
if isinstance(graph.identifier, URIRef):
- self.writer.element(
- TRIXNS[u"uri"], content=text_type(graph.identifier))
+ self.writer.element(TRIXNS[u"uri"], content=str(graph.identifier))
for triple in graph.triples((None, None, None)):
self._writeTriple(triple)
@@ -56,23 +64,22 @@ class TriXSerializer(Serializer):
self.writer.push(TRIXNS[u"triple"])
for component in triple:
if isinstance(component, URIRef):
- self.writer.element(TRIXNS[u"uri"],
- content=text_type(component))
+ self.writer.element(TRIXNS[u"uri"], content=str(component))
elif isinstance(component, BNode):
- self.writer.element(TRIXNS[u"id"],
- content=text_type(component))
+ self.writer.element(TRIXNS[u"id"], content=str(component))
elif isinstance(component, Literal):
if component.datatype:
- self.writer.element(TRIXNS[u"typedLiteral"],
- content=text_type(component),
- attributes={TRIXNS[u"datatype"]:
- text_type(component.datatype)})
+ self.writer.element(
+ TRIXNS[u"typedLiteral"],
+ content=str(component),
+ attributes={TRIXNS[u"datatype"]: str(component.datatype)},
+ )
elif component.language:
- self.writer.element(TRIXNS[u"plainLiteral"],
- content=text_type(component),
- attributes={XMLNS[u"lang"]:
- text_type(component.language)})
+ self.writer.element(
+ TRIXNS[u"plainLiteral"],
+ content=str(component),
+ attributes={XMLNS[u"lang"]: str(component.language)},
+ )
else:
- self.writer.element(TRIXNS[u"plainLiteral"],
- content=text_type(component))
+ self.writer.element(TRIXNS[u"plainLiteral"], content=str(component))
self.writer.pop()
diff --git a/rdflib/plugins/serializers/turtle.py b/rdflib/plugins/serializers/turtle.py
index 1c58ba1b..a2270674 100644
--- a/rdflib/plugins/serializers/turtle.py
+++ b/rdflib/plugins/serializers/turtle.py
@@ -10,9 +10,8 @@ from rdflib.term import BNode, Literal, URIRef
from rdflib.exceptions import Error
from rdflib.serializer import Serializer
from rdflib.namespace import RDF, RDFS
-from six import b, text_type
-__all__ = ['RecursiveSerializer', 'TurtleSerializer']
+__all__ = ["RecursiveSerializer", "TurtleSerializer"]
def _object_comparator(a, b):
@@ -32,8 +31,8 @@ def _object_comparator(a, b):
return 0
except TypeError:
- a = text_type(a)
- b = text_type(b)
+ a = str(a)
+ b = str(b)
return (a > b) - (a < b)
@@ -53,16 +52,20 @@ class RecursiveSerializer(Serializer):
def addNamespace(self, prefix, uri):
if prefix in self.namespaces and self.namespaces[prefix] != uri:
- raise Exception("Trying to override namespace prefix %s => %s, but it's already bound to %s" % (prefix, uri, self.namespaces[prefix]))
+ raise Exception(
+ "Trying to override namespace prefix %s => %s, but it's already bound to %s"
+ % (prefix, uri, self.namespaces[prefix])
+ )
self.namespaces[prefix] = uri
def checkSubject(self, subject):
"""Check to see if the subject should be serialized yet"""
- if ((self.isDone(subject))
+ if (
+ (self.isDone(subject))
or (subject not in self._subjects)
or ((subject in self._topLevels) and (self.depth > 1))
- or (isinstance(subject, URIRef) and
- (self.depth >= self.maxDepth))):
+ or (isinstance(subject, URIRef) and (self.depth >= self.maxDepth))
+ ):
return False
return True
@@ -84,9 +87,10 @@ class RecursiveSerializer(Serializer):
seen[member] = True
recursable = [
- (isinstance(subject, BNode),
- self._references[subject], subject)
- for subject in self._subjects if subject not in seen]
+ (isinstance(subject, BNode), self._references[subject], subject)
+ for subject in self._subjects
+ if subject not in seen
+ ]
recursable.sort()
subjects.extend([subject for (isbnode, refs, subject) in recursable])
@@ -112,7 +116,7 @@ class RecursiveSerializer(Serializer):
self._topLevels = {}
if self.roundtrip_prefixes:
- if hasattr(self.roundtrip_prefixes, '__iter__'):
+ if hasattr(self.roundtrip_prefixes, "__iter__"):
for prefix, ns in self.store.namespaces():
if prefix in self.roundtrip_prefixes:
self.addNamespace(prefix, ns)
@@ -164,7 +168,7 @@ class RecursiveSerializer(Serializer):
def write(self, text):
"""Write text in given encoding."""
- self.stream.write(text.encode(self.encoding, 'replace'))
+ self.stream.write(text.encode(self.encoding, "replace"))
SUBJECT = 0
@@ -178,14 +182,12 @@ _SPACIOUS_OUTPUT = False
class TurtleSerializer(RecursiveSerializer):
short_name = "turtle"
- indentString = ' '
+ indentString = " "
def __init__(self, store):
self._ns_rewrite = {}
super(TurtleSerializer, self).__init__(store)
- self.keywords = {
- RDF.type: 'a'
- }
+ self.keywords = {RDF.type: "a"}
self.reset()
self.stream = None
self._spacious = _SPACIOUS_OUTPUT
@@ -200,8 +202,9 @@ class TurtleSerializer(RecursiveSerializer):
# so we need to keep track of ns rewrites we made so far.
- if (prefix > '' and prefix[0] == '_') \
- or self.namespaces.get(prefix, namespace) != namespace:
+ if (prefix > "" and prefix[0] == "_") or self.namespaces.get(
+ prefix, namespace
+ ) != namespace:
if prefix not in self._ns_rewrite:
p = "p" + prefix
@@ -220,11 +223,14 @@ class TurtleSerializer(RecursiveSerializer):
self._started = False
self._ns_rewrite = {}
- def serialize(self, stream, base=None, encoding=None,
- spacious=None, **args):
+ def serialize(self, stream, base=None, encoding=None, spacious=None, **args):
self.reset()
self.stream = stream
- self.base = base
+ # if base is given here, use that, if not and a base is set for the graph use that
+ if base is not None:
+ self.base = base
+ elif self.store.base is not None:
+ self.base = self.store.base
if spacious is not None:
self._spacious = spacious
@@ -241,10 +247,12 @@ class TurtleSerializer(RecursiveSerializer):
if firstTime:
firstTime = False
if self.statement(subject) and not firstTime:
- self.write('\n')
+ self.write("\n")
self.endDocument()
- stream.write(b("\n"))
+ stream.write("\n".encode("latin-1"))
+
+ self.base = None
def preprocessTriple(self, triple):
super(TurtleSerializer, self).preprocessTriple(triple)
@@ -273,7 +281,7 @@ class TurtleSerializer(RecursiveSerializer):
pfx = self.store.store.prefix(uri)
if pfx is not None:
- parts = (pfx, uri, '')
+ parts = (pfx, uri, "")
else:
# nothing worked
return None
@@ -286,119 +294,125 @@ class TurtleSerializer(RecursiveSerializer):
prefix = self.addNamespace(prefix, namespace)
- return u'%s:%s' % (prefix, local)
+ return u"%s:%s" % (prefix, local)
def startDocument(self):
self._started = True
ns_list = sorted(self.namespaces.items())
+
+ if self.base:
+ self.write(self.indent() + "@base <%s> .\n" % self.base)
for prefix, uri in ns_list:
- self.write(self.indent() + '@prefix %s: <%s> .\n' % (prefix, uri))
+ self.write(self.indent() + "@prefix %s: <%s> .\n" % (prefix, uri))
if ns_list and self._spacious:
- self.write('\n')
+ self.write("\n")
def endDocument(self):
if self._spacious:
- self.write('\n')
+ self.write("\n")
def statement(self, subject):
self.subjectDone(subject)
return self.s_squared(subject) or self.s_default(subject)
def s_default(self, subject):
- self.write('\n' + self.indent())
+ self.write("\n" + self.indent())
self.path(subject, SUBJECT)
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
def s_squared(self, subject):
if (self._references[subject] > 0) or not isinstance(subject, BNode):
return False
- self.write('\n' + self.indent() + '[]')
+ self.write("\n" + self.indent() + "[]")
self.predicateList(subject)
- self.write(' .')
+ self.write(" .")
return True
def path(self, node, position, newline=False):
- if not (self.p_squared(node, position, newline)
- or self.p_default(node, position, newline)):
- raise Error("Cannot serialize node '%s'" % (node, ))
+ if not (
+ self.p_squared(node, position, newline)
+ or self.p_default(node, position, newline)
+ ):
+ raise Error("Cannot serialize node '%s'" % (node,))
def p_default(self, node, position, newline=False):
if position != SUBJECT and not newline:
- self.write(' ')
+ self.write(" ")
self.write(self.label(node, position))
return True
def label(self, node, position):
if node == RDF.nil:
- return '()'
+ return "()"
if position is VERB and node in self.keywords:
return self.keywords[node]
if isinstance(node, Literal):
return node._literal_n3(
use_plain=True,
- qname_callback=lambda dt: self.getQName(
- dt, _GEN_QNAME_FOR_DT))
+ qname_callback=lambda dt: self.getQName(dt, _GEN_QNAME_FOR_DT),
+ )
else:
node = self.relativize(node)
return self.getQName(node, position == VERB) or node.n3()
def p_squared(self, node, position, newline=False):
- if (not isinstance(node, BNode)
- or node in self._serialized
- or self._references[node] > 1
- or position == SUBJECT):
+ if (
+ not isinstance(node, BNode)
+ or node in self._serialized
+ or self._references[node] > 1
+ or position == SUBJECT
+ ):
return False
if not newline:
- self.write(' ')
+ self.write(" ")
if self.isValidList(node):
# this is a list
- self.write('(')
+ self.write("(")
self.depth += 1 # 2
self.doList(node)
self.depth -= 1 # 2
- self.write(' )')
+ self.write(" )")
else:
self.subjectDone(node)
self.depth += 2
# self.write('[\n' + self.indent())
- self.write('[')
+ self.write("[")
self.depth -= 1
# self.predicateList(node, newline=True)
self.predicateList(node, newline=False)
# self.write('\n' + self.indent() + ']')
- self.write(' ]')
+ self.write(" ]")
self.depth -= 1
return True
- def isValidList(self, l):
+ def isValidList(self, l_):
"""
Checks if l is a valid RDF list, i.e. no nodes have other properties.
"""
try:
- if self.store.value(l, RDF.first) is None:
+ if self.store.value(l_, RDF.first) is None:
return False
except:
return False
- while l:
- if l != RDF.nil and len(
- list(self.store.predicate_objects(l))) != 2:
+ while l_:
+ if l_ != RDF.nil and len(list(self.store.predicate_objects(l_))) != 2:
return False
- l = self.store.value(l, RDF.rest)
+ l_ = self.store.value(l_, RDF.rest)
return True
- def doList(self, l):
- while l:
- item = self.store.value(l, RDF.first)
+ def doList(self, l_):
+ while l_:
+ item = self.store.value(l_, RDF.first)
if item is not None:
self.path(item, OBJECT)
- self.subjectDone(l)
- l = self.store.value(l, RDF.rest)
+ self.subjectDone(l_)
+ l_ = self.store.value(l_, RDF.rest)
def predicateList(self, subject, newline=False):
properties = self.buildPredicateHash(subject)
@@ -408,7 +422,7 @@ class TurtleSerializer(RecursiveSerializer):
self.verb(propList[0], newline=newline)
self.objectList(properties[propList[0]])
for predicate in propList[1:]:
- self.write(' ;\n' + self.indent(1))
+ self.write(" ;\n" + self.indent(1))
self.verb(predicate, newline=True)
self.objectList(properties[predicate])
@@ -423,6 +437,6 @@ class TurtleSerializer(RecursiveSerializer):
self.depth += depthmod
self.path(objects[0], OBJECT)
for obj in objects[1:]:
- self.write(',\n' + self.indent(1))
+ self.write(",\n" + self.indent(1))
self.path(obj, OBJECT, newline=True)
self.depth -= depthmod
diff --git a/rdflib/plugins/serializers/xmlwriter.py b/rdflib/plugins/serializers/xmlwriter.py
index de720e8c..99d1e767 100644
--- a/rdflib/plugins/serializers/xmlwriter.py
+++ b/rdflib/plugins/serializers/xmlwriter.py
@@ -1,19 +1,15 @@
import codecs
from xml.sax.saxutils import quoteattr, escape
-__all__ = ['XMLWriter']
+__all__ = ["XMLWriter"]
-ESCAPE_ENTITIES = {
- '\r': '&#13;'
-}
+ESCAPE_ENTITIES = {"\r": "&#13;"}
class XMLWriter(object):
- def __init__(self, stream, namespace_manager, encoding=None,
- decl=1, extra_ns=None):
- encoding = encoding or 'utf-8'
- encoder, decoder, stream_reader, stream_writer = \
- codecs.lookup(encoding)
+ def __init__(self, stream, namespace_manager, encoding=None, decl=1, extra_ns=None):
+ encoding = encoding or "utf-8"
+ encoder, decoder, stream_reader, stream_writer = codecs.lookup(encoding)
self.stream = stream = stream_writer(stream)
if decl:
stream.write('<?xml version="1.0" encoding="%s"?>' % encoding)
@@ -24,6 +20,7 @@ class XMLWriter(object):
def __get_indent(self):
return " " * len(self.element_stack)
+
indent = property(__get_indent)
def __close_start_tag(self):
@@ -89,7 +86,7 @@ class XMLWriter(object):
def text(self, text):
self.__close_start_tag()
- if "<" in text and ">" in text and not "]]>" in text:
+ if "<" in text and ">" in text and "]]>" not in text:
self.stream.write("<![CDATA[")
self.stream.write(text)
self.stream.write("]]>")
diff --git a/rdflib/plugins/sleepycat.py b/rdflib/plugins/sleepycat.py
index 745e270a..735d3c3a 100644
--- a/rdflib/plugins/sleepycat.py
+++ b/rdflib/plugins/sleepycat.py
@@ -4,20 +4,21 @@ from os.path import exists, abspath
from os import mkdir
from rdflib.store import Store, VALID_STORE, NO_STORE
from rdflib.term import URIRef
-from six import b
-from six.moves.urllib.request import pathname2url
+from urllib.request import pathname2url
def bb(u):
- return u.encode('utf-8')
+ return u.encode("utf-8")
try:
from bsddb import db
+
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
+
has_bsddb = True
except ImportError:
has_bsddb = False
@@ -37,7 +38,7 @@ if has_bsddb:
logger = logging.getLogger(__name__)
-__all__ = ['Sleepycat']
+__all__ = ["Sleepycat"]
class Sleepycat(Store):
@@ -49,8 +50,7 @@ class Sleepycat(Store):
def __init__(self, configuration=None, identifier=None):
if not has_bsddb:
- raise ImportError(
- "Unable to import bsddb/bsddb3, store is unusable.")
+ raise ImportError("Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
@@ -59,6 +59,7 @@ class Sleepycat(Store):
def __get_identifier(self):
return self.__identifier
+
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
@@ -112,8 +113,10 @@ class Sleepycat(Store):
self.__indicies = [None, ] * 3
self.__indicies_info = [None, ] * 3
for i in range(0, 3):
- index_name = to_key_func(
- i)((b("s"), b("p"), b("o")), b("c")).decode()
+ index_name = to_key_func(i)(
+ ("s".encode("latin-1"), "p".encode("latin-1"), "o".encode("latin-1")),
+ "c".encode("latin-1"),
+ ).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags, dbmode)
@@ -149,13 +152,15 @@ class Sleepycat(Store):
yield triple[i % 3]
i += 1
yield ""
+
return get_prefix
lookup[i] = (
self.__indicies[start],
get_prefix_func(start, start + len),
from_key_func(start),
- results_from_key_func(start, self._from_string))
+ results_from_key_func(start, self._from_string),
+ )
self.__lookup_dict = lookup
@@ -188,6 +193,7 @@ class Sleepycat(Store):
def __sync_run(self):
from time import sleep, time
+
try:
min_seconds, max_seconds = 10, 300
while self.__open:
@@ -195,12 +201,11 @@ class Sleepycat(Store):
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
- sleep(.1)
+ sleep(0.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
- if time() - t1 > min_seconds \
- or time() - t0 > max_seconds:
+ if time() - t1 > min_seconds or time() - t0 > max_seconds:
self.__needs_sync = False
logger.debug("sync")
self.sync()
@@ -255,22 +260,20 @@ class Sleepycat(Store):
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(
- bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("")
- contexts = set(contexts_value.split(b("^")))
+ bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn
+ ) or "".encode("latin-1")
+ contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.add(bb(c))
- contexts_value = b("^").join(contexts)
+ contexts_value = "^".encode("latin-1").join(contexts)
assert contexts_value is not None
cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
- cspo.put(bb(
- "%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
- cpos.put(bb(
- "%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
- cosp.put(bb(
- "%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
+ cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
+ cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
+ cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
@@ -278,20 +281,28 @@ class Sleepycat(Store):
s, p, o = spo
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(
- b("^").join([b(""), s, p, o, b("")]), txn=txn) or b("")
- contexts = set(contexts_value.split(b("^")))
+ "^".encode("latin-1").join(
+ ["".encode("latin-1"), s, p, o, "".encode("latin-1")]
+ ),
+ txn=txn,
+ ) or "".encode("latin-1")
+ contexts = set(contexts_value.split("^".encode("latin-1")))
contexts.discard(c)
- contexts_value = b("^").join(contexts)
+ contexts_value = "^".encode("latin-1").join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
- i.put(_to_key((s, p, o), b("")), contexts_value, txn=txn)
+ i.put(
+ _to_key((s, p, o), "".encode("latin-1")),
+ contexts_value,
+ txn=txn,
+ )
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
- i.delete(_to_key((s, p, o), b("")), txn=txn)
+ i.delete(_to_key((s, p, o), "".encode("latin-1")), txn=txn)
except db.DBNotFoundError:
pass # TODO: is it okay to ignore these?
@@ -305,23 +316,25 @@ class Sleepycat(Store):
if context == self:
context = None
- if subject is not None \
- and predicate is not None \
- and object is not None \
- and context is not None:
+ if (
+ subject is not None
+ and predicate is not None
+ and object is not None
+ and context is not None
+ ):
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
- value = self.__indicies[0].get(bb("%s^%s^%s^%s^" %
- (c, s, p, o)), txn=txn)
+ value = self.__indicies[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup(
- (subject, predicate, object), context, txn=txn)
+ (subject, predicate, object), context, txn=txn
+ )
cursor = index.cursor(txn=txn)
try:
@@ -337,18 +350,18 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
- contexts_value = index.get(key, txn=txn) or b("")
+ contexts_value = index.get(key, txn=txn) or "".encode("latin-1")
# remove triple from all non quoted contexts
- contexts = set(contexts_value.split(b("^")))
+ contexts = set(contexts_value.split("^".encode("latin-1")))
# and from the conjunctive index
- contexts.add(b(""))
+ contexts.add("".encode("latin-1"))
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
@@ -363,7 +376,8 @@ class Sleepycat(Store):
# remove((None, None, None), c)
try:
self.__contexts.delete(
- bb(_to_string(context, txn=txn)), txn=txn)
+ bb(_to_string(context, txn=txn)), txn=txn
+ )
except db.DBNotFoundError:
pass
@@ -381,7 +395,8 @@ class Sleepycat(Store):
# _from_string = self._from_string ## UNUSED
index, prefix, from_key, results_from_key = self.__lookup(
- (subject, predicate, object), context, txn=txn)
+ (subject, predicate, object), context, txn=txn
+ )
cursor = index.cursor(txn=txn)
try:
@@ -395,14 +410,13 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
- yield results_from_key(
- key, subject, predicate, object, contexts_value)
+ yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
@@ -413,7 +427,7 @@ class Sleepycat(Store):
context = None
if context is None:
- prefix = b("^")
+ prefix = "^".encode("latin-1")
else:
prefix = bb("%s^" % self._to_string(context))
@@ -426,7 +440,7 @@ class Sleepycat(Store):
if key.startswith(prefix):
count += 1
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
else:
break
cursor.close()
@@ -445,14 +459,14 @@ class Sleepycat(Store):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
- return URIRef(ns.decode('utf-8'))
+ return URIRef(ns.decode("utf-8"))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
- return prefix.decode('utf-8')
+ return prefix.decode("utf-8")
return None
def namespaces(self):
@@ -461,9 +475,9 @@ class Sleepycat(Store):
current = cursor.first()
while current:
prefix, namespace = current
- results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
+ results.append((prefix.decode("utf-8"), namespace.decode("utf-8")))
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
@@ -477,10 +491,9 @@ class Sleepycat(Store):
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
- contexts = self.__indicies[0].get(bb(
- "%s^%s^%s^%s^" % ("", s, p, o)))
+ contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
- for c in contexts.split(b("^")):
+ for c in contexts.split("^".encode("latin-1")):
if c:
yield _from_string(c)
else:
@@ -496,7 +509,7 @@ class Sleepycat(Store):
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
- current = getattr(cursor, 'next')()
+ current = getattr(cursor, "next")()
except db.DBNotFoundError:
current = None
cursor.close()
@@ -545,38 +558,44 @@ class Sleepycat(Store):
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
# print (subject, predicate, object), context, prefix_func, index
# #DEBUG
- prefix = bb(
- "^".join(prefix_func((subject, predicate, object), context)))
+ prefix = bb("^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
- return b("^").join(
- (context,
- triple[i % 3],
- triple[(i + 1) % 3],
- triple[(i + 2) % 3], b(""))) # "" to tac on the trailing ^
+ return "^".encode("latin-1").join(
+ (
+ context,
+ triple[i % 3],
+ triple[(i + 1) % 3],
+ triple[(i + 2) % 3],
+ "".encode("latin-1"),
+ )
+ ) # "" to tac on the trailing ^
+
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
- parts = key.split(b("^"))
- return \
- parts[0], \
- parts[(3 - i + 0) % 3 + 1], \
- parts[(3 - i + 1) % 3 + 1], \
- parts[(3 - i + 2) % 3 + 1]
+ parts = key.split("^".encode("latin-1"))
+ return (
+ parts[0],
+ parts[(3 - i + 0) % 3 + 1],
+ parts[(3 - i + 1) % 3 + 1],
+ parts[(3 - i + 2) % 3 + 1],
+ )
+
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
- parts = key.split(b("^"))
+ parts = key.split("^".encode("latin-1"))
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
@@ -591,8 +610,11 @@ def results_from_key_func(i, from_string):
o = from_string(parts[(3 - i + 2) % 3 + 1])
else:
o = object
- return (s, p, o), (
- from_string(c) for c in contexts_value.split(b("^")) if c)
+ return (
+ (s, p, o),
+ (from_string(c) for c in contexts_value.split("^".encode("latin-1")) if c),
+ )
+
return from_key
diff --git a/rdflib/plugins/sparql/__init__.py b/rdflib/plugins/sparql/__init__.py
index bc1227f2..9efbd87f 100644
--- a/rdflib/plugins/sparql/__init__.py
+++ b/rdflib/plugins/sparql/__init__.py
@@ -28,7 +28,7 @@ NotImplementedError if they cannot handle a certain part
"""
-PLUGIN_ENTRY_POINT = 'rdf.plugins.sparqleval'
+PLUGIN_ENTRY_POINT = "rdf.plugins.sparqleval"
from . import parser
from . import operators
diff --git a/rdflib/plugins/sparql/aggregates.py b/rdflib/plugins/sparql/aggregates.py
index b63b6edb..8c70aeb1 100644
--- a/rdflib/plugins/sparql/aggregates.py
+++ b/rdflib/plugins/sparql/aggregates.py
@@ -1,6 +1,4 @@
from rdflib import Literal, XSD
-
-from six import text_type, itervalues
from rdflib.plugins.sparql.evalutils import _eval, NotBoundError, _val
from rdflib.plugins.sparql.operators import numeric
from rdflib.plugins.sparql.datatypes import type_promotion
@@ -41,7 +39,6 @@ class Accumulator(object):
class Counter(Accumulator):
-
def __init__(self, aggregation):
super(Counter, self).__init__(aggregation)
self.value = 0
@@ -73,16 +70,14 @@ class Counter(Accumulator):
def type_safe_numbers(*args):
- if (
- any(isinstance(arg, float) for arg in args) and
- any(isinstance(arg, Decimal) for arg in args)
+ if any(isinstance(arg, float) for arg in args) and any(
+ isinstance(arg, Decimal) for arg in args
):
return map(float, args)
return args
class Sum(Accumulator):
-
def __init__(self, aggregation):
super(Sum, self).__init__(aggregation)
self.value = 0
@@ -109,7 +104,6 @@ class Sum(Accumulator):
class Average(Accumulator):
-
def __init__(self, aggregation):
super(Average, self).__init__(aggregation)
self.counter = 0
@@ -173,13 +167,11 @@ class Extremum(Accumulator):
class Minimum(Extremum):
-
def compare(self, val1, val2):
return min(val1, val2, key=_val)
class Maximum(Extremum):
-
def compare(self, val1, val2):
return max(val1, val2, key=_val)
@@ -207,7 +199,6 @@ class Sample(Accumulator):
class GroupConcat(Accumulator):
-
def __init__(self, aggregation):
super(GroupConcat, self).__init__(aggregation)
# only GROUPCONCAT needs to have a list as accumlator
@@ -225,7 +216,7 @@ class GroupConcat(Accumulator):
pass
def get_value(self):
- return Literal(self.separator.join(text_type(v) for v in self.value))
+ return Literal(self.separator.join(str(v) for v in self.value))
class Aggregator(object):
@@ -255,12 +246,12 @@ class Aggregator(object):
# SAMPLE accumulators may delete themselves
# => iterate over list not generator
- for acc in list(itervalues(self.accumulators)):
+ for acc in list(self.accumulators.values()):
if acc.use_row(row):
acc.update(row, self)
def get_bindings(self):
"""calculate and set last values"""
- for acc in itervalues(self.accumulators):
+ for acc in self.accumulators.values():
acc.set_value(self.bindings)
return self.bindings
diff --git a/rdflib/plugins/sparql/algebra.py b/rdflib/plugins/sparql/algebra.py
index 00a6d0b2..cfd4f410 100644
--- a/rdflib/plugins/sparql/algebra.py
+++ b/rdflib/plugins/sparql/algebra.py
@@ -1,4 +1,3 @@
-
"""
Converting the 'parse-tree' output of pyparsing to a SPARQL Algebra expression
@@ -19,9 +18,11 @@ from rdflib import Literal, Variable, URIRef, BNode
from rdflib.plugins.sparql.sparql import Prologue, Query
from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.operators import (
- and_, TrueFilter, simplify as simplifyFilters)
-from rdflib.paths import (
- InvPath, AlternativePath, SequencePath, MulPath, NegatedPath)
+ and_,
+ TrueFilter,
+ simplify as simplifyFilters,
+)
+from rdflib.paths import InvPath, AlternativePath, SequencePath, MulPath, NegatedPath
from pyparsing import ParseResults
@@ -29,66 +30,72 @@ from pyparsing import ParseResults
# ---------------------------
# Some convenience methods
def OrderBy(p, expr):
- return CompValue('OrderBy', p=p, expr=expr)
+ return CompValue("OrderBy", p=p, expr=expr)
def ToMultiSet(p):
- return CompValue('ToMultiSet', p=p)
+ return CompValue("ToMultiSet", p=p)
def Union(p1, p2):
- return CompValue('Union', p1=p1, p2=p2)
+ return CompValue("Union", p1=p1, p2=p2)
def Join(p1, p2):
- return CompValue('Join', p1=p1, p2=p2)
+ return CompValue("Join", p1=p1, p2=p2)
def Minus(p1, p2):
- return CompValue('Minus', p1=p1, p2=p2)
+ return CompValue("Minus", p1=p1, p2=p2)
def Graph(term, graph):
- return CompValue('Graph', term=term, p=graph)
+ return CompValue("Graph", term=term, p=graph)
def BGP(triples=None):
- return CompValue('BGP', triples=triples or [])
+ return CompValue("BGP", triples=triples or [])
def LeftJoin(p1, p2, expr):
- return CompValue('LeftJoin', p1=p1, p2=p2, expr=expr)
+ return CompValue("LeftJoin", p1=p1, p2=p2, expr=expr)
def Filter(expr, p):
- return CompValue('Filter', expr=expr, p=p)
+ return CompValue("Filter", expr=expr, p=p)
def Extend(p, expr, var):
- return CompValue('Extend', p=p, expr=expr, var=var)
+ return CompValue("Extend", p=p, expr=expr, var=var)
def Values(res):
- return CompValue('values', res=res)
+ return CompValue("values", res=res)
def Project(p, PV):
- return CompValue('Project', p=p, PV=PV)
+ return CompValue("Project", p=p, PV=PV)
def Group(p, expr=None):
- return CompValue('Group', p=p, expr=expr)
+ return CompValue("Group", p=p, expr=expr)
def _knownTerms(triple, varsknown, varscount):
- return (len([x for x in triple if x not in varsknown and
- isinstance(x, (Variable, BNode))]),
- -sum(varscount.get(x, 0) for x in triple),
- not isinstance(triple[2], Literal),
- )
+ return (
+ len(
+ [
+ x
+ for x in triple
+ if x not in varsknown and isinstance(x, (Variable, BNode))
+ ]
+ ),
+ -sum(varscount.get(x, 0) for x in triple),
+ not isinstance(triple[2], Literal),
+ )
-def reorderTriples(l):
+def reorderTriples(l_):
"""
Reorder triple patterns so that we execute the
ones with most bindings first
@@ -98,10 +105,10 @@ def reorderTriples(l):
if isinstance(term, (Variable, BNode)):
varsknown.add(term)
- l = [(None, x) for x in l]
+ l_ = [(None, x) for x in l_]
varsknown = set()
varscount = collections.defaultdict(int)
- for t in l:
+ for t in l_:
for c in t[1]:
if isinstance(c, (Variable, BNode)):
varscount[c] += 1
@@ -114,27 +121,25 @@ def reorderTriples(l):
# we sort by decorate/undecorate, since we need the value of the sort keys
- while i < len(l):
- l[i:] = sorted((_knownTerms(x[
- 1], varsknown, varscount), x[1]) for x in l[i:])
- t = l[i][0][0] # top block has this many terms bound
+ while i < len(l_):
+ l_[i:] = sorted((_knownTerms(x[1], varsknown, varscount), x[1]) for x in l_[i:])
+ t = l_[i][0][0] # top block has this many terms bound
j = 0
- while i + j < len(l) and l[i + j][0][0] == t:
- for c in l[i + j][1]:
+ while i + j < len(l_) and l_[i + j][0][0] == t:
+ for c in l_[i + j][1]:
_addvar(c, varsknown)
j += 1
i += 1
- return [x[1] for x in l]
+ return [x[1] for x in l_]
def triples(l):
l = reduce(lambda x, y: x + y, l)
if (len(l) % 3) != 0:
- raise Exception('these aint triples')
- return reorderTriples((l[x], l[x + 1], l[x + 2])
- for x in range(0, len(l), 3))
+ raise Exception("these aint triples")
+ return reorderTriples((l[x], l[x + 1], l[x + 2]) for x in range(0, len(l), 3))
def translatePName(p, prologue):
@@ -142,11 +147,12 @@ def translatePName(p, prologue):
Expand prefixed/relative URIs
"""
if isinstance(p, CompValue):
- if p.name == 'pname':
+ if p.name == "pname":
return prologue.absolutize(p)
- if p.name == 'literal':
- return Literal(p.string, lang=p.lang,
- datatype=prologue.absolutize(p.datatype))
+ if p.name == "literal":
+ return Literal(
+ p.string, lang=p.lang, datatype=prologue.absolutize(p.datatype)
+ )
elif isinstance(p, URIRef):
return prologue.absolutize(p)
@@ -157,39 +163,39 @@ def translatePath(p):
"""
if isinstance(p, CompValue):
- if p.name == 'PathAlternative':
+ if p.name == "PathAlternative":
if len(p.part) == 1:
return p.part[0]
else:
return AlternativePath(*p.part)
- elif p.name == 'PathSequence':
+ elif p.name == "PathSequence":
if len(p.part) == 1:
return p.part[0]
else:
return SequencePath(*p.part)
- elif p.name == 'PathElt':
+ elif p.name == "PathElt":
if not p.mod:
return p.part
else:
if isinstance(p.part, list):
if len(p.part) != 1:
- raise Exception('Denkfehler!')
+ raise Exception("Denkfehler!")
return MulPath(p.part[0], p.mod)
else:
return MulPath(p.part, p.mod)
- elif p.name == 'PathEltOrInverse':
+ elif p.name == "PathEltOrInverse":
if isinstance(p.part, list):
if len(p.part) != 1:
- raise Exception('Denkfehler!')
+ raise Exception("Denkfehler!")
return InvPath(p.part[0])
else:
return InvPath(p.part)
- elif p.name == 'PathNegatedPropertySet':
+ elif p.name == "PathNegatedPropertySet":
if isinstance(p.part, list):
return NegatedPath(AlternativePath(*p.part))
else:
@@ -204,9 +210,9 @@ def translateExists(e):
def _c(n):
if isinstance(n, CompValue):
- if n.name in ('Builtin_EXISTS', 'Builtin_NOTEXISTS'):
+ if n.name in ("Builtin_EXISTS", "Builtin_NOTEXISTS"):
n.graph = translateGroupGraphPattern(n.graph)
- if n.graph.name == 'Filter':
+ if n.graph.name == "Filter":
# filters inside (NOT) EXISTS can see vars bound outside
n.graph.no_isolated_scope = True
@@ -229,7 +235,7 @@ def collectAndRemoveFilters(parts):
i = 0
while i < len(parts):
p = parts[i]
- if p.name == 'Filter':
+ if p.name == "Filter":
filters.append(translateExists(p.expr))
parts.pop(i)
else:
@@ -254,8 +260,7 @@ def translateGroupOrUnionGraphPattern(graphPattern):
def translateGraphGraphPattern(graphPattern):
- return Graph(graphPattern.term,
- translateGroupGraphPattern(graphPattern.graph))
+ return Graph(graphPattern.term, translateGroupGraphPattern(graphPattern.graph))
def translateInlineData(graphPattern):
@@ -267,7 +272,7 @@ def translateGroupGraphPattern(graphPattern):
http://www.w3.org/TR/sparql11-query/#convertGraphPattern
"""
- if graphPattern.name == 'SubSelect':
+ if graphPattern.name == "SubSelect":
return ToMultiSet(translate(graphPattern)[0])
if not graphPattern.part:
@@ -277,9 +282,9 @@ def translateGroupGraphPattern(graphPattern):
g = []
for p in graphPattern.part:
- if p.name == 'TriplesBlock':
+ if p.name == "TriplesBlock":
# merge adjacent TripleBlocks
- if not (g and g[-1].name == 'BGP'):
+ if not (g and g[-1].name == "BGP"):
g.append(BGP())
g[-1]["triples"] += triples(p.triples)
else:
@@ -287,30 +292,31 @@ def translateGroupGraphPattern(graphPattern):
G = BGP()
for p in g:
- if p.name == 'OptionalGraphPattern':
+ if p.name == "OptionalGraphPattern":
A = translateGroupGraphPattern(p.graph)
- if A.name == 'Filter':
+ if A.name == "Filter":
G = LeftJoin(G, A.p, A.expr)
else:
G = LeftJoin(G, A, TrueFilter)
- elif p.name == 'MinusGraphPattern':
+ elif p.name == "MinusGraphPattern":
G = Minus(p1=G, p2=translateGroupGraphPattern(p.graph))
- elif p.name == 'GroupOrUnionGraphPattern':
+ elif p.name == "GroupOrUnionGraphPattern":
G = Join(p1=G, p2=translateGroupOrUnionGraphPattern(p))
- elif p.name == 'GraphGraphPattern':
+ elif p.name == "GraphGraphPattern":
G = Join(p1=G, p2=translateGraphGraphPattern(p))
- elif p.name == 'InlineData':
+ elif p.name == "InlineData":
G = Join(p1=G, p2=translateInlineData(p))
- elif p.name == 'ServiceGraphPattern':
+ elif p.name == "ServiceGraphPattern":
G = Join(p1=G, p2=p)
- elif p.name in ('BGP', 'Extend'):
+ elif p.name in ("BGP", "Extend"):
G = Join(p1=G, p2=p)
- elif p.name == 'Bind':
+ elif p.name == "Bind":
G = Extend(G, p.expr, p.var)
else:
- raise Exception('Unknown part in GroupGraphPattern: %s - %s' %
- (type(p), p.name))
+ raise Exception(
+ "Unknown part in GroupGraphPattern: %s - %s" % (type(p), p.name)
+ )
if filters:
G = Filter(expr=filters, p=G)
@@ -372,9 +378,7 @@ def _traverseAgg(e, visitor=lambda n, v: None):
return visitor(e, res)
-def traverse(
- tree, visitPre=lambda n: None,
- visitPost=lambda n: None, complete=None):
+def traverse(tree, visitPre=lambda n: None, visitPost=lambda n: None, complete=None):
"""
Traverse tree, visit each node with visit function
visit function may raise StopTraversal to stop traversal
@@ -397,7 +401,7 @@ def _hasAggregate(x):
"""
if isinstance(x, CompValue):
- if x.name.startswith('Aggregate_'):
+ if x.name.startswith("Aggregate_"):
raise StopTraversal(True)
@@ -409,9 +413,9 @@ def _aggs(e, A):
# TODO: nested Aggregates?
- if isinstance(e, CompValue) and e.name.startswith('Aggregate_'):
+ if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
A.append(e)
- aggvar = Variable('__agg_%d__' % len(A))
+ aggvar = Variable("__agg_%d__" % len(A))
e["res"] = aggvar
return aggvar
@@ -426,7 +430,7 @@ def _findVars(x, res):
if x.name == "Bind":
res.add(x.var)
return x # stop recursion and finding vars in the expr
- elif x.name == 'SubSelect':
+ elif x.name == "SubSelect":
if x.projection:
res.update(v.var or v.evar for v in x.projection)
return x
@@ -443,13 +447,16 @@ def _addVars(x, children):
x["_vars"] = set()
elif x.name == "Extend":
# vars only used in the expr for a bind should not be included
- x["_vars"] = reduce(operator.or_, [child for child,
- part in zip(children, x) if part != 'expr'], set())
+ x["_vars"] = reduce(
+ operator.or_,
+ [child for child, part in zip(children, x) if part != "expr"],
+ set(),
+ )
else:
x["_vars"] = set(reduce(operator.or_, children, set()))
- if x.name == 'SubSelect':
+ if x.name == "SubSelect":
if x.projection:
s = set(v.var or v.evar for v in x.projection)
else:
@@ -470,7 +477,7 @@ def _sample(e, v=None):
if isinstance(e, CompValue) and e.name.startswith("Aggregate_"):
return e # do not replace vars in aggregates
if isinstance(e, Variable) and v != e:
- return CompValue('Aggregate_Sample', vars=e)
+ return CompValue("Aggregate_Sample", vars=e)
def _simplifyFilters(e):
@@ -505,11 +512,11 @@ def translateAggregates(q, M):
if q.projection:
for v in q.projection:
if v.var:
- rv = Variable('__agg_%d__' % (len(A) + 1))
- A.append(CompValue('Aggregate_Sample', vars=v.var, res=rv))
+ rv = Variable("__agg_%d__" % (len(A) + 1))
+ A.append(CompValue("Aggregate_Sample", vars=v.var, res=rv))
E.append((rv, v.var))
- return CompValue('AggregateJoin', A=A, p=M), E
+ return CompValue("AggregateJoin", A=A, p=M), E
def translateValues(v):
@@ -554,17 +561,22 @@ def translate(q):
conditions = []
# convert "GROUP BY (?expr as ?var)" to an Extend
for c in q.groupby.condition:
- if isinstance(c, CompValue) and c.name == 'GroupAs':
+ if isinstance(c, CompValue) and c.name == "GroupAs":
M = Extend(M, c.expr, c.var)
c = c.var
conditions.append(c)
M = Group(p=M, expr=conditions)
aggregate = True
- elif traverse(q.having, _hasAggregate, complete=False) or \
- traverse(q.orderby, _hasAggregate, complete=False) or \
- any(traverse(x.expr, _hasAggregate, complete=False)
- for x in q.projection or [] if x.evar):
+ elif (
+ traverse(q.having, _hasAggregate, complete=False)
+ or traverse(q.orderby, _hasAggregate, complete=False)
+ or any(
+ traverse(x.expr, _hasAggregate, complete=False)
+ for x in q.projection or []
+ if x.evar
+ )
+ ):
# if any aggregate is used, implicit group by
M = Group(p=M)
aggregate = True
@@ -604,17 +616,22 @@ def translate(q):
# ORDER BY
if q.orderby:
- M = OrderBy(M, [CompValue('OrderCondition', expr=c.expr,
- order=c.order) for c in q.orderby.condition])
+ M = OrderBy(
+ M,
+ [
+ CompValue("OrderCondition", expr=c.expr, order=c.order)
+ for c in q.orderby.condition
+ ],
+ )
# PROJECT
M = Project(M, PV)
if q.modifier:
- if q.modifier == 'DISTINCT':
- M = CompValue('Distinct', p=M)
- elif q.modifier == 'REDUCED':
- M = CompValue('Reduced', p=M)
+ if q.modifier == "DISTINCT":
+ M = CompValue("Distinct", p=M)
+ elif q.modifier == "REDUCED":
+ M = CompValue("Reduced", p=M)
if q.limitoffset:
offset = 0
@@ -622,10 +639,11 @@ def translate(q):
offset = q.limitoffset.offset.toPython()
if q.limitoffset.limit is not None:
- M = CompValue('Slice', p=M, start=offset,
- length=q.limitoffset.limit.toPython())
+ M = CompValue(
+ "Slice", p=M, start=offset, length=q.limitoffset.limit.toPython()
+ )
else:
- M = CompValue('Slice', p=M, start=offset)
+ M = CompValue("Slice", p=M, start=offset)
return M, PV
@@ -633,12 +651,12 @@ def translate(q):
def simplify(n):
"""Remove joins to empty BGPs"""
if isinstance(n, CompValue):
- if n.name == 'Join':
- if n.p1.name == 'BGP' and len(n.p1.triples) == 0:
+ if n.name == "Join":
+ if n.p1.name == "BGP" and len(n.p1.triples) == 0:
return n.p2
- if n.p2.name == 'BGP' and len(n.p2.triples) == 0:
+ if n.p2.name == "BGP" and len(n.p2.triples) == 0:
return n.p1
- elif n.name == 'BGP':
+ elif n.name == "BGP":
n["triples"] = reorderTriples(n.triples)
return n
@@ -651,10 +669,10 @@ def analyse(n, children):
"""
if isinstance(n, CompValue):
- if n.name == 'Join':
+ if n.name == "Join":
n["lazy"] = all(children)
return False
- elif n.name in ('Slice', 'Distinct'):
+ elif n.name in ("Slice", "Distinct"):
return False
else:
return all(children)
@@ -674,9 +692,9 @@ def translatePrologue(p, base, initNs=None, prologue=None):
prologue.bind(k, v)
for x in p:
- if x.name == 'Base':
+ if x.name == "Base":
prologue.base = x.iri
- elif x.name == 'PrefixDecl':
+ elif x.name == "PrefixDecl":
prologue.bind(x.prefix, prologue.absolutize(x.iri))
return prologue
@@ -699,26 +717,24 @@ def translateQuads(quads):
def translateUpdate1(u, prologue):
- if u.name in ('Load', 'Clear', 'Drop', 'Create'):
+ if u.name in ("Load", "Clear", "Drop", "Create"):
pass # no translation needed
- elif u.name in ('Add', 'Move', 'Copy'):
+ elif u.name in ("Add", "Move", "Copy"):
pass
- elif u.name in ('InsertData', 'DeleteData', 'DeleteWhere'):
+ elif u.name in ("InsertData", "DeleteData", "DeleteWhere"):
t, q = translateQuads(u.quads)
u["quads"] = q
u["triples"] = t
- if u.name in ('DeleteWhere', 'DeleteData'):
+ if u.name in ("DeleteWhere", "DeleteData"):
pass # TODO: check for bnodes in triples
- elif u.name == 'Modify':
+ elif u.name == "Modify":
if u.delete:
- u.delete["triples"], u.delete[
- "quads"] = translateQuads(u.delete.quads)
+ u.delete["triples"], u.delete["quads"] = translateQuads(u.delete.quads)
if u.insert:
- u.insert["triples"], u.insert[
- "quads"] = translateQuads(u.insert.quads)
+ u.insert["triples"], u.insert["quads"] = translateQuads(u.insert.quads)
u["where"] = translateGroupGraphPattern(u.where)
else:
- raise Exception('Unknown type of update operation: %s' % u)
+ raise Exception("Unknown type of update operation: %s" % u)
u.prologue = prologue
return u
@@ -737,8 +753,7 @@ def translateUpdate(q, base=None, initNs=None):
prologue = translatePrologue(p, base, initNs, prologue)
# absolutize/resolve prefixes
- u = traverse(
- u, visitPost=functools.partial(translatePName, prologue=prologue))
+ u = traverse(u, visitPost=functools.partial(translatePName, prologue=prologue))
u = _traverse(u, _simplifyFilters)
u = traverse(u, visitPost=translatePath)
@@ -761,17 +776,16 @@ def translateQuery(q, base=None, initNs=None):
# absolutize/resolve prefixes
q[1] = traverse(
- q[1], visitPost=functools.partial(translatePName, prologue=prologue))
+ q[1], visitPost=functools.partial(translatePName, prologue=prologue)
+ )
P, PV = translate(q[1])
datasetClause = q[1].datasetClause
- if q[1].name == 'ConstructQuery':
+ if q[1].name == "ConstructQuery":
template = triples(q[1].template) if q[1].template else None
- res = CompValue(q[1].name, p=P,
- template=template,
- datasetClause=datasetClause)
+ res = CompValue(q[1].name, p=P, template=template, datasetClause=datasetClause)
else:
res = CompValue(q[1].name, p=P, datasetClause=datasetClause, PV=PV)
@@ -792,9 +806,9 @@ def pprintAlgebra(q):
if not isinstance(p, CompValue):
print(p)
return
- print("%s(" % (p.name, ))
+ print("%s(" % (p.name,))
for k in p:
- print("%s%s =" % (ind, k,), end=' ')
+ print("%s%s =" % (ind, k,), end=" ")
pp(p[k], ind + " ")
print("%s)" % ind)
@@ -806,13 +820,13 @@ def pprintAlgebra(q):
pp(x)
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
from rdflib.plugins.sparql import parser
import os.path
if os.path.exists(sys.argv[1]):
- q = file(sys.argv[1])
+ q = open(sys.argv[1]).read()
else:
q = sys.argv[1]
diff --git a/rdflib/plugins/sparql/datatypes.py b/rdflib/plugins/sparql/datatypes.py
index 1e8475e0..5ab8c92f 100644
--- a/rdflib/plugins/sparql/datatypes.py
+++ b/rdflib/plugins/sparql/datatypes.py
@@ -5,17 +5,45 @@ Utility functions for supporting the XML Schema Datatypes hierarchy
from rdflib import XSD
XSD_DTs = set(
- (XSD.integer, XSD.decimal, XSD.float, XSD.double, XSD.string,
- XSD.boolean, XSD.dateTime, XSD.nonPositiveInteger, XSD.negativeInteger,
- XSD.long, XSD.int, XSD.short, XSD.byte, XSD.nonNegativeInteger,
- XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte,
- XSD.positiveInteger, XSD.date))
+ (
+ XSD.integer,
+ XSD.decimal,
+ XSD.float,
+ XSD.double,
+ XSD.string,
+ XSD.boolean,
+ XSD.dateTime,
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ XSD.nonNegativeInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ XSD.positiveInteger,
+ XSD.date,
+ )
+)
_sub_types = {
XSD.integer: [
- XSD.nonPositiveInteger, XSD.negativeInteger, XSD.long, XSD.int,
- XSD.short, XSD.byte, XSD.nonNegativeInteger, XSD.positiveInteger,
- XSD.unsignedLong, XSD.unsignedInt, XSD.unsignedShort, XSD.unsignedByte],
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ XSD.nonNegativeInteger,
+ XSD.positiveInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ ],
}
_super_types = {}
@@ -25,21 +53,22 @@ for superdt in XSD_DTs:
# we only care about float, double, integer, decimal
_typePromotionMap = {
- XSD.float: {XSD.integer: XSD.float,
- XSD.decimal: XSD.float,
- XSD.double: XSD.double},
-
- XSD.double: {XSD.integer: XSD.double,
- XSD.float: XSD.double,
- XSD.decimal: XSD.double},
-
- XSD.decimal: {XSD.integer: XSD.decimal,
- XSD.float: XSD.float,
- XSD.double: XSD.double},
-
- XSD.integer: {XSD.decimal: XSD.decimal,
- XSD.float: XSD.float,
- XSD.double: XSD.double}
+ XSD.float: {XSD.integer: XSD.float, XSD.decimal: XSD.float, XSD.double: XSD.double},
+ XSD.double: {
+ XSD.integer: XSD.double,
+ XSD.float: XSD.double,
+ XSD.decimal: XSD.double,
+ },
+ XSD.decimal: {
+ XSD.integer: XSD.decimal,
+ XSD.float: XSD.float,
+ XSD.double: XSD.double,
+ },
+ XSD.integer: {
+ XSD.decimal: XSD.decimal,
+ XSD.float: XSD.float,
+ XSD.double: XSD.double,
+ },
}
@@ -53,5 +82,4 @@ def type_promotion(t1, t2):
try:
return _typePromotionMap[t1][t2]
except KeyError:
- raise TypeError(
- 'Operators cannot combine datatypes %s and %s' % (t1, t2))
+ raise TypeError("Operators cannot combine datatypes %s and %s" % (t1, t2))
diff --git a/rdflib/plugins/sparql/evaluate.py b/rdflib/plugins/sparql/evaluate.py
index d371017f..34fe41e9 100644
--- a/rdflib/plugins/sparql/evaluate.py
+++ b/rdflib/plugins/sparql/evaluate.py
@@ -21,17 +21,27 @@ import requests
from pyparsing import ParseException
from rdflib import Variable, Graph, BNode, URIRef, Literal
-from six import iteritems, itervalues
-
from rdflib.plugins.sparql import CUSTOM_EVALS
from rdflib.plugins.sparql.parserutils import value
from rdflib.plugins.sparql.sparql import (
- QueryContext, AlreadyBound, FrozenBindings, Bindings, SPARQLError)
+ QueryContext,
+ AlreadyBound,
+ FrozenBindings,
+ Bindings,
+ SPARQLError,
+)
from rdflib.plugins.sparql.evalutils import (
- _filter, _eval, _join, _diff, _minus, _fillTemplate, _ebv, _val)
+ _filter,
+ _eval,
+ _join,
+ _diff,
+ _minus,
+ _fillTemplate,
+ _ebv,
+ _val,
+)
from rdflib.plugins.sparql.aggregates import Aggregator
-from rdflib.plugins.sparql.algebra import Join, ToMultiSet, Values
from rdflib.plugins.sparql import parser
@@ -147,9 +157,10 @@ def evalLeftJoin(ctx, join):
# check that we would have had no OPTIONAL matches
# even without prior bindings...
p1_vars = join.p1._vars
- if p1_vars is None \
- or not any(_ebv(join.expr, b) for b in
- evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)):
+ if p1_vars is None or not any(
+ _ebv(join.expr, b)
+ for b in evalPart(ctx.thaw(a.remember(p1_vars)), join.p2)
+ ):
yield a
@@ -157,7 +168,10 @@ def evalLeftJoin(ctx, join):
def evalFilter(ctx, part):
# TODO: Deal with dict returned from evalPart!
for c in evalPart(ctx, part.p):
- if _ebv(part.expr, c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c):
+ if _ebv(
+ part.expr,
+ c.forget(ctx, _except=part._vars) if not part.no_isolated_scope else c,
+ ):
yield c
@@ -165,8 +179,9 @@ def evalGraph(ctx, part):
if ctx.dataset is None:
raise Exception(
- "Non-conjunctive-graph doesn't know about " +
- "graphs. Try a query without GRAPH.")
+ "Non-conjunctive-graph doesn't know about "
+ + "graphs. Try a query without GRAPH."
+ )
ctx = ctx.clone()
graph = ctx[part.term]
@@ -195,7 +210,7 @@ def evalValues(ctx, part):
c = ctx.push()
try:
for k, v in r.items():
- if v != 'UNDEF':
+ if v != "UNDEF":
c[k] = v
except AlreadyBound:
continue
@@ -205,7 +220,7 @@ def evalValues(ctx, part):
def evalMultiset(ctx, part):
- if part.p.name == 'values':
+ if part.p.name == "values":
return evalValues(ctx, part)
return evalPart(ctx, part.p)
@@ -220,91 +235,102 @@ def evalPart(ctx, part):
except NotImplementedError:
pass # the given custome-function did not handle this part
- if part.name == 'BGP':
+ if part.name == "BGP":
# Reorder triples patterns by number of bound nodes in the current ctx
# Do patterns with more bound nodes first
- triples = sorted(part.triples, key=lambda t: len([n for n in t if ctx[n] is None]))
+ triples = sorted(
+ part.triples, key=lambda t: len([n for n in t if ctx[n] is None])
+ )
return evalBGP(ctx, triples)
- elif part.name == 'Filter':
+ elif part.name == "Filter":
return evalFilter(ctx, part)
- elif part.name == 'Join':
+ elif part.name == "Join":
return evalJoin(ctx, part)
- elif part.name == 'LeftJoin':
+ elif part.name == "LeftJoin":
return evalLeftJoin(ctx, part)
- elif part.name == 'Graph':
+ elif part.name == "Graph":
return evalGraph(ctx, part)
- elif part.name == 'Union':
+ elif part.name == "Union":
return evalUnion(ctx, part)
- elif part.name == 'ToMultiSet':
+ elif part.name == "ToMultiSet":
return evalMultiset(ctx, part)
- elif part.name == 'Extend':
+ elif part.name == "Extend":
return evalExtend(ctx, part)
- elif part.name == 'Minus':
+ elif part.name == "Minus":
return evalMinus(ctx, part)
- elif part.name == 'Project':
+ elif part.name == "Project":
return evalProject(ctx, part)
- elif part.name == 'Slice':
+ elif part.name == "Slice":
return evalSlice(ctx, part)
- elif part.name == 'Distinct':
+ elif part.name == "Distinct":
return evalDistinct(ctx, part)
- elif part.name == 'Reduced':
+ elif part.name == "Reduced":
return evalReduced(ctx, part)
- elif part.name == 'OrderBy':
+ elif part.name == "OrderBy":
return evalOrderBy(ctx, part)
- elif part.name == 'Group':
+ elif part.name == "Group":
return evalGroup(ctx, part)
- elif part.name == 'AggregateJoin':
+ elif part.name == "AggregateJoin":
return evalAggregateJoin(ctx, part)
- elif part.name == 'SelectQuery':
+ elif part.name == "SelectQuery":
return evalSelectQuery(ctx, part)
- elif part.name == 'AskQuery':
+ elif part.name == "AskQuery":
return evalAskQuery(ctx, part)
- elif part.name == 'ConstructQuery':
+ elif part.name == "ConstructQuery":
return evalConstructQuery(ctx, part)
- elif part.name == 'ServiceGraphPattern':
+ elif part.name == "ServiceGraphPattern":
return evalServiceQuery(ctx, part)
- #raise Exception('ServiceGraphPattern not implemented')
+ # raise Exception('ServiceGraphPattern not implemented')
- elif part.name == 'DescribeQuery':
- raise Exception('DESCRIBE not implemented')
+ elif part.name == "DescribeQuery":
+ raise Exception("DESCRIBE not implemented")
else:
- raise Exception('I dont know: %s' % part.name)
+ raise Exception("I dont know: %s" % part.name)
+
def evalServiceQuery(ctx, part):
res = {}
- match = re.match('^service <(.*)>[ \n]*{(.*)}[ \n]*$',
- part.get('service_string', ''), re.DOTALL)
+ match = re.match(
+ "^service <(.*)>[ \n]*{(.*)}[ \n]*$",
+ part.get("service_string", ""),
+ re.DOTALL | re.I,
+ )
if match:
service_url = match.group(1)
service_query = _buildQueryStringForServiceCall(ctx, match)
- query_settings = {'query': service_query,
- 'output': 'json'}
- headers = {'accept' : 'application/sparql-results+json',
- 'user-agent': 'rdflibForAnUser'}
+ query_settings = {"query": service_query, "output": "json"}
+ headers = {
+ "accept": "application/sparql-results+json",
+ "user-agent": "rdflibForAnUser",
+ }
# GET is easier to cache so prefer that if the query is not to long
if len(service_query) < 600:
response = requests.get(service_url, params=query_settings, headers=headers)
else:
- response = requests.post(service_url, params=query_settings, headers=headers)
+ response = requests.post(
+ service_url, params=query_settings, headers=headers
+ )
if response.status_code == 200:
- json = response.json();
- variables = res["vars_"] = json['head']['vars']
+ json = response.json()
+ variables = res["vars_"] = json["head"]["vars"]
# or just return the bindings?
- res = json['results']['bindings']
+ res = json["results"]["bindings"]
if len(res) > 0:
for r in res:
for bound in _yieldBindingsFromServiceCallResult(ctx, r, variables):
yield bound
else:
- raise Exception("Service: %s responded with code: %s", service_url, response.status_code);
+ raise Exception(
+ "Service: %s responded with code: %s", service_url, response.status_code
+ )
"""
@@ -313,6 +339,8 @@ def evalServiceQuery(ctx, part):
Re-adds prefixes if added and sets the base.
Wraps it in select if needed.
"""
+
+
def _buildQueryStringForServiceCall(ctx, match):
service_query = match.group(2)
@@ -320,18 +348,20 @@ def _buildQueryStringForServiceCall(ctx, match):
parser.parseQuery(service_query)
except ParseException:
# This could be because we don't have a select around the service call.
- service_query = 'SELECT REDUCED * WHERE {' + service_query + '}'
+ service_query = "SELECT REDUCED * WHERE {" + service_query + "}"
for p in ctx.prologue.namespace_manager.store.namespaces():
- service_query = 'PREFIX ' + p[0] + ':' + p[1].n3() + ' ' + service_query
+ service_query = "PREFIX " + p[0] + ":" + p[1].n3() + " " + service_query
# re add the base if one was defined
base = ctx.prologue.base
if base is not None and len(base) > 0:
- service_query = 'BASE <' + base + '> ' + service_query
- sol = ctx.solution();
+ service_query = "BASE <" + base + "> " + service_query
+ sol = ctx.solution()
if len(sol) > 0:
- variables = ' '.join(map(lambda v:v.n3(), sol))
- variables_bound = ' '.join(map(lambda v: ctx.get(v).n3(), sol))
- service_query = service_query + 'VALUES (' + variables + ') {(' + variables_bound + ')}'
+ variables = " ".join(map(lambda v: v.n3(), sol))
+ variables_bound = " ".join(map(lambda v: ctx.get(v).n3(), sol))
+ service_query = (
+ service_query + "VALUES (" + variables + ") {(" + variables_bound + ")}"
+ )
return service_query
@@ -339,14 +369,18 @@ def _yieldBindingsFromServiceCallResult(ctx, r, variables):
res_dict = {}
for var in variables:
if var in r and r[var]:
- if r[var]['type'] == "uri":
+ if r[var]["type"] == "uri":
res_dict[Variable(var)] = URIRef(r[var]["value"])
- elif r[var]['type'] == "bnode":
+ elif r[var]["type"] == "bnode":
res_dict[Variable(var)] = BNode(r[var]["value"])
- elif r[var]['type'] == "literal" and 'datatype' in r[var]:
- res_dict[Variable(var)] = Literal(r[var]["value"], datatype=r[var]['datatype'])
- elif r[var]['type'] == "literal" and 'xml:lang' in r[var]:
- res_dict[Variable(var)] = Literal(r[var]["value"], lang=r[var]['xml:lang'])
+ elif r[var]["type"] == "literal" and "datatype" in r[var]:
+ res_dict[Variable(var)] = Literal(
+ r[var]["value"], datatype=r[var]["datatype"]
+ )
+ elif r[var]["type"] == "literal" and "xml:lang" in r[var]:
+ res_dict[Variable(var)] = Literal(
+ r[var]["value"], lang=r[var]["xml:lang"]
+ )
yield FrozenBindings(ctx, res_dict)
@@ -379,7 +413,7 @@ def evalAggregateJoin(ctx, agg):
res[k].update(row)
# all rows are done; yield aggregated values
- for aggregator in itervalues(res):
+ for aggregator in res.values():
yield FrozenBindings(ctx, aggregator.get_bindings())
# there were no matches
@@ -393,8 +427,10 @@ def evalOrderBy(ctx, part):
for e in reversed(part.expr):
- reverse = bool(e.order and e.order == 'DESC')
- res = sorted(res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse)
+ reverse = bool(e.order and e.order == "DESC")
+ res = sorted(
+ res, key=lambda x: _val(value(x, e.expr, variables=True)), reverse=reverse
+ )
return res
@@ -402,7 +438,11 @@ def evalOrderBy(ctx, part):
def evalSlice(ctx, slice):
res = evalPart(ctx, slice.p)
- return itertools.islice(res, slice.start, slice.start + slice.length if slice.length is not None else None)
+ return itertools.islice(
+ res,
+ slice.start,
+ slice.start + slice.length if slice.length is not None else None,
+ )
def evalReduced(ctx, part):
@@ -500,7 +540,7 @@ def evalConstructQuery(ctx, query):
def evalQuery(graph, query, initBindings, base=None):
- initBindings = dict((Variable(k), v) for k, v in iteritems(initBindings))
+ initBindings = dict((Variable(k), v) for k, v in initBindings.items())
ctx = QueryContext(graph, initBindings=initBindings)
@@ -510,8 +550,9 @@ def evalQuery(graph, query, initBindings, base=None):
if main.datasetClause:
if ctx.dataset is None:
raise Exception(
- "Non-conjunctive-graph doesn't know about " +
- "graphs! Try a query without FROM (NAMED).")
+ "Non-conjunctive-graph doesn't know about "
+ + "graphs! Try a query without FROM (NAMED)."
+ )
ctx = ctx.clone() # or push/pop?
diff --git a/rdflib/plugins/sparql/evalutils.py b/rdflib/plugins/sparql/evalutils.py
index 25353fe0..72b767a1 100644
--- a/rdflib/plugins/sparql/evalutils.py
+++ b/rdflib/plugins/sparql/evalutils.py
@@ -3,7 +3,7 @@ import collections
from rdflib.term import Variable, Literal, BNode, URIRef
from rdflib.plugins.sparql.operators import EBV
-from rdflib.plugins.sparql.parserutils import Expr, CompValue, value
+from rdflib.plugins.sparql.parserutils import Expr, CompValue
from rdflib.plugins.sparql.sparql import SPARQLError, NotBoundError
@@ -49,8 +49,7 @@ def _ebv(expr, ctx):
except SPARQLError:
return False # filter error == False
elif isinstance(expr, CompValue):
- raise Exception(
- "Weird - filter got a CompValue without evalfn! %r" % expr)
+ raise Exception("Weird - filter got a CompValue without evalfn! %r" % expr)
elif isinstance(expr, Variable):
try:
return EBV(ctx[expr])
@@ -73,8 +72,7 @@ def _eval(expr, ctx, raise_not_bound_error=True):
else:
return None
elif isinstance(expr, CompValue):
- raise Exception(
- "Weird - _eval got a CompValue without evalfn! %r" % expr)
+ raise Exception("Weird - _eval got a CompValue without evalfn! %r" % expr)
else:
raise Exception("Cannot eval thing: %s (%s)" % (expr, type(expr)))
@@ -101,12 +99,11 @@ def _fillTemplate(template, solution):
_o = solution.get(o)
# instantiate new bnodes for each solution
- _s, _p, _o = [bnodeMap[x] if isinstance(
- x, BNode) else y for x, y in zip(t, (_s, _p, _o))]
+ _s, _p, _o = [
+ bnodeMap[x] if isinstance(x, BNode) else y for x, y in zip(t, (_s, _p, _o))
+ ]
- if _s is not None and \
- _p is not None and \
- _o is not None:
+ if _s is not None and _p is not None and _o is not None:
yield (_s, _p, _o)
diff --git a/rdflib/plugins/sparql/operators.py b/rdflib/plugins/sparql/operators.py
index f904b82a..ef995ce0 100644
--- a/rdflib/plugins/sparql/operators.py
+++ b/rdflib/plugins/sparql/operators.py
@@ -25,8 +25,8 @@ from rdflib.plugins.sparql.parserutils import CompValue, Expr
from rdflib.plugins.sparql.datatypes import XSD_DTs, type_promotion
from rdflib import URIRef, BNode, Variable, Literal, XSD, RDF
from rdflib.term import Node
-from six import text_type
-from six.moves.urllib.parse import quote
+
+from urllib.parse import quote
from pyparsing import ParseResults
@@ -49,7 +49,7 @@ def Builtin_IRI(expr, ctx):
if isinstance(a, Literal):
return ctx.prologue.absolutize(URIRef(a))
- raise SPARQLError('IRI function only accepts URIRefs or Literals/Strings!')
+ raise SPARQLError("IRI function only accepts URIRefs or Literals/Strings!")
def Builtin_isBLANK(expr, ctx):
@@ -85,8 +85,7 @@ def Builtin_BNODE(expr, ctx):
if isinstance(a, Literal):
return ctx.bnodes[a] # defaultdict does the right thing
- raise SPARQLError(
- 'BNode function only accepts no argument or literal/string')
+ raise SPARQLError("BNode function only accepts no argument or literal/string")
def Builtin_ABS(expr, ctx):
@@ -158,11 +157,10 @@ def Builtin_COALESCE(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-coalesce
"""
- for x in expr.get('arg', variables=True):
+ for x in expr.get("arg", variables=True):
if x is not None and not isinstance(x, (SPARQLError, Variable)):
return x
- raise SPARQLError(
- "COALESCE got no arguments that did not evaluate to an error")
+ raise SPARQLError("COALESCE got no arguments that did not evaluate to an error")
def Builtin_CEIL(expr, ctx):
@@ -170,16 +168,16 @@ def Builtin_CEIL(expr, ctx):
http://www.w3.org/TR/sparql11-query/#func-ceil
"""
- l = expr.arg
- return Literal(int(math.ceil(numeric(l))), datatype=l.datatype)
+ l_ = expr.arg
+ return Literal(int(math.ceil(numeric(l_))), datatype=l_.datatype)
def Builtin_FLOOR(expr, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-floor
"""
- l = expr.arg
- return Literal(int(math.floor(numeric(l))), datatype=l.datatype)
+ l_ = expr.arg
+ return Literal(int(math.floor(numeric(l_))), datatype=l_.datatype)
def Builtin_ROUND(expr, ctx):
@@ -191,10 +189,10 @@ def Builtin_ROUND(expr, ctx):
# but in py3k bound was changed to
# "round-to-even" behaviour
# this is an ugly work-around
- l = expr.arg
- v = numeric(l)
+ l_ = expr.arg
+ v = numeric(l_)
v = int(Decimal(v).quantize(1, ROUND_HALF_UP))
- return Literal(v, datatype=l.datatype)
+ return Literal(v, datatype=l_.datatype)
def Builtin_REGEX(expr, ctx):
@@ -214,11 +212,10 @@ def Builtin_REGEX(expr, ctx):
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
- flagMap = dict(
- [('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
+ flagMap = dict([("i", re.IGNORECASE), ("s", re.DOTALL), ("m", re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
- return Literal(bool(re.search(text_type(pattern), text, cFlag)))
+ return Literal(bool(re.search(str(pattern), text, cFlag)))
def Builtin_REPLACE(expr, ctx):
@@ -231,7 +228,7 @@ def Builtin_REPLACE(expr, ctx):
flags = expr.flags
# python uses \1, xpath/sparql uses $1
- replacement = re.sub('\\$([0-9]*)', r'\\\1', replacement)
+ replacement = re.sub("\\$([0-9]*)", r"\\\1", replacement)
def _r(m):
@@ -245,7 +242,7 @@ def Builtin_REPLACE(expr, ctx):
# the match object is replaced with a wrapper that
# returns "" instead of None for unmatched groups
- class _m():
+ class _m:
def __init__(self, m):
self.m = m
self.string = m.string
@@ -259,18 +256,20 @@ def Builtin_REPLACE(expr, ctx):
if flags:
# Maps XPath REGEX flags (http://www.w3.org/TR/xpath-functions/#flags)
# to Python's re flags
- flagMap = dict(
- [('i', re.IGNORECASE), ('s', re.DOTALL), ('m', re.MULTILINE)])
+ flagMap = dict([("i", re.IGNORECASE), ("s", re.DOTALL), ("m", re.MULTILINE)])
cFlag = reduce(pyop.or_, [flagMap.get(f, 0) for f in flags])
# @@FIXME@@ either datatype OR lang, NOT both
# this is necessary due to different treatment of unmatched groups in
# python versions. see comments above in _r(m).
- compat_r = text_type(replacement) if sys.version_info[:2] >= (3, 5) else _r
+ compat_r = str(replacement) if sys.version_info[:2] >= (3, 5) else _r
- return Literal(re.sub(text_type(pattern), compat_r, text, cFlag),
- datatype=text.datatype, lang=text.language)
+ return Literal(
+ re.sub(str(pattern), compat_r, text, cFlag),
+ datatype=text.datatype,
+ lang=text.language,
+ )
def Builtin_STRDT(expr, ctx):
@@ -278,7 +277,7 @@ def Builtin_STRDT(expr, ctx):
http://www.w3.org/TR/sparql11-query/#func-strdt
"""
- return Literal(text_type(expr.arg1), datatype=expr.arg2)
+ return Literal(str(expr.arg1), datatype=expr.arg2)
def Builtin_STRLANG(expr, ctx):
@@ -288,11 +287,11 @@ def Builtin_STRLANG(expr, ctx):
s = string(expr.arg1)
if s.language or s.datatype:
- raise SPARQLError('STRLANG expects a simple literal')
+ raise SPARQLError("STRLANG expects a simple literal")
# TODO: normalisation of lang tag to lower-case
# should probably happen in literal __init__
- return Literal(text_type(s), lang=str(expr.arg2).lower())
+ return Literal(str(s), lang=str(expr.arg2).lower())
def Builtin_CONCAT(expr, ctx):
@@ -308,8 +307,7 @@ def Builtin_CONCAT(expr, ctx):
lang = set(x.language for x in expr.arg)
lang = lang.pop() if len(lang) == 1 else None
- return Literal("".join(string(x)
- for x in expr.arg), datatype=dt, lang=lang)
+ return Literal("".join(string(x) for x in expr.arg), datatype=dt, lang=lang)
def _compatibleStrings(a, b):
@@ -317,7 +315,7 @@ def _compatibleStrings(a, b):
string(b)
if b.language and a.language != b.language:
- raise SPARQLError('incompatible arguments to str functions')
+ raise SPARQLError("incompatible arguments to str functions")
def Builtin_STRSTARTS(expr, ctx):
@@ -409,22 +407,22 @@ def Builtin_SUBSTR(expr, ctx):
def Builtin_STRLEN(e, ctx):
- l = string(e.arg)
+ l_ = string(e.arg)
- return Literal(len(l))
+ return Literal(len(l_))
def Builtin_STR(e, ctx):
arg = e.arg
if isinstance(arg, SPARQLError):
raise arg
- return Literal(text_type(arg)) # plain literal
+ return Literal(str(arg)) # plain literal
def Builtin_LCASE(e, ctx):
- l = string(e.arg)
+ l_ = string(e.arg)
- return Literal(l.lower(), datatype=l.datatype, lang=l.language)
+ return Literal(l_.lower(), datatype=l_.datatype, lang=l_.language)
def Builtin_LANGMATCHES(e, ctx):
@@ -436,7 +434,7 @@ def Builtin_LANGMATCHES(e, ctx):
langTag = string(e.arg1)
langRange = string(e.arg2)
- if text_type(langTag) == "":
+ if str(langTag) == "":
return Literal(False) # nothing matches empty!
return Literal(_lang_range_check(langRange, langTag))
@@ -491,7 +489,7 @@ def Builtin_TIMEZONE(e, ctx):
"""
dt = datetime(e.arg)
if not dt.tzinfo:
- raise SPARQLError('datatime has no timezone: %r' % dt)
+ raise SPARQLError("datatime has no timezone: %r" % dt)
delta = dt.tzinfo.utcoffset(ctx.now)
@@ -508,11 +506,13 @@ def Builtin_TIMEZONE(e, ctx):
m = (s - h * 60 * 60) / 60
s = s - h * 60 * 60 - m * 60
- tzdelta = "%sP%sT%s%s%s" % (neg,
- "%dD" % d if d else "",
- "%dH" % h if h else "",
- "%dM" % m if m else "",
- "%dS" % s if not d and not h and not m else "")
+ tzdelta = "%sP%sT%s%s%s" % (
+ neg,
+ "%dD" % d if d else "",
+ "%dH" % h if h else "",
+ "%dM" % m if m else "",
+ "%dS" % s if not d and not h and not m else "",
+ )
return Literal(tzdelta, datatype=XSD.dayTimeDuration)
@@ -528,9 +528,9 @@ def Builtin_TZ(e, ctx):
def Builtin_UCASE(e, ctx):
- l = string(e.arg)
+ l_ = string(e.arg)
- return Literal(l.upper(), datatype=l.datatype, lang=l.language)
+ return Literal(l_.upper(), datatype=l_.datatype, lang=l_.language)
def Builtin_LANG(e, ctx):
@@ -542,19 +542,19 @@ def Builtin_LANG(e, ctx):
with an empty language tag.
"""
- l = literal(e.arg)
- return Literal(l.language or "")
+ l_ = literal(e.arg)
+ return Literal(l_.language or "")
def Builtin_DATATYPE(e, ctx):
- l = e.arg
- if not isinstance(l, Literal):
- raise SPARQLError('Can only get datatype of literal: %r' % l)
- if l.language:
+ l_ = e.arg
+ if not isinstance(l_, Literal):
+ raise SPARQLError("Can only get datatype of literal: %r" % l_)
+ if l_.language:
return RDF_langString
- if not l.datatype and not l.language:
+ if not l_.datatype and not l_.language:
return XSD.string
- return l.datatype
+ return l_.datatype
def Builtin_sameTerm(e, ctx):
@@ -567,7 +567,7 @@ def Builtin_BOUND(e, ctx):
"""
http://www.w3.org/TR/sparql11-query/#func-bound
"""
- n = e.get('arg', variables=True)
+ n = e.get("arg", variables=True)
return Literal(not isinstance(n, Variable))
@@ -576,7 +576,7 @@ def Builtin_EXISTS(e, ctx):
# damn...
from rdflib.plugins.sparql.evaluate import evalPart
- exists = e.name == 'Builtin_EXISTS'
+ exists = e.name == "Builtin_EXISTS"
ctx = ctx.ctx.thaw(ctx) # hmm
for x in evalPart(ctx, e.graph):
@@ -605,9 +605,11 @@ def custom_function(uri, override=False, raw=False):
"""
Decorator version of :func:`register_custom_function`.
"""
+
def decorator(func):
register_custom_function(uri, func, override=override, raw=raw)
return func
+
return decorator
@@ -624,7 +626,7 @@ def Function(e, ctx):
pair = _CUSTOM_FUNCTIONS.get(e.iri)
if pair is None:
# no such function is registered
- raise SPARQLError('Unknown function %r' % e.iri)
+ raise SPARQLError("Unknown function %r" % e.iri)
func, raw = pair
if raw:
# function expects expression and context
@@ -658,21 +660,17 @@ def default_cast(e, ctx):
if isinstance(x, (URIRef, Literal)):
return Literal(x, datatype=XSD.string)
else:
- raise SPARQLError(
- "Cannot cast term %r of type %r" % (x, type(x)))
+ raise SPARQLError("Cannot cast term %r of type %r" % (x, type(x)))
if not isinstance(x, Literal):
- raise SPARQLError(
- "Can only cast Literals to non-string data-types")
+ raise SPARQLError("Can only cast Literals to non-string data-types")
if x.datatype and not x.datatype in XSD_DTs:
- raise SPARQLError(
- "Cannot cast literal with unknown datatype: %r" % x.datatype)
+ raise SPARQLError("Cannot cast literal with unknown datatype: %r" % x.datatype)
if e.iri == XSD.dateTime:
if x.datatype and x.datatype not in (XSD.dateTime, XSD.string):
- raise SPARQLError(
- "Cannot cast %r to XSD:dateTime" % x.datatype)
+ raise SPARQLError("Cannot cast %r to XSD:dateTime" % x.datatype)
try:
return Literal(isodate.parse_datetime(x), datatype=e.iri)
except:
@@ -742,12 +740,12 @@ def MultiplicativeExpression(e, ctx):
if type(f) == float:
res = float(res)
- if op == '*':
+ if op == "*":
res *= f
else:
res /= f
except (InvalidOperation, ZeroDivisionError):
- raise SPARQLError('divide by 0')
+ raise SPARQLError("divide by 0")
return Literal(res)
@@ -775,7 +773,7 @@ def AdditiveExpression(e, ctx):
dt = type_promotion(dt, term.datatype)
- if op == '+':
+ if op == "+":
res += n
else:
res -= n
@@ -794,18 +792,22 @@ def RelationalExpression(e, ctx):
if other is None:
return expr
- ops = dict([('>', lambda x, y: x.__gt__(y)),
- ('<', lambda x, y: x.__lt__(y)),
- ('=', lambda x, y: x.eq(y)),
- ('!=', lambda x, y: x.neq(y)),
- ('>=', lambda x, y: x.__ge__(y)),
- ('<=', lambda x, y: x.__le__(y)),
- ('IN', pyop.contains),
- ('NOT IN', lambda x, y: not pyop.contains(x, y))])
+ ops = dict(
+ [
+ (">", lambda x, y: x.__gt__(y)),
+ ("<", lambda x, y: x.__lt__(y)),
+ ("=", lambda x, y: x.eq(y)),
+ ("!=", lambda x, y: x.neq(y)),
+ (">=", lambda x, y: x.__ge__(y)),
+ ("<=", lambda x, y: x.__le__(y)),
+ ("IN", pyop.contains),
+ ("NOT IN", lambda x, y: not pyop.contains(x, y)),
+ ]
+ )
- if op in ('IN', 'NOT IN'):
+ if op in ("IN", "NOT IN"):
- res = (op == 'NOT IN')
+ res = op == "NOT IN"
error = False
@@ -823,33 +825,37 @@ def RelationalExpression(e, ctx):
else:
raise error
- if not op in ('=', '!=', 'IN', 'NOT IN'):
+ if op not in ("=", "!=", "IN", "NOT IN"):
if not isinstance(expr, Literal):
raise SPARQLError(
- "Compare other than =, != of non-literals is an error: %r" %
- expr)
+ "Compare other than =, != of non-literals is an error: %r" % expr
+ )
if not isinstance(other, Literal):
raise SPARQLError(
- "Compare other than =, != of non-literals is an error: %r" %
- other)
+ "Compare other than =, != of non-literals is an error: %r" % other
+ )
else:
if not isinstance(expr, Node):
- raise SPARQLError('I cannot compare this non-node: %r' % expr)
+ raise SPARQLError("I cannot compare this non-node: %r" % expr)
if not isinstance(other, Node):
- raise SPARQLError('I cannot compare this non-node: %r' % other)
+ raise SPARQLError("I cannot compare this non-node: %r" % other)
if isinstance(expr, Literal) and isinstance(other, Literal):
- if expr.datatype is not None and expr.datatype not in XSD_DTs and other.datatype is not None and other.datatype not in XSD_DTs:
+ if (
+ expr.datatype is not None
+ and expr.datatype not in XSD_DTs
+ and other.datatype is not None
+ and other.datatype not in XSD_DTs
+ ):
# in SPARQL for non-XSD DT Literals we can only do =,!=
- if op not in ('=', '!='):
- raise SPARQLError(
- 'Can only do =,!= comparisons of non-XSD Literals')
+ if op not in ("=", "!="):
+ raise SPARQLError("Can only do =,!= comparisons of non-XSD Literals")
try:
r = ops[op](expr, other)
if r == NotImplemented:
- raise SPARQLError('Error when comparing')
+ raise SPARQLError("Error when comparing")
except TypeError as te:
raise SPARQLError(*te.args)
return Literal(r)
@@ -897,18 +903,22 @@ def ConditionalOrExpression(e, ctx):
def not_(arg):
- return Expr('UnaryNot', UnaryNot, expr=arg)
+ return Expr("UnaryNot", UnaryNot, expr=arg)
def and_(*args):
if len(args) == 1:
return args[0]
- return Expr('ConditionalAndExpression', ConditionalAndExpression,
- expr=args[0], other=list(args[1:]))
+ return Expr(
+ "ConditionalAndExpression",
+ ConditionalAndExpression,
+ expr=args[0],
+ other=list(args[1:]),
+ )
-TrueFilter = Expr('TrueFilter', lambda _1, _2: Literal(True))
+TrueFilter = Expr("TrueFilter", lambda _1, _2: Literal(True))
def simplify(expr):
@@ -919,7 +929,7 @@ def simplify(expr):
return list(map(simplify, expr))
if not isinstance(expr, CompValue):
return expr
- if expr.name.endswith('Expression'):
+ if expr.name.endswith("Expression"):
if expr.other is None:
return simplify(expr.expr)
@@ -941,8 +951,7 @@ def datetime(e):
if not isinstance(e, Literal):
raise SPARQLError("Non-literal passed as datetime: %r" % e)
if not e.datatype == XSD.dateTime:
- raise SPARQLError(
- "Literal with wrong datatype passed as datetime: %r" % e)
+ raise SPARQLError("Literal with wrong datatype passed as datetime: %r" % e)
return e.toPython()
@@ -954,8 +963,7 @@ def string(s):
if not isinstance(s, Literal):
raise SPARQLError("Non-literal passes as string: %r" % s)
if s.datatype and s.datatype != XSD.string:
- raise SPARQLError(
- "Non-string datatype-literal passes as string: %r" % s)
+ raise SPARQLError("Non-string datatype-literal passes as string: %r" % s)
return s
@@ -970,13 +978,24 @@ def numeric(expr):
if not isinstance(expr, Literal):
raise SPARQLTypeError("%r is not a literal!" % expr)
- if expr.datatype not in (XSD.float, XSD.double,
- XSD.decimal, XSD.integer,
- XSD.nonPositiveInteger, XSD.negativeInteger,
- XSD.nonNegativeInteger, XSD.positiveInteger,
- XSD.unsignedLong, XSD.unsignedInt,
- XSD.unsignedShort, XSD.unsignedByte,
- XSD.long, XSD.int, XSD.short, XSD.byte):
+ if expr.datatype not in (
+ XSD.float,
+ XSD.double,
+ XSD.decimal,
+ XSD.integer,
+ XSD.nonPositiveInteger,
+ XSD.negativeInteger,
+ XSD.nonNegativeInteger,
+ XSD.positiveInteger,
+ XSD.unsignedLong,
+ XSD.unsignedInt,
+ XSD.unsignedShort,
+ XSD.unsignedByte,
+ XSD.long,
+ XSD.int,
+ XSD.short,
+ XSD.byte,
+ ):
raise SPARQLTypeError("%r does not have a numeric datatype!" % expr)
return expr.toPython()
@@ -1011,14 +1030,18 @@ def EBV(rt):
# Type error, see: http://www.w3.org/TR/rdf-sparql-query/#ebv
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
- 'Could not determine the EBV for : %r" % rt)
+ 'Could not determine the EBV for : %r"
+ % rt
+ )
else:
return bool(pyRT)
else:
raise SPARQLTypeError(
"http://www.w3.org/TR/rdf-sparql-query/#ebv - ' + \
- 'Only literals have Boolean values! %r" % rt)
+ 'Only literals have Boolean values! %r"
+ % rt
+ )
def _lang_range_check(range, lang):
@@ -1038,18 +1061,19 @@ def _lang_range_check(range, lang):
.. __:http://dev.w3.org/2004/PythonLib-IH/RDFClosure/RestrictedDatatype.py
"""
- def _match(r, l):
+
+ def _match(r, l_):
"""
Matching of a range and language item: either range is a wildcard
or the two are equal
@param r: language range item
- @param l: language tag item
+ @param l_: language tag item
@rtype: boolean
"""
- return r == '*' or r == l
+ return r == "*" or r == l_
- rangeList = range.strip().lower().split('-')
- langList = lang.strip().lower().split('-')
+ rangeList = range.strip().lower().split("-")
+ langList = lang.strip().lower().split("-")
if not _match(rangeList[0], langList[0]):
return False
if len(rangeList) > len(langList):
diff --git a/rdflib/plugins/sparql/parser.py b/rdflib/plugins/sparql/parser.py
index 881e638b..2124aad2 100644
--- a/rdflib/plugins/sparql/parser.py
+++ b/rdflib/plugins/sparql/parser.py
@@ -10,17 +10,28 @@ import sys
import re
from pyparsing import (
- Literal, Regex, Optional, OneOrMore, ZeroOrMore, Forward,
- ParseException, Suppress, Combine, restOfLine, Group,
- ParseResults, delimitedList)
+ Literal,
+ Regex,
+ Optional,
+ OneOrMore,
+ ZeroOrMore,
+ Forward,
+ ParseException,
+ Suppress,
+ Combine,
+ restOfLine,
+ Group,
+ ParseResults,
+ delimitedList,
+)
from pyparsing import CaselessKeyword as Keyword # watch out :)
+
# from pyparsing import Keyword as CaseSensitiveKeyword
from .parserutils import Comp, Param, ParamList
from . import operators as op
from rdflib.compat import decodeUnicodeEscape
-from six import binary_type, unichr
import rdflib
@@ -50,11 +61,11 @@ def expandTriples(terms):
res = []
if DEBUG:
print("Terms", terms)
- l = len(terms)
+ l_ = len(terms)
for i, t in enumerate(terms):
- if t == ',':
+ if t == ",":
res.extend([res[-3], res[-2]])
- elif t == ';':
+ elif t == ";":
if i + 1 == len(terms) or terms[i + 1] == ";" or terms[i + 1] == ".":
continue # this semicolon is spurious
res.append(res[0])
@@ -67,16 +78,17 @@ def expandTriples(terms):
if len(t) > 1:
res += t
# is this bnode the subject of more triples?
- if i + 1 < l and terms[i + 1] not in ".,;":
+ if i + 1 < l_ and terms[i + 1] not in ".,;":
res.append(t[0])
elif isinstance(t, ParseResults):
res += t.asList()
- elif t != '.':
+ elif t != ".":
res.append(t)
if DEBUG:
print(len(res), t)
if DEBUG:
import json
+
print(json.dumps(res, indent=2))
return res
@@ -88,6 +100,7 @@ def expandTriples(terms):
except:
if DEBUG:
import traceback
+
traceback.print_exc()
raise
@@ -140,13 +153,16 @@ def expandCollection(terms):
# SPARQL Grammar from http://www.w3.org/TR/sparql11-query/#grammar
# ------ TERMINALS --------------
# [139] IRIREF ::= '<' ([^<>"{}|^`\]-[#x00-#x20])* '>'
-IRIREF = Combine(Suppress('<') + Regex(r'[^<>"{}|^`\\%s]*' % ''.join(
- '\\x%02X' % i for i in range(33))) + Suppress('>'))
+IRIREF = Combine(
+ Suppress("<")
+ + Regex(r'[^<>"{}|^`\\%s]*' % "".join("\\x%02X" % i for i in range(33)))
+ + Suppress(">")
+)
IRIREF.setParseAction(lambda x: rdflib.URIRef(x[0]))
# [164] P_CHARS_BASE ::= [A-Z] | [a-z] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x02FF] | [#x0370-#x037D] | [#x037F-#x1FFF] | [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF] | [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] | [#x10000-#xEFFFF]
-if sys.maxunicode == 0xffff:
+if sys.maxunicode == 0xFFFF:
# this is narrow python build (default on windows/osx)
# this means that unicode code points over 0xffff are stored
# as several characters, which in turn means that regex character
@@ -163,127 +179,130 @@ if sys.maxunicode == 0xffff:
#
# in py3.3 this is fixed
- PN_CHARS_BASE_re = u'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD'
+ PN_CHARS_BASE_re = u"A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD"
else:
# wide python build
- PN_CHARS_BASE_re = u'A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF'
+ PN_CHARS_BASE_re = u"A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\U00010000-\U000EFFFF"
# [165] PN_CHARS_U ::= PN_CHARS_BASE | '_'
-PN_CHARS_U_re = '_' + PN_CHARS_BASE_re
+PN_CHARS_U_re = "_" + PN_CHARS_BASE_re
# [167] PN_CHARS ::= PN_CHARS_U | '-' | [0-9] | #x00B7 | [#x0300-#x036F] | [#x203F-#x2040]
-PN_CHARS_re = u'\\-0-9\u00B7\u0300-\u036F\u203F-\u2040' + PN_CHARS_U_re
+PN_CHARS_re = u"\\-0-9\u00B7\u0300-\u036F\u203F-\u2040" + PN_CHARS_U_re
# PN_CHARS = Regex(u'[%s]'%PN_CHARS_re, flags=re.U)
# [168] PN_PREFIX ::= PN_CHARS_BASE ((PN_CHARS|'.')* PN_CHARS)?
-PN_PREFIX = Regex(u'[%s](?:[%s\\.]*[%s])?' % (PN_CHARS_BASE_re,
- PN_CHARS_re, PN_CHARS_re), flags=re.U)
+PN_PREFIX = Regex(
+ u"[%s](?:[%s\\.]*[%s])?" % (PN_CHARS_BASE_re, PN_CHARS_re, PN_CHARS_re), flags=re.U
+)
# [140] PNAME_NS ::= PN_PREFIX? ':'
-PNAME_NS = Optional(
- Param('prefix', PN_PREFIX)) + Suppress(':').leaveWhitespace()
+PNAME_NS = Optional(Param("prefix", PN_PREFIX)) + Suppress(":").leaveWhitespace()
# [173] PN_LOCAL_ESC ::= '\' ( '_' | '~' | '.' | '-' | '!' | '$' | '&' | "'" | '(' | ')' | '*' | '+' | ',' | ';' | '=' | '/' | '?' | '#' | '@' | '%' )
-PN_LOCAL_ESC_re = '\\\\[_~\\.\\-!$&"\'()*+,;=/?#@%]'
+PN_LOCAL_ESC_re = "\\\\[_~\\.\\-!$&\"'()*+,;=/?#@%]"
# PN_LOCAL_ESC = Regex(PN_LOCAL_ESC_re) # regex'd
-#PN_LOCAL_ESC.setParseAction(lambda x: x[0][1:])
+# PN_LOCAL_ESC.setParseAction(lambda x: x[0][1:])
# [172] HEX ::= [0-9] | [A-F] | [a-f]
# HEX = Regex('[0-9A-Fa-f]') # not needed
# [171] PERCENT ::= '%' HEX HEX
-PERCENT_re = '%[0-9a-fA-F]{2}'
+PERCENT_re = "%[0-9a-fA-F]{2}"
# PERCENT = Regex(PERCENT_re) # regex'd
-#PERCENT.setParseAction(lambda x: unichr(int(x[0][1:], 16)))
+# PERCENT.setParseAction(lambda x: chr(int(x[0][1:], 16)))
# [170] PLX ::= PERCENT | PN_LOCAL_ESC
-PLX_re = '(%s|%s)' % (PN_LOCAL_ESC_re, PERCENT_re)
+PLX_re = "(%s|%s)" % (PN_LOCAL_ESC_re, PERCENT_re)
# PLX = PERCENT | PN_LOCAL_ESC # regex'd
# [169] PN_LOCAL ::= (PN_CHARS_U | ':' | [0-9] | PLX ) ((PN_CHARS | '.' | ':' | PLX)* (PN_CHARS | ':' | PLX) )?
-PN_LOCAL = Regex(u"""([%(PN_CHARS_U)s:0-9]|%(PLX)s)
+PN_LOCAL = Regex(
+ u"""([%(PN_CHARS_U)s:0-9]|%(PLX)s)
(([%(PN_CHARS)s\\.:]|%(PLX)s)*
- ([%(PN_CHARS)s:]|%(PLX)s) )?""" % dict(PN_CHARS_U=PN_CHARS_U_re,
- PN_CHARS=PN_CHARS_re,
- PLX=PLX_re), flags=re.X | re.UNICODE)
+ ([%(PN_CHARS)s:]|%(PLX)s) )?"""
+ % dict(PN_CHARS_U=PN_CHARS_U_re, PN_CHARS=PN_CHARS_re, PLX=PLX_re),
+ flags=re.X | re.UNICODE,
+)
def _hexExpand(match):
- return unichr(int(match.group(0)[1:], 16))
+ return chr(int(match.group(0)[1:], 16))
PN_LOCAL.setParseAction(lambda x: re.sub("(%s)" % PERCENT_re, _hexExpand, x[0]))
# [141] PNAME_LN ::= PNAME_NS PN_LOCAL
-PNAME_LN = PNAME_NS + Param('localname', PN_LOCAL.leaveWhitespace())
+PNAME_LN = PNAME_NS + Param("localname", PN_LOCAL.leaveWhitespace())
# [142] BLANK_NODE_LABEL ::= '_:' ( PN_CHARS_U | [0-9] ) ((PN_CHARS|'.')* PN_CHARS)?
-BLANK_NODE_LABEL = Regex(u'_:[0-9%s](?:[\\.%s]*[%s])?' % (
- PN_CHARS_U_re, PN_CHARS_re, PN_CHARS_re), flags=re.U)
+BLANK_NODE_LABEL = Regex(
+ u"_:[0-9%s](?:[\\.%s]*[%s])?" % (PN_CHARS_U_re, PN_CHARS_re, PN_CHARS_re),
+ flags=re.U,
+)
BLANK_NODE_LABEL.setParseAction(lambda x: rdflib.BNode(x[0][2:]))
# [166] VARNAME ::= ( PN_CHARS_U | [0-9] ) ( PN_CHARS_U | [0-9] | #x00B7 | [#x0300-#x036F] | [#x203F-#x2040] )*
-VARNAME = Regex(u'[%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*' % (
- PN_CHARS_U_re, PN_CHARS_U_re), flags=re.U)
+VARNAME = Regex(
+ u"[%s0-9][%s0-9\u00B7\u0300-\u036F\u203F-\u2040]*" % (PN_CHARS_U_re, PN_CHARS_U_re),
+ flags=re.U,
+)
# [143] VAR1 ::= '?' VARNAME
-VAR1 = Combine(Suppress('?') + VARNAME)
+VAR1 = Combine(Suppress("?") + VARNAME)
# [144] VAR2 ::= '$' VARNAME
-VAR2 = Combine(Suppress('$') + VARNAME)
+VAR2 = Combine(Suppress("$") + VARNAME)
# [145] LANGTAG ::= '@' [a-zA-Z]+ ('-' [a-zA-Z0-9]+)*
-LANGTAG = Combine(Suppress('@') + Regex('[a-zA-Z]+(?:-[a-zA-Z0-9]+)*'))
+LANGTAG = Combine(Suppress("@") + Regex("[a-zA-Z]+(?:-[a-zA-Z0-9]+)*"))
# [146] INTEGER ::= [0-9]+
INTEGER = Regex(r"[0-9]+")
# INTEGER.setResultsName('integer')
-INTEGER.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.integer))
+INTEGER.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.integer))
# [155] EXPONENT ::= [eE] [+-]? [0-9]+
-EXPONENT_re = '[eE][+-]?[0-9]+'
+EXPONENT_re = "[eE][+-]?[0-9]+"
# [147] DECIMAL ::= [0-9]* '.' [0-9]+
-DECIMAL = Regex(r'[0-9]*\.[0-9]+') # (?![eE])
+DECIMAL = Regex(r"[0-9]*\.[0-9]+") # (?![eE])
# DECIMAL.setResultsName('decimal')
-DECIMAL.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.decimal))
+DECIMAL.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.decimal))
# [148] DOUBLE ::= [0-9]+ '.' [0-9]* EXPONENT | '.' ([0-9])+ EXPONENT | ([0-9])+ EXPONENT
-DOUBLE = Regex(
- r'[0-9]+\.[0-9]*%(e)s|\.([0-9])+%(e)s|[0-9]+%(e)s' % {'e': EXPONENT_re})
+DOUBLE = Regex(r"[0-9]+\.[0-9]*%(e)s|\.([0-9])+%(e)s|[0-9]+%(e)s" % {"e": EXPONENT_re})
# DOUBLE.setResultsName('double')
-DOUBLE.setParseAction(
- lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.double))
+DOUBLE.setParseAction(lambda x: rdflib.Literal(x[0], datatype=rdflib.XSD.double))
# [149] INTEGER_POSITIVE ::= '+' INTEGER
-INTEGER_POSITIVE = Suppress('+') + INTEGER.copy().leaveWhitespace()
-INTEGER_POSITIVE.setParseAction(lambda x: rdflib.Literal(
- "+" + x[0], datatype=rdflib.XSD.integer))
+INTEGER_POSITIVE = Suppress("+") + INTEGER.copy().leaveWhitespace()
+INTEGER_POSITIVE.setParseAction(
+ lambda x: rdflib.Literal("+" + x[0], datatype=rdflib.XSD.integer)
+)
# [150] DECIMAL_POSITIVE ::= '+' DECIMAL
-DECIMAL_POSITIVE = Suppress('+') + DECIMAL.copy().leaveWhitespace()
+DECIMAL_POSITIVE = Suppress("+") + DECIMAL.copy().leaveWhitespace()
# [151] DOUBLE_POSITIVE ::= '+' DOUBLE
-DOUBLE_POSITIVE = Suppress('+') + DOUBLE.copy().leaveWhitespace()
+DOUBLE_POSITIVE = Suppress("+") + DOUBLE.copy().leaveWhitespace()
# [152] INTEGER_NEGATIVE ::= '-' INTEGER
-INTEGER_NEGATIVE = Suppress('-') + INTEGER.copy().leaveWhitespace()
+INTEGER_NEGATIVE = Suppress("-") + INTEGER.copy().leaveWhitespace()
INTEGER_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [153] DECIMAL_NEGATIVE ::= '-' DECIMAL
-DECIMAL_NEGATIVE = Suppress('-') + DECIMAL.copy().leaveWhitespace()
+DECIMAL_NEGATIVE = Suppress("-") + DECIMAL.copy().leaveWhitespace()
DECIMAL_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [154] DOUBLE_NEGATIVE ::= '-' DOUBLE
-DOUBLE_NEGATIVE = Suppress('-') + DOUBLE.copy().leaveWhitespace()
+DOUBLE_NEGATIVE = Suppress("-") + DOUBLE.copy().leaveWhitespace()
DOUBLE_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# [160] ECHAR ::= '\' [tbnrf\"']
@@ -295,57 +314,58 @@ DOUBLE_NEGATIVE.setParseAction(lambda x: neg(x[0]))
# ) + ZeroOrMore( ~ Literal("'\\") | ECHAR ) ) + "'''"
STRING_LITERAL_LONG1 = Regex(u"'''((?:'|'')?(?:[^'\\\\]|\\\\['ntbrf\\\\]))*'''")
STRING_LITERAL_LONG1.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3]))
+)
# [159] STRING_LITERAL_LONG2 ::= '"""' ( ( '"' | '""' )? ( [^"\] | ECHAR ) )* '"""'
# STRING_LITERAL_LONG2 = Literal('"""') + ( Optional( Literal('"') | '""'
# ) + ZeroOrMore( ~ Literal('"\\') | ECHAR ) ) + '"""'
STRING_LITERAL_LONG2 = Regex(u'"""(?:(?:"|"")?(?:[^"\\\\]|\\\\["ntbrf\\\\]))*"""')
STRING_LITERAL_LONG2.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][3:-3]))
+)
# [156] STRING_LITERAL1 ::= "'" ( ([^#x27#x5C#xA#xD]) | ECHAR )* "'"
# STRING_LITERAL1 = Literal("'") + ZeroOrMore(
# Regex(u'[^\u0027\u005C\u000A\u000D]',flags=re.U) | ECHAR ) + "'"
-STRING_LITERAL1 = Regex(
- u"'(?:[^'\\n\\r\\\\]|\\\\['ntbrf\\\\])*'(?!')", flags=re.U)
+STRING_LITERAL1 = Regex(u"'(?:[^'\\n\\r\\\\]|\\\\['ntbrf\\\\])*'(?!')", flags=re.U)
STRING_LITERAL1.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1]))
+)
# [157] STRING_LITERAL2 ::= '"' ( ([^#x22#x5C#xA#xD]) | ECHAR )* '"'
# STRING_LITERAL2 = Literal('"') + ZeroOrMore (
# Regex(u'[^\u0022\u005C\u000A\u000D]',flags=re.U) | ECHAR ) + '"'
-STRING_LITERAL2 = Regex(
- u'"(?:[^"\\n\\r\\\\]|\\\\["ntbrf\\\\])*"(?!")', flags=re.U)
+STRING_LITERAL2 = Regex(u'"(?:[^"\\n\\r\\\\]|\\\\["ntbrf\\\\])*"(?!")', flags=re.U)
STRING_LITERAL2.setParseAction(
- lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1])))
+ lambda x: rdflib.Literal(decodeUnicodeEscape(x[0][1:-1]))
+)
# [161] NIL ::= '(' WS* ')'
-NIL = Literal('(') + ')'
+NIL = Literal("(") + ")"
NIL.setParseAction(lambda x: rdflib.RDF.nil)
# [162] WS ::= #x20 | #x9 | #xD | #xA
# Not needed?
# WS = #x20 | #x9 | #xD | #xA
# [163] ANON ::= '[' WS* ']'
-ANON = Literal('[') + ']'
+ANON = Literal("[") + "]"
ANON.setParseAction(lambda x: rdflib.BNode())
# A = CaseSensitiveKeyword('a')
-A = Literal('a')
+A = Literal("a")
A.setParseAction(lambda x: rdflib.RDF.type)
# ------ NON-TERMINALS --------------
# [5] BaseDecl ::= 'BASE' IRIREF
-BaseDecl = Comp('Base', Keyword('BASE') + Param('iri', IRIREF))
+BaseDecl = Comp("Base", Keyword("BASE") + Param("iri", IRIREF))
# [6] PrefixDecl ::= 'PREFIX' PNAME_NS IRIREF
-PrefixDecl = Comp(
- 'PrefixDecl', Keyword('PREFIX') + PNAME_NS + Param('iri', IRIREF))
+PrefixDecl = Comp("PrefixDecl", Keyword("PREFIX") + PNAME_NS + Param("iri", IRIREF))
# [4] Prologue ::= ( BaseDecl | PrefixDecl )*
Prologue = Group(ZeroOrMore(BaseDecl | PrefixDecl))
@@ -355,7 +375,7 @@ Var = VAR1 | VAR2
Var.setParseAction(lambda x: rdflib.term.Variable(x[0]))
# [137] PrefixedName ::= PNAME_LN | PNAME_NS
-PrefixedName = Comp('pname', PNAME_LN | PNAME_NS)
+PrefixedName = Comp("pname", PNAME_LN | PNAME_NS)
# [136] iri ::= IRIREF | PrefixedName
iri = IRIREF | PrefixedName
@@ -365,8 +385,14 @@ String = STRING_LITERAL_LONG1 | STRING_LITERAL_LONG2 | STRING_LITERAL1 | STRING_
# [129] RDFLiteral ::= String ( LANGTAG | ( '^^' iri ) )?
-RDFLiteral = Comp('literal', Param('string', String) + Optional(Param(
- 'lang', LANGTAG.leaveWhitespace()) | Literal('^^').leaveWhitespace() + Param('datatype', iri).leaveWhitespace()))
+RDFLiteral = Comp(
+ "literal",
+ Param("string", String)
+ + Optional(
+ Param("lang", LANGTAG.leaveWhitespace())
+ | Literal("^^").leaveWhitespace() + Param("datatype", iri).leaveWhitespace()
+ ),
+)
# [132] NumericLiteralPositive ::= INTEGER_POSITIVE | DECIMAL_POSITIVE | DOUBLE_POSITIVE
NumericLiteralPositive = DOUBLE_POSITIVE | DECIMAL_POSITIVE | INTEGER_POSITIVE
@@ -378,11 +404,14 @@ NumericLiteralNegative = DOUBLE_NEGATIVE | DECIMAL_NEGATIVE | INTEGER_NEGATIVE
NumericLiteralUnsigned = DOUBLE | DECIMAL | INTEGER
# [130] NumericLiteral ::= NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
-NumericLiteral = NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
+NumericLiteral = (
+ NumericLiteralUnsigned | NumericLiteralPositive | NumericLiteralNegative
+)
# [134] BooleanLiteral ::= 'true' | 'false'
-BooleanLiteral = Keyword('true').setParseAction(lambda: rdflib.Literal(True)) |\
- Keyword('false').setParseAction(lambda: rdflib.Literal(False))
+BooleanLiteral = Keyword("true").setParseAction(lambda: rdflib.Literal(True)) | Keyword(
+ "false"
+).setParseAction(lambda: rdflib.Literal(False))
# [138] BlankNode ::= BLANK_NODE_LABEL | ANON
BlankNode = BLANK_NODE_LABEL | ANON
@@ -397,19 +426,23 @@ VarOrTerm = Var | GraphTerm
VarOrIri = Var | iri
# [46] GraphRef ::= 'GRAPH' iri
-GraphRef = Keyword('GRAPH') + Param('graphiri', iri)
+GraphRef = Keyword("GRAPH") + Param("graphiri", iri)
# [47] GraphRefAll ::= GraphRef | 'DEFAULT' | 'NAMED' | 'ALL'
-GraphRefAll = GraphRef | Param('graphiri', Keyword('DEFAULT')) | Param(
- 'graphiri', Keyword('NAMED')) | Param('graphiri', Keyword('ALL'))
+GraphRefAll = (
+ GraphRef
+ | Param("graphiri", Keyword("DEFAULT"))
+ | Param("graphiri", Keyword("NAMED"))
+ | Param("graphiri", Keyword("ALL"))
+)
# [45] GraphOrDefault ::= 'DEFAULT' | 'GRAPH'? iri
-GraphOrDefault = ParamList('graph', Keyword(
- 'DEFAULT')) | Optional(Keyword('GRAPH')) + ParamList('graph', iri)
+GraphOrDefault = ParamList("graph", Keyword("DEFAULT")) | Optional(
+ Keyword("GRAPH")
+) + ParamList("graph", iri)
# [65] DataBlockValue ::= iri | RDFLiteral | NumericLiteral | BooleanLiteral | 'UNDEF'
-DataBlockValue = iri | RDFLiteral | NumericLiteral | BooleanLiteral | Keyword(
- 'UNDEF')
+DataBlockValue = iri | RDFLiteral | NumericLiteral | BooleanLiteral | Keyword("UNDEF")
# [78] Verb ::= VarOrIri | A
Verb = VarOrIri | A
@@ -433,37 +466,58 @@ GraphNodePath = VarOrTerm | TriplesNodePath
# [93] PathMod ::= '?' | '*' | '+'
-PathMod = Literal('?') | '*' | '+'
+PathMod = Literal("?") | "*" | "+"
# [96] PathOneInPropertySet ::= iri | A | '^' ( iri | A )
-PathOneInPropertySet = iri | A | Comp('InversePath', '^' + (iri | A))
+PathOneInPropertySet = iri | A | Comp("InversePath", "^" + (iri | A))
Path = Forward()
# [95] PathNegatedPropertySet ::= PathOneInPropertySet | '(' ( PathOneInPropertySet ( '|' PathOneInPropertySet )* )? ')'
-PathNegatedPropertySet = Comp('PathNegatedPropertySet', ParamList('part', PathOneInPropertySet) | '(' + Optional(
- ParamList('part', PathOneInPropertySet) + ZeroOrMore('|' + ParamList('part', PathOneInPropertySet))) + ')')
+PathNegatedPropertySet = Comp(
+ "PathNegatedPropertySet",
+ ParamList("part", PathOneInPropertySet)
+ | "("
+ + Optional(
+ ParamList("part", PathOneInPropertySet)
+ + ZeroOrMore("|" + ParamList("part", PathOneInPropertySet))
+ )
+ + ")",
+)
# [94] PathPrimary ::= iri | A | '!' PathNegatedPropertySet | '(' Path ')' | 'DISTINCT' '(' Path ')'
-PathPrimary = iri | A | Suppress('!') + PathNegatedPropertySet | Suppress('(') + Path + Suppress(
- ')') | Comp('DistinctPath', Keyword('DISTINCT') + '(' + Param('part', Path) + ')')
+PathPrimary = (
+ iri
+ | A
+ | Suppress("!") + PathNegatedPropertySet
+ | Suppress("(") + Path + Suppress(")")
+ | Comp("DistinctPath", Keyword("DISTINCT") + "(" + Param("part", Path) + ")")
+)
# [91] PathElt ::= PathPrimary Optional(PathMod)
-PathElt = Comp('PathElt', Param(
- 'part', PathPrimary) + Optional(Param('mod', PathMod.leaveWhitespace())))
+PathElt = Comp(
+ "PathElt",
+ Param("part", PathPrimary) + Optional(Param("mod", PathMod.leaveWhitespace())),
+)
# [92] PathEltOrInverse ::= PathElt | '^' PathElt
-PathEltOrInverse = PathElt | Suppress(
- '^') + Comp('PathEltOrInverse', Param('part', PathElt))
+PathEltOrInverse = PathElt | Suppress("^") + Comp(
+ "PathEltOrInverse", Param("part", PathElt)
+)
# [90] PathSequence ::= PathEltOrInverse ( '/' PathEltOrInverse )*
-PathSequence = Comp('PathSequence', ParamList('part', PathEltOrInverse) +
- ZeroOrMore('/' + ParamList('part', PathEltOrInverse)))
+PathSequence = Comp(
+ "PathSequence",
+ ParamList("part", PathEltOrInverse)
+ + ZeroOrMore("/" + ParamList("part", PathEltOrInverse)),
+)
# [89] PathAlternative ::= PathSequence ( '|' PathSequence )*
-PathAlternative = Comp('PathAlternative', ParamList('part', PathSequence) +
- ZeroOrMore('|' + ParamList('part', PathSequence)))
+PathAlternative = Comp(
+ "PathAlternative",
+ ParamList("part", PathSequence) + ZeroOrMore("|" + ParamList("part", PathSequence)),
+)
# [88] Path ::= PathAlternative
Path <<= PathAlternative
@@ -475,127 +529,172 @@ VerbPath = Path
ObjectPath = GraphNodePath
# [86] ObjectListPath ::= ObjectPath ( ',' ObjectPath )*
-ObjectListPath = ObjectPath + ZeroOrMore(',' + ObjectPath)
+ObjectListPath = ObjectPath + ZeroOrMore("," + ObjectPath)
GroupGraphPattern = Forward()
# [102] Collection ::= '(' OneOrMore(GraphNode) ')'
-Collection = Suppress('(') + OneOrMore(GraphNode) + Suppress(')')
+Collection = Suppress("(") + OneOrMore(GraphNode) + Suppress(")")
Collection.setParseAction(expandCollection)
# [103] CollectionPath ::= '(' OneOrMore(GraphNodePath) ')'
-CollectionPath = Suppress('(') + OneOrMore(GraphNodePath) + Suppress(')')
+CollectionPath = Suppress("(") + OneOrMore(GraphNodePath) + Suppress(")")
CollectionPath.setParseAction(expandCollection)
# [80] Object ::= GraphNode
Object = GraphNode
# [79] ObjectList ::= Object ( ',' Object )*
-ObjectList = Object + ZeroOrMore(',' + Object)
+ObjectList = Object + ZeroOrMore("," + Object)
# [83] PropertyListPathNotEmpty ::= ( VerbPath | VerbSimple ) ObjectListPath ( ';' ( ( VerbPath | VerbSimple ) ObjectList )? )*
-PropertyListPathNotEmpty = (VerbPath | VerbSimple) + ObjectListPath + ZeroOrMore(
- ';' + Optional((VerbPath | VerbSimple) + ObjectListPath))
+PropertyListPathNotEmpty = (
+ (VerbPath | VerbSimple)
+ + ObjectListPath
+ + ZeroOrMore(";" + Optional((VerbPath | VerbSimple) + ObjectListPath))
+)
# [82] PropertyListPath ::= Optional(PropertyListPathNotEmpty)
PropertyListPath = Optional(PropertyListPathNotEmpty)
# [77] PropertyListNotEmpty ::= Verb ObjectList ( ';' ( Verb ObjectList )? )*
-PropertyListNotEmpty = Verb + ObjectList + ZeroOrMore(';' + Optional(Verb +
- ObjectList))
+PropertyListNotEmpty = Verb + ObjectList + ZeroOrMore(";" + Optional(Verb + ObjectList))
# [76] PropertyList ::= Optional(PropertyListNotEmpty)
PropertyList = Optional(PropertyListNotEmpty)
# [99] BlankNodePropertyList ::= '[' PropertyListNotEmpty ']'
-BlankNodePropertyList = Group(
- Suppress('[') + PropertyListNotEmpty + Suppress(']'))
+BlankNodePropertyList = Group(Suppress("[") + PropertyListNotEmpty + Suppress("]"))
BlankNodePropertyList.setParseAction(expandBNodeTriples)
# [101] BlankNodePropertyListPath ::= '[' PropertyListPathNotEmpty ']'
BlankNodePropertyListPath = Group(
- Suppress('[') + PropertyListPathNotEmpty + Suppress(']'))
+ Suppress("[") + PropertyListPathNotEmpty + Suppress("]")
+)
BlankNodePropertyListPath.setParseAction(expandBNodeTriples)
# [98] TriplesNode ::= Collection | BlankNodePropertyList
-TriplesNode <<= (Collection | BlankNodePropertyList)
+TriplesNode <<= Collection | BlankNodePropertyList
# [100] TriplesNodePath ::= CollectionPath | BlankNodePropertyListPath
-TriplesNodePath <<= (CollectionPath | BlankNodePropertyListPath)
+TriplesNodePath <<= CollectionPath | BlankNodePropertyListPath
# [75] TriplesSameSubject ::= VarOrTerm PropertyListNotEmpty | TriplesNode PropertyList
-TriplesSameSubject = VarOrTerm + PropertyListNotEmpty | TriplesNode + \
- PropertyList
+TriplesSameSubject = VarOrTerm + PropertyListNotEmpty | TriplesNode + PropertyList
TriplesSameSubject.setParseAction(expandTriples)
# [52] TriplesTemplate ::= TriplesSameSubject ( '.' Optional(TriplesTemplate) )?
TriplesTemplate = Forward()
-TriplesTemplate <<= (ParamList('triples', TriplesSameSubject) + Optional(
- Suppress('.') + Optional(TriplesTemplate)))
+TriplesTemplate <<= ParamList("triples", TriplesSameSubject) + Optional(
+ Suppress(".") + Optional(TriplesTemplate)
+)
# [51] QuadsNotTriples ::= 'GRAPH' VarOrIri '{' Optional(TriplesTemplate) '}'
-QuadsNotTriples = Comp('QuadsNotTriples', Keyword('GRAPH') + Param(
- 'term', VarOrIri) + '{' + Optional(TriplesTemplate) + '}')
+QuadsNotTriples = Comp(
+ "QuadsNotTriples",
+ Keyword("GRAPH") + Param("term", VarOrIri) + "{" + Optional(TriplesTemplate) + "}",
+)
# [50] Quads ::= Optional(TriplesTemplate) ( QuadsNotTriples '.'? Optional(TriplesTemplate) )*
-Quads = Comp('Quads', Optional(TriplesTemplate) + ZeroOrMore(ParamList(
- 'quadsNotTriples', QuadsNotTriples) + Optional(Suppress('.')) + Optional(TriplesTemplate)))
+Quads = Comp(
+ "Quads",
+ Optional(TriplesTemplate)
+ + ZeroOrMore(
+ ParamList("quadsNotTriples", QuadsNotTriples)
+ + Optional(Suppress("."))
+ + Optional(TriplesTemplate)
+ ),
+)
# [48] QuadPattern ::= '{' Quads '}'
-QuadPattern = '{' + Param('quads', Quads) + '}'
+QuadPattern = "{" + Param("quads", Quads) + "}"
# [49] QuadData ::= '{' Quads '}'
-QuadData = '{' + Param('quads', Quads) + '}'
+QuadData = "{" + Param("quads", Quads) + "}"
# [81] TriplesSameSubjectPath ::= VarOrTerm PropertyListPathNotEmpty | TriplesNodePath PropertyListPath
-TriplesSameSubjectPath = VarOrTerm + \
- PropertyListPathNotEmpty | TriplesNodePath + PropertyListPath
+TriplesSameSubjectPath = (
+ VarOrTerm + PropertyListPathNotEmpty | TriplesNodePath + PropertyListPath
+)
TriplesSameSubjectPath.setParseAction(expandTriples)
# [55] TriplesBlock ::= TriplesSameSubjectPath ( '.' Optional(TriplesBlock) )?
TriplesBlock = Forward()
-TriplesBlock <<= (ParamList('triples', TriplesSameSubjectPath) + Optional(
- Suppress('.') + Optional(TriplesBlock)))
+TriplesBlock <<= ParamList("triples", TriplesSameSubjectPath) + Optional(
+ Suppress(".") + Optional(TriplesBlock)
+)
# [66] MinusGraphPattern ::= 'MINUS' GroupGraphPattern
MinusGraphPattern = Comp(
- 'MinusGraphPattern', Keyword('MINUS') + Param('graph', GroupGraphPattern))
+ "MinusGraphPattern", Keyword("MINUS") + Param("graph", GroupGraphPattern)
+)
# [67] GroupOrUnionGraphPattern ::= GroupGraphPattern ( 'UNION' GroupGraphPattern )*
-GroupOrUnionGraphPattern = Comp('GroupOrUnionGraphPattern', ParamList(
- 'graph', GroupGraphPattern) + ZeroOrMore(Keyword('UNION') + ParamList('graph', GroupGraphPattern)))
+GroupOrUnionGraphPattern = Comp(
+ "GroupOrUnionGraphPattern",
+ ParamList("graph", GroupGraphPattern)
+ + ZeroOrMore(Keyword("UNION") + ParamList("graph", GroupGraphPattern)),
+)
Expression = Forward()
# [72] ExpressionList ::= NIL | '(' Expression ( ',' Expression )* ')'
-ExpressionList = NIL | Group(
- Suppress('(') + delimitedList(Expression) + Suppress(')'))
+ExpressionList = NIL | Group(Suppress("(") + delimitedList(Expression) + Suppress(")"))
# [122] RegexExpression ::= 'REGEX' '(' Expression ',' Expression ( ',' Expression )? ')'
-RegexExpression = Comp('Builtin_REGEX', Keyword('REGEX') + '(' + Param('text', Expression) + ',' + Param(
- 'pattern', Expression) + Optional(',' + Param('flags', Expression)) + ')')
+RegexExpression = Comp(
+ "Builtin_REGEX",
+ Keyword("REGEX")
+ + "("
+ + Param("text", Expression)
+ + ","
+ + Param("pattern", Expression)
+ + Optional("," + Param("flags", Expression))
+ + ")",
+)
RegexExpression.setEvalFn(op.Builtin_REGEX)
# [123] SubstringExpression ::= 'SUBSTR' '(' Expression ',' Expression ( ',' Expression )? ')'
-SubstringExpression = Comp('Builtin_SUBSTR', Keyword('SUBSTR') + '(' + Param('arg', Expression) + ',' + Param(
- 'start', Expression) + Optional(',' + Param('length', Expression)) + ')').setEvalFn(op.Builtin_SUBSTR)
+SubstringExpression = Comp(
+ "Builtin_SUBSTR",
+ Keyword("SUBSTR")
+ + "("
+ + Param("arg", Expression)
+ + ","
+ + Param("start", Expression)
+ + Optional("," + Param("length", Expression))
+ + ")",
+).setEvalFn(op.Builtin_SUBSTR)
# [124] StrReplaceExpression ::= 'REPLACE' '(' Expression ',' Expression ',' Expression ( ',' Expression )? ')'
-StrReplaceExpression = Comp('Builtin_REPLACE', Keyword('REPLACE') + '(' + Param('arg', Expression) + ',' + Param(
- 'pattern', Expression) + ',' + Param('replacement', Expression) + Optional(',' + Param('flags', Expression)) + ')').setEvalFn(op.Builtin_REPLACE)
+StrReplaceExpression = Comp(
+ "Builtin_REPLACE",
+ Keyword("REPLACE")
+ + "("
+ + Param("arg", Expression)
+ + ","
+ + Param("pattern", Expression)
+ + ","
+ + Param("replacement", Expression)
+ + Optional("," + Param("flags", Expression))
+ + ")",
+).setEvalFn(op.Builtin_REPLACE)
# [125] ExistsFunc ::= 'EXISTS' GroupGraphPattern
-ExistsFunc = Comp('Builtin_EXISTS', Keyword('EXISTS') + Param(
- 'graph', GroupGraphPattern)).setEvalFn(op.Builtin_EXISTS)
+ExistsFunc = Comp(
+ "Builtin_EXISTS", Keyword("EXISTS") + Param("graph", GroupGraphPattern)
+).setEvalFn(op.Builtin_EXISTS)
# [126] NotExistsFunc ::= 'NOT' 'EXISTS' GroupGraphPattern
-NotExistsFunc = Comp('Builtin_NOTEXISTS', Keyword('NOT') + Keyword(
- 'EXISTS') + Param('graph', GroupGraphPattern)).setEvalFn(op.Builtin_EXISTS)
+NotExistsFunc = Comp(
+ "Builtin_NOTEXISTS",
+ Keyword("NOT") + Keyword("EXISTS") + Param("graph", GroupGraphPattern),
+).setEvalFn(op.Builtin_EXISTS)
# [127] Aggregate ::= 'COUNT' '(' 'DISTINCT'? ( '*' | Expression ) ')'
@@ -606,17 +705,33 @@ NotExistsFunc = Comp('Builtin_NOTEXISTS', Keyword('NOT') + Keyword(
# | 'SAMPLE' '(' Optional('DISTINCT') Expression ')'
# | 'GROUP_CONCAT' '(' Optional('DISTINCT') Expression ( ';' 'SEPARATOR' '=' String )? ')'
-_Distinct = Optional(Keyword('DISTINCT'))
-_AggregateParams = '(' + Param(
- 'distinct', _Distinct) + Param('vars', Expression) + ')'
-
-Aggregate = Comp('Aggregate_Count', Keyword('COUNT') + '(' + Param('distinct', _Distinct) + Param('vars', '*' | Expression) + ')')\
- | Comp('Aggregate_Sum', Keyword('SUM') + _AggregateParams)\
- | Comp('Aggregate_Min', Keyword('MIN') + _AggregateParams)\
- | Comp('Aggregate_Max', Keyword('MAX') + _AggregateParams)\
- | Comp('Aggregate_Avg', Keyword('AVG') + _AggregateParams)\
- | Comp('Aggregate_Sample', Keyword('SAMPLE') + _AggregateParams)\
- | Comp('Aggregate_GroupConcat', Keyword('GROUP_CONCAT') + '(' + Param('distinct', _Distinct) + Param('vars', Expression) + Optional(';' + Keyword('SEPARATOR') + '=' + Param('separator', String)) + ')')
+_Distinct = Optional(Keyword("DISTINCT"))
+_AggregateParams = "(" + Param("distinct", _Distinct) + Param("vars", Expression) + ")"
+
+Aggregate = (
+ Comp(
+ "Aggregate_Count",
+ Keyword("COUNT")
+ + "("
+ + Param("distinct", _Distinct)
+ + Param("vars", "*" | Expression)
+ + ")",
+ )
+ | Comp("Aggregate_Sum", Keyword("SUM") + _AggregateParams)
+ | Comp("Aggregate_Min", Keyword("MIN") + _AggregateParams)
+ | Comp("Aggregate_Max", Keyword("MAX") + _AggregateParams)
+ | Comp("Aggregate_Avg", Keyword("AVG") + _AggregateParams)
+ | Comp("Aggregate_Sample", Keyword("SAMPLE") + _AggregateParams)
+ | Comp(
+ "Aggregate_GroupConcat",
+ Keyword("GROUP_CONCAT")
+ + "("
+ + Param("distinct", _Distinct)
+ + Param("vars", Expression)
+ + Optional(";" + Keyword("SEPARATOR") + "=" + Param("separator", String))
+ + ")",
+ )
+)
# [121] BuiltInCall ::= Aggregate
# | 'STR' '(' + Expression + ')'
@@ -674,133 +789,330 @@ Aggregate = Comp('Aggregate_Count', Keyword('COUNT') + '(' + Param('distinct', _
# | ExistsFunc
# | NotExistsFunc
-BuiltInCall = Aggregate \
- | Comp('Builtin_STR', Keyword('STR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_STR) \
- | Comp('Builtin_LANG', Keyword('LANG') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_LANG) \
- | Comp('Builtin_LANGMATCHES', Keyword('LANGMATCHES') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_LANGMATCHES) \
- | Comp('Builtin_DATATYPE', Keyword('DATATYPE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_DATATYPE) \
- | Comp('Builtin_BOUND', Keyword('BOUND') + '(' + Param('arg', Var) + ')').setEvalFn(op.Builtin_BOUND) \
- | Comp('Builtin_IRI', Keyword('IRI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_IRI) \
- | Comp('Builtin_URI', Keyword('URI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_IRI) \
- | Comp('Builtin_BNODE', Keyword('BNODE') + ('(' + Param('arg', Expression) + ')' | NIL)).setEvalFn(op.Builtin_BNODE) \
- | Comp('Builtin_RAND', Keyword('RAND') + NIL).setEvalFn(op.Builtin_RAND) \
- | Comp('Builtin_ABS', Keyword('ABS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ABS) \
- | Comp('Builtin_CEIL', Keyword('CEIL') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_CEIL) \
- | Comp('Builtin_FLOOR', Keyword('FLOOR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_FLOOR) \
- | Comp('Builtin_ROUND', Keyword('ROUND') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ROUND) \
- | Comp('Builtin_CONCAT', Keyword('CONCAT') + Param('arg', ExpressionList)).setEvalFn(op.Builtin_CONCAT) \
- | SubstringExpression \
- | Comp('Builtin_STRLEN', Keyword('STRLEN') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_STRLEN) \
- | StrReplaceExpression \
- | Comp('Builtin_UCASE', Keyword('UCASE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_UCASE) \
- | Comp('Builtin_LCASE', Keyword('LCASE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_LCASE) \
- | Comp('Builtin_ENCODE_FOR_URI', Keyword('ENCODE_FOR_URI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_ENCODE_FOR_URI) \
- | Comp('Builtin_CONTAINS', Keyword('CONTAINS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_CONTAINS) \
- | Comp('Builtin_STRSTARTS', Keyword('STRSTARTS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRSTARTS) \
- | Comp('Builtin_STRENDS', Keyword('STRENDS') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRENDS) \
- | Comp('Builtin_STRBEFORE', Keyword('STRBEFORE') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRBEFORE) \
- | Comp('Builtin_STRAFTER', Keyword('STRAFTER') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRAFTER) \
- | Comp('Builtin_YEAR', Keyword('YEAR') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_YEAR) \
- | Comp('Builtin_MONTH', Keyword('MONTH') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MONTH) \
- | Comp('Builtin_DAY', Keyword('DAY') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_DAY) \
- | Comp('Builtin_HOURS', Keyword('HOURS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_HOURS) \
- | Comp('Builtin_MINUTES', Keyword('MINUTES') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MINUTES) \
- | Comp('Builtin_SECONDS', Keyword('SECONDS') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SECONDS) \
- | Comp('Builtin_TIMEZONE', Keyword('TIMEZONE') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_TIMEZONE) \
- | Comp('Builtin_TZ', Keyword('TZ') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_TZ) \
- | Comp('Builtin_NOW', Keyword('NOW') + NIL).setEvalFn(op.Builtin_NOW) \
- | Comp('Builtin_UUID', Keyword('UUID') + NIL).setEvalFn(op.Builtin_UUID) \
- | Comp('Builtin_STRUUID', Keyword('STRUUID') + NIL).setEvalFn(op.Builtin_STRUUID) \
- | Comp('Builtin_MD5', Keyword('MD5') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_MD5) \
- | Comp('Builtin_SHA1', Keyword('SHA1') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA1) \
- | Comp('Builtin_SHA256', Keyword('SHA256') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA256) \
- | Comp('Builtin_SHA384', Keyword('SHA384') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA384) \
- | Comp('Builtin_SHA512', Keyword('SHA512') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_SHA512) \
- | Comp('Builtin_COALESCE', Keyword('COALESCE') + Param('arg', ExpressionList)).setEvalFn(op.Builtin_COALESCE) \
- | Comp('Builtin_IF', Keyword('IF') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ',' + Param('arg3', Expression) + ')').setEvalFn(op.Builtin_IF) \
- | Comp('Builtin_STRLANG', Keyword('STRLANG') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRLANG) \
- | Comp('Builtin_STRDT', Keyword('STRDT') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_STRDT) \
- | Comp('Builtin_sameTerm', Keyword('sameTerm') + '(' + Param('arg1', Expression) + ',' + Param('arg2', Expression) + ')').setEvalFn(op.Builtin_sameTerm) \
- | Comp('Builtin_isIRI', Keyword('isIRI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isIRI) \
- | Comp('Builtin_isURI', Keyword('isURI') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isIRI) \
- | Comp('Builtin_isBLANK', Keyword('isBLANK') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isBLANK) \
- | Comp('Builtin_isLITERAL', Keyword('isLITERAL') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isLITERAL) \
- | Comp('Builtin_isNUMERIC', Keyword('isNUMERIC') + '(' + Param('arg', Expression) + ')').setEvalFn(op.Builtin_isNUMERIC) \
- | RegexExpression \
- | ExistsFunc \
+BuiltInCall = (
+ Aggregate
+ | Comp(
+ "Builtin_STR", Keyword("STR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_STR)
+ | Comp(
+ "Builtin_LANG", Keyword("LANG") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_LANG)
+ | Comp(
+ "Builtin_LANGMATCHES",
+ Keyword("LANGMATCHES")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_LANGMATCHES)
+ | Comp(
+ "Builtin_DATATYPE", Keyword("DATATYPE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_DATATYPE)
+ | Comp("Builtin_BOUND", Keyword("BOUND") + "(" + Param("arg", Var) + ")").setEvalFn(
+ op.Builtin_BOUND
+ )
+ | Comp(
+ "Builtin_IRI", Keyword("IRI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_IRI)
+ | Comp(
+ "Builtin_URI", Keyword("URI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_IRI)
+ | Comp(
+ "Builtin_BNODE", Keyword("BNODE") + ("(" + Param("arg", Expression) + ")" | NIL)
+ ).setEvalFn(op.Builtin_BNODE)
+ | Comp("Builtin_RAND", Keyword("RAND") + NIL).setEvalFn(op.Builtin_RAND)
+ | Comp(
+ "Builtin_ABS", Keyword("ABS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_ABS)
+ | Comp(
+ "Builtin_CEIL", Keyword("CEIL") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_CEIL)
+ | Comp(
+ "Builtin_FLOOR", Keyword("FLOOR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_FLOOR)
+ | Comp(
+ "Builtin_ROUND", Keyword("ROUND") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_ROUND)
+ | Comp(
+ "Builtin_CONCAT", Keyword("CONCAT") + Param("arg", ExpressionList)
+ ).setEvalFn(op.Builtin_CONCAT)
+ | SubstringExpression
+ | Comp(
+ "Builtin_STRLEN", Keyword("STRLEN") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_STRLEN)
+ | StrReplaceExpression
+ | Comp(
+ "Builtin_UCASE", Keyword("UCASE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_UCASE)
+ | Comp(
+ "Builtin_LCASE", Keyword("LCASE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_LCASE)
+ | Comp(
+ "Builtin_ENCODE_FOR_URI",
+ Keyword("ENCODE_FOR_URI") + "(" + Param("arg", Expression) + ")",
+ ).setEvalFn(op.Builtin_ENCODE_FOR_URI)
+ | Comp(
+ "Builtin_CONTAINS",
+ Keyword("CONTAINS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_CONTAINS)
+ | Comp(
+ "Builtin_STRSTARTS",
+ Keyword("STRSTARTS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRSTARTS)
+ | Comp(
+ "Builtin_STRENDS",
+ Keyword("STRENDS")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRENDS)
+ | Comp(
+ "Builtin_STRBEFORE",
+ Keyword("STRBEFORE")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRBEFORE)
+ | Comp(
+ "Builtin_STRAFTER",
+ Keyword("STRAFTER")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRAFTER)
+ | Comp(
+ "Builtin_YEAR", Keyword("YEAR") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_YEAR)
+ | Comp(
+ "Builtin_MONTH", Keyword("MONTH") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MONTH)
+ | Comp(
+ "Builtin_DAY", Keyword("DAY") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_DAY)
+ | Comp(
+ "Builtin_HOURS", Keyword("HOURS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_HOURS)
+ | Comp(
+ "Builtin_MINUTES", Keyword("MINUTES") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MINUTES)
+ | Comp(
+ "Builtin_SECONDS", Keyword("SECONDS") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SECONDS)
+ | Comp(
+ "Builtin_TIMEZONE", Keyword("TIMEZONE") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_TIMEZONE)
+ | Comp(
+ "Builtin_TZ", Keyword("TZ") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_TZ)
+ | Comp("Builtin_NOW", Keyword("NOW") + NIL).setEvalFn(op.Builtin_NOW)
+ | Comp("Builtin_UUID", Keyword("UUID") + NIL).setEvalFn(op.Builtin_UUID)
+ | Comp("Builtin_STRUUID", Keyword("STRUUID") + NIL).setEvalFn(op.Builtin_STRUUID)
+ | Comp(
+ "Builtin_MD5", Keyword("MD5") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_MD5)
+ | Comp(
+ "Builtin_SHA1", Keyword("SHA1") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA1)
+ | Comp(
+ "Builtin_SHA256", Keyword("SHA256") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA256)
+ | Comp(
+ "Builtin_SHA384", Keyword("SHA384") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA384)
+ | Comp(
+ "Builtin_SHA512", Keyword("SHA512") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_SHA512)
+ | Comp(
+ "Builtin_COALESCE", Keyword("COALESCE") + Param("arg", ExpressionList)
+ ).setEvalFn(op.Builtin_COALESCE)
+ | Comp(
+ "Builtin_IF",
+ Keyword("IF")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ","
+ + Param("arg3", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_IF)
+ | Comp(
+ "Builtin_STRLANG",
+ Keyword("STRLANG")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRLANG)
+ | Comp(
+ "Builtin_STRDT",
+ Keyword("STRDT")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_STRDT)
+ | Comp(
+ "Builtin_sameTerm",
+ Keyword("sameTerm")
+ + "("
+ + Param("arg1", Expression)
+ + ","
+ + Param("arg2", Expression)
+ + ")",
+ ).setEvalFn(op.Builtin_sameTerm)
+ | Comp(
+ "Builtin_isIRI", Keyword("isIRI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isIRI)
+ | Comp(
+ "Builtin_isURI", Keyword("isURI") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isIRI)
+ | Comp(
+ "Builtin_isBLANK", Keyword("isBLANK") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isBLANK)
+ | Comp(
+ "Builtin_isLITERAL", Keyword("isLITERAL") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isLITERAL)
+ | Comp(
+ "Builtin_isNUMERIC", Keyword("isNUMERIC") + "(" + Param("arg", Expression) + ")"
+ ).setEvalFn(op.Builtin_isNUMERIC)
+ | RegexExpression
+ | ExistsFunc
| NotExistsFunc
+)
# [71] ArgList ::= NIL | '(' 'DISTINCT'? Expression ( ',' Expression )* ')'
-ArgList = NIL | '(' + Param('distinct', _Distinct) + delimitedList(
- ParamList('expr', Expression)) + ')'
+ArgList = (
+ NIL
+ | "("
+ + Param("distinct", _Distinct)
+ + delimitedList(ParamList("expr", Expression))
+ + ")"
+)
# [128] iriOrFunction ::= iri Optional(ArgList)
-iriOrFunction = (Comp(
- 'Function', Param('iri', iri) + ArgList).setEvalFn(op.Function)) | iri
+iriOrFunction = (
+ Comp("Function", Param("iri", iri) + ArgList).setEvalFn(op.Function)
+) | iri
# [70] FunctionCall ::= iri ArgList
-FunctionCall = Comp(
- 'Function', Param('iri', iri) + ArgList).setEvalFn(op.Function)
+FunctionCall = Comp("Function", Param("iri", iri) + ArgList).setEvalFn(op.Function)
# [120] BrackettedExpression ::= '(' Expression ')'
-BrackettedExpression = Suppress('(') + Expression + Suppress(')')
+BrackettedExpression = Suppress("(") + Expression + Suppress(")")
# [119] PrimaryExpression ::= BrackettedExpression | BuiltInCall | iriOrFunction | RDFLiteral | NumericLiteral | BooleanLiteral | Var
-PrimaryExpression = BrackettedExpression | BuiltInCall | iriOrFunction | RDFLiteral | NumericLiteral | BooleanLiteral | Var
+PrimaryExpression = (
+ BrackettedExpression
+ | BuiltInCall
+ | iriOrFunction
+ | RDFLiteral
+ | NumericLiteral
+ | BooleanLiteral
+ | Var
+)
# [118] UnaryExpression ::= '!' PrimaryExpression
# | '+' PrimaryExpression
# | '-' PrimaryExpression
# | PrimaryExpression
-UnaryExpression = Comp('UnaryNot', '!' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryNot) \
- | Comp('UnaryPlus', '+' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryPlus) \
- | Comp('UnaryMinus', '-' + Param('expr', PrimaryExpression)).setEvalFn(op.UnaryMinus) \
+UnaryExpression = (
+ Comp("UnaryNot", "!" + Param("expr", PrimaryExpression)).setEvalFn(op.UnaryNot)
+ | Comp("UnaryPlus", "+" + Param("expr", PrimaryExpression)).setEvalFn(op.UnaryPlus)
+ | Comp("UnaryMinus", "-" + Param("expr", PrimaryExpression)).setEvalFn(
+ op.UnaryMinus
+ )
| PrimaryExpression
+)
# [117] MultiplicativeExpression ::= UnaryExpression ( '*' UnaryExpression | '/' UnaryExpression )*
-MultiplicativeExpression = Comp('MultiplicativeExpression', Param('expr', UnaryExpression) + ZeroOrMore(ParamList('op', '*') + ParamList(
- 'other', UnaryExpression) | ParamList('op', '/') + ParamList('other', UnaryExpression))).setEvalFn(op.MultiplicativeExpression)
+MultiplicativeExpression = Comp(
+ "MultiplicativeExpression",
+ Param("expr", UnaryExpression)
+ + ZeroOrMore(
+ ParamList("op", "*") + ParamList("other", UnaryExpression)
+ | ParamList("op", "/") + ParamList("other", UnaryExpression)
+ ),
+).setEvalFn(op.MultiplicativeExpression)
# [116] AdditiveExpression ::= MultiplicativeExpression ( '+' MultiplicativeExpression | '-' MultiplicativeExpression | ( NumericLiteralPositive | NumericLiteralNegative ) ( ( '*' UnaryExpression ) | ( '/' UnaryExpression ) )* )*
# NOTE: The second part of this production is there because:
-### "In signed numbers, no white space is allowed between the sign and the number. The AdditiveExpression grammar rule allows for this by covering the two cases of an expression followed by a signed number. These produce an addition or subtraction of the unsigned number as appropriate."
+# "In signed numbers, no white space is allowed between the sign and the number. The AdditiveExpression grammar rule allows for this by covering the two cases of an expression followed by a signed number. These produce an addition or subtraction of the unsigned number as appropriate."
-# Here (I think) this is not nescessary since pyparsing doesn't separate
+# Here (I think) this is not necessary since pyparsing doesn't separate
# tokenizing and parsing
-AdditiveExpression = Comp('AdditiveExpression', Param('expr', MultiplicativeExpression) +
- ZeroOrMore(ParamList('op', '+') + ParamList('other', MultiplicativeExpression) |
- ParamList('op', '-') + ParamList('other', MultiplicativeExpression))).setEvalFn(op.AdditiveExpression)
+AdditiveExpression = Comp(
+ "AdditiveExpression",
+ Param("expr", MultiplicativeExpression)
+ + ZeroOrMore(
+ ParamList("op", "+") + ParamList("other", MultiplicativeExpression)
+ | ParamList("op", "-") + ParamList("other", MultiplicativeExpression)
+ ),
+).setEvalFn(op.AdditiveExpression)
# [115] NumericExpression ::= AdditiveExpression
NumericExpression = AdditiveExpression
# [114] RelationalExpression ::= NumericExpression ( '=' NumericExpression | '!=' NumericExpression | '<' NumericExpression | '>' NumericExpression | '<=' NumericExpression | '>=' NumericExpression | 'IN' ExpressionList | 'NOT' 'IN' ExpressionList )?
-RelationalExpression = Comp('RelationalExpression', Param('expr', NumericExpression) + Optional(
- Param('op', '=') + Param('other', NumericExpression) |
- Param('op', '!=') + Param('other', NumericExpression) |
- Param('op', '<') + Param('other', NumericExpression) |
- Param('op', '>') + Param('other', NumericExpression) |
- Param('op', '<=') + Param('other', NumericExpression) |
- Param('op', '>=') + Param('other', NumericExpression) |
- Param('op', Keyword('IN')) + Param('other', ExpressionList) |
- Param('op', Combine(Keyword('NOT') + Keyword('IN'), adjacent=False, joinString=" ")) + Param('other', ExpressionList))).setEvalFn(op.RelationalExpression)
+RelationalExpression = Comp(
+ "RelationalExpression",
+ Param("expr", NumericExpression)
+ + Optional(
+ Param("op", "=") + Param("other", NumericExpression)
+ | Param("op", "!=") + Param("other", NumericExpression)
+ | Param("op", "<") + Param("other", NumericExpression)
+ | Param("op", ">") + Param("other", NumericExpression)
+ | Param("op", "<=") + Param("other", NumericExpression)
+ | Param("op", ">=") + Param("other", NumericExpression)
+ | Param("op", Keyword("IN")) + Param("other", ExpressionList)
+ | Param(
+ "op",
+ Combine(Keyword("NOT") + Keyword("IN"), adjacent=False, joinString=" "),
+ )
+ + Param("other", ExpressionList)
+ ),
+).setEvalFn(op.RelationalExpression)
# [113] ValueLogical ::= RelationalExpression
ValueLogical = RelationalExpression
# [112] ConditionalAndExpression ::= ValueLogical ( '&&' ValueLogical )*
-ConditionalAndExpression = Comp('ConditionalAndExpression', Param('expr', ValueLogical) + ZeroOrMore(
- '&&' + ParamList('other', ValueLogical))).setEvalFn(op.ConditionalAndExpression)
+ConditionalAndExpression = Comp(
+ "ConditionalAndExpression",
+ Param("expr", ValueLogical) + ZeroOrMore("&&" + ParamList("other", ValueLogical)),
+).setEvalFn(op.ConditionalAndExpression)
# [111] ConditionalOrExpression ::= ConditionalAndExpression ( '||' ConditionalAndExpression )*
-ConditionalOrExpression = Comp('ConditionalOrExpression', Param('expr', ConditionalAndExpression) + ZeroOrMore(
- '||' + ParamList('other', ConditionalAndExpression))).setEvalFn(op.ConditionalOrExpression)
+ConditionalOrExpression = Comp(
+ "ConditionalOrExpression",
+ Param("expr", ConditionalAndExpression)
+ + ZeroOrMore("||" + ParamList("other", ConditionalAndExpression)),
+).setEvalFn(op.ConditionalOrExpression)
# [110] Expression ::= ConditionalOrExpression
Expression <<= ConditionalOrExpression
@@ -810,7 +1122,7 @@ Expression <<= ConditionalOrExpression
Constraint = BrackettedExpression | BuiltInCall | FunctionCall
# [68] Filter ::= 'FILTER' Constraint
-Filter = Comp('Filter', Keyword('FILTER') + Param('expr', Constraint))
+Filter = Comp("Filter", Keyword("FILTER") + Param("expr", Constraint))
# [16] SourceSelector ::= iri
@@ -820,128 +1132,217 @@ SourceSelector = iri
DefaultGraphClause = SourceSelector
# [15] NamedGraphClause ::= 'NAMED' SourceSelector
-NamedGraphClause = Keyword('NAMED') + Param('named', SourceSelector)
+NamedGraphClause = Keyword("NAMED") + Param("named", SourceSelector)
# [13] DatasetClause ::= 'FROM' ( DefaultGraphClause | NamedGraphClause )
-DatasetClause = Comp('DatasetClause', Keyword(
- 'FROM') + (Param('default', DefaultGraphClause) | NamedGraphClause))
+DatasetClause = Comp(
+ "DatasetClause",
+ Keyword("FROM") + (Param("default", DefaultGraphClause) | NamedGraphClause),
+)
# [20] GroupCondition ::= BuiltInCall | FunctionCall | '(' Expression ( 'AS' Var )? ')' | Var
-GroupCondition = BuiltInCall | FunctionCall | Comp('GroupAs', '(' + Param(
- 'expr', Expression) + Optional(Keyword('AS') + Param('var', Var)) + ')') | Var
+GroupCondition = (
+ BuiltInCall
+ | FunctionCall
+ | Comp(
+ "GroupAs",
+ "("
+ + Param("expr", Expression)
+ + Optional(Keyword("AS") + Param("var", Var))
+ + ")",
+ )
+ | Var
+)
# [19] GroupClause ::= 'GROUP' 'BY' GroupCondition+
-GroupClause = Comp('GroupClause', Keyword('GROUP') + Keyword(
- 'BY') + OneOrMore(ParamList('condition', GroupCondition)))
+GroupClause = Comp(
+ "GroupClause",
+ Keyword("GROUP")
+ + Keyword("BY")
+ + OneOrMore(ParamList("condition", GroupCondition)),
+)
-_Silent = Optional(Param('silent', Keyword('SILENT')))
+_Silent = Optional(Param("silent", Keyword("SILENT")))
# [31] Load ::= 'LOAD' 'SILENT'? iri ( 'INTO' GraphRef )?
-Load = Comp('Load', Keyword('LOAD') + _Silent + Param('iri', iri) +
- Optional(Keyword('INTO') + GraphRef))
+Load = Comp(
+ "Load",
+ Keyword("LOAD")
+ + _Silent
+ + Param("iri", iri)
+ + Optional(Keyword("INTO") + GraphRef),
+)
# [32] Clear ::= 'CLEAR' 'SILENT'? GraphRefAll
-Clear = Comp('Clear', Keyword('CLEAR') + _Silent + GraphRefAll)
+Clear = Comp("Clear", Keyword("CLEAR") + _Silent + GraphRefAll)
# [33] Drop ::= 'DROP' _Silent GraphRefAll
-Drop = Comp('Drop', Keyword('DROP') + _Silent + GraphRefAll)
+Drop = Comp("Drop", Keyword("DROP") + _Silent + GraphRefAll)
# [34] Create ::= 'CREATE' _Silent GraphRef
-Create = Comp('Create', Keyword('CREATE') + _Silent + GraphRef)
+Create = Comp("Create", Keyword("CREATE") + _Silent + GraphRef)
# [35] Add ::= 'ADD' _Silent GraphOrDefault 'TO' GraphOrDefault
-Add = Comp('Add', Keyword(
- 'ADD') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Add = Comp(
+ "Add", Keyword("ADD") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [36] Move ::= 'MOVE' _Silent GraphOrDefault 'TO' GraphOrDefault
-Move = Comp('Move', Keyword(
- 'MOVE') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Move = Comp(
+ "Move", Keyword("MOVE") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [37] Copy ::= 'COPY' _Silent GraphOrDefault 'TO' GraphOrDefault
-Copy = Comp('Copy', Keyword(
- 'COPY') + _Silent + GraphOrDefault + Keyword('TO') + GraphOrDefault)
+Copy = Comp(
+ "Copy", Keyword("COPY") + _Silent + GraphOrDefault + Keyword("TO") + GraphOrDefault
+)
# [38] InsertData ::= 'INSERT DATA' QuadData
-InsertData = Comp('InsertData', Keyword('INSERT') + Keyword('DATA') + QuadData)
+InsertData = Comp("InsertData", Keyword("INSERT") + Keyword("DATA") + QuadData)
# [39] DeleteData ::= 'DELETE DATA' QuadData
-DeleteData = Comp('DeleteData', Keyword('DELETE') + Keyword('DATA') + QuadData)
+DeleteData = Comp("DeleteData", Keyword("DELETE") + Keyword("DATA") + QuadData)
# [40] DeleteWhere ::= 'DELETE WHERE' QuadPattern
-DeleteWhere = Comp(
- 'DeleteWhere', Keyword('DELETE') + Keyword('WHERE') + QuadPattern)
+DeleteWhere = Comp("DeleteWhere", Keyword("DELETE") + Keyword("WHERE") + QuadPattern)
# [42] DeleteClause ::= 'DELETE' QuadPattern
-DeleteClause = Comp('DeleteClause', Keyword('DELETE') + QuadPattern)
+DeleteClause = Comp("DeleteClause", Keyword("DELETE") + QuadPattern)
# [43] InsertClause ::= 'INSERT' QuadPattern
-InsertClause = Comp('InsertClause', Keyword('INSERT') + QuadPattern)
+InsertClause = Comp("InsertClause", Keyword("INSERT") + QuadPattern)
# [44] UsingClause ::= 'USING' ( iri | 'NAMED' iri )
-UsingClause = Comp('UsingClause', Keyword('USING') + (
- Param('default', iri) | Keyword('NAMED') + Param('named', iri)))
+UsingClause = Comp(
+ "UsingClause",
+ Keyword("USING") + (Param("default", iri) | Keyword("NAMED") + Param("named", iri)),
+)
# [41] Modify ::= ( 'WITH' iri )? ( DeleteClause Optional(InsertClause) | InsertClause ) ZeroOrMore(UsingClause) 'WHERE' GroupGraphPattern
-Modify = Comp('Modify', Optional(Keyword('WITH') + Param('withClause', iri)) + (Param('delete', DeleteClause) + Optional(Param(
- 'insert', InsertClause)) | Param('insert', InsertClause)) + ZeroOrMore(ParamList('using', UsingClause)) + Keyword('WHERE') + Param('where', GroupGraphPattern))
+Modify = Comp(
+ "Modify",
+ Optional(Keyword("WITH") + Param("withClause", iri))
+ + (
+ Param("delete", DeleteClause) + Optional(Param("insert", InsertClause))
+ | Param("insert", InsertClause)
+ )
+ + ZeroOrMore(ParamList("using", UsingClause))
+ + Keyword("WHERE")
+ + Param("where", GroupGraphPattern),
+)
# [30] Update1 ::= Load | Clear | Drop | Add | Move | Copy | Create | InsertData | DeleteData | DeleteWhere | Modify
-Update1 = Load | Clear | Drop | Add | Move | Copy | Create | InsertData | DeleteData | DeleteWhere | Modify
+Update1 = (
+ Load
+ | Clear
+ | Drop
+ | Add
+ | Move
+ | Copy
+ | Create
+ | InsertData
+ | DeleteData
+ | DeleteWhere
+ | Modify
+)
# [63] InlineDataOneVar ::= Var '{' ZeroOrMore(DataBlockValue) '}'
-InlineDataOneVar = ParamList(
- 'var', Var) + '{' + ZeroOrMore(ParamList('value', DataBlockValue)) + '}'
+InlineDataOneVar = (
+ ParamList("var", Var) + "{" + ZeroOrMore(ParamList("value", DataBlockValue)) + "}"
+)
# [64] InlineDataFull ::= ( NIL | '(' ZeroOrMore(Var) ')' ) '{' ( '(' ZeroOrMore(DataBlockValue) ')' | NIL )* '}'
-InlineDataFull = (NIL | '(' + ZeroOrMore(ParamList('var', Var)) + ')') + '{' + ZeroOrMore(
- ParamList('value', Group(Suppress('(') + ZeroOrMore(DataBlockValue) + Suppress(')') | NIL))) + '}'
+InlineDataFull = (
+ (NIL | "(" + ZeroOrMore(ParamList("var", Var)) + ")")
+ + "{"
+ + ZeroOrMore(
+ ParamList(
+ "value",
+ Group(Suppress("(") + ZeroOrMore(DataBlockValue) + Suppress(")") | NIL),
+ )
+ )
+ + "}"
+)
# [62] DataBlock ::= InlineDataOneVar | InlineDataFull
DataBlock = InlineDataOneVar | InlineDataFull
# [28] ValuesClause ::= ( 'VALUES' DataBlock )?
-ValuesClause = Optional(Param(
- 'valuesClause', Comp('ValuesClause', Keyword('VALUES') + DataBlock)))
+ValuesClause = Optional(
+ Param("valuesClause", Comp("ValuesClause", Keyword("VALUES") + DataBlock))
+)
# [74] ConstructTriples ::= TriplesSameSubject ( '.' Optional(ConstructTriples) )?
ConstructTriples = Forward()
-ConstructTriples <<= (ParamList('template', TriplesSameSubject) + Optional(
- Suppress('.') + Optional(ConstructTriples)))
+ConstructTriples <<= ParamList("template", TriplesSameSubject) + Optional(
+ Suppress(".") + Optional(ConstructTriples)
+)
# [73] ConstructTemplate ::= '{' Optional(ConstructTriples) '}'
-ConstructTemplate = Suppress('{') + Optional(ConstructTriples) + Suppress('}')
+ConstructTemplate = Suppress("{") + Optional(ConstructTriples) + Suppress("}")
# [57] OptionalGraphPattern ::= 'OPTIONAL' GroupGraphPattern
-OptionalGraphPattern = Comp('OptionalGraphPattern', Keyword(
- 'OPTIONAL') + Param('graph', GroupGraphPattern))
+OptionalGraphPattern = Comp(
+ "OptionalGraphPattern", Keyword("OPTIONAL") + Param("graph", GroupGraphPattern)
+)
# [58] GraphGraphPattern ::= 'GRAPH' VarOrIri GroupGraphPattern
-GraphGraphPattern = Comp('GraphGraphPattern', Keyword(
- 'GRAPH') + Param('term', VarOrIri) + Param('graph', GroupGraphPattern))
+GraphGraphPattern = Comp(
+ "GraphGraphPattern",
+ Keyword("GRAPH") + Param("term", VarOrIri) + Param("graph", GroupGraphPattern),
+)
# [59] ServiceGraphPattern ::= 'SERVICE' _Silent VarOrIri GroupGraphPattern
-ServiceGraphPattern = Comp('ServiceGraphPattern', Keyword(
- 'SERVICE') + _Silent + Param('term', VarOrIri) + Param('graph', GroupGraphPattern))
+ServiceGraphPattern = Comp(
+ "ServiceGraphPattern",
+ Keyword("SERVICE")
+ + _Silent
+ + Param("term", VarOrIri)
+ + Param("graph", GroupGraphPattern),
+)
# [60] Bind ::= 'BIND' '(' Expression 'AS' Var ')'
-Bind = Comp('Bind', Keyword('BIND') + '(' + Param(
- 'expr', Expression) + Keyword('AS') + Param('var', Var) + ')')
+Bind = Comp(
+ "Bind",
+ Keyword("BIND")
+ + "("
+ + Param("expr", Expression)
+ + Keyword("AS")
+ + Param("var", Var)
+ + ")",
+)
# [61] InlineData ::= 'VALUES' DataBlock
-InlineData = Comp('InlineData', Keyword('VALUES') + DataBlock)
+InlineData = Comp("InlineData", Keyword("VALUES") + DataBlock)
# [56] GraphPatternNotTriples ::= GroupOrUnionGraphPattern | OptionalGraphPattern | MinusGraphPattern | GraphGraphPattern | ServiceGraphPattern | Filter | Bind | InlineData
-GraphPatternNotTriples = GroupOrUnionGraphPattern | OptionalGraphPattern | MinusGraphPattern | GraphGraphPattern | ServiceGraphPattern | Filter | Bind | InlineData
+GraphPatternNotTriples = (
+ GroupOrUnionGraphPattern
+ | OptionalGraphPattern
+ | MinusGraphPattern
+ | GraphGraphPattern
+ | ServiceGraphPattern
+ | Filter
+ | Bind
+ | InlineData
+)
# [54] GroupGraphPatternSub ::= Optional(TriplesBlock) ( GraphPatternNotTriples '.'? Optional(TriplesBlock) )*
-GroupGraphPatternSub = Comp('GroupGraphPatternSub', Optional(ParamList('part', Comp('TriplesBlock', TriplesBlock))) + ZeroOrMore(
- ParamList('part', GraphPatternNotTriples) + Optional('.') + Optional(ParamList('part', Comp('TriplesBlock', TriplesBlock)))))
+GroupGraphPatternSub = Comp(
+ "GroupGraphPatternSub",
+ Optional(ParamList("part", Comp("TriplesBlock", TriplesBlock)))
+ + ZeroOrMore(
+ ParamList("part", GraphPatternNotTriples)
+ + Optional(".")
+ + Optional(ParamList("part", Comp("TriplesBlock", TriplesBlock)))
+ ),
+)
# ----------------
@@ -949,70 +1350,151 @@ GroupGraphPatternSub = Comp('GroupGraphPatternSub', Optional(ParamList('part', C
HavingCondition = Constraint
# [21] HavingClause ::= 'HAVING' HavingCondition+
-HavingClause = Comp('HavingClause', Keyword(
- 'HAVING') + OneOrMore(ParamList('condition', HavingCondition)))
+HavingClause = Comp(
+ "HavingClause",
+ Keyword("HAVING") + OneOrMore(ParamList("condition", HavingCondition)),
+)
# [24] OrderCondition ::= ( ( 'ASC' | 'DESC' ) BrackettedExpression )
# | ( Constraint | Var )
-OrderCondition = Comp('OrderCondition', Param('order', Keyword('ASC') | Keyword(
- 'DESC')) + Param('expr', BrackettedExpression) | Param('expr', Constraint | Var))
+OrderCondition = Comp(
+ "OrderCondition",
+ Param("order", Keyword("ASC") | Keyword("DESC"))
+ + Param("expr", BrackettedExpression)
+ | Param("expr", Constraint | Var),
+)
# [23] OrderClause ::= 'ORDER' 'BY' OneOrMore(OrderCondition)
-OrderClause = Comp('OrderClause', Keyword('ORDER') + Keyword(
- 'BY') + OneOrMore(ParamList('condition', OrderCondition)))
+OrderClause = Comp(
+ "OrderClause",
+ Keyword("ORDER")
+ + Keyword("BY")
+ + OneOrMore(ParamList("condition", OrderCondition)),
+)
# [26] LimitClause ::= 'LIMIT' INTEGER
-LimitClause = Keyword('LIMIT') + Param('limit', INTEGER)
+LimitClause = Keyword("LIMIT") + Param("limit", INTEGER)
# [27] OffsetClause ::= 'OFFSET' INTEGER
-OffsetClause = Keyword('OFFSET') + Param('offset', INTEGER)
+OffsetClause = Keyword("OFFSET") + Param("offset", INTEGER)
# [25] LimitOffsetClauses ::= LimitClause Optional(OffsetClause) | OffsetClause Optional(LimitClause)
-LimitOffsetClauses = Comp('LimitOffsetClauses', LimitClause + Optional(
- OffsetClause) | OffsetClause + Optional(LimitClause))
+LimitOffsetClauses = Comp(
+ "LimitOffsetClauses",
+ LimitClause + Optional(OffsetClause) | OffsetClause + Optional(LimitClause),
+)
# [18] SolutionModifier ::= GroupClause? HavingClause? OrderClause? LimitOffsetClauses?
-SolutionModifier = Optional(Param('groupby', GroupClause)) + Optional(Param('having', HavingClause)) + Optional(
- Param('orderby', OrderClause)) + Optional(Param('limitoffset', LimitOffsetClauses))
+SolutionModifier = (
+ Optional(Param("groupby", GroupClause))
+ + Optional(Param("having", HavingClause))
+ + Optional(Param("orderby", OrderClause))
+ + Optional(Param("limitoffset", LimitOffsetClauses))
+)
# [9] SelectClause ::= 'SELECT' ( 'DISTINCT' | 'REDUCED' )? ( ( Var | ( '(' Expression 'AS' Var ')' ) )+ | '*' )
-SelectClause = Keyword('SELECT') + Optional(Param('modifier', Keyword('DISTINCT') | Keyword('REDUCED'))) + (OneOrMore(ParamList('projection', Comp('vars',
- Param('var', Var) | (Literal('(') + Param('expr', Expression) + Keyword('AS') + Param('evar', Var) + ')')))) | '*')
+SelectClause = (
+ Keyword("SELECT")
+ + Optional(Param("modifier", Keyword("DISTINCT") | Keyword("REDUCED")))
+ + (
+ OneOrMore(
+ ParamList(
+ "projection",
+ Comp(
+ "vars",
+ Param("var", Var)
+ | (
+ Literal("(")
+ + Param("expr", Expression)
+ + Keyword("AS")
+ + Param("evar", Var)
+ + ")"
+ ),
+ ),
+ )
+ )
+ | "*"
+ )
+)
# [17] WhereClause ::= 'WHERE'? GroupGraphPattern
-WhereClause = Optional(Keyword('WHERE')) + Param('where', GroupGraphPattern)
+WhereClause = Optional(Keyword("WHERE")) + Param("where", GroupGraphPattern)
# [8] SubSelect ::= SelectClause WhereClause SolutionModifier ValuesClause
-SubSelect = Comp('SubSelect', SelectClause + WhereClause +
- SolutionModifier + ValuesClause)
+SubSelect = Comp(
+ "SubSelect", SelectClause + WhereClause + SolutionModifier + ValuesClause
+)
# [53] GroupGraphPattern ::= '{' ( SubSelect | GroupGraphPatternSub ) '}'
-GroupGraphPattern <<= (
- Suppress('{') + (SubSelect | GroupGraphPatternSub) + Suppress('}'))
+GroupGraphPattern <<= Suppress("{") + (SubSelect | GroupGraphPatternSub) + Suppress("}")
# [7] SelectQuery ::= SelectClause DatasetClause* WhereClause SolutionModifier
-SelectQuery = Comp('SelectQuery', SelectClause + ZeroOrMore(ParamList(
- 'datasetClause', DatasetClause)) + WhereClause + SolutionModifier + ValuesClause)
+SelectQuery = Comp(
+ "SelectQuery",
+ SelectClause
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause,
+)
# [10] ConstructQuery ::= 'CONSTRUCT' ( ConstructTemplate DatasetClause* WhereClause SolutionModifier | DatasetClause* 'WHERE' '{' TriplesTemplate? '}' SolutionModifier )
# NOTE: The CONSTRUCT WHERE alternative has unnescessarily many Comp/Param pairs
# to allow it to through the same algebra translation process
-ConstructQuery = Comp('ConstructQuery', Keyword('CONSTRUCT') + (ConstructTemplate + ZeroOrMore(ParamList('datasetClause', DatasetClause)) + WhereClause + SolutionModifier + ValuesClause | ZeroOrMore(ParamList(
- 'datasetClause', DatasetClause)) + Keyword('WHERE') + '{' + Optional(Param('where', Comp('FakeGroupGraphPatten', ParamList('part', Comp('TriplesBlock', TriplesTemplate))))) + '}' + SolutionModifier + ValuesClause))
+ConstructQuery = Comp(
+ "ConstructQuery",
+ Keyword("CONSTRUCT")
+ + (
+ ConstructTemplate
+ + ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause
+ | ZeroOrMore(ParamList("datasetClause", DatasetClause))
+ + Keyword("WHERE")
+ + "{"
+ + Optional(
+ Param(
+ "where",
+ Comp(
+ "FakeGroupGraphPatten",
+ ParamList("part", Comp("TriplesBlock", TriplesTemplate)),
+ ),
+ )
+ )
+ + "}"
+ + SolutionModifier
+ + ValuesClause
+ ),
+)
# [12] AskQuery ::= 'ASK' DatasetClause* WhereClause SolutionModifier
-AskQuery = Comp('AskQuery', Keyword('ASK') + Param('datasetClause', ZeroOrMore(
- DatasetClause)) + WhereClause + SolutionModifier + ValuesClause)
+AskQuery = Comp(
+ "AskQuery",
+ Keyword("ASK")
+ + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + WhereClause
+ + SolutionModifier
+ + ValuesClause,
+)
# [11] DescribeQuery ::= 'DESCRIBE' ( VarOrIri+ | '*' ) DatasetClause* WhereClause? SolutionModifier
-DescribeQuery = Comp('DescribeQuery', Keyword('DESCRIBE') + (OneOrMore(ParamList('var', VarOrIri)) | '*') + Param(
- 'datasetClause', ZeroOrMore(DatasetClause)) + Optional(WhereClause) + SolutionModifier + ValuesClause)
+DescribeQuery = Comp(
+ "DescribeQuery",
+ Keyword("DESCRIBE")
+ + (OneOrMore(ParamList("var", VarOrIri)) | "*")
+ + Param("datasetClause", ZeroOrMore(DatasetClause))
+ + Optional(WhereClause)
+ + SolutionModifier
+ + ValuesClause,
+)
# [29] Update ::= Prologue ( Update1 ( ';' Update )? )?
Update = Forward()
-Update <<= (ParamList('prologue', Prologue) + Optional(ParamList('request',
- Update1) + Optional(';' + Update)))
+Update <<= ParamList("prologue", Prologue) + Optional(
+ ParamList("request", Update1) + Optional(";" + Update)
+)
# [2] Query ::= Prologue
@@ -1022,17 +1504,16 @@ Update <<= (ParamList('prologue', Prologue) + Optional(ParamList('request',
Query = Prologue + (SelectQuery | ConstructQuery | DescribeQuery | AskQuery)
# [3] UpdateUnit ::= Update
-UpdateUnit = Comp('Update', Update)
+UpdateUnit = Comp("Update", Update)
# [1] QueryUnit ::= Query
QueryUnit = Query
-QueryUnit.ignore('#' + restOfLine)
-UpdateUnit.ignore('#' + restOfLine)
+QueryUnit.ignore("#" + restOfLine)
+UpdateUnit.ignore("#" + restOfLine)
-expandUnicodeEscapes_re = re.compile(
- r'\\u([0-9a-f]{4}(?:[0-9a-f]{4})?)', flags=re.I)
+expandUnicodeEscapes_re = re.compile(r"\\u([0-9a-f]{4}(?:[0-9a-f]{4})?)", flags=re.I)
def expandUnicodeEscapes(q):
@@ -1043,7 +1524,7 @@ def expandUnicodeEscapes(q):
def expand(m):
try:
- return unichr(int(m.group(1), 16))
+ return chr(int(m.group(1), 16))
except:
raise Exception("Invalid unicode code point: " + m)
@@ -1051,28 +1532,29 @@ def expandUnicodeEscapes(q):
def parseQuery(q):
- if hasattr(q, 'read'):
+ if hasattr(q, "read"):
q = q.read()
- if isinstance(q, binary_type):
- q = q.decode('utf-8')
+ if isinstance(q, bytes):
+ q = q.decode("utf-8")
q = expandUnicodeEscapes(q)
return Query.parseString(q, parseAll=True)
def parseUpdate(q):
- if hasattr(q, 'read'):
+ if hasattr(q, "read"):
q = q.read()
- if isinstance(q, binary_type):
- q = q.decode('utf-8')
+ if isinstance(q, bytes):
+ q = q.decode("utf-8")
q = expandUnicodeEscapes(q)
return UpdateUnit.parseString(q, parseAll=True)[0]
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
+
DEBUG = True
try:
q = Query.parseString(sys.argv[1])
diff --git a/rdflib/plugins/sparql/parserutils.py b/rdflib/plugins/sparql/parserutils.py
index 29804eea..c30e10d6 100644
--- a/rdflib/plugins/sparql/parserutils.py
+++ b/rdflib/plugins/sparql/parserutils.py
@@ -1,11 +1,10 @@
-
from types import MethodType
from collections import OrderedDict
from pyparsing import TokenConverter, ParseResults, originalTextFor
-from rdflib import BNode, Variable, URIRef
+from rdflib import BNode, Variable
DEBUG = True
DEBUG = False
@@ -44,6 +43,7 @@ the resulting CompValue
# Comp('Sum')( Param('x')(Number) + '+' + Param('y')(Number) )
+
def value(ctx, val, variables=False, errors=False):
"""
utility function for evaluating something...
@@ -172,7 +172,7 @@ class CompValue(OrderedDict):
def __getattr__(self, a):
# Hack hack: OrderedDict relies on this
- if a in ('_OrderedDict__root', '_OrderedDict__end'):
+ if a in ("_OrderedDict__root", "_OrderedDict__end"):
raise AttributeError
try:
return self[a]
@@ -224,18 +224,18 @@ class Comp(TokenConverter):
res._evalfn = MethodType(self.evalfn, res)
else:
res = CompValue(self.name)
- if self.name == 'ServiceGraphPattern':
+ if self.name == "ServiceGraphPattern":
# Then this must be a service graph pattern and have
# already matched.
# lets assume there is one, for now, then test for two later.
sgp = originalTextFor(self.expr)
service_string = sgp.searchString(instring)[0][0]
- res['service_string'] = service_string
+ res["service_string"] = service_string
for t in tokenList:
if isinstance(t, ParamValue):
if t.isList:
- if not t.name in res:
+ if t.name not in res:
res[t.name] = plist()
res[t.name].append(t.tokenList)
else:
@@ -250,38 +250,38 @@ class Comp(TokenConverter):
return self
-def prettify_parsetree(t, indent='', depth=0):
+def prettify_parsetree(t, indent="", depth=0):
out = []
if isinstance(t, ParseResults):
for e in t.asList():
out.append(prettify_parsetree(e, indent, depth + 1))
for k, v in sorted(t.items()):
- out.append("%s%s- %s:\n" % (indent, ' ' * depth, k))
+ out.append("%s%s- %s:\n" % (indent, " " * depth, k))
out.append(prettify_parsetree(v, indent, depth + 1))
elif isinstance(t, CompValue):
- out.append("%s%s> %s:\n" % (indent, ' ' * depth, t.name))
+ out.append("%s%s> %s:\n" % (indent, " " * depth, t.name))
for k, v in t.items():
- out.append("%s%s- %s:\n" % (indent, ' ' * (depth + 1), k))
+ out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, dict):
for k, v in t.items():
- out.append("%s%s- %s:\n" % (indent, ' ' * (depth + 1), k))
+ out.append("%s%s- %s:\n" % (indent, " " * (depth + 1), k))
out.append(prettify_parsetree(v, indent, depth + 2))
elif isinstance(t, list):
for e in t:
out.append(prettify_parsetree(e, indent, depth + 1))
else:
- out.append("%s%s- %r\n" % (indent, ' ' * depth, t))
+ out.append("%s%s- %r\n" % (indent, " " * depth, t))
return "".join(out)
-if __name__ == '__main__':
+if __name__ == "__main__":
from pyparsing import Word, nums
import sys
Number = Word(nums)
Number.setParseAction(lambda x: int(x[0]))
- Plus = Comp('plus', Param('a', Number) + '+' + Param('b', Number))
+ Plus = Comp("plus", Param("a", Number) + "+" + Param("b", Number))
Plus.setEvalFn(lambda self, ctx: self.a + self.b)
r = Plus.parseString(sys.argv[1])
diff --git a/rdflib/plugins/sparql/processor.py b/rdflib/plugins/sparql/processor.py
index 14f70f1b..84e8c823 100644
--- a/rdflib/plugins/sparql/processor.py
+++ b/rdflib/plugins/sparql/processor.py
@@ -1,4 +1,3 @@
-
"""
Code for tying SPARQL Engine into RDFLib
@@ -6,7 +5,6 @@ These should be automatically registered with RDFLib
"""
-from six import string_types
from rdflib.query import Processor, Result, UpdateProcessor
@@ -33,12 +31,12 @@ def processUpdate(graph, updateString, initBindings={}, initNs={}, base=None):
Process a SPARQL Update Request
returns Nothing on success or raises Exceptions on error
"""
- evalUpdate(graph, translateUpdate(
- parseUpdate(updateString), base, initNs), initBindings)
+ evalUpdate(
+ graph, translateUpdate(parseUpdate(updateString), base, initNs), initBindings
+ )
class SPARQLResult(Result):
-
def __init__(self, res):
Result.__init__(self, res["type_"])
self.vars = res.get("vars_")
@@ -52,20 +50,17 @@ class SPARQLUpdateProcessor(UpdateProcessor):
self.graph = graph
def update(self, strOrQuery, initBindings={}, initNs={}):
- if isinstance(strOrQuery, string_types):
+ if isinstance(strOrQuery, str):
strOrQuery = translateUpdate(parseUpdate(strOrQuery), initNs=initNs)
return evalUpdate(self.graph, strOrQuery, initBindings)
class SPARQLProcessor(Processor):
-
def __init__(self, graph):
self.graph = graph
- def query(
- self, strOrQuery, initBindings={},
- initNs={}, base=None, DEBUG=False):
+ def query(self, strOrQuery, initBindings={}, initNs={}, base=None, DEBUG=False):
"""
Evaluate a query with the given initial bindings, and initial
namespaces. The given base is used to resolve relative URIs in
diff --git a/rdflib/plugins/sparql/results/csvresults.py b/rdflib/plugins/sparql/results/csvresults.py
index 2cbeea05..c87b6ea7 100644
--- a/rdflib/plugins/sparql/results/csvresults.py
+++ b/rdflib/plugins/sparql/results/csvresults.py
@@ -10,8 +10,6 @@ http://www.w3.org/TR/sparql11-results-csv-tsv/
import codecs
import csv
-from six import binary_type, PY3
-
from rdflib import Variable, BNode, URIRef, Literal
from rdflib.query import Result, ResultSerializer, ResultParser
@@ -23,11 +21,11 @@ class CSVResultParser(ResultParser):
def parse(self, source, content_type=None):
- r = Result('SELECT')
+ r = Result("SELECT")
- if isinstance(source.read(0), binary_type):
+ if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
- source = codecs.getreader('utf-8')(source)
+ source = codecs.getreader("utf-8")(source)
reader = csv.reader(source, delimiter=self.delim)
r.vars = [Variable(x) for x in next(reader)]
@@ -39,9 +37,11 @@ class CSVResultParser(ResultParser):
return r
def parseRow(self, row, v):
- return dict((var, val)
- for var, val in zip(v, [self.convertTerm(t)
- for t in row]) if val is not None)
+ return dict(
+ (var, val)
+ for var, val in zip(v, [self.convertTerm(t) for t in row])
+ if val is not None
+ )
def convertTerm(self, t):
if t == "":
@@ -54,39 +54,34 @@ class CSVResultParser(ResultParser):
class CSVResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
self.delim = ","
if result.type != "SELECT":
- raise Exception(
- "CSVSerializer can only serialize select query results")
+ raise Exception("CSVSerializer can only serialize select query results")
+
+ def serialize(self, stream, encoding="utf-8", **kwargs):
- def serialize(self, stream, encoding='utf-8'):
+ # the serialiser writes bytes in the given encoding
+ # in py3 csv.writer is unicode aware and writes STRINGS,
+ # so we encode afterwards
- if PY3:
- # the serialiser writes bytes in the given encoding
- # in py3 csv.writer is unicode aware and writes STRINGS,
- # so we encode afterwards
- # in py2 it breaks when passed unicode strings,
- # and must be passed utf8, so we encode before
+ import codecs
- import codecs
- stream = codecs.getwriter(encoding)(stream)
+ stream = codecs.getwriter(encoding)(stream)
out = csv.writer(stream, delimiter=self.delim)
vs = [self.serializeTerm(v, encoding) for v in self.result.vars]
out.writerow(vs)
for row in self.result.bindings:
- out.writerow([self.serializeTerm(
- row.get(v), encoding) for v in self.result.vars])
+ out.writerow(
+ [self.serializeTerm(row.get(v), encoding) for v in self.result.vars]
+ )
def serializeTerm(self, term, encoding):
if term is None:
return ""
- if not PY3:
- return term.encode(encoding)
else:
return term
diff --git a/rdflib/plugins/sparql/results/graph.py b/rdflib/plugins/sparql/results/graph.py
index c47daa72..77715d07 100644
--- a/rdflib/plugins/sparql/results/graph.py
+++ b/rdflib/plugins/sparql/results/graph.py
@@ -1,18 +1,12 @@
from rdflib import Graph
-from rdflib.query import (
- Result,
- ResultParser,
- ResultSerializer,
- ResultException
-)
+from rdflib.query import Result, ResultParser
class GraphResultParser(ResultParser):
-
def parse(self, source, content_type):
- res = Result('CONSTRUCT') # hmm - or describe?type_)
+ res = Result("CONSTRUCT") # hmm - or describe?type_)
res.graph = Graph()
res.graph.parse(source, format=content_type)
diff --git a/rdflib/plugins/sparql/results/jsonresults.py b/rdflib/plugins/sparql/results/jsonresults.py
index 1f45ce27..13a8da5e 100644
--- a/rdflib/plugins/sparql/results/jsonresults.py
+++ b/rdflib/plugins/sparql/results/jsonresults.py
@@ -1,11 +1,8 @@
import json
-from rdflib.query import (
- Result, ResultException, ResultSerializer, ResultParser)
+from rdflib.query import Result, ResultException, ResultSerializer, ResultParser
from rdflib import Literal, URIRef, BNode, Variable
-from six import binary_type, text_type
-
"""A Serializer for SPARQL results in JSON:
@@ -20,23 +17,21 @@ Authors: Drew Perttula, Gunnar Aastrand Grimnes
class JSONResultParser(ResultParser):
-
def parse(self, source, content_type=None):
inp = source.read()
- if isinstance(inp, binary_type):
- inp = inp.decode('utf-8')
+ if isinstance(inp, bytes):
+ inp = inp.decode("utf-8")
return JSONResult(json.loads(inp))
class JSONResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
def serialize(self, stream, encoding=None):
res = {}
- if self.result.type == 'ASK':
+ if self.result.type == "ASK":
res["head"] = {}
res["boolean"] = self.result.askAnswer
else:
@@ -44,8 +39,9 @@ class JSONResultSerializer(ResultSerializer):
res["results"] = {}
res["head"] = {}
res["head"]["vars"] = self.result.vars
- res["results"]["bindings"] = [self._bindingToJSON(
- x) for x in self.result.bindings]
+ res["results"]["bindings"] = [
+ self._bindingToJSON(x) for x in self.result.bindings
+ ]
r = json.dumps(res, allow_nan=False, ensure_ascii=False)
if encoding is not None:
@@ -63,27 +59,26 @@ class JSONResultSerializer(ResultSerializer):
class JSONResult(Result):
-
def __init__(self, json):
self.json = json
if "boolean" in json:
- type_ = 'ASK'
+ type_ = "ASK"
elif "results" in json:
- type_ = 'SELECT'
+ type_ = "SELECT"
else:
- raise ResultException('No boolean or results in json!')
+ raise ResultException("No boolean or results in json!")
Result.__init__(self, type_)
- if type_ == 'ASK':
- self.askAnswer = bool(json['boolean'])
+ if type_ == "ASK":
+ self.askAnswer = bool(json["boolean"])
else:
self.bindings = self._get_bindings()
self.vars = [Variable(x) for x in json["head"]["vars"]]
def _get_bindings(self):
ret = []
- for row in self.json['results']['bindings']:
+ for row in self.json["results"]["bindings"]:
outRow = {}
for k, v in row.items():
outRow[Variable(k)] = parseJsonTerm(v)
@@ -99,36 +94,34 @@ def parseJsonTerm(d):
{ 'type': 'literal', 'value': 'drewp' }
"""
- t = d['type']
- if t == 'uri':
- return URIRef(d['value'])
- elif t == 'literal':
- return Literal(d['value'], datatype=d.get('datatype'), lang=d.get('xml:lang'))
- elif t == 'typed-literal':
- return Literal(d['value'], datatype=URIRef(d['datatype']))
- elif t == 'bnode':
- return BNode(d['value'])
+ t = d["type"]
+ if t == "uri":
+ return URIRef(d["value"])
+ elif t == "literal":
+ return Literal(d["value"], datatype=d.get("datatype"), lang=d.get("xml:lang"))
+ elif t == "typed-literal":
+ return Literal(d["value"], datatype=URIRef(d["datatype"]))
+ elif t == "bnode":
+ return BNode(d["value"])
else:
raise NotImplementedError("json term type %r" % t)
def termToJSON(self, term):
if isinstance(term, URIRef):
- return {'type': 'uri', 'value': text_type(term)}
+ return {"type": "uri", "value": str(term)}
elif isinstance(term, Literal):
- r = {'type': 'literal',
- 'value': text_type(term)}
+ r = {"type": "literal", "value": str(term)}
if term.datatype is not None:
- r['datatype'] = text_type(term.datatype)
+ r["datatype"] = str(term.datatype)
if term.language is not None:
- r['xml:lang'] = term.language
+ r["xml:lang"] = term.language
return r
elif isinstance(term, BNode):
- return {'type': 'bnode', 'value': str(term)}
+ return {"type": "bnode", "value": str(term)}
elif term is None:
return None
else:
- raise ResultException(
- 'Unknown term type: %s (%s)' % (term, type(term)))
+ raise ResultException("Unknown term type: %s (%s)" % (term, type(term)))
diff --git a/rdflib/plugins/sparql/results/rdfresults.py b/rdflib/plugins/sparql/results/rdfresults.py
index ac71ff1d..7f64bbf4 100644
--- a/rdflib/plugins/sparql/results/rdfresults.py
+++ b/rdflib/plugins/sparql/results/rdfresults.py
@@ -2,7 +2,7 @@ from rdflib import Graph, Namespace, RDF, Variable
from rdflib.query import Result, ResultParser
-RS = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/result-set#')
+RS = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/result-set#")
class RDFResultParser(ResultParser):
@@ -11,7 +11,6 @@ class RDFResultParser(ResultParser):
class RDFResult(Result):
-
def __init__(self, source, **kwargs):
if not isinstance(source, Graph):
@@ -24,7 +23,7 @@ class RDFResult(Result):
# there better be only one :)
if rs is None:
- type_ = 'CONSTRUCT'
+ type_ = "CONSTRUCT"
# use a new graph
g = Graph()
@@ -35,27 +34,27 @@ class RDFResult(Result):
askAnswer = graph.value(rs, RS.boolean)
if askAnswer is not None:
- type_ = 'ASK'
+ type_ = "ASK"
else:
- type_ = 'SELECT'
+ type_ = "SELECT"
Result.__init__(self, type_)
- if type_ == 'SELECT':
- self.vars = [Variable(v) for v in graph.objects(rs,
- RS.resultVariable)]
+ if type_ == "SELECT":
+ self.vars = [Variable(v) for v in graph.objects(rs, RS.resultVariable)]
self.bindings = []
for s in graph.objects(rs, RS.solution):
sol = {}
for b in graph.objects(s, RS.binding):
- sol[Variable(graph.value(
- b, RS.variable))] = graph.value(b, RS.value)
+ sol[Variable(graph.value(b, RS.variable))] = graph.value(
+ b, RS.value
+ )
self.bindings.append(sol)
- elif type_ == 'ASK':
+ elif type_ == "ASK":
self.askAnswer = askAnswer.value
if askAnswer.value is None:
- raise Exception('Malformed boolean in ask answer!')
- elif type_ == 'CONSTRUCT':
+ raise Exception("Malformed boolean in ask answer!")
+ elif type_ == "CONSTRUCT":
self.graph = g
diff --git a/rdflib/plugins/sparql/results/tsvresults.py b/rdflib/plugins/sparql/results/tsvresults.py
index 6e9366f3..2406cf4e 100644
--- a/rdflib/plugins/sparql/results/tsvresults.py
+++ b/rdflib/plugins/sparql/results/tsvresults.py
@@ -1,4 +1,3 @@
-
"""
This implements the Tab Separated SPARQL Result Format
@@ -8,29 +7,45 @@ It is implemented with pyparsing, reusing the elements from the SPARQL Parser
import codecs
from pyparsing import (
- Optional, ZeroOrMore, Literal, ParserElement, ParseException, Suppress,
- FollowedBy, LineEnd)
+ Optional,
+ ZeroOrMore,
+ Literal,
+ ParserElement,
+ ParseException,
+ Suppress,
+ FollowedBy,
+ LineEnd,
+)
from rdflib.query import Result, ResultParser
from rdflib.plugins.sparql.parser import (
- Var, STRING_LITERAL1, STRING_LITERAL2, IRIREF, BLANK_NODE_LABEL,
- NumericLiteral, BooleanLiteral, LANGTAG)
+ Var,
+ STRING_LITERAL1,
+ STRING_LITERAL2,
+ IRIREF,
+ BLANK_NODE_LABEL,
+ NumericLiteral,
+ BooleanLiteral,
+ LANGTAG,
+)
from rdflib.plugins.sparql.parserutils import Comp, Param, CompValue
from rdflib import Literal as RDFLiteral
-from six import binary_type
-
ParserElement.setDefaultWhitespaceChars(" \n")
String = STRING_LITERAL1 | STRING_LITERAL2
-RDFLITERAL = Comp('literal', Param('string', String) + Optional(
- Param('lang', LANGTAG.leaveWhitespace()
- ) | Literal('^^').leaveWhitespace(
- ) + Param('datatype', IRIREF).leaveWhitespace()))
+RDFLITERAL = Comp(
+ "literal",
+ Param("string", String)
+ + Optional(
+ Param("lang", LANGTAG.leaveWhitespace())
+ | Literal("^^").leaveWhitespace() + Param("datatype", IRIREF).leaveWhitespace()
+ ),
+)
NONE_VALUE = object()
@@ -49,12 +64,12 @@ HEADER.parseWithTabs()
class TSVResultParser(ResultParser):
def parse(self, source, content_type=None):
- if isinstance(source.read(0), binary_type):
+ if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
- source = codecs.getreader('utf-8')(source)
+ source = codecs.getreader("utf-8")(source)
try:
- r = Result('SELECT')
+ r = Result("SELECT")
header = source.readline()
@@ -64,13 +79,12 @@ class TSVResultParser(ResultParser):
line = source.readline()
if not line:
break
- line = line.strip('\n')
+ line = line.strip("\n")
if line == "":
continue
row = ROW.parseString(line, parseAll=True)
- r.bindings.append(
- dict(zip(r.vars, (self.convertTerm(x) for x in row))))
+ r.bindings.append(dict(zip(r.vars, (self.convertTerm(x) for x in row))))
return r
@@ -83,7 +97,7 @@ class TSVResultParser(ResultParser):
if t is NONE_VALUE:
return None
if isinstance(t, CompValue):
- if t.name == 'literal':
+ if t.name == "literal":
return RDFLiteral(t.string, lang=t.lang, datatype=t.datatype)
else:
raise Exception("I dont know how to handle this: %s" % (t,))
@@ -91,9 +105,10 @@ class TSVResultParser(ResultParser):
return t
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
- r = Result.parse(file(sys.argv[1]), format='tsv')
+
+ r = Result.parse(source=sys.argv[1], format="tsv")
print(r.vars)
print(r.bindings)
# print r.serialize(format='json')
diff --git a/rdflib/plugins/sparql/results/txtresults.py b/rdflib/plugins/sparql/results/txtresults.py
index c42f24c4..426dd9a1 100644
--- a/rdflib/plugins/sparql/results/txtresults.py
+++ b/rdflib/plugins/sparql/results/txtresults.py
@@ -1,4 +1,3 @@
-
from rdflib import URIRef, BNode, Literal
from rdflib.query import ResultSerializer
@@ -37,7 +36,7 @@ class TXTResultSerializer(ResultSerializer):
h2 += 1
return " " * h1 + s + " " * h2
- if self.result.type != 'SELECT':
+ if self.result.type != "SELECT":
raise Exception("Can only pretty print SELECT results!")
if not self.result:
@@ -46,14 +45,17 @@ class TXTResultSerializer(ResultSerializer):
keys = sorted(self.result.vars)
maxlen = [0] * len(keys)
- b = [[_termString(r[k], namespace_manager) for k in keys] for r in self.result]
+ b = [
+ [_termString(r[k], namespace_manager) for k in keys]
+ for r in self.result
+ ]
for r in b:
for i in range(len(keys)):
maxlen[i] = max(maxlen[i], len(r[i]))
- stream.write(
- "|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
+ stream.write("|".join([c(k, maxlen[i]) for i, k in enumerate(keys)]) + "\n")
stream.write("-" * (len(maxlen) + sum(maxlen)) + "\n")
for r in sorted(b):
- stream.write("|".join(
- [t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n")
+ stream.write(
+ "|".join([t + " " * (i - len(t)) for i, t in zip(maxlen, r)]) + "\n"
+ )
diff --git a/rdflib/plugins/sparql/results/xmlresults.py b/rdflib/plugins/sparql/results/xmlresults.py
index e1daff92..1511783f 100644
--- a/rdflib/plugins/sparql/results/xmlresults.py
+++ b/rdflib/plugins/sparql/results/xmlresults.py
@@ -1,25 +1,17 @@
import logging
-from io import BytesIO
from xml.sax.saxutils import XMLGenerator
from xml.dom import XML_NAMESPACE
from xml.sax.xmlreader import AttributesNSImpl
from rdflib.compat import etree
-from six import iteritems
-from rdflib import Literal, URIRef, BNode, Graph, Variable
-from rdflib.query import (
- Result,
- ResultParser,
- ResultSerializer,
- ResultException
-)
+from rdflib import Literal, URIRef, BNode, Variable
+from rdflib.query import Result, ResultParser, ResultSerializer, ResultException
-from six import text_type
-SPARQL_XML_NAMESPACE = u'http://www.w3.org/2005/sparql-results#'
-RESULTS_NS_ET = '{%s}' % SPARQL_XML_NAMESPACE
+SPARQL_XML_NAMESPACE = u"http://www.w3.org/2005/sparql-results#"
+RESULTS_NS_ET = "{%s}" % SPARQL_XML_NAMESPACE
log = logging.getLogger(__name__)
@@ -36,7 +28,6 @@ Authors: Drew Perttula, Gunnar Aastrand Grimnes
class XMLResultParser(ResultParser):
-
def parse(self, source, content_type=None):
return XMLResult(source)
@@ -50,31 +41,32 @@ class XMLResult(Result):
except TypeError:
tree = etree.parse(source)
- boolean = tree.find(RESULTS_NS_ET + 'boolean')
- results = tree.find(RESULTS_NS_ET + 'results')
+ boolean = tree.find(RESULTS_NS_ET + "boolean")
+ results = tree.find(RESULTS_NS_ET + "results")
if boolean is not None:
- type_ = 'ASK'
+ type_ = "ASK"
elif results is not None:
- type_ = 'SELECT'
+ type_ = "SELECT"
else:
- raise ResultException(
- "No RDF result-bindings or boolean answer found!")
+ raise ResultException("No RDF result-bindings or boolean answer found!")
Result.__init__(self, type_)
- if type_ == 'SELECT':
+ if type_ == "SELECT":
self.bindings = []
for result in results:
r = {}
for binding in result:
- r[Variable(binding.get('name'))] = parseTerm(binding[0])
+ r[Variable(binding.get("name"))] = parseTerm(binding[0])
self.bindings.append(r)
- self.vars = [Variable(x.get("name"))
- for x in tree.findall(
- './%shead/%svariable' % (
- RESULTS_NS_ET, RESULTS_NS_ET))]
+ self.vars = [
+ Variable(x.get("name"))
+ for x in tree.findall(
+ "./%shead/%svariable" % (RESULTS_NS_ET, RESULTS_NS_ET)
+ )
+ ]
else:
self.askAnswer = boolean.text.lower().strip() == "true"
@@ -84,36 +76,35 @@ def parseTerm(element):
"""rdflib object (Literal, URIRef, BNode) for the given
elementtree element"""
tag, text = element.tag, element.text
- if tag == RESULTS_NS_ET + 'literal':
+ if tag == RESULTS_NS_ET + "literal":
if text is None:
- text = ''
+ text = ""
datatype = None
lang = None
- if element.get('datatype', None):
- datatype = URIRef(element.get('datatype'))
+ if element.get("datatype", None):
+ datatype = URIRef(element.get("datatype"))
elif element.get("{%s}lang" % XML_NAMESPACE, None):
lang = element.get("{%s}lang" % XML_NAMESPACE)
ret = Literal(text, datatype=datatype, lang=lang)
return ret
- elif tag == RESULTS_NS_ET + 'uri':
+ elif tag == RESULTS_NS_ET + "uri":
return URIRef(text)
- elif tag == RESULTS_NS_ET + 'bnode':
+ elif tag == RESULTS_NS_ET + "bnode":
return BNode(text)
else:
raise TypeError("unknown binding type %r" % element)
class XMLResultSerializer(ResultSerializer):
-
def __init__(self, result):
ResultSerializer.__init__(self, result)
def serialize(self, stream, encoding="utf-8"):
writer = SPARQLXMLWriter(stream, encoding)
- if self.result.type == 'ASK':
+ if self.result.type == "ASK":
writer.write_header([])
writer.write_ask(self.result.askAnswer)
else:
@@ -121,7 +112,7 @@ class XMLResultSerializer(ResultSerializer):
writer.write_results_header()
for b in self.result.bindings:
writer.write_start_result()
- for key, val in iteritems(b):
+ for key, val in b.items():
writer.write_binding(key, val)
writer.write_end_result()
@@ -135,14 +126,14 @@ class SPARQLXMLWriter:
Python saxutils-based SPARQL XML Writer
"""
- def __init__(self, output, encoding='utf-8'):
+ def __init__(self, output, encoding="utf-8"):
writer = XMLGenerator(output, encoding)
writer.startDocument()
- writer.startPrefixMapping(u'', SPARQL_XML_NAMESPACE)
- writer.startPrefixMapping(u'xml', XML_NAMESPACE)
+ writer.startPrefixMapping(u"", SPARQL_XML_NAMESPACE)
+ writer.startPrefixMapping(u"xml", XML_NAMESPACE)
writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'sparql'),
- u'sparql', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"sparql"), u"sparql", AttributesNSImpl({}, {})
+ )
self.writer = writer
self._output = output
self._encoding = encoding
@@ -150,102 +141,99 @@ class SPARQLXMLWriter:
def write_header(self, allvarsL):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'head'),
- u'head', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"head"), u"head", AttributesNSImpl({}, {})
+ )
for i in range(0, len(allvarsL)):
attr_vals = {
- (None, u'name'): text_type(allvarsL[i]),
+ (None, u"name"): str(allvarsL[i]),
}
attr_qnames = {
- (None, u'name'): u'name',
+ (None, u"name"): u"name",
}
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'variable'),
- u'variable', AttributesNSImpl(attr_vals, attr_qnames))
- self.writer.endElementNS((SPARQL_XML_NAMESPACE,
- u'variable'), u'variable')
- self.writer.endElementNS((SPARQL_XML_NAMESPACE, u'head'), u'head')
+ (SPARQL_XML_NAMESPACE, u"variable"),
+ u"variable",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"variable"), u"variable")
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"head"), u"head")
def write_ask(self, val):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'boolean'),
- u'boolean', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"boolean"), u"boolean", AttributesNSImpl({}, {})
+ )
self.writer.characters(str(val).lower())
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'boolean'), u'boolean')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"boolean"), u"boolean")
def write_results_header(self):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'results'),
- u'results', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"results"), u"results", AttributesNSImpl({}, {})
+ )
self._results = True
def write_start_result(self):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'result'),
- u'result', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"result"), u"result", AttributesNSImpl({}, {})
+ )
self._resultStarted = True
def write_end_result(self):
assert self._resultStarted
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'result'), u'result')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"result"), u"result")
self._resultStarted = False
def write_binding(self, name, val):
assert self._resultStarted
attr_vals = {
- (None, u'name'): text_type(name),
+ (None, u"name"): str(name),
}
attr_qnames = {
- (None, u'name'): u'name',
+ (None, u"name"): u"name",
}
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'binding'),
- u'binding', AttributesNSImpl(attr_vals, attr_qnames))
+ (SPARQL_XML_NAMESPACE, u"binding"),
+ u"binding",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
if isinstance(val, URIRef):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'uri'),
- u'uri', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"uri"), u"uri", AttributesNSImpl({}, {})
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'uri'), u'uri')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"uri"), u"uri")
elif isinstance(val, BNode):
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'bnode'),
- u'bnode', AttributesNSImpl({}, {}))
+ (SPARQL_XML_NAMESPACE, u"bnode"), u"bnode", AttributesNSImpl({}, {})
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'bnode'), u'bnode')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"bnode"), u"bnode")
elif isinstance(val, Literal):
attr_vals = {}
attr_qnames = {}
if val.language:
- attr_vals[(XML_NAMESPACE, u'lang')] = val.language
- attr_qnames[(XML_NAMESPACE, u'lang')] = u"xml:lang"
+ attr_vals[(XML_NAMESPACE, u"lang")] = val.language
+ attr_qnames[(XML_NAMESPACE, u"lang")] = u"xml:lang"
elif val.datatype:
- attr_vals[(None, u'datatype')] = val.datatype
- attr_qnames[(None, u'datatype')] = u'datatype'
+ attr_vals[(None, u"datatype")] = val.datatype
+ attr_qnames[(None, u"datatype")] = u"datatype"
self.writer.startElementNS(
- (SPARQL_XML_NAMESPACE, u'literal'),
- u'literal', AttributesNSImpl(attr_vals, attr_qnames))
+ (SPARQL_XML_NAMESPACE, u"literal"),
+ u"literal",
+ AttributesNSImpl(attr_vals, attr_qnames),
+ )
self.writer.characters(val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'literal'), u'literal')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"literal"), u"literal")
else:
raise Exception("Unsupported RDF term: %s" % val)
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'binding'), u'binding')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"binding"), u"binding")
def close(self):
if self._results:
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'results'), u'results')
- self.writer.endElementNS(
- (SPARQL_XML_NAMESPACE, u'sparql'), u'sparql')
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"results"), u"results")
+ self.writer.endElementNS((SPARQL_XML_NAMESPACE, u"sparql"), u"sparql")
self.writer.endDocument()
diff --git a/rdflib/plugins/sparql/sparql.py b/rdflib/plugins/sparql/sparql.py
index d6d445e8..417edc03 100644
--- a/rdflib/plugins/sparql/sparql.py
+++ b/rdflib/plugins/sparql/sparql.py
@@ -5,7 +5,6 @@ import itertools
import datetime
import isodate
-from six import text_type, iteritems
from rdflib.compat import Mapping, MutableMapping
from rdflib.namespace import NamespaceManager
@@ -92,7 +91,7 @@ class Bindings(MutableMapping):
return "Bindings({" + ", ".join((k, self[k]) for k in self) + "})"
def __repr__(self):
- return text_type(self)
+ return str(self)
class FrozenDict(Mapping):
@@ -118,20 +117,19 @@ class FrozenDict(Mapping):
def __hash__(self):
# It would have been simpler and maybe more obvious to
- # use hash(tuple(sorted(self._d.iteritems()))) from this discussion
+ # use hash(tuple(sorted(self._d.items()))) from this discussion
# so far, but this solution is O(n). I don't know what kind of
# n we are going to run into, but sometimes it's hard to resist the
# urge to optimize when it will gain improved algorithmic performance.
if self._hash is None:
self._hash = 0
- for key, value in iteritems(self):
+ for key, value in self.items():
self._hash ^= hash(key)
self._hash ^= hash(value)
return self._hash
def project(self, vars):
- return FrozenDict(
- (x for x in iteritems(self) if x[0] in vars))
+ return FrozenDict((x for x in self.items() if x[0] in vars))
def disjointDomain(self, other):
return not bool(set(self).intersection(other))
@@ -147,8 +145,7 @@ class FrozenDict(Mapping):
return True
def merge(self, other):
- res = FrozenDict(
- itertools.chain(iteritems(self), iteritems(other)))
+ res = FrozenDict(itertools.chain(self.items(), other.items()))
return res
@@ -160,7 +157,6 @@ class FrozenDict(Mapping):
class FrozenBindings(FrozenDict):
-
def __init__(self, ctx, *args, **kwargs):
FrozenDict.__init__(self, *args, **kwargs)
self.ctx = ctx
@@ -173,15 +169,16 @@ class FrozenBindings(FrozenDict):
if not type(key) in (BNode, Variable):
return key
- return self._d[key]
+ if key not in self._d:
+ return self.ctx.initBindings[key]
+ else:
+ return self._d[key]
def project(self, vars):
- return FrozenBindings(
- self.ctx, (x for x in iteritems(self) if x[0] in vars))
+ return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in vars))
def merge(self, other):
- res = FrozenBindings(
- self.ctx, itertools.chain(iteritems(self), iteritems(other)))
+ res = FrozenBindings(self.ctx, itertools.chain(self.items(), other.items()))
return res
@@ -208,18 +205,23 @@ class FrozenBindings(FrozenDict):
# bindings from initBindings are newer forgotten
return FrozenBindings(
- self.ctx, (
- x for x in iteritems(self) if (
- x[0] in _except or
- x[0] in self.ctx.initBindings or
- before[x[0]] is None)))
+ self.ctx,
+ (
+ x
+ for x in self.items()
+ if (
+ x[0] in _except
+ or x[0] in self.ctx.initBindings
+ or before[x[0]] is None
+ )
+ ),
+ )
def remember(self, these):
"""
return a frozen dict only of bindings in these
"""
- return FrozenBindings(
- self.ctx, (x for x in iteritems(self) if x[0] in these))
+ return FrozenBindings(self.ctx, (x for x in self.items() if x[0] in these))
class QueryContext(object):
@@ -251,7 +253,10 @@ class QueryContext(object):
def clone(self, bindings=None):
r = QueryContext(
- self._dataset if self._dataset is not None else self.graph, bindings or self.bindings, initBindings=self.initBindings)
+ self._dataset if self._dataset is not None else self.graph,
+ bindings or self.bindings,
+ initBindings=self.initBindings,
+ )
r.prologue = self.prologue
r.graph = self.graph
r.bnodes = self.bnodes
@@ -260,30 +265,30 @@ class QueryContext(object):
def _get_dataset(self):
if self._dataset is None:
raise Exception(
- 'You performed a query operation requiring ' +
- 'a dataset (i.e. ConjunctiveGraph), but ' +
- 'operating currently on a single graph.')
+ "You performed a query operation requiring "
+ + "a dataset (i.e. ConjunctiveGraph), but "
+ + "operating currently on a single graph."
+ )
return self._dataset
dataset = property(_get_dataset, doc="current dataset")
def load(self, source, default=False, **kwargs):
-
def _load(graph, source):
try:
return graph.load(source, **kwargs)
except:
pass
try:
- return graph.load(source, format='n3', **kwargs)
+ return graph.load(source, format="n3", **kwargs)
except:
pass
try:
- return graph.load(source, format='nt', **kwargs)
+ return graph.load(source, format="nt", **kwargs)
except:
raise Exception(
- "Could not load %s as either RDF/XML, N3 or NTriples" % (
- source))
+ "Could not load %s as either RDF/XML, N3 or NTriples" % source
+ )
if not rdflib.plugins.sparql.SPARQL_LOAD_GRAPHS:
# we are not loading - if we already know the graph
@@ -318,11 +323,10 @@ class QueryContext(object):
"""
if vars:
return FrozenBindings(
- self, ((k, v)
- for k, v in iteritems(self.bindings)
- if k in vars))
+ self, ((k, v) for k, v in self.bindings.items() if k in vars)
+ )
else:
- return FrozenBindings(self, iteritems(self.bindings))
+ return FrozenBindings(self, self.bindings.items())
def __setitem__(self, key, value):
if key in self.bindings and self.bindings[key] != value:
@@ -364,13 +368,12 @@ class Prologue(object):
def __init__(self):
self.base = None
- self.namespace_manager = NamespaceManager(
- Graph()) # ns man needs a store
+ self.namespace_manager = NamespaceManager(Graph()) # ns man needs a store
def resolvePName(self, prefix, localname):
ns = self.namespace_manager.store.namespace(prefix or "")
if ns is None:
- raise Exception('Unknown namespace prefix : %s' % prefix)
+ raise Exception("Unknown namespace prefix : %s" % prefix)
return URIRef(ns + (localname or ""))
def bind(self, prefix, uri):
@@ -385,13 +388,13 @@ class Prologue(object):
"""
if isinstance(iri, CompValue):
- if iri.name == 'pname':
+ if iri.name == "pname":
return self.resolvePName(iri.prefix, iri.localname)
- if iri.name == 'literal':
+ if iri.name == "literal":
return Literal(
- iri.string, lang=iri.lang,
- datatype=self.absolutize(iri.datatype))
- elif isinstance(iri, URIRef) and not ':' in iri:
+ iri.string, lang=iri.lang, datatype=self.absolutize(iri.datatype)
+ )
+ elif isinstance(iri, URIRef) and not ":" in iri:
return URIRef(iri, base=self.base)
return iri
diff --git a/rdflib/plugins/sparql/update.py b/rdflib/plugins/sparql/update.py
index daf380a3..f979c387 100644
--- a/rdflib/plugins/sparql/update.py
+++ b/rdflib/plugins/sparql/update.py
@@ -5,16 +5,13 @@ Code for carrying out Update Operations
"""
from rdflib import Graph, Variable
-
-from six import iteritems
-
from rdflib.plugins.sparql.sparql import QueryContext
from rdflib.plugins.sparql.evalutils import _fillTemplate, _join
from rdflib.plugins.sparql.evaluate import evalBGP, evalPart
def _graphOrDefault(ctx, g):
- if g == 'DEFAULT':
+ if g == "DEFAULT":
return ctx.graph
else:
return ctx.dataset.get_context(g)
@@ -24,12 +21,13 @@ def _graphAll(ctx, g):
"""
return a list of graphs
"""
- if g == 'DEFAULT':
+ if g == "DEFAULT":
return [ctx.graph]
- elif g == 'NAMED':
- return [c for c in ctx.dataset.contexts()
- if c.identifier != ctx.graph.identifier]
- elif g == 'ALL':
+ elif g == "NAMED":
+ return [
+ c for c in ctx.dataset.contexts() if c.identifier != ctx.graph.identifier
+ ]
+ elif g == "ALL":
return list(ctx.dataset.contexts())
else:
return [ctx.dataset.get_context(g)]
@@ -175,14 +173,14 @@ def evalModify(ctx, u):
if u.delete:
dg -= _fillTemplate(u.delete.triples, c)
- for g, q in iteritems(u.delete.quads):
+ for g, q in u.delete.quads.items():
cg = ctx.dataset.get_context(c.get(g))
cg -= _fillTemplate(q, c)
if u.insert:
dg += _fillTemplate(u.insert.triples, c)
- for g, q in iteritems(u.insert.quads):
+ for g, q in u.insert.quads.items():
cg = ctx.dataset.get_context(c.get(g))
cg += _fillTemplate(q, c)
@@ -277,36 +275,36 @@ def evalUpdate(graph, update, initBindings={}):
for u in update:
- initBindings = dict((Variable(k), v) for k, v in iteritems(initBindings))
+ initBindings = dict((Variable(k), v) for k, v in initBindings.items())
ctx = QueryContext(graph, initBindings=initBindings)
ctx.prologue = u.prologue
try:
- if u.name == 'Load':
+ if u.name == "Load":
evalLoad(ctx, u)
- elif u.name == 'Clear':
+ elif u.name == "Clear":
evalClear(ctx, u)
- elif u.name == 'Drop':
+ elif u.name == "Drop":
evalDrop(ctx, u)
- elif u.name == 'Create':
+ elif u.name == "Create":
evalCreate(ctx, u)
- elif u.name == 'Add':
+ elif u.name == "Add":
evalAdd(ctx, u)
- elif u.name == 'Move':
+ elif u.name == "Move":
evalMove(ctx, u)
- elif u.name == 'Copy':
+ elif u.name == "Copy":
evalCopy(ctx, u)
- elif u.name == 'InsertData':
+ elif u.name == "InsertData":
evalInsertData(ctx, u)
- elif u.name == 'DeleteData':
+ elif u.name == "DeleteData":
evalDeleteData(ctx, u)
- elif u.name == 'DeleteWhere':
+ elif u.name == "DeleteWhere":
evalDeleteWhere(ctx, u)
- elif u.name == 'Modify':
+ elif u.name == "Modify":
evalModify(ctx, u)
else:
- raise Exception('Unknown update operation: %s' % (u,))
+ raise Exception("Unknown update operation: %s" % (u,))
except:
if not u.silent:
raise
diff --git a/rdflib/plugins/stores/auditable.py b/rdflib/plugins/stores/auditable.py
index 7a3492b7..ff21716b 100644
--- a/rdflib/plugins/stores/auditable.py
+++ b/rdflib/plugins/stores/auditable.py
@@ -20,8 +20,8 @@ from rdflib import Graph, ConjunctiveGraph
import threading
destructiveOpLocks = {
- 'add': None,
- 'remove': None,
+ "add": None,
+ "remove": None,
}
@@ -50,59 +50,79 @@ class AuditableStore(Store):
def add(self, triple, context, quoted=False):
(s, p, o) = triple
- lock = destructiveOpLocks['add']
+ lock = destructiveOpLocks["add"]
lock = lock if lock else threading.RLock()
with lock:
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
ctxId = context.identifier if context is not None else None
if list(self.store.triples(triple, context)):
return # triple already in store, do nothing
- self.reverseOps.append((s, p, o, ctxId, 'remove'))
+ self.reverseOps.append((s, p, o, ctxId, "remove"))
try:
- self.reverseOps.remove((s, p, o, ctxId, 'add'))
+ self.reverseOps.remove((s, p, o, ctxId, "add"))
except ValueError:
pass
self.store.add((s, p, o), context, quoted)
def remove(self, spo, context=None):
subject, predicate, object_ = spo
- lock = destructiveOpLocks['remove']
+ lock = destructiveOpLocks["remove"]
lock = lock if lock else threading.RLock()
with lock:
# Need to determine which quads will be removed if any term is a
# wildcard
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
ctxId = context.identifier if context is not None else None
if None in [subject, predicate, object_, context]:
if ctxId:
for s, p, o in context.triples((subject, predicate, object_)):
try:
- self.reverseOps.remove((s, p, o, ctxId, 'remove'))
+ self.reverseOps.remove((s, p, o, ctxId, "remove"))
except ValueError:
- self.reverseOps.append((s, p, o, ctxId, 'add'))
+ self.reverseOps.append((s, p, o, ctxId, "add"))
else:
- for s, p, o, ctx in ConjunctiveGraph(self.store).quads((subject, predicate, object_)):
+ for s, p, o, ctx in ConjunctiveGraph(self.store).quads(
+ (subject, predicate, object_)
+ ):
try:
- self.reverseOps.remove((s, p, o, ctx.identifier, 'remove'))
+ self.reverseOps.remove((s, p, o, ctx.identifier, "remove"))
except ValueError:
- self.reverseOps.append((s, p, o, ctx.identifier, 'add'))
+ self.reverseOps.append((s, p, o, ctx.identifier, "add"))
else:
if not list(self.triples((subject, predicate, object_), context)):
return # triple not present in store, do nothing
try:
- self.reverseOps.remove((subject, predicate, object_, ctxId, 'remove'))
+ self.reverseOps.remove(
+ (subject, predicate, object_, ctxId, "remove")
+ )
except ValueError:
- self.reverseOps.append((subject, predicate, object_, ctxId, 'add'))
+ self.reverseOps.append((subject, predicate, object_, ctxId, "add"))
self.store.remove((subject, predicate, object_), context)
def triples(self, triple, context=None):
(su, pr, ob) = triple
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
for (s, p, o), cg in self.store.triples((su, pr, ob), context):
yield (s, p, o), cg
def __len__(self, context=None):
- context = context.__class__(self.store, context.identifier) if context is not None else None
+ context = (
+ context.__class__(self.store, context.identifier)
+ if context is not None
+ else None
+ )
return self.store.__len__(context)
def contexts(self, triple=None):
@@ -129,11 +149,13 @@ class AuditableStore(Store):
# order
with self.rollbackLock:
for subject, predicate, obj, context, op in self.reverseOps:
- if op == 'add':
+ if op == "add":
self.store.add(
- (subject, predicate, obj), Graph(self.store, context))
+ (subject, predicate, obj), Graph(self.store, context)
+ )
else:
self.store.remove(
- (subject, predicate, obj), Graph(self.store, context))
+ (subject, predicate, obj), Graph(self.store, context)
+ )
self.reverseOps = []
diff --git a/rdflib/plugins/stores/concurrent.py b/rdflib/plugins/stores/concurrent.py
index 40747fb1..a258e778 100644
--- a/rdflib/plugins/stores/concurrent.py
+++ b/rdflib/plugins/stores/concurrent.py
@@ -4,7 +4,7 @@ from threading import Lock
class ResponsibleGenerator(object):
"""A generator that will help clean up when it is done being used."""
- __slots__ = ['cleanup', 'gen']
+ __slots__ = ["cleanup", "gen"]
def __init__(self, gen, cleanup):
self.cleanup = cleanup
@@ -21,7 +21,6 @@ class ResponsibleGenerator(object):
class ConcurrentStore(object):
-
def __init__(self, store):
self.store = store
@@ -60,9 +59,11 @@ class ConcurrentStore(object):
yield s, p, o
for (s, p, o) in self.__pending_adds:
- if (su is None or su == s) \
- and (pr is None or pr == p) \
- and (ob is None or ob == o):
+ if (
+ (su is None or su == s)
+ and (pr is None or pr == p)
+ and (ob is None or ob == o)
+ ):
yield s, p, o
def __len__(self):
diff --git a/rdflib/plugins/stores/regexmatching.py b/rdflib/plugins/stores/regexmatching.py
index 773dfab3..f890405d 100644
--- a/rdflib/plugins/stores/regexmatching.py
+++ b/rdflib/plugins/stores/regexmatching.py
@@ -11,7 +11,6 @@ matching against the results from the store it's wrapping.
from rdflib.store import Store
from rdflib.graph import Graph
-from six import text_type
import re
@@ -21,7 +20,7 @@ NATIVE_REGEX = 0
PYTHON_REGEX = 1
-class REGEXTerm(text_type):
+class REGEXTerm(str):
"""
REGEXTerm can be used in any term slot and is interpreted as a request to
perform a REGEX match (not a string comparison) using the value
@@ -32,13 +31,14 @@ class REGEXTerm(text_type):
self.compiledExpr = re.compile(expr)
def __reduce__(self):
- return (REGEXTerm, (text_type(''),))
+ return (REGEXTerm, (str(""),))
def regexCompareQuad(quad, regexQuad):
for index in range(4):
- if isinstance(regexQuad[index], REGEXTerm) and not \
- regexQuad[index].compiledExpr.match(quad[index]):
+ if isinstance(regexQuad[index], REGEXTerm) and not regexQuad[
+ index
+ ].compiledExpr.match(quad[index]):
return False
return True
@@ -67,29 +67,36 @@ class REGEXMatching(Store):
def remove(self, triple, context=None):
(subject, predicate, object_) = triple
- if isinstance(subject, REGEXTerm) or \
- isinstance(predicate, REGEXTerm) or \
- isinstance(object_, REGEXTerm) or \
- (context is not None and
- isinstance(context.identifier, REGEXTerm)):
+ if (
+ isinstance(subject, REGEXTerm)
+ or isinstance(predicate, REGEXTerm)
+ or isinstance(object_, REGEXTerm)
+ or (context is not None and isinstance(context.identifier, REGEXTerm))
+ ):
# One or more of the terms is a REGEX expression, so we must
# replace it / them with wildcard(s)and match after we query.
s = not isinstance(subject, REGEXTerm) and subject or None
p = not isinstance(predicate, REGEXTerm) and predicate or None
o = not isinstance(object_, REGEXTerm) and object_ or None
- c = (context is not None and
- not isinstance(context.identifier, REGEXTerm)) \
- and context \
+ c = (
+ (context is not None and not isinstance(context.identifier, REGEXTerm))
+ and context
or None
+ )
removeQuadList = []
for (s1, p1, o1), cg in self.storage.triples((s, p, o), c):
for ctx in cg:
ctx = ctx.identifier
if regexCompareQuad(
- (s1, p1, o1, ctx),
- (subject, predicate, object_, context
- is not None and context.identifier or context)):
+ (s1, p1, o1, ctx),
+ (
+ subject,
+ predicate,
+ object_,
+ context is not None and context.identifier or context,
+ ),
+ ):
removeQuadList.append((s1, p1, o1, ctx))
for s, p, o, c in removeQuadList:
self.storage.remove((s, p, o), c and Graph(self, c) or c)
@@ -98,37 +105,40 @@ class REGEXMatching(Store):
def triples(self, triple, context=None):
(subject, predicate, object_) = triple
- if isinstance(subject, REGEXTerm) or \
- isinstance(predicate, REGEXTerm) or \
- isinstance(object_, REGEXTerm) or \
- (context is not None and
- isinstance(context.identifier, REGEXTerm)):
+ if (
+ isinstance(subject, REGEXTerm)
+ or isinstance(predicate, REGEXTerm)
+ or isinstance(object_, REGEXTerm)
+ or (context is not None and isinstance(context.identifier, REGEXTerm))
+ ):
# One or more of the terms is a REGEX expression, so we must
# replace it / them with wildcard(s) and match after we query.
s = not isinstance(subject, REGEXTerm) and subject or None
p = not isinstance(predicate, REGEXTerm) and predicate or None
o = not isinstance(object_, REGEXTerm) and object_ or None
- c = (context is not None and
- not isinstance(context.identifier, REGEXTerm)) \
- and context \
+ c = (
+ (context is not None and not isinstance(context.identifier, REGEXTerm))
+ and context
or None
+ )
for (s1, p1, o1), cg in self.storage.triples((s, p, o), c):
matchingCtxs = []
for ctx in cg:
if c is None:
- if context is None \
- or context.identifier.compiledExpr.match(
- ctx.identifier):
+ if context is None or context.identifier.compiledExpr.match(
+ ctx.identifier
+ ):
matchingCtxs.append(ctx)
else:
matchingCtxs.append(ctx)
- if matchingCtxs \
- and regexCompareQuad((s1, p1, o1, None),
- (subject, predicate, object_, None)):
+ if matchingCtxs and regexCompareQuad(
+ (s1, p1, o1, None), (subject, predicate, object_, None)
+ ):
yield (s1, p1, o1), (c for c in matchingCtxs)
else:
for (s1, p1, o1), cg in self.storage.triples(
- (subject, predicate, object_), context):
+ (subject, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
def __len__(self, context=None):
diff --git a/rdflib/plugins/stores/sparqlconnector.py b/rdflib/plugins/stores/sparqlconnector.py
index ee981419..abec85a8 100644
--- a/rdflib/plugins/stores/sparqlconnector.py
+++ b/rdflib/plugins/stores/sparqlconnector.py
@@ -14,13 +14,14 @@ log = logging.getLogger(__name__)
class SPARQLConnectorException(Exception):
pass
+
# TODO: Pull in these from the result implementation plugins?
_response_mime_types = {
- 'xml': 'application/sparql-results+xml, application/rdf+xml',
- 'json': 'application/sparql-results+json',
- 'csv': 'text/csv',
- 'tsv': 'text/tab-separated-values',
- 'application/rdf+xml': 'application/rdf+xml',
+ "xml": "application/sparql-results+xml, application/rdf+xml",
+ "json": "application/sparql-results+json",
+ "csv": "text/csv",
+ "tsv": "text/tab-separated-values",
+ "application/rdf+xml": "application/rdf+xml",
}
@@ -30,7 +31,14 @@ class SPARQLConnector(object):
this class deals with nitty gritty details of talking to a SPARQL server
"""
- def __init__(self, query_endpoint=None, update_endpoint=None, returnFormat='xml', method='GET', **kwargs):
+ def __init__(
+ self,
+ query_endpoint=None,
+ update_endpoint=None,
+ returnFormat="xml",
+ method="GET",
+ **kwargs
+ ):
"""
Any additional keyword arguments will be passed to requests, and can be used to setup timesouts, basic auth, etc.
"""
@@ -48,9 +56,9 @@ class SPARQLConnector(object):
@property
def session(self):
- k = 'session_%d' % os.getpid()
+ k = "session_%d" % os.getpid()
self._session.__dict__.setdefault(k, requests.Session())
- log.debug('Session %s %s', os.getpid(), id(self._session.__dict__[k]))
+ log.debug("Session %s %s", os.getpid(), id(self._session.__dict__[k]))
return self._session.__dict__[k]
@property
@@ -59,7 +67,7 @@ class SPARQLConnector(object):
@method.setter
def method(self, method):
- if method not in ('GET', 'POST'):
+ if method not in ("GET", "POST"):
raise SPARQLConnectorException('Method must be "GET" or "POST"')
self._method = method
@@ -69,25 +77,26 @@ class SPARQLConnector(object):
if not self.query_endpoint:
raise SPARQLConnectorException("Query endpoint not set!")
- params = {'query': query}
+ params = {"query": query}
if default_graph:
params["default-graph-uri"] = default_graph
- headers = {'Accept': _response_mime_types[self.returnFormat]}
+ headers = {"Accept": _response_mime_types[self.returnFormat]}
args = dict(self.kwargs)
args.update(url=self.query_endpoint)
# merge params/headers dicts
- args.setdefault('params', {})
+ args.setdefault("params", {})
- args.setdefault('headers', {})
- args['headers'].update(headers)
+ args.setdefault("headers", {})
+ args["headers"].update(headers)
- if self.method == 'GET':
- args['params'].update(params)
- elif self.method == 'POST':
- args['data'] = params
+ if self.method == "GET":
+ args["params"].update(params)
+ elif self.method == "POST":
+ args["headers"].update({"Content-Type": "application/sparql-query"})
+ args["data"] = params
else:
raise SPARQLConnectorException("Unknown method %s" % self.method)
@@ -95,7 +104,9 @@ class SPARQLConnector(object):
res.raise_for_status()
- return Result.parse(BytesIO(res.content), content_type=res.headers['Content-type'])
+ return Result.parse(
+ BytesIO(res.content), content_type=res.headers["Content-type"]
+ )
def update(self, update, default_graph=None):
if not self.update_endpoint:
@@ -106,18 +117,20 @@ class SPARQLConnector(object):
if default_graph:
params["using-graph-uri"] = default_graph
- headers = {'Accept': _response_mime_types[self.returnFormat]}
+ headers = {
+ "Accept": _response_mime_types[self.returnFormat],
+ "Content-Type": "application/sparql-update",
+ }
args = dict(self.kwargs)
- args.update(url=self.update_endpoint,
- data=update.encode('utf-8'))
+ args.update(url=self.update_endpoint, data=update.encode("utf-8"))
# merge params/headers dicts
- args.setdefault('params', {})
- args['params'].update(params)
- args.setdefault('headers', {})
- args['headers'].update(headers)
+ args.setdefault("params", {})
+ args["params"].update(params)
+ args.setdefault("headers", {})
+ args["headers"].update(headers)
res = self.session.post(**args)
diff --git a/rdflib/plugins/stores/sparqlstore.py b/rdflib/plugins/stores/sparqlstore.py
index 5f7446ce..1bdf2d32 100644
--- a/rdflib/plugins/stores/sparqlstore.py
+++ b/rdflib/plugins/stores/sparqlstore.py
@@ -5,12 +5,6 @@ This is an RDFLib store around Ivan Herman et al.'s SPARQL service wrapper.
This was first done in layer-cake, and then ported to RDFLib
"""
-
-# Defines some SPARQL keywords
-LIMIT = 'LIMIT'
-OFFSET = 'OFFSET'
-ORDERBY = 'ORDER BY'
-
import re
import collections
@@ -23,9 +17,12 @@ from rdflib import Variable, BNode
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
from rdflib.term import Node
-from six import string_types
+# Defines some SPARQL keywords
+LIMIT = "LIMIT"
+OFFSET = "OFFSET"
+ORDERBY = "ORDER BY"
-BNODE_IDENT_PATTERN = re.compile('(?P<label>_\:[^\s]+)')
+BNODE_IDENT_PATTERN = re.compile("(?P<label>_\:[^\s]+)")
def _node_to_sparql(node):
@@ -88,21 +85,26 @@ class SPARQLStore(SPARQLConnector, Store):
will use HTTP basic auth.
"""
+
formula_aware = False
transaction_aware = False
graph_aware = True
regex_matching = NATIVE_REGEX
- def __init__(self,
- endpoint=None,
- sparql11=True, context_aware=True,
- node_to_sparql=_node_to_sparql,
- returnFormat='xml',
- **sparqlconnector_kwargs):
+ def __init__(
+ self,
+ endpoint=None,
+ sparql11=True,
+ context_aware=True,
+ node_to_sparql=_node_to_sparql,
+ returnFormat="xml",
+ **sparqlconnector_kwargs
+ ):
"""
"""
super(SPARQLStore, self).__init__(
- endpoint, returnFormat=returnFormat, **sparqlconnector_kwargs)
+ endpoint, returnFormat=returnFormat, **sparqlconnector_kwargs
+ )
self.node_to_sparql = node_to_sparql
self.nsBindings = {}
@@ -113,7 +115,7 @@ class SPARQLStore(SPARQLConnector, Store):
# Database Management Methods
def create(self, configuration):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def open(self, configuration, create=False):
"""
@@ -126,23 +128,23 @@ class SPARQLStore(SPARQLConnector, Store):
self.query_endpoint = configuration
def destroy(self, configuration):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
# Transactional interfaces
def commit(self):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def rollback(self):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def add(self, _, context=None, quoted=False):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def addN(self, quads):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def remove(self, _, context):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def _query(self, *args, **kwargs):
self._queries += 1
@@ -153,38 +155,37 @@ class SPARQLStore(SPARQLConnector, Store):
bindings = list(self.nsBindings.items()) + list(extra_bindings.items())
if not bindings:
return query
- return '\n'.join([
- '\n'.join(['PREFIX %s: <%s>' % (k, v) for k, v in bindings]),
- '', # separate ns_bindings from query with an empty line
- query
- ])
+ return "\n".join(
+ [
+ "\n".join(["PREFIX %s: <%s>" % (k, v) for k, v in bindings]),
+ "", # separate ns_bindings from query with an empty line
+ query,
+ ]
+ )
def _preprocess_query(self, query):
return self._inject_prefixes(query)
- def query(self, query,
- initNs={},
- initBindings={},
- queryGraph=None,
- DEBUG=False):
+ def query(self, query, initNs={}, initBindings={}, queryGraph=None, DEBUG=False):
self.debug = DEBUG
- assert isinstance(query, string_types)
+ assert isinstance(query, str)
query = self._inject_prefixes(query, initNs)
if initBindings:
if not self.sparql11:
- raise Exception(
- "initBindings not supported for SPARQL 1.0 Endpoints.")
+ raise Exception("initBindings not supported for SPARQL 1.0 Endpoints.")
v = list(initBindings)
# VALUES was added to SPARQL 1.1 on 2012/07/24
- query += "\nVALUES ( %s )\n{ ( %s ) }\n"\
- % (" ".join("?" + str(x) for x in v),
- " ".join(self.node_to_sparql(initBindings[x]) for x in v))
+ query += "\nVALUES ( %s )\n{ ( %s ) }\n" % (
+ " ".join("?" + str(x) for x in v),
+ " ".join(self.node_to_sparql(initBindings[x]) for x in v),
+ )
- return self._query(query,
- default_graph=queryGraph if self._is_contextual(queryGraph) else None)
+ return self._query(
+ query, default_graph=queryGraph if self._is_contextual(queryGraph) else None
+ )
def triples(self, spo, context=None):
"""
@@ -226,28 +227,31 @@ class SPARQLStore(SPARQLConnector, Store):
vars = []
if not s:
- s = Variable('s')
+ s = Variable("s")
vars.append(s)
if not p:
- p = Variable('p')
+ p = Variable("p")
vars.append(p)
if not o:
- o = Variable('o')
+ o = Variable("o")
vars.append(o)
if vars:
- v = ' '.join([term.n3() for term in vars])
- verb = 'SELECT %s ' % v
+ v = " ".join([term.n3() for term in vars])
+ verb = "SELECT %s " % v
else:
- verb = 'ASK'
+ verb = "ASK"
nts = self.node_to_sparql
query = "%s { %s %s %s }" % (verb, nts(s), nts(p), nts(o))
# The ORDER BY is necessary
- if hasattr(context, LIMIT) or hasattr(context, OFFSET) \
- or hasattr(context, ORDERBY):
+ if (
+ hasattr(context, LIMIT)
+ or hasattr(context, OFFSET)
+ or hasattr(context, ORDERBY)
+ ):
var = None
if isinstance(s, Variable):
var = s
@@ -255,28 +259,33 @@ class SPARQLStore(SPARQLConnector, Store):
var = p
elif isinstance(o, Variable):
var = o
- elif hasattr(context, ORDERBY) \
- and isinstance(getattr(context, ORDERBY), Variable):
+ elif hasattr(context, ORDERBY) and isinstance(
+ getattr(context, ORDERBY), Variable
+ ):
var = getattr(context, ORDERBY)
- query = query + ' %s %s' % (ORDERBY, var.n3())
+ query = query + " %s %s" % (ORDERBY, var.n3())
try:
- query = query + ' LIMIT %s' % int(getattr(context, LIMIT))
+ query = query + " LIMIT %s" % int(getattr(context, LIMIT))
except (ValueError, TypeError, AttributeError):
pass
try:
- query = query + ' OFFSET %s' % int(getattr(context, OFFSET))
+ query = query + " OFFSET %s" % int(getattr(context, OFFSET))
except (ValueError, TypeError, AttributeError):
pass
- result = self._query(query,
- default_graph=context.identifier if self._is_contextual(context) else None)
+ result = self._query(
+ query,
+ default_graph=context.identifier if self._is_contextual(context) else None,
+ )
if vars:
for row in result:
- yield (row.get(s, s),
- row.get(p, p),
- row.get(o, o)), None # why is the context here not the passed in graph 'context'?
+ yield (
+ row.get(s, s),
+ row.get(p, p),
+ row.get(o, o),
+ ), None # why is the context here not the passed in graph 'context'?
else:
if result.askAnswer:
yield (s, p, o), None
@@ -289,18 +298,23 @@ class SPARQLStore(SPARQLConnector, Store):
which will iterate over each term in the list and dispatch to
triples.
"""
- raise NotImplementedError('Triples choices currently not supported')
+ raise NotImplementedError("Triples choices currently not supported")
def __len__(self, context=None):
if not self.sparql11:
raise NotImplementedError(
- "For performance reasons, this is not" +
- "supported for sparql1.0 endpoints")
+ "For performance reasons, this is not"
+ + "supported for sparql1.0 endpoints"
+ )
else:
q = "SELECT (count(*) as ?c) WHERE {?s ?p ?o .}"
- result = self._query(q,
- default_graph=context.identifier if self._is_contextual(context) else None)
+ result = self._query(
+ q,
+ default_graph=context.identifier
+ if self._is_contextual(context)
+ else None,
+ )
return int(next(iter(result)).c)
@@ -322,12 +336,14 @@ class SPARQLStore(SPARQLConnector, Store):
if triple:
nts = self.node_to_sparql
s, p, o = triple
- params = (nts(s if s else Variable('s')),
- nts(p if p else Variable('p')),
- nts(o if o else Variable('o')))
- q = 'SELECT ?name WHERE { GRAPH ?name { %s %s %s }}' % params
+ params = (
+ nts(s if s else Variable("s")),
+ nts(p if p else Variable("p")),
+ nts(o if o else Variable("o")),
+ )
+ q = "SELECT ?name WHERE { GRAPH ?name { %s %s %s }}" % params
else:
- q = 'SELECT ?name WHERE { GRAPH ?name {} }'
+ q = "SELECT ?name WHERE { GRAPH ?name {} }"
result = self._query(q)
@@ -339,9 +355,7 @@ class SPARQLStore(SPARQLConnector, Store):
def prefix(self, namespace):
""" """
- return dict(
- [(v, k) for k, v in self.nsBindings.items()]
- ).get(namespace)
+ return dict([(v, k) for k, v in self.nsBindings.items()]).get(namespace)
def namespace(self, prefix):
return self.nsBindings.get(prefix)
@@ -351,10 +365,10 @@ class SPARQLStore(SPARQLConnector, Store):
yield prefix, ns
def add_graph(self, graph):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def remove_graph(self, graph):
- raise TypeError('The SPARQL store is read only')
+ raise TypeError("The SPARQL store is read only")
def _is_contextual(self, graph):
""" Returns `True` if the "GRAPH" keyword must appear
@@ -362,8 +376,8 @@ class SPARQLStore(SPARQLConnector, Store):
"""
if (not self.context_aware) or (graph is None):
return False
- if isinstance(graph, string_types):
- return graph != '__UNION__'
+ if isinstance(graph, str):
+ return graph != "__UNION__"
else:
return graph.identifier != DATASET_DEFAULT_GRAPH_ID
@@ -390,9 +404,9 @@ class SPARQLUpdateStore(SPARQLStore):
where_pattern = re.compile(r"""(?P<where>WHERE\s*\{)""", re.IGNORECASE)
- ##################################################################
- ### Regex for injecting GRAPH blocks into updates on a context ###
- ##################################################################
+ ##############################################################
+ # Regex for injecting GRAPH blocks into updates on a context #
+ ##############################################################
# Observations on the SPARQL grammar (http://www.w3.org/TR/2013/REC-sparql11-query-20130321/):
# 1. Only the terminals STRING_LITERAL1, STRING_LITERAL2,
@@ -415,19 +429,27 @@ class SPARQLUpdateStore(SPARQLStore):
STRING_LITERAL2 = u'"([^"\\\\]|\\\\.)*"'
STRING_LITERAL_LONG1 = u"'''(('|'')?([^'\\\\]|\\\\.))*'''"
STRING_LITERAL_LONG2 = u'"""(("|"")?([^"\\\\]|\\\\.))*"""'
- String = u'(%s)|(%s)|(%s)|(%s)' % (STRING_LITERAL1, STRING_LITERAL2,
- STRING_LITERAL_LONG1, STRING_LITERAL_LONG2)
+ String = u"(%s)|(%s)|(%s)|(%s)" % (
+ STRING_LITERAL1,
+ STRING_LITERAL2,
+ STRING_LITERAL_LONG1,
+ STRING_LITERAL_LONG2,
+ )
IRIREF = u'<([^<>"{}|^`\\]\\\\\[\\x00-\\x20])*>'
- COMMENT = u'#[^\\x0D\\x0A]*([\\x0D\\x0A]|\\Z)'
+ COMMENT = u"#[^\\x0D\\x0A]*([\\x0D\\x0A]|\\Z)"
# Simplified grammar to find { at beginning and } at end of blocks
- BLOCK_START = u'{'
- BLOCK_END = u'}'
- ESCAPED = u'\\\\.'
+ BLOCK_START = u"{"
+ BLOCK_END = u"}"
+ ESCAPED = u"\\\\."
# Match anything that doesn't start or end a block:
- BlockContent = u'(%s)|(%s)|(%s)|(%s)' % (String, IRIREF, COMMENT, ESCAPED)
- BlockFinding = u'(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)' % (BLOCK_START, BLOCK_END, BlockContent)
+ BlockContent = u"(%s)|(%s)|(%s)|(%s)" % (String, IRIREF, COMMENT, ESCAPED)
+ BlockFinding = u"(?P<block_start>%s)|(?P<block_end>%s)|(?P<block_content>%s)" % (
+ BLOCK_START,
+ BLOCK_END,
+ BlockContent,
+ )
BLOCK_FINDING_PATTERN = re.compile(BlockFinding)
# Note that BLOCK_FINDING_PATTERN.finditer() will not cover the whole
@@ -436,15 +458,17 @@ class SPARQLUpdateStore(SPARQLStore):
##################################################################
- def __init__(self,
- queryEndpoint=None, update_endpoint=None,
- sparql11=True,
- context_aware=True,
- postAsEncoded=True,
- autocommit=True,
- dirty_reads=False,
- **kwds
- ):
+ def __init__(
+ self,
+ queryEndpoint=None,
+ update_endpoint=None,
+ sparql11=True,
+ context_aware=True,
+ postAsEncoded=True,
+ autocommit=True,
+ dirty_reads=False,
+ **kwds
+ ):
"""
:param autocommit if set, the store will commit after every
writing operations. If False, we only make queries on the
@@ -493,8 +517,8 @@ class SPARQLUpdateStore(SPARQLStore):
def open(self, configuration, create=False):
"""
sets the endpoint URLs for this SPARQLStore
- :param configuration: either a tuple of (queryEndpoint, update_endpoint),
- or a string with the query endpoint
+ :param configuration: either a tuple of (query_endpoint, update_endpoint),
+ or a string with the endpoint which is configured as query and update endpoint
:param create: if True an exception is thrown.
"""
@@ -507,9 +531,7 @@ class SPARQLUpdateStore(SPARQLStore):
self.update_endpoint = configuration[1]
else:
self.query_endpoint = configuration
-
- if not self.update_endpoint:
- self.update_endpoint = self.endpoint
+ self.update_endpoint = configuration
def _transaction(self):
if self._edits is None:
@@ -524,7 +546,7 @@ class SPARQLUpdateStore(SPARQLStore):
and reads can degenerate to the original call-per-triple situation that originally existed.
"""
if self._edits and len(self._edits) > 0:
- self._update('\n;\n'.join(self._edits))
+ self._update("\n;\n".join(self._edits))
self._edits = None
def rollback(self):
@@ -542,8 +564,7 @@ class SPARQLUpdateStore(SPARQLStore):
nts = self.node_to_sparql
triple = "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
if self._is_contextual(context):
- q = "INSERT DATA { GRAPH %s { %s } }" % (
- nts(context.identifier), triple)
+ q = "INSERT DATA { GRAPH %s { %s } }" % (nts(context.identifier), triple)
else:
q = "INSERT DATA { %s }" % triple
self._transaction().append(q)
@@ -562,12 +583,13 @@ class SPARQLUpdateStore(SPARQLStore):
nts = self.node_to_sparql
for context in contexts:
triples = [
- "%s %s %s ." % (
- nts(subject), nts(predicate), nts(obj)
- ) for subject, predicate, obj in contexts[context]
+ "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
+ for subject, predicate, obj in contexts[context]
]
- data.append("INSERT DATA { GRAPH %s { %s } }\n" % (
- nts(context.identifier), '\n'.join(triples)))
+ data.append(
+ "INSERT DATA { GRAPH %s { %s } }\n"
+ % (nts(context.identifier), "\n".join(triples))
+ )
self._transaction().extend(data)
if self.autocommit:
self.commit()
@@ -589,7 +611,10 @@ class SPARQLUpdateStore(SPARQLStore):
triple = "%s %s %s ." % (nts(subject), nts(predicate), nts(obj))
if self._is_contextual(context):
cid = nts(context.identifier)
- q = "WITH %(graph)s DELETE { %(triple)s } WHERE { %(triple)s }" % { 'graph': cid, 'triple': triple }
+ q = "WITH %(graph)s DELETE { %(triple)s } WHERE { %(triple)s }" % {
+ "graph": cid,
+ "triple": triple,
+ }
else:
q = "DELETE { %s } WHERE { %s } " % (triple, triple)
self._transaction().append(q)
@@ -605,11 +630,7 @@ class SPARQLUpdateStore(SPARQLStore):
SPARQLConnector.update(self, update)
- def update(self, query,
- initNs={},
- initBindings={},
- queryGraph=None,
- DEBUG=False):
+ def update(self, query, initNs={}, initBindings={}, queryGraph=None, DEBUG=False):
"""
Perform a SPARQL Update Query against the endpoint,
INSERT, LOAD, DELETE etc.
@@ -646,7 +667,7 @@ class SPARQLUpdateStore(SPARQLStore):
raise Exception("UpdateEndpoint is not set - call 'open'")
self.debug = DEBUG
- assert isinstance(query, string_types)
+ assert isinstance(query, str)
query = self._inject_prefixes(query, initNs)
if self._is_contextual(queryGraph):
@@ -659,9 +680,10 @@ class SPARQLUpdateStore(SPARQLStore):
# have a WHERE clause. This also works for updates with
# more than one INSERT/DELETE.
v = list(initBindings)
- values = "\nVALUES ( %s )\n{ ( %s ) }\n"\
- % (" ".join("?" + str(x) for x in v),
- " ".join(self.node_to_sparql(initBindings[x]) for x in v))
+ values = "\nVALUES ( %s )\n{ ( %s ) }\n" % (
+ " ".join("?" + str(x) for x in v),
+ " ".join(self.node_to_sparql(initBindings[x]) for x in v),
+ )
query = self.where_pattern.sub("WHERE { " + values, query)
@@ -680,7 +702,7 @@ class SPARQLUpdateStore(SPARQLStore):
if isinstance(query_graph, Node):
query_graph = self.node_to_sparql(query_graph)
else:
- query_graph = '<%s>' % query_graph
+ query_graph = "<%s>" % query_graph
graph_block_open = " GRAPH %s {" % query_graph
graph_block_close = "} "
@@ -699,16 +721,18 @@ class SPARQLUpdateStore(SPARQLStore):
modified_query = []
pos = 0
for match in self.BLOCK_FINDING_PATTERN.finditer(query):
- if match.group('block_start') is not None:
+ if match.group("block_start") is not None:
level += 1
if level == 1:
- modified_query.append(query[pos:match.end()])
+ modified_query.append(query[pos: match.end()])
modified_query.append(graph_block_open)
pos = match.end()
- elif match.group('block_end') is not None:
+ elif match.group("block_end") is not None:
if level == 1:
- since_previous_pos = query[pos:match.start()]
- if modified_query[-1] is graph_block_open and (since_previous_pos == "" or since_previous_pos.isspace()):
+ since_previous_pos = query[pos: match.start()]
+ if modified_query[-1] is graph_block_open and (
+ since_previous_pos == "" or since_previous_pos.isspace()
+ ):
# In this case, adding graph_block_start and
# graph_block_end results in an empty GRAPH block. Some
# enpoints (e.g. TDB) can not handle this. Therefore
@@ -728,8 +752,7 @@ class SPARQLUpdateStore(SPARQLStore):
if not self.graph_aware:
Store.add_graph(self, graph)
elif graph.identifier != DATASET_DEFAULT_GRAPH_ID:
- self.update(
- "CREATE GRAPH %s" % self.node_to_sparql(graph.identifier))
+ self.update("CREATE GRAPH %s" % self.node_to_sparql(graph.identifier))
def close(self, commit_pending_transaction=False):
@@ -744,5 +767,4 @@ class SPARQLUpdateStore(SPARQLStore):
elif graph.identifier == DATASET_DEFAULT_GRAPH_ID:
self.update("DROP DEFAULT")
else:
- self.update(
- "DROP GRAPH %s" % self.node_to_sparql(graph.identifier))
+ self.update("DROP GRAPH %s" % self.node_to_sparql(graph.identifier))
diff --git a/rdflib/query.py b/rdflib/query.py
index d04440fb..3e21632f 100644
--- a/rdflib/query.py
+++ b/rdflib/query.py
@@ -9,13 +9,11 @@ import tempfile
import warnings
import types
-from six import BytesIO
-from six import PY2
-from six import text_type
-from six.moves.urllib.parse import urlparse
+from io import BytesIO
-__all__ = ['Processor', 'Result', 'ResultParser', 'ResultSerializer',
- 'ResultException']
+from urllib.parse import urlparse
+
+__all__ = ["Processor", "Result", "ResultParser", "ResultSerializer", "ResultException"]
class Processor(object):
@@ -71,7 +69,7 @@ class EncodeOnlyUnicode(object):
self.__stream = stream
def write(self, arg):
- if isinstance(arg, text_type):
+ if isinstance(arg, str):
self.__stream.write(arg.encode("utf-8"))
else:
self.__stream.write(arg)
@@ -118,10 +116,8 @@ class ResultRow(tuple):
def __new__(cls, values, labels):
- instance = super(ResultRow, cls).__new__(
- cls, (values.get(v) for v in labels))
- instance.labels = dict((text_type(x[1]), x[0])
- for x in enumerate(labels))
+ instance = super(ResultRow, cls).__new__(cls, (values.get(v) for v in labels))
+ instance.labels = dict((str(x[1]), x[0]) for x in enumerate(labels))
return instance
def __getattr__(self, name):
@@ -135,8 +131,8 @@ class ResultRow(tuple):
except TypeError:
if name in self.labels:
return tuple.__getitem__(self, self.labels[name])
- if text_type(name) in self.labels: # passing in variable object
- return tuple.__getitem__(self, self.labels[text_type(name)])
+ if str(name) in self.labels: # passing in variable object
+ return tuple.__getitem__(self, self.labels[str(name)])
raise KeyError(name)
def get(self, name, default=None):
@@ -170,8 +166,8 @@ class Result(object):
def __init__(self, type_):
- if type_ not in ('CONSTRUCT', 'DESCRIBE', 'SELECT', 'ASK'):
- raise ResultException('Unknown Result type: %s' % type_)
+ if type_ not in ("CONSTRUCT", "DESCRIBE", "SELECT", "ASK"):
+ raise ResultException("Unknown Result type: %s" % type_)
self.type = type_
self.vars = None
@@ -195,7 +191,8 @@ class Result(object):
self._bindings = b
bindings = property(
- _get_bindings, _set_bindings, doc="a list of variable bindings as dicts")
+ _get_bindings, _set_bindings, doc="a list of variable bindings as dicts"
+ )
@staticmethod
def parse(source=None, format=None, content_type=None, **kwargs):
@@ -206,21 +203,22 @@ class Result(object):
elif content_type:
plugin_key = content_type.split(";", 1)[0]
else:
- plugin_key = 'xml'
+ plugin_key = "xml"
parser = plugin.get(plugin_key, ResultParser)()
return parser.parse(source, content_type=content_type, **kwargs)
- def serialize(
- self, destination=None, encoding="utf-8", format='xml', **args):
+ def serialize(self, destination=None, encoding="utf-8", format="xml", **args):
- if self.type in ('CONSTRUCT', 'DESCRIBE'):
+ if self.type in ("CONSTRUCT", "DESCRIBE"):
return self.graph.serialize(
- destination, encoding=encoding, format=format, **args)
+ destination, encoding=encoding, format=format, **args
+ )
"""stolen wholesale from graph.serialize"""
from rdflib import plugin
+
serializer = plugin.get(format, ResultSerializer)(self)
if destination is None:
stream = BytesIO()
@@ -234,11 +232,12 @@ class Result(object):
location = destination
scheme, netloc, path, params, query, fragment = urlparse(location)
if netloc != "":
- print("WARNING: not saving as location" +
- "is not a local file reference")
+ print(
+ "WARNING: not saving as location" + "is not a local file reference"
+ )
return
fd, name = tempfile.mkstemp()
- stream = os.fdopen(fd, 'wb')
+ stream = os.fdopen(fd, "wb")
serializer.serialize(stream, encoding=encoding, **args)
stream.close()
if hasattr(shutil, "move"):
@@ -248,29 +247,26 @@ class Result(object):
os.remove(name)
def __len__(self):
- if self.type == 'ASK':
+ if self.type == "ASK":
return 1
- elif self.type == 'SELECT':
+ elif self.type == "SELECT":
return len(self.bindings)
else:
return len(self.graph)
def __bool__(self):
- if self.type == 'ASK':
+ if self.type == "ASK":
return self.askAnswer
else:
return len(self) > 0
- if PY2:
- __nonzero__ = __bool__
-
def __iter__(self):
if self.type in ("CONSTRUCT", "DESCRIBE"):
for t in self.graph:
yield t
- elif self.type == 'ASK':
+ elif self.type == "ASK":
yield self.askAnswer
- elif self.type == 'SELECT':
+ elif self.type == "SELECT":
# this iterates over ResultRows of variable bindings
if self._genbindings:
@@ -287,26 +283,26 @@ class Result(object):
def __getattr__(self, name):
if self.type in ("CONSTRUCT", "DESCRIBE") and self.graph is not None:
return self.graph.__getattr__(self, name)
- elif self.type == 'SELECT' and name == 'result':
+ elif self.type == "SELECT" and name == "result":
warnings.warn(
"accessing the 'result' attribute is deprecated."
" Iterate over the object instead.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning,
+ stacklevel=2,
+ )
# copied from __iter__, above
return [(tuple(b[v] for v in self.vars)) for b in self.bindings]
else:
- raise AttributeError(
- "'%s' object has no attribute '%s'" % (self, name))
+ raise AttributeError("'%s' object has no attribute '%s'" % (self, name))
def __eq__(self, other):
try:
if self.type != other.type:
return False
- if self.type == 'ASK':
+ if self.type == "ASK":
return self.askAnswer == other.askAnswer
- elif self.type == 'SELECT':
- return self.vars == other.vars \
- and self.bindings == other.bindings
+ elif self.type == "SELECT":
+ return self.vars == other.vars and self.bindings == other.bindings
else:
return self.graph == other.graph
@@ -315,7 +311,6 @@ class Result(object):
class ResultParser(object):
-
def __init__(self):
pass
@@ -325,7 +320,6 @@ class ResultParser(object):
class ResultSerializer(object):
-
def __init__(self, result):
self.result = result
diff --git a/rdflib/resource.py b/rdflib/resource.py
index 7b43b394..691a07f1 100644
--- a/rdflib/resource.py
+++ b/rdflib/resource.py
@@ -3,8 +3,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from six import text_type, PY3
-
__doc__ = """
The :class:`~rdflib.resource.Resource` class wraps a
@@ -320,11 +318,10 @@ from rdflib.term import Node, BNode, URIRef
from rdflib.namespace import RDF
from rdflib.paths import Path
-__all__ = ['Resource']
+__all__ = ["Resource"]
class Resource(object):
-
def __init__(self, graph, subject):
self._graph = graph
self._identifier = subject
@@ -337,11 +334,14 @@ class Resource(object):
return hash(Resource) ^ hash(self._graph) ^ hash(self._identifier)
def __eq__(self, other):
- return (isinstance(other, Resource) and
- self._graph == other._graph and
- self._identifier == other._identifier)
+ return (
+ isinstance(other, Resource)
+ and self._graph == other._graph
+ and self._identifier == other._identifier
+ )
- def __ne__(self, other): return not self == other
+ def __ne__(self, other):
+ return not self == other
def __lt__(self, other):
if isinstance(other, Resource):
@@ -349,17 +349,19 @@ class Resource(object):
else:
return False
- def __gt__(self, other): return not (self < other or self == other)
+ def __gt__(self, other):
+ return not (self < other or self == other)
- def __le__(self, other): return self < other or self == other
+ def __le__(self, other):
+ return self < other or self == other
- def __ge__(self, other): return not self < other
+ def __ge__(self, other):
+ return not self < other
def __unicode__(self):
- return text_type(self._identifier)
+ return str(self._identifier)
- if PY3:
- __str__ = __unicode__
+ __str__ = __unicode__
def add(self, p, o):
if isinstance(o, Resource):
@@ -380,38 +382,31 @@ class Resource(object):
self._graph.set((self._identifier, p, o))
def subjects(self, predicate=None): # rev
- return self._resources(
- self._graph.subjects(predicate, self._identifier))
+ return self._resources(self._graph.subjects(predicate, self._identifier))
def predicates(self, o=None):
if isinstance(o, Resource):
o = o._identifier
- return self._resources(
- self._graph.predicates(self._identifier, o))
+ return self._resources(self._graph.predicates(self._identifier, o))
def objects(self, predicate=None):
- return self._resources(
- self._graph.objects(self._identifier, predicate))
+ return self._resources(self._graph.objects(self._identifier, predicate))
def subject_predicates(self):
- return self._resource_pairs(
- self._graph.subject_predicates(self._identifier))
+ return self._resource_pairs(self._graph.subject_predicates(self._identifier))
def subject_objects(self):
- return self._resource_pairs(
- self._graph.subject_objects(self._identifier))
+ return self._resource_pairs(self._graph.subject_objects(self._identifier))
def predicate_objects(self):
- return self._resource_pairs(
- self._graph.predicate_objects(self._identifier))
+ return self._resource_pairs(self._graph.predicate_objects(self._identifier))
def value(self, p=RDF.value, o=None, default=None, any=True):
if isinstance(o, Resource):
o = o._identifier
- return self._cast(
- self._graph.value(self._identifier, p, o, default, any))
+ return self._cast(self._graph.value(self._identifier, p, o, default, any))
def label(self):
return self._graph.label(self._identifier)
@@ -423,12 +418,14 @@ class Resource(object):
return self._resources(self._graph.items(self._identifier))
def transitive_objects(self, predicate, remember=None):
- return self._resources(self._graph.transitive_objects(
- self._identifier, predicate, remember))
+ return self._resources(
+ self._graph.transitive_objects(self._identifier, predicate, remember)
+ )
def transitive_subjects(self, predicate, remember=None):
- return self._resources(self._graph.transitive_subjects(
- predicate, self._identifier, remember))
+ return self._resources(
+ self._graph.transitive_subjects(predicate, self._identifier, remember)
+ )
def seq(self):
return self._resources(self._graph.seq(self._identifier))
@@ -455,12 +452,16 @@ class Resource(object):
return node
def __iter__(self):
- return self._resource_triples(self._graph.triples((self.identifier, None, None)))
+ return self._resource_triples(
+ self._graph.triples((self.identifier, None, None))
+ )
def __getitem__(self, item):
if isinstance(item, slice):
if item.step:
- raise TypeError("Resources fix the subject for slicing, and can only be sliced by predicate/object. ")
+ raise TypeError(
+ "Resources fix the subject for slicing, and can only be sliced by predicate/object. "
+ )
p, o = item.start, item.stop
if isinstance(p, Resource):
p = p._identifier
@@ -477,7 +478,10 @@ class Resource(object):
elif isinstance(item, (Node, Path)):
return self.objects(item)
else:
- raise TypeError("You can only index a resource by a single rdflib term, a slice of rdflib terms, not %s (%s)"%(item, type(item)))
+ raise TypeError(
+ "You can only index a resource by a single rdflib term, a slice of rdflib terms, not %s (%s)"
+ % (item, type(item))
+ )
def __setitem__(self, item, value):
self.set(item, value)
@@ -486,7 +490,7 @@ class Resource(object):
return type(self)(self._graph, subject)
def __str__(self):
- return 'Resource(%s)' % self._identifier
+ return "Resource(%s)" % self._identifier
def __repr__(self):
- return 'Resource(%s,%s)' % (self._graph, self._identifier)
+ return "Resource(%s,%s)" % (self._graph, self._identifier)
diff --git a/rdflib/serializer.py b/rdflib/serializer.py
index e5a31989..ecb8da0a 100644
--- a/rdflib/serializer.py
+++ b/rdflib/serializer.py
@@ -12,11 +12,10 @@ See also rdflib.plugin
from rdflib.term import URIRef
-__all__ = ['Serializer']
+__all__ = ["Serializer"]
class Serializer(object):
-
def __init__(self, store):
self.store = store
self.encoding = "UTF-8"
diff --git a/rdflib/store.py b/rdflib/store.py
index 32c3f650..ead1c2e7 100644
--- a/rdflib/store.py
+++ b/rdflib/store.py
@@ -2,9 +2,10 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
-from six import BytesIO
-from six.moves import cPickle
+from io import BytesIO
+import pickle
from rdflib.events import Dispatcher, Event
+
"""
============
rdflib.store
@@ -40,12 +41,17 @@ NO_STORE = -1
UNKNOWN = None
-Pickler = cPickle.Pickler
-Unpickler = cPickle.Unpickler
-UnpicklingError = cPickle.UnpicklingError
+Pickler = pickle.Pickler
+Unpickler = pickle.Unpickler
+UnpicklingError = pickle.UnpicklingError
-__all__ = ['StoreCreatedEvent', 'TripleAddedEvent', 'TripleRemovedEvent',
- 'NodePickler', 'Store']
+__all__ = [
+ "StoreCreatedEvent",
+ "TripleAddedEvent",
+ "TripleRemovedEvent",
+ "NodePickler",
+ "Store",
+]
class StoreCreatedEvent(Event):
@@ -113,11 +119,10 @@ class NodePickler(object):
def __getstate__(self):
state = self.__dict__.copy()
- del state['_get_object']
- state.update({
- '_ids': tuple(self._ids.items()),
- '_objects': tuple(self._objects.items())
- })
+ del state["_get_object"]
+ state.update(
+ {"_ids": tuple(self._ids.items()), "_objects": tuple(self._objects.items())}
+ )
return state
def __setstate__(self, state):
@@ -153,6 +158,7 @@ class Store(object):
from rdflib.graph import Graph, QuotedGraph
from rdflib.term import Variable
from rdflib.term import Statement
+
self.__node_pickler = np = NodePickler()
np.register(self, "S")
np.register(URIRef, "U")
@@ -163,12 +169,12 @@ class Store(object):
np.register(Variable, "V")
np.register(Statement, "s")
return self.__node_pickler
+
node_pickler = property(__get_node_pickler)
# Database management methods
def create(self, configuration):
- self.dispatcher.dispatch(
- StoreCreatedEvent(configuration=configuration))
+ self.dispatcher.dispatch(StoreCreatedEvent(configuration=configuration))
def open(self, configuration, create=False):
"""
@@ -211,9 +217,7 @@ class Store(object):
be an error for the quoted argument to be True when the store is not
formula-aware.
"""
- self.dispatcher.dispatch(
- TripleAddedEvent(
- triple=triple, context=context))
+ self.dispatcher.dispatch(TripleAddedEvent(triple=triple, context=context))
def addN(self, quads):
"""
@@ -223,15 +227,16 @@ class Store(object):
is a redirect to add
"""
for s, p, o, c in quads:
- assert c is not None, \
- "Context associated with %s %s %s is None!" % (s, p, o)
+ assert c is not None, "Context associated with %s %s %s is None!" % (
+ s,
+ p,
+ o,
+ )
self.add((s, p, o), c)
def remove(self, triple, context=None):
""" Remove the set of triples matching the pattern from the store """
- self.dispatcher.dispatch(
- TripleRemovedEvent(
- triple=triple, context=context))
+ self.dispatcher.dispatch(TripleRemovedEvent(triple=triple, context=context))
def triples_choices(self, triple, context=None):
"""
@@ -242,44 +247,44 @@ class Store(object):
"""
subject, predicate, object_ = triple
if isinstance(object_, list):
- assert not isinstance(
- subject, list), "object_ / subject are both lists"
- assert not isinstance(
- predicate, list), "object_ / predicate are both lists"
+ assert not isinstance(subject, list), "object_ / subject are both lists"
+ assert not isinstance(predicate, list), "object_ / predicate are both lists"
if object_:
for obj in object_:
for (s1, p1, o1), cg in self.triples(
- (subject, predicate, obj), context):
+ (subject, predicate, obj), context
+ ):
yield (s1, p1, o1), cg
else:
for (s1, p1, o1), cg in self.triples(
- (subject, predicate, None), context):
+ (subject, predicate, None), context
+ ):
yield (s1, p1, o1), cg
elif isinstance(subject, list):
- assert not isinstance(
- predicate, list), "subject / predicate are both lists"
+ assert not isinstance(predicate, list), "subject / predicate are both lists"
if subject:
for subj in subject:
for (s1, p1, o1), cg in self.triples(
- (subj, predicate, object_), context):
+ (subj, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
else:
for (s1, p1, o1), cg in self.triples(
- (None, predicate, object_), context):
+ (None, predicate, object_), context
+ ):
yield (s1, p1, o1), cg
elif isinstance(predicate, list):
- assert not isinstance(
- subject, list), "predicate / subject are both lists"
+ assert not isinstance(subject, list), "predicate / subject are both lists"
if predicate:
for pred in predicate:
for (s1, p1, o1), cg in self.triples(
- (subject, pred, object_), context):
+ (subject, pred, object_), context
+ ):
yield (s1, p1, o1), cg
else:
- for (s1, p1, o1), cg in self.triples(
- (subject, None, object_), context):
+ for (s1, p1, o1), cg in self.triples((subject, None, object_), context):
yield (s1, p1, o1), cg
def triples(self, triple_pattern, context=None):
@@ -290,8 +295,8 @@ class Store(object):
QuotedGraph, Date? DateRange?
:param context: A conjunctive query can be indicated by either
- providing a value of None, or a specific context can be
- queries by passing a Graph instance (if store is context aware).
+ providing a value of None, or a specific context can be
+ queries by passing a Graph instance (if store is context aware).
"""
subject, predicate, object = triple_pattern
diff --git a/rdflib/term.py b/rdflib/term.py
index 3d290258..a93e9c50 100644
--- a/rdflib/term.py
+++ b/rdflib/term.py
@@ -23,25 +23,22 @@ underlying Graph:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
+
# from __future__ import unicode_literals
from fractions import Fraction
__all__ = [
- 'bind',
-
- 'Node',
- 'Identifier',
-
- 'URIRef',
- 'BNode',
- 'Literal',
-
- 'Variable',
- 'Statement',
+ "bind",
+ "Node",
+ "Identifier",
+ "URIRef",
+ "BNode",
+ "Literal",
+ "Variable",
+ "Statement",
]
import logging
-logger = logging.getLogger(__name__)
import warnings
import math
@@ -51,22 +48,27 @@ import xml.dom.minidom
from datetime import date, time, datetime, timedelta
from re import sub, compile
from collections import defaultdict
-from unicodedata import category
-from isodate import parse_time, parse_date, parse_datetime, Duration, parse_duration, duration_isoformat
+from isodate import (
+ parse_time,
+ parse_date,
+ parse_datetime,
+ Duration,
+ parse_duration,
+ duration_isoformat,
+)
from binascii import hexlify, unhexlify
import rdflib
-from six import PY2
-from six import PY3
-from six import b
from rdflib.compat import long_type
-from six import string_types
-from six import text_type
-from six.moves.urllib.parse import urldefrag
-from six.moves.urllib.parse import urljoin
-from six.moves.urllib.parse import urlparse
+from urllib.parse import urldefrag
+from urllib.parse import urljoin
+from urllib.parse import urlparse
+
+from decimal import Decimal
+
+logger = logging.getLogger(__name__)
skolem_genid = "/.well-known/genid/"
rdflib_skolem_genid = "/.well-known/genid/rdflib/"
skolems = {}
@@ -76,10 +78,10 @@ _invalid_uri_chars = '<>" {}|\\^`'
def _is_valid_uri(uri):
- return all(map(lambda c: ord(c) > 256 or not c in _invalid_uri_chars, uri))
+ return all(map(lambda c: ord(c) > 256 or c not in _invalid_uri_chars, uri))
-_lang_tag_regex = compile('^[a-zA-Z]+(?:-[a-zA-Z0-9]+)*$')
+_lang_tag_regex = compile("^[a-zA-Z]+(?:-[a-zA-Z0-9]+)*$")
def _is_valid_langtag(tag):
@@ -92,11 +94,9 @@ def _is_valid_unicode(value):
unicode object.
"""
if isinstance(value, bytes):
- coding_func, param = getattr(value, 'decode'), 'utf-8'
- elif PY3:
- coding_func, param = str, value
+ coding_func, param = getattr(value, "decode"), "utf-8"
else:
- coding_func, param = unicode, value
+ coding_func, param = str, value
# try to convert value into unicode
try:
@@ -114,7 +114,7 @@ class Node(object):
__slots__ = ()
-class Identifier(Node, text_type): # allow Identifiers to be Nodes in the Graph
+class Identifier(Node, str): # allow Identifiers to be Nodes in the Graph
"""
See http://www.w3.org/2002/07/rdf-identifer-terminology/
regarding choice of terminology.
@@ -123,7 +123,7 @@ class Identifier(Node, text_type): # allow Identifiers to be Nodes in the Graph
__slots__ = ()
def __new__(cls, value):
- return text_type.__new__(cls, value)
+ return str.__new__(cls, value)
def eq(self, other):
"""A "semantic"/interpreted equality function,
@@ -159,7 +159,7 @@ class Identifier(Node, text_type): # allow Identifiers to be Nodes in the Graph
"""
if type(self) == type(other):
- return text_type(self) == text_type(other)
+ return str(self) == str(other)
else:
return False
@@ -177,7 +177,7 @@ class Identifier(Node, text_type): # allow Identifiers to be Nodes in the Graph
if other is None:
return True # everything bigger than None
elif type(self) == type(other):
- return text_type(self) > text_type(other)
+ return str(self) > str(other)
elif isinstance(other, Node):
return _ORDERING[type(self)] > _ORDERING[type(other)]
@@ -187,7 +187,7 @@ class Identifier(Node, text_type): # allow Identifiers to be Nodes in the Graph
if other is None:
return False # Nothing is less than None
elif type(self) == type(other):
- return text_type(self) < text_type(other)
+ return str(self) < str(other)
elif isinstance(other, Node):
return _ORDERING[type(self)] < _ORDERING[type(other)]
@@ -209,7 +209,7 @@ class Identifier(Node, text_type): # allow Identifiers to be Nodes in the Graph
# clashes of 'foo', URIRef('foo') and Literal('foo') are typically so rare
# that they don't justify additional overhead. Notice that even in case of
# clash __eq__ is still the fallback and very quick in those cases.
- __hash__ = text_type.__hash__
+ __hash__ = str.__hash__
class URIRef(Identifier):
@@ -228,17 +228,19 @@ class URIRef(Identifier):
value += "#"
if not _is_valid_uri(value):
- logger.warning('%s does not look like a valid URI, trying to serialize this will break.'%value)
-
+ logger.warning(
+ "%s does not look like a valid URI, trying to serialize this will break."
+ % value
+ )
try:
- rt = text_type.__new__(cls, value)
+ rt = str.__new__(cls, value)
except UnicodeDecodeError:
- rt = text_type.__new__(cls, value, 'utf-8')
+ rt = str.__new__(cls, value, "utf-8")
return rt
def toPython(self):
- return text_type(self)
+ return str(self)
def n3(self, namespace_manager=None):
"""
@@ -251,7 +253,10 @@ class URIRef(Identifier):
"""
if not _is_valid_uri(self):
- raise Exception('"%s" does not look like a valid URI, I cannot serialize this as N3/Turtle. Perhaps you wanted to urlencode it?'%self)
+ raise Exception(
+ '"%s" does not look like a valid URI, I cannot serialize this as N3/Turtle. Perhaps you wanted to urlencode it?'
+ % self
+ )
if namespace_manager:
return namespace_manager.normalizeUri(self)
@@ -266,14 +271,10 @@ class URIRef(Identifier):
return self
def __reduce__(self):
- return (URIRef, (text_type(self),))
+ return (URIRef, (str(self),))
def __getnewargs__(self):
- return (text_type(self), )
-
- if PY2:
- def __str__(self):
- return self.encode()
+ return (str(self),)
def __repr__(self):
if self.__class__ is URIRef:
@@ -284,13 +285,13 @@ class URIRef(Identifier):
return """%s(%s)""" % (clsName, super(URIRef, self).__repr__())
def __add__(self, other):
- return self.__class__(text_type(self) + other)
+ return self.__class__(str(self) + other)
def __radd__(self, other):
- return self.__class__(other + text_type(self))
+ return self.__class__(other + str(self))
def __mod__(self, other):
- return self.__class__(text_type(self) % other)
+ return self.__class__(str(self) % other)
def de_skolemize(self):
""" Create a Blank Node from a skolem URI, in accordance
@@ -302,8 +303,7 @@ class URIRef(Identifier):
"""
if isinstance(self, RDFLibGenid):
parsed_uri = urlparse("%s" % self)
- return BNode(
- value=parsed_uri.path[len(rdflib_skolem_genid):])
+ return BNode(value=parsed_uri.path[len(rdflib_skolem_genid):])
elif isinstance(self, Genid):
bnode_id = "%s" % self
if bnode_id in skolems:
@@ -321,7 +321,7 @@ class Genid(URIRef):
@staticmethod
def _is_external_skolem(uri):
- if not isinstance(uri, string_types):
+ if not isinstance(uri, str):
uri = str(uri)
parsed_uri = urlparse(uri)
gen_id = parsed_uri.path.rfind(skolem_genid)
@@ -335,12 +335,14 @@ class RDFLibGenid(Genid):
@staticmethod
def _is_rdflib_skolem(uri):
- if not isinstance(uri, string_types):
+ if not isinstance(uri, str):
uri = str(uri)
parsed_uri = urlparse(uri)
- if parsed_uri.params != "" \
- or parsed_uri.query != "" \
- or parsed_uri.fragment != "":
+ if (
+ parsed_uri.params != ""
+ or parsed_uri.query != ""
+ or parsed_uri.fragment != ""
+ ):
return False
gen_id = parsed_uri.path.rfind(rdflib_skolem_genid)
if gen_id != 0:
@@ -378,10 +380,12 @@ class BNode(Identifier):
Blank Node: http://www.w3.org/TR/rdf-concepts/#section-blank-nodes
"""
+
__slots__ = ()
- def __new__(cls, value=None,
- _sn_gen=_serial_number_generator(), _prefix=_unique_id()):
+ def __new__(
+ cls, value=None, _sn_gen=_serial_number_generator(), _prefix=_unique_id()
+ ):
"""
# only store implementations should pass in a value
"""
@@ -395,26 +399,22 @@ class BNode(Identifier):
# for RDF/XML needs to be something that can be serialzed
# as a nodeID for N3 ?? Unless we require these
# constraints be enforced elsewhere?
- pass # assert is_ncname(text_type(value)), "BNode identifiers
+ pass # assert is_ncname(str(value)), "BNode identifiers
# must be valid NCNames" _:[A-Za-z][A-Za-z0-9]*
# http://www.w3.org/TR/2004/REC-rdf-testcases-20040210/#nodeID
return Identifier.__new__(cls, value)
def toPython(self):
- return text_type(self)
+ return str(self)
def n3(self, namespace_manager=None):
return "_:%s" % self
def __getnewargs__(self):
- return (text_type(self), )
+ return (str(self),)
def __reduce__(self):
- return (BNode, (text_type(self),))
-
- if PY2:
- def __str__(self):
- return self.encode()
+ return (BNode, (str(self),))
def __repr__(self):
if self.__class__ is BNode:
@@ -433,7 +433,7 @@ class BNode(Identifier):
authority = "http://rdlib.net/"
if basepath is None:
basepath = rdflib_skolem_genid
- skolem = "%s%s" % (basepath, text_type(self))
+ skolem = "%s%s" % (basepath, str(self))
return URIRef(urljoin(authority, skolem))
@@ -490,8 +490,6 @@ class Literal(Identifier):
is None < BNode < URIRef < Literal
Any comparison with non-rdflib Node are "NotImplemented"
- In PY2.X some stable order will be made up by python
-
In PY3 this is an error.
>>> from rdflib import Literal, XSD
@@ -515,8 +513,7 @@ class Literal(Identifier):
>>> Literal(1) > URIRef('foo') # by node-type
True
- The > < operators will eat this NotImplemented and either make up
- an ordering (py2.x) or throw a TypeError (py3k):
+ The > < operators will eat this NotImplemented and throw a TypeError (py3k):
>>> Literal(1).__gt__(2.0)
NotImplemented
@@ -524,15 +521,11 @@ class Literal(Identifier):
"""
- if not PY3:
- __slots__ = ("language", "datatype", "value", "_language",
- "_datatype", "_value")
- else:
- __slots__ = ("_language", "_datatype", "_value")
+ __slots__ = ("_language", "_datatype", "_value")
def __new__(cls, lexical_or_value, lang=None, datatype=None, normalize=None):
- if lang == '':
+ if lang == "":
lang = None # no empty lang-tags in RDF
normalize = normalize if normalize is not None else rdflib.NORMALIZE_LITERALS
@@ -540,7 +533,8 @@ class Literal(Identifier):
if lang is not None and datatype is not None:
raise TypeError(
"A Literal can only have one of lang or datatype, "
- "per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal")
+ "per http://www.w3.org/TR/rdf-concepts/#section-Graph-Literal"
+ )
if lang and not _is_valid_langtag(lang):
raise Exception("'%s' is not a valid language tag!" % lang)
@@ -560,9 +554,9 @@ class Literal(Identifier):
datatype = lexical_or_value.datatype
value = lexical_or_value.value
- elif isinstance(lexical_or_value, string_types) or (PY3 and isinstance(lexical_or_value, bytes)):
- # passed a string
- # try parsing lexical form of datatyped literal
+ elif isinstance(lexical_or_value, str) or isinstance(lexical_or_value, bytes):
+ # passed a string
+ # try parsing lexical form of datatyped literal
value = _castLexicalToPython(lexical_or_value, datatype)
if value is not None and normalize:
@@ -581,13 +575,13 @@ class Literal(Identifier):
if datatype:
lang = None
- if PY3 and isinstance(lexical_or_value, bytes):
- lexical_or_value = lexical_or_value.decode('utf-8')
+ if isinstance(lexical_or_value, bytes):
+ lexical_or_value = lexical_or_value.decode("utf-8")
try:
- inst = text_type.__new__(cls, lexical_or_value)
+ inst = str.__new__(cls, lexical_or_value)
except UnicodeDecodeError:
- inst = text_type.__new__(cls, lexical_or_value, 'utf-8')
+ inst = str.__new__(cls, lexical_or_value, "utf-8")
inst._language = lang
inst._datatype = datatype
@@ -626,7 +620,10 @@ class Literal(Identifier):
return self._datatype
def __reduce__(self):
- return (Literal, (text_type(self), self.language, self.datatype),)
+ return (
+ Literal,
+ (str(self), self.language, self.datatype),
+ )
def __getstate__(self):
return (None, dict(language=self.language, datatype=self.datatype))
@@ -654,24 +651,30 @@ class Literal(Identifier):
# if the datatypes are the same, just add the Python values and convert back
if self.datatype == val.datatype:
- return Literal(self.toPython() + val.toPython(), self.language, datatype=self.datatype)
+ return Literal(
+ self.toPython() + val.toPython(), self.language, datatype=self.datatype
+ )
# if the datatypes are not the same but are both numeric, add the Python values and strip off decimal junk
# (i.e. tiny numbers (more than 17 decimal places) and trailing zeros) and return as a decimal
elif (
- self.datatype in _NUMERIC_LITERAL_TYPES
- and
- val.datatype in _NUMERIC_LITERAL_TYPES
+ self.datatype in _NUMERIC_LITERAL_TYPES
+ and val.datatype in _NUMERIC_LITERAL_TYPES
):
return Literal(
Decimal(
- ('%f' % round(Decimal(self.toPython()) + Decimal(val.toPython()), 15)).rstrip('0').rstrip('.')
+ (
+ "%f"
+ % round(Decimal(self.toPython()) + Decimal(val.toPython()), 15)
+ )
+ .rstrip("0")
+ .rstrip(".")
),
- datatype=_XSD_DECIMAL
+ datatype=_XSD_DECIMAL,
)
# in all other cases, perform string concatenation
else:
try:
- s = text_type.__add__(self, val)
+ s = str.__add__(self, val)
except TypeError:
s = str(self.value) + str(val)
@@ -693,9 +696,6 @@ class Literal(Identifier):
return bool(self.value)
return len(self) != 0
- if PY2:
- __nonzero__ = __bool__
-
def __neg__(self):
"""
>>> (- Literal(1))
@@ -818,8 +818,10 @@ class Literal(Identifier):
return True # Everything is greater than None
if isinstance(other, Literal):
- if self.datatype in _NUMERIC_LITERAL_TYPES and \
- other.datatype in _NUMERIC_LITERAL_TYPES:
+ if (
+ self.datatype in _NUMERIC_LITERAL_TYPES
+ and other.datatype in _NUMERIC_LITERAL_TYPES
+ ):
return self.value > other.value
# plain-literals and xsd:string literals
@@ -851,8 +853,8 @@ class Literal(Identifier):
except TypeError:
pass
- if text_type(self) != text_type(other):
- return text_type(self) > text_type(other)
+ if str(self) != str(other):
+ return str(self) > str(other)
# same language, same lexical form, check real dt
# plain-literals come before xsd:string!
@@ -914,17 +916,21 @@ class Literal(Identifier):
rich-compare with this literal
"""
if isinstance(other, Literal):
- if (self.datatype and other.datatype):
+ if self.datatype and other.datatype:
# two datatyped literals
- if not self.datatype in XSDToPython or not other.datatype in XSDToPython:
+ if (
+ self.datatype not in XSDToPython
+ or other.datatype not in XSDToPython
+ ):
# non XSD DTs must match
if self.datatype != other.datatype:
return False
else:
# xsd:string may be compared with plain literals
- if not (self.datatype == _XSD_STRING and not other.datatype) or \
- (other.datatype == _XSD_STRING and not self.datatype):
+ if not (self.datatype == _XSD_STRING and not other.datatype) or (
+ other.datatype == _XSD_STRING and not self.datatype
+ ):
return False
# if given lang-tag has to be case insensitive equal
@@ -963,7 +969,7 @@ class Literal(Identifier):
"""
# don't use super()... for efficiency reasons, see Identifier.__hash__
- res = text_type.__hash__(self)
+ res = str.__hash__(self)
if self.language:
res ^= hash(self.language.lower())
if self.datatype:
@@ -1011,9 +1017,12 @@ class Literal(Identifier):
if other is None:
return False
if isinstance(other, Literal):
- return self.datatype == other.datatype \
- and (self.language.lower() if self.language else None) == (other.language.lower() if other.language else None) \
- and text_type.__eq__(self, other)
+ return (
+ self.datatype == other.datatype
+ and (self.language.lower() if self.language else None)
+ == (other.language.lower() if other.language else None)
+ and str.__eq__(self, other)
+ )
return False
@@ -1041,29 +1050,35 @@ class Literal(Identifier):
"""
if isinstance(other, Literal):
- if self.datatype in _NUMERIC_LITERAL_TYPES \
- and other.datatype in _NUMERIC_LITERAL_TYPES:
+ if (
+ self.datatype in _NUMERIC_LITERAL_TYPES
+ and other.datatype in _NUMERIC_LITERAL_TYPES
+ ):
if self.value is not None and other.value is not None:
return self.value == other.value
else:
- if text_type.__eq__(self, other):
+ if str.__eq__(self, other):
return True
raise TypeError(
- 'I cannot know that these two lexical forms do not map to the same value: %s and %s' % (self, other))
+ "I cannot know that these two lexical forms do not map to the same value: %s and %s"
+ % (self, other)
+ )
if (self.language or "").lower() != (other.language or "").lower():
return False
dtself = self.datatype or _XSD_STRING
dtother = other.datatype or _XSD_STRING
- if (dtself == _XSD_STRING and dtother == _XSD_STRING):
+ if dtself == _XSD_STRING and dtother == _XSD_STRING:
# string/plain literals, compare on lexical form
- return text_type.__eq__(self, other)
+ return str.__eq__(self, other)
if dtself != dtother:
if rdflib.DAWG_LITERAL_COLLATION:
- raise TypeError("I don't know how to compare literals with datatypes %s and %s" % (
- self.datatype, other.datatype))
+ raise TypeError(
+ "I don't know how to compare literals with datatypes %s and %s"
+ % (self.datatype, other.datatype)
+ )
else:
return False
@@ -1079,7 +1094,7 @@ class Literal(Identifier):
return self.value == other.value
else:
- if text_type.__eq__(self, other):
+ if str.__eq__(self, other):
return True
if self.datatype == _XSD_STRING:
@@ -1087,20 +1102,22 @@ class Literal(Identifier):
# matching DTs, but not matching, we cannot compare!
raise TypeError(
- 'I cannot know that these two lexical forms do not map to the same value: %s and %s' % (self, other))
+ "I cannot know that these two lexical forms do not map to the same value: %s and %s"
+ % (self, other)
+ )
elif isinstance(other, Node):
return False # no non-Literal nodes are equal to a literal
- elif isinstance(other, string_types):
+ elif isinstance(other, str):
# only plain-literals can be directly compared to strings
# TODO: Is "blah"@en eq "blah" ?
if self.language is not None:
return False
- if (self.datatype == _XSD_STRING or self.datatype is None):
- return text_type(self) == other
+ if self.datatype == _XSD_STRING or self.datatype is None:
+ return str(self) == other
elif isinstance(other, (int, long_type, float)):
if self.datatype in _NUMERIC_LITERAL_TYPES:
@@ -1109,7 +1126,11 @@ class Literal(Identifier):
if self.datatype in (_XSD_DATETIME, _XSD_DATE, _XSD_TIME):
return self.value == other
elif isinstance(other, (timedelta, Duration)):
- if self.datatype in (_XSD_DURATION, _XSD_DAYTIMEDURATION, _XSD_YEARMONTHDURATION):
+ if self.datatype in (
+ _XSD_DURATION,
+ _XSD_DAYTIMEDURATION,
+ _XSD_YEARMONTHDURATION,
+ ):
return self.value == other
elif isinstance(other, bool):
if self.datatype == _XSD_BOOLEAN:
@@ -1179,7 +1200,7 @@ class Literal(Identifier):
return self._literal_n3()
def _literal_n3(self, use_plain=False, qname_callback=None):
- '''
+ """
Using plain literal (shorthand) output::
>>> from rdflib.namespace import XSD
@@ -1222,7 +1243,7 @@ class Literal(Identifier):
... qname_callback=lambda uri: "xsd:integer")
u'"1"^^xsd:integer'
- '''
+ """
if use_plain and self.datatype in _PLAIN_LITERAL_TYPES:
if self.value is not None:
# If self is inf or NaN, we need a datatype
@@ -1239,17 +1260,17 @@ class Literal(Identifier):
# in py >=2.6 the string.format function makes this easier
# we try to produce "pretty" output
if self.datatype == _XSD_DOUBLE:
- return sub("\\.?0*e", "e", u'%e' % float(self))
+ return sub("\\.?0*e", "e", u"%e" % float(self))
elif self.datatype == _XSD_DECIMAL:
- s = '%s' % self
- if '.' not in s:
- s += '.0'
+ s = "%s" % self
+ if "." not in s:
+ s += ".0"
return s
elif self.datatype == _XSD_BOOLEAN:
- return (u'%s' % self).lower()
+ return (u"%s" % self).lower()
else:
- return u'%s' % self
+ return u"%s" % self
encoded = self._quote_encode()
@@ -1266,10 +1287,11 @@ class Literal(Identifier):
if math.isinf(v):
# py string reps: float: 'inf', Decimal: 'Infinity"
# both need to become "INF" in xsd datatypes
- encoded = encoded.replace('inf', 'INF').replace(
- 'Infinity', 'INF')
+ encoded = encoded.replace("inf", "INF").replace(
+ "Infinity", "INF"
+ )
if math.isnan(v):
- encoded = encoded.replace('nan', 'NaN')
+ encoded = encoded.replace("nan", "NaN")
except ValueError:
# if we can't cast to float something is wrong, but we can
# still serialize. Warn user about it
@@ -1277,11 +1299,11 @@ class Literal(Identifier):
language = self.language
if language:
- return '%s@%s' % (encoded, language)
+ return "%s@%s" % (encoded, language)
elif datatype:
- return '%s^^%s' % (encoded, quoted_dt)
+ return "%s^^%s" % (encoded, quoted_dt)
else:
- return '%s' % encoded
+ return "%s" % encoded
def _quote_encode(self):
# This simpler encoding doesn't work; a newline gets encoded as "\\n",
@@ -1295,24 +1317,18 @@ class Literal(Identifier):
if "\n" in self:
# Triple quote this string.
- encoded = self.replace('\\', '\\\\')
+ encoded = self.replace("\\", "\\\\")
if '"""' in self:
# is this ok?
encoded = encoded.replace('"""', '\\"\\"\\"')
- if encoded[-1] == '"' and encoded[-2] != '\\':
- encoded = encoded[:-1] + '\\' + '"'
+ if encoded[-1] == '"' and encoded[-2] != "\\":
+ encoded = encoded[:-1] + "\\" + '"'
- return '"""%s"""' % encoded.replace('\r', '\\r')
+ return '"""%s"""' % encoded.replace("\r", "\\r")
else:
- return '"%s"' % self.replace(
- '\n', '\\n').replace(
- '\\', '\\\\').replace(
- '"', '\\"').replace(
- '\r', '\\r')
-
- if PY2:
- def __str__(self):
- return self.encode()
+ return '"%s"' % self.replace("\n", "\\n").replace("\\", "\\\\").replace(
+ '"', '\\"'
+ ).replace("\r", "\\r")
def __repr__(self):
args = [super(Literal, self).__repr__()]
@@ -1337,10 +1353,9 @@ class Literal(Identifier):
def _parseXML(xmlstring):
- if PY2:
- xmlstring = xmlstring.encode('utf-8')
retval = xml.dom.minidom.parseString(
- "<rdflibtoplevelelement>%s</rdflibtoplevelelement>" % xmlstring)
+ "<rdflibtoplevelelement>%s</rdflibtoplevelelement>" % xmlstring
+ )
retval.normalize()
return retval
@@ -1348,15 +1363,16 @@ def _parseXML(xmlstring):
def _parseHTML(htmltext):
try:
import html5lib
- parser = html5lib.HTMLParser(
- tree=html5lib.treebuilders.getTreeBuilder("dom"))
+
+ parser = html5lib.HTMLParser(tree=html5lib.treebuilders.getTreeBuilder("dom"))
retval = parser.parseFragment(htmltext)
retval.normalize()
return retval
except ImportError:
raise ImportError(
- "HTML5 parser not available. Try installing" +
- " html5lib <http://code.google.com/p/html5lib>")
+ "HTML5 parser not available. Try installing"
+ + " html5lib <http://code.google.com/p/html5lib>"
+ )
def _writeXML(xmlnode):
@@ -1364,70 +1380,83 @@ def _writeXML(xmlnode):
d = xml.dom.minidom.Document()
d.childNodes += xmlnode.childNodes
xmlnode = d
- s = xmlnode.toxml('utf-8')
+ s = xmlnode.toxml("utf-8")
# for clean round-tripping, remove headers -- I have great and
# specific worries that this will blow up later, but this margin
# is too narrow to contain them
- if s.startswith(b('<?xml version="1.0" encoding="utf-8"?>')):
+ if s.startswith('<?xml version="1.0" encoding="utf-8"?>'.encode("latin-1")):
s = s[38:]
- if s.startswith(b('<rdflibtoplevelelement>')):
+ if s.startswith("<rdflibtoplevelelement>".encode("latin-1")):
s = s[23:-24]
- if s == b('<rdflibtoplevelelement/>'):
- s = b('')
+ if s == "<rdflibtoplevelelement/>".encode("latin-1"):
+ s = "".encode("latin-1")
return s
def _unhexlify(value):
# In Python 3.2, unhexlify does not support str (only bytes)
- if PY3 and isinstance(value, str):
+ if isinstance(value, str):
value = value.encode()
return unhexlify(value)
+
+def _parseBoolean(value):
+ true_accepted_values = ["1", "true"]
+ false_accepted_values = ["0", "false"]
+ new_value = value.lower()
+ if new_value in true_accepted_values:
+ return True
+ if new_value not in false_accepted_values:
+ warnings.warn(
+ "Parsing weird boolean, % r does not map to True or False" % value,
+ category=DeprecationWarning,
+ )
+ return False
+
+
# Cannot import Namespace/XSD because of circular dependencies
-_XSD_PFX = 'http://www.w3.org/2001/XMLSchema#'
-_RDF_PFX = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
+_XSD_PFX = "http://www.w3.org/2001/XMLSchema#"
+_RDF_PFX = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-_RDF_XMLLITERAL = URIRef(_RDF_PFX + 'XMLLiteral')
-_RDF_HTMLLITERAL = URIRef(_RDF_PFX + 'HTML')
+_RDF_XMLLITERAL = URIRef(_RDF_PFX + "XMLLiteral")
+_RDF_HTMLLITERAL = URIRef(_RDF_PFX + "HTML")
-_XSD_STRING = URIRef(_XSD_PFX + 'string')
+_XSD_STRING = URIRef(_XSD_PFX + "string")
-_XSD_FLOAT = URIRef(_XSD_PFX + 'float')
-_XSD_DOUBLE = URIRef(_XSD_PFX + 'double')
-_XSD_DECIMAL = URIRef(_XSD_PFX + 'decimal')
-_XSD_INTEGER = URIRef(_XSD_PFX + 'integer')
-_XSD_BOOLEAN = URIRef(_XSD_PFX + 'boolean')
+_XSD_FLOAT = URIRef(_XSD_PFX + "float")
+_XSD_DOUBLE = URIRef(_XSD_PFX + "double")
+_XSD_DECIMAL = URIRef(_XSD_PFX + "decimal")
+_XSD_INTEGER = URIRef(_XSD_PFX + "integer")
+_XSD_BOOLEAN = URIRef(_XSD_PFX + "boolean")
-_XSD_DATETIME = URIRef(_XSD_PFX + 'dateTime')
-_XSD_DATE = URIRef(_XSD_PFX + 'date')
-_XSD_TIME = URIRef(_XSD_PFX + 'time')
-_XSD_DURATION = URIRef(_XSD_PFX + 'duration')
-_XSD_DAYTIMEDURATION = URIRef(_XSD_PFX + 'dayTimeDuration')
-_XSD_YEARMONTHDURATION = URIRef(_XSD_PFX + 'yearMonthDuration')
+_XSD_DATETIME = URIRef(_XSD_PFX + "dateTime")
+_XSD_DATE = URIRef(_XSD_PFX + "date")
+_XSD_TIME = URIRef(_XSD_PFX + "time")
+_XSD_DURATION = URIRef(_XSD_PFX + "duration")
+_XSD_DAYTIMEDURATION = URIRef(_XSD_PFX + "dayTimeDuration")
+_XSD_YEARMONTHDURATION = URIRef(_XSD_PFX + "yearMonthDuration")
-_OWL_RATIONAL = URIRef('http://www.w3.org/2002/07/owl#rational')
-_XSD_HEXBINARY = URIRef(_XSD_PFX + 'hexBinary')
+_OWL_RATIONAL = URIRef("http://www.w3.org/2002/07/owl#rational")
+_XSD_HEXBINARY = URIRef(_XSD_PFX + "hexBinary")
# TODO: gYearMonth, gYear, gMonthDay, gDay, gMonth
_NUMERIC_LITERAL_TYPES = (
_XSD_INTEGER,
_XSD_DECIMAL,
_XSD_DOUBLE,
- URIRef(_XSD_PFX + 'float'),
-
- URIRef(_XSD_PFX + 'byte'),
- URIRef(_XSD_PFX + 'int'),
- URIRef(_XSD_PFX + 'long'),
- URIRef(_XSD_PFX + 'negativeInteger'),
- URIRef(_XSD_PFX + 'nonNegativeInteger'),
- URIRef(_XSD_PFX + 'nonPositiveInteger'),
- URIRef(_XSD_PFX + 'positiveInteger'),
- URIRef(_XSD_PFX + 'short'),
- URIRef(_XSD_PFX + 'unsignedByte'),
- URIRef(_XSD_PFX + 'unsignedInt'),
- URIRef(_XSD_PFX + 'unsignedLong'),
- URIRef(_XSD_PFX + 'unsignedShort'),
-
+ URIRef(_XSD_PFX + "float"),
+ URIRef(_XSD_PFX + "byte"),
+ URIRef(_XSD_PFX + "int"),
+ URIRef(_XSD_PFX + "long"),
+ URIRef(_XSD_PFX + "negativeInteger"),
+ URIRef(_XSD_PFX + "nonNegativeInteger"),
+ URIRef(_XSD_PFX + "nonPositiveInteger"),
+ URIRef(_XSD_PFX + "positiveInteger"),
+ URIRef(_XSD_PFX + "short"),
+ URIRef(_XSD_PFX + "unsignedByte"),
+ URIRef(_XSD_PFX + "unsignedInt"),
+ URIRef(_XSD_PFX + "unsignedLong"),
+ URIRef(_XSD_PFX + "unsignedShort"),
)
# these have "native" syntax in N3/SPARQL
@@ -1436,12 +1465,12 @@ _PLAIN_LITERAL_TYPES = (
_XSD_BOOLEAN,
_XSD_DOUBLE,
_XSD_DECIMAL,
- _OWL_RATIONAL
+ _OWL_RATIONAL,
)
# these have special INF and NaN XSD representations
_NUMERIC_INF_NAN_LITERAL_TYPES = (
- URIRef(_XSD_PFX + 'float'),
+ URIRef(_XSD_PFX + "float"),
_XSD_DOUBLE,
_XSD_DECIMAL,
)
@@ -1453,12 +1482,12 @@ _TOTAL_ORDER_CASTERS = {
datetime: lambda value: (
# naive vs. aware
value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None,
- value
+ value,
),
time: lambda value: (
# naive vs. aware
value.tzinfo is not None and value.tzinfo.utcoffset(None) is not None,
- value
+ value,
),
xml.dom.minidom.Document: lambda value: value.toxml(),
}
@@ -1468,8 +1497,8 @@ _STRING_LITERAL_TYPES = (
_XSD_STRING,
_RDF_XMLLITERAL,
_RDF_HTMLLITERAL,
- URIRef(_XSD_PFX + 'normalizedString'),
- URIRef(_XSD_PFX + 'token')
+ URIRef(_XSD_PFX + "normalizedString"),
+ URIRef(_XSD_PFX + "token"),
)
@@ -1497,8 +1526,6 @@ def _castPythonToLiteral(obj, datatype):
return obj, None # TODO: is this right for the fall through case?
-from decimal import Decimal
-
# Mappings from Python types to XSD datatypes and back (borrowed from sparta)
# datetime instances are also instances of date... so we need to order these.
@@ -1511,68 +1538,67 @@ from decimal import Decimal
# both map to the abstract integer type,
# rather than some concrete bit-limited datatype
_GenericPythonToXSDRules = [
- (string_types, (None, None)),
+ (str, (None, None)),
(float, (None, _XSD_DOUBLE)),
- (bool, (lambda i:str(i).lower(), _XSD_BOOLEAN)),
+ (bool, (lambda i: str(i).lower(), _XSD_BOOLEAN)),
(int, (None, _XSD_INTEGER)),
(long_type, (None, _XSD_INTEGER)),
(Decimal, (None, _XSD_DECIMAL)),
- (datetime, (lambda i:i.isoformat(), _XSD_DATETIME)),
- (date, (lambda i:i.isoformat(), _XSD_DATE)),
- (time, (lambda i:i.isoformat(), _XSD_TIME)),
- (Duration, (lambda i:duration_isoformat(i), _XSD_DURATION)),
- (timedelta, (lambda i:duration_isoformat(i), _XSD_DAYTIMEDURATION)),
+ (datetime, (lambda i: i.isoformat(), _XSD_DATETIME)),
+ (date, (lambda i: i.isoformat(), _XSD_DATE)),
+ (time, (lambda i: i.isoformat(), _XSD_TIME)),
+ (Duration, (lambda i: duration_isoformat(i), _XSD_DURATION)),
+ (timedelta, (lambda i: duration_isoformat(i), _XSD_DAYTIMEDURATION)),
(xml.dom.minidom.Document, (_writeXML, _RDF_XMLLITERAL)),
# this is a bit dirty - by accident the html5lib parser produces
# DocumentFragments, and the xml parser Documents, letting this
# decide what datatype to use makes roundtripping easier, but it a
# bit random
(xml.dom.minidom.DocumentFragment, (_writeXML, _RDF_HTMLLITERAL)),
- (Fraction, (None, _OWL_RATIONAL))
+ (Fraction, (None, _OWL_RATIONAL)),
]
_SpecificPythonToXSDRules = [
- ((string_types, _XSD_HEXBINARY), hexlify),
+ ((str, _XSD_HEXBINARY), hexlify),
+ ((bytes, _XSD_HEXBINARY), hexlify),
]
-if PY3:
- _SpecificPythonToXSDRules.append(((bytes, _XSD_HEXBINARY), hexlify))
XSDToPython = {
None: None, # plain literals map directly to value space
- URIRef(_XSD_PFX + 'time'): parse_time,
- URIRef(_XSD_PFX + 'date'): parse_date,
- URIRef(_XSD_PFX + 'gYear'): parse_date,
- URIRef(_XSD_PFX + 'gYearMonth'): parse_date,
- URIRef(_XSD_PFX + 'dateTime'): parse_datetime,
- URIRef(_XSD_PFX + 'duration'): parse_duration,
- URIRef(_XSD_PFX + 'dayTimeDuration'): parse_duration,
- URIRef(_XSD_PFX + 'yearMonthDuration'): parse_duration,
- URIRef(_XSD_PFX + 'hexBinary'): _unhexlify,
- URIRef(_XSD_PFX + 'string'): None,
- URIRef(_XSD_PFX + 'normalizedString'): None,
- URIRef(_XSD_PFX + 'token'): None,
- URIRef(_XSD_PFX + 'language'): None,
- URIRef(_XSD_PFX + 'boolean'): lambda i: i.lower() == 'true',
- URIRef(_XSD_PFX + 'decimal'): Decimal,
- URIRef(_XSD_PFX + 'integer'): long_type,
- URIRef(_XSD_PFX + 'nonPositiveInteger'): int,
- URIRef(_XSD_PFX + 'long'): long_type,
- URIRef(_XSD_PFX + 'nonNegativeInteger'): int,
- URIRef(_XSD_PFX + 'negativeInteger'): int,
- URIRef(_XSD_PFX + 'int'): long_type,
- URIRef(_XSD_PFX + 'unsignedLong'): long_type,
- URIRef(_XSD_PFX + 'positiveInteger'): int,
- URIRef(_XSD_PFX + 'short'): int,
- URIRef(_XSD_PFX + 'unsignedInt'): long_type,
- URIRef(_XSD_PFX + 'byte'): int,
- URIRef(_XSD_PFX + 'unsignedShort'): int,
- URIRef(_XSD_PFX + 'unsignedByte'): int,
- URIRef(_XSD_PFX + 'float'): float,
- URIRef(_XSD_PFX + 'double'): float,
- URIRef(_XSD_PFX + 'base64Binary'): lambda s: base64.b64decode(s),
- URIRef(_XSD_PFX + 'anyURI'): None,
+ URIRef(_XSD_PFX + "time"): parse_time,
+ URIRef(_XSD_PFX + "date"): parse_date,
+ URIRef(_XSD_PFX + "gYear"): parse_date,
+ URIRef(_XSD_PFX + "gYearMonth"): parse_date,
+ URIRef(_XSD_PFX + "dateTime"): parse_datetime,
+ URIRef(_XSD_PFX + "duration"): parse_duration,
+ URIRef(_XSD_PFX + "dayTimeDuration"): parse_duration,
+ URIRef(_XSD_PFX + "yearMonthDuration"): parse_duration,
+ URIRef(_XSD_PFX + "hexBinary"): _unhexlify,
+ URIRef(_XSD_PFX + "string"): None,
+ URIRef(_XSD_PFX + "normalizedString"): None,
+ URIRef(_XSD_PFX + "token"): None,
+ URIRef(_XSD_PFX + "language"): None,
+ URIRef(_XSD_PFX + "boolean"): _parseBoolean,
+ URIRef(_XSD_PFX + "decimal"): Decimal,
+ URIRef(_XSD_PFX + "integer"): long_type,
+ URIRef(_XSD_PFX + "nonPositiveInteger"): int,
+ URIRef(_XSD_PFX + "long"): long_type,
+ URIRef(_XSD_PFX + "nonNegativeInteger"): int,
+ URIRef(_XSD_PFX + "negativeInteger"): int,
+ URIRef(_XSD_PFX + "int"): long_type,
+ URIRef(_XSD_PFX + "unsignedLong"): long_type,
+ URIRef(_XSD_PFX + "positiveInteger"): int,
+ URIRef(_XSD_PFX + "short"): int,
+ URIRef(_XSD_PFX + "unsignedInt"): long_type,
+ URIRef(_XSD_PFX + "byte"): int,
+ URIRef(_XSD_PFX + "unsignedShort"): int,
+ URIRef(_XSD_PFX + "unsignedByte"): int,
+ URIRef(_XSD_PFX + "float"): float,
+ URIRef(_XSD_PFX + "double"): float,
+ URIRef(_XSD_PFX + "base64Binary"): lambda s: base64.b64decode(s),
+ URIRef(_XSD_PFX + "anyURI"): None,
_RDF_XMLLITERAL: _parseXML,
- _RDF_HTMLLITERAL: _parseHTML
+ _RDF_HTMLLITERAL: _parseHTML,
}
_toPythonMapping = {}
@@ -1595,15 +1621,17 @@ def _castLexicalToPython(lexical, datatype):
elif convFunc is None:
# no conv func means 1-1 lexical<->value-space mapping
try:
- return text_type(lexical)
+ return str(lexical)
except UnicodeDecodeError:
- return text_type(lexical, 'utf-8')
+ return str(lexical, "utf-8")
else:
# no convFunc - unknown data-type
return None
-def bind(datatype, pythontype, constructor=None, lexicalizer=None, datatype_specific=False):
+def bind(
+ datatype, pythontype, constructor=None, lexicalizer=None, datatype_specific=False
+):
"""
register a new datatype<->pythontype binding
@@ -1622,8 +1650,7 @@ def bind(datatype, pythontype, constructor=None, lexicalizer=None, datatype_spec
raise Exception("No datatype given for a datatype-specific binding")
if datatype in _toPythonMapping:
- logger.warning("datatype '%s' was already bound. Rebinding." %
- datatype)
+ logger.warning("datatype '%s' was already bound. Rebinding." % datatype)
if constructor is None:
constructor = pythontype
@@ -1639,15 +1666,15 @@ class Variable(Identifier):
A Variable - this is used for querying, or in Formula aware
graphs, where Variables can stored in the graph
"""
+
__slots__ = ()
def __new__(cls, value):
if len(value) == 0:
- raise Exception(
- "Attempted to create variable with empty string as name!")
- if value[0] == '?':
+ raise Exception("Attempted to create variable with empty string as name!")
+ if value[0] == "?":
value = value[1:]
- return text_type.__new__(cls, value)
+ return str.__new__(cls, value)
def __repr__(self):
if self.__class__ is Variable:
@@ -1664,17 +1691,18 @@ class Variable(Identifier):
return "?%s" % self
def __reduce__(self):
- return (Variable, (text_type(self),))
+ return (Variable, (str(self),))
class Statement(Node, tuple):
-
def __new__(cls, triple, context):
subject, predicate, object = triple
warnings.warn(
- "Class Statement is deprecated, and will be removed in " +
- "the future. If you use this please let rdflib-dev know!",
- category=DeprecationWarning, stacklevel=2)
+ "Class Statement is deprecated, and will be removed in "
+ + "the future. If you use this please let rdflib-dev know!",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
return tuple.__new__(cls, ((subject, predicate, object), context))
def __reduce__(self):
@@ -1689,12 +1717,7 @@ class Statement(Node, tuple):
# we leave "space" for more subclasses of Node elsewhere
# default-dict to grazefully fail for new subclasses
_ORDERING = defaultdict(int)
-_ORDERING.update({
- BNode: 10,
- Variable: 20,
- URIRef: 30,
- Literal: 40
-})
+_ORDERING.update({BNode: 10, Variable: 20, URIRef: 30, Literal: 40})
def _isEqualXMLNode(node, other):
@@ -1708,8 +1731,7 @@ def _isEqualXMLNode(node, other):
# for the length becomes necessary...
if len(node.childNodes) != len(other.childNodes):
return False
- for (nc, oc) in map(
- lambda x, y: (x, y), node.childNodes, other.childNodes):
+ for (nc, oc) in map(lambda x, y: (x, y), node.childNodes, other.childNodes):
if not _isEqualXMLNode(nc, oc):
return False
# if we got here then everything is fine:
@@ -1726,8 +1748,9 @@ def _isEqualXMLNode(node, other):
elif node.nodeType == Node.ELEMENT_NODE:
# Get the basics right
- if not (node.tagName == other.tagName and
- node.namespaceURI == other.namespaceURI):
+ if not (
+ node.tagName == other.tagName and node.namespaceURI == other.namespaceURI
+ ):
return False
# Handle the (namespaced) attributes; the namespace setting key
@@ -1735,17 +1758,22 @@ def _isEqualXMLNode(node, other):
# Note that the minidom orders the keys already, so we do not have
# to worry about that, which is a bonus...
n_keys = [
- k for k in node.attributes.keysNS()
- if k[0] != 'http://www.w3.org/2000/xmlns/']
+ k
+ for k in node.attributes.keysNS()
+ if k[0] != "http://www.w3.org/2000/xmlns/"
+ ]
o_keys = [
- k for k in other.attributes.keysNS()
- if k[0] != 'http://www.w3.org/2000/xmlns/']
+ k
+ for k in other.attributes.keysNS()
+ if k[0] != "http://www.w3.org/2000/xmlns/"
+ ]
if len(n_keys) != len(o_keys):
return False
for k in n_keys:
- if not (k in o_keys
- and node.getAttributeNS(k[0], k[1]) ==
- other.getAttributeNS(k[0], k[1])):
+ if not (
+ k in o_keys
+ and node.getAttributeNS(k[0], k[1]) == other.getAttributeNS(k[0], k[1])
+ ):
return False
# if we got here, the attributes are all right, we can go down
@@ -1753,8 +1781,11 @@ def _isEqualXMLNode(node, other):
return recurse()
elif node.nodeType in [
- Node.TEXT_NODE, Node.COMMENT_NODE, Node.CDATA_SECTION_NODE,
- Node.NOTATION_NODE]:
+ Node.TEXT_NODE,
+ Node.COMMENT_NODE,
+ Node.CDATA_SECTION_NODE,
+ Node.NOTATION_NODE,
+ ]:
return node.data == other.data
elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
@@ -1764,15 +1795,14 @@ def _isEqualXMLNode(node, other):
return node.nodeValue == other.nodeValue
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
- return node.publicId == other.publicId \
- and node.systemId == other.system.Id
+ return node.publicId == other.publicId and node.systemId == other.system.Id
else:
# should not happen, in fact
- raise Exception(
- 'I dont know how to compare XML Node type: %s' % node.nodeType)
+ raise Exception("I dont know how to compare XML Node type: %s" % node.nodeType)
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/rdflib/tools/csv2rdf.py b/rdflib/tools/csv2rdf.py
index a4e55c4e..812ffadc 100644
--- a/rdflib/tools/csv2rdf.py
+++ b/rdflib/tools/csv2rdf.py
@@ -17,17 +17,14 @@ import time
import datetime
import warnings
+import configparser
+from urllib.parse import quote
import rdflib
-
-from six.moves import configparser
-from six.moves.urllib.parse import quote
-from six import text_type
-
from rdflib import RDF, RDFS
from rdflib.namespace import split_uri
-__all__ = ['CSV2RDF']
+__all__ = ["CSV2RDF"]
HELP = """
csv2rdf.py \
@@ -117,33 +114,31 @@ def toPropertyLabel(label):
return label
-def index(l, i):
+def index(l_, i):
"""return a set of indexes from a list
>>> index([1,2,3],(0,2))
(1, 3)
"""
- return tuple([l[x] for x in i])
+ return tuple([l_[x] for x in i])
def csv_reader(csv_data, dialect=csv.excel, **kwargs):
- csv_reader = csv.reader(csv_data,
- dialect=dialect, **kwargs)
+ csv_reader = csv.reader(csv_data, dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
- yield [text_type(cell, 'utf-8', errors='replace') for cell in row]
+ yield [str(cell, "utf-8", errors="replace") for cell in row]
def prefixuri(x, prefix, class_=None):
if prefix:
- r = rdflib.URIRef(
- prefix + quote(
- x.encode("utf8").replace(" ", "_"), safe=""))
+ r = rdflib.URIRef(prefix + quote(x.encode("utf8").replace(" ", "_"), safe=""))
else:
r = rdflib.URIRef(x)
uris[x] = (r, class_)
return r
+
# meta-language for config
@@ -238,8 +233,7 @@ class NodeSplit(NodeMaker):
self.f = rdflib.Literal
if not callable(self.f):
raise Exception("Function passed to split is not callable!")
- return [
- self.f(y.strip()) for y in x.split(self.sep) if y.strip() != ""]
+ return [self.f(y.strip()) for y in x.split(self.sep) if y.strip() != ""]
def range(self):
if self.f and isinstance(self.f, NodeMaker):
@@ -286,16 +280,17 @@ def _config_split(sep=None, f=None):
return NodeSplit(sep, f)
-config_functions = {"ignore": _config_ignore,
- "uri": _config_uri,
- "literal": _config_literal,
- "float": _config_float,
- "int": _config_int,
- "date": _config_date,
- "split": _config_split,
- "replace": _config_replace,
- "bool": _config_bool,
- }
+config_functions = {
+ "ignore": _config_ignore,
+ "uri": _config_uri,
+ "literal": _config_literal,
+ "float": _config_float,
+ "int": _config_int,
+ "date": _config_date,
+ "split": _config_split,
+ "replace": _config_replace,
+ "bool": _config_bool,
+}
def column(v):
@@ -310,7 +305,7 @@ class CSV2RDF(object):
self.CLASS = None
self.BASE = None
self.PROPBASE = None
- self.IDENT = 'auto'
+ self.IDENT = "auto"
self.LABEL = None
self.DEFINECLASS = False
self.SKIP = 0
@@ -320,7 +315,7 @@ class CSV2RDF(object):
self.COLUMNS = {}
self.PROPS = {}
- self.OUT = codecs.getwriter("utf-8")(sys.stdout, errors='replace')
+ self.OUT = codecs.getwriter("utf-8")(sys.stdout, errors="replace")
self.triples = 0
@@ -343,8 +338,7 @@ class CSV2RDF(object):
self.BASE = rdflib.Namespace("http://example.org/instances/")
if not self.PROPBASE:
- warnings.warn(
- "No property base given, using http://example.org/property/")
+ warnings.warn("No property base given, using http://example.org/property/")
self.PROPBASE = rdflib.Namespace("http://example.org/props/")
# skip lines at the start
@@ -353,8 +347,7 @@ class CSV2RDF(object):
# read header line
header_labels = list(csvreader.next())
- headers = dict(
- enumerate([self.PROPBASE[toProperty(x)] for x in header_labels]))
+ headers = dict(enumerate([self.PROPBASE[toProperty(x)] for x in header_labels]))
# override header properties if some are given
for k, v in self.PROPS.items():
headers[k] = v
@@ -364,39 +357,46 @@ class CSV2RDF(object):
# output class/property definitions
self.triple(self.CLASS, RDF.type, RDFS.Class)
for i in range(len(headers)):
- h, l = headers[i], header_labels[i]
- if h == "" or l == "":
+ h, l_ = headers[i], header_labels[i]
+ if h == "" or l_ == "":
continue
- if self.COLUMNS.get(i, self.DEFAULT) == 'ignore':
+ if self.COLUMNS.get(i, self.DEFAULT) == "ignore":
continue
self.triple(h, RDF.type, RDF.Property)
- self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l)))
+ self.triple(h, RDFS.label, rdflib.Literal(toPropertyLabel(l_)))
self.triple(h, RDFS.domain, self.CLASS)
- self.triple(h, RDFS.range,
- self.COLUMNS.get(i, default_node_make).range())
+ self.triple(
+ h, RDFS.range, self.COLUMNS.get(i, default_node_make).range()
+ )
rows = 0
- for l in csvreader:
+ for l_ in csvreader:
try:
- if self.IDENT == 'auto':
+ if self.IDENT == "auto":
uri = self.BASE["%d" % rows]
else:
- uri = self.BASE["_".join([quote(x.encode(
- "utf8").replace(" ", "_"), safe="")
- for x in index(l, self.IDENT)])]
+ uri = self.BASE[
+ "_".join(
+ [
+ quote(x.encode("utf8").replace(" ", "_"), safe="")
+ for x in index(l_, self.IDENT)
+ ]
+ )
+ ]
if self.LABEL:
- self.triple(uri, RDFS.label, rdflib.Literal(
- " ".join(index(l, self.LABEL))))
+ self.triple(
+ uri, RDFS.label, rdflib.Literal(" ".join(index(l_, self.LABEL)))
+ )
if self.CLASS:
# type triple
self.triple(uri, RDF.type, self.CLASS)
- for i, x in enumerate(l):
+ for i, x in enumerate(l_):
x = x.strip()
- if x != '':
- if self.COLUMNS.get(i, self.DEFAULT) == 'ignore':
+ if x != "":
+ if self.COLUMNS.get(i, self.DEFAULT) == "ignore":
continue
try:
o = self.COLUMNS.get(i, rdflib.Literal)(x)
@@ -408,24 +408,26 @@ class CSV2RDF(object):
except Exception as e:
warnings.warn(
- "Could not process value for column " +
- "%d:%s in row %d, ignoring: %s " % (
- i, headers[i], rows, e.message))
+ "Could not process value for column "
+ + "%d:%s in row %d, ignoring: %s "
+ % (i, headers[i], rows, e.message)
+ )
rows += 1
if rows % 100000 == 0:
sys.stderr.write(
- "%d rows, %d triples, elapsed %.2fs.\n" % (
- rows, self.triples, time.time() - start))
+ "%d rows, %d triples, elapsed %.2fs.\n"
+ % (rows, self.triples, time.time() - start)
+ )
except:
sys.stderr.write("Error processing line: %d\n" % rows)
raise
# output types/labels for generated URIs
classes = set()
- for l, x in uris.items():
+ for l_, x in uris.items():
u, c = x
- self.triple(u, RDFS.label, rdflib.Literal(l))
+ self.triple(u, RDFS.label, rdflib.Literal(l_))
if c:
c = rdflib.URIRef(c)
classes.add(c)
@@ -435,8 +437,7 @@ class CSV2RDF(object):
self.triple(c, RDF.type, RDFS.Class)
self.OUT.close()
- sys.stderr.write(
- "Converted %d rows into %d triples.\n" % (rows, self.triples))
+ sys.stderr.write("Converted %d rows into %d triples.\n" % (rows, self.triples))
sys.stderr.write("Took %.2f seconds.\n" % (time.time() - start))
@@ -446,8 +447,19 @@ def main():
opts, files = getopt.getopt(
sys.argv[1:],
"hc:b:p:i:o:Cf:l:s:d:D:",
- ["out=", "base=", "delim=", "propbase=", "class=", "default="
- "ident=", "label=", "skip=", "defineclass", "help"])
+ [
+ "out=",
+ "base=",
+ "delim=",
+ "propbase=",
+ "class=",
+ "default=" "ident=",
+ "label=",
+ "skip=",
+ "defineclass",
+ "help",
+ ],
+ )
opts = dict(opts)
if "-h" in opts or "--help" in opts:
@@ -537,9 +549,8 @@ def main():
if csv2rdf.CLASS and ("-C" in opts or "--defineclass" in opts):
csv2rdf.DEFINECLASS = True
- csv2rdf.convert(
- csv_reader(fileinput.input(files), delimiter=csv2rdf.DELIM))
+ csv2rdf.convert(csv_reader(fileinput.input(files), delimiter=csv2rdf.DELIM))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/tools/graphisomorphism.py b/rdflib/tools/graphisomorphism.py
index abc84fa1..a073d7d9 100644
--- a/rdflib/tools/graphisomorphism.py
+++ b/rdflib/tools/graphisomorphism.py
@@ -69,39 +69,47 @@ class IsomorphicTestableGraph(Graph):
def main():
import sys
from optparse import OptionParser
- usage = '''usage: %prog [options] file1 file2 ... fileN'''
+
+ usage = """usage: %prog [options] file1 file2 ... fileN"""
op = OptionParser(usage=usage)
- op.add_option('-s', '--stdin', action='store_true', default=False,
- help='Load from STDIN as well')
- op.add_option('--format',
- default='xml',
- dest='inputFormat',
- metavar='RDF_FORMAT',
- choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
- help="The format of the RDF document(s) to compare" +
- "One of 'xml','n3','trix', 'nt', " +
- "or 'rdfa'. The default is %default")
+ op.add_option(
+ "-s",
+ "--stdin",
+ action="store_true",
+ default=False,
+ help="Load from STDIN as well",
+ )
+ op.add_option(
+ "--format",
+ default="xml",
+ dest="inputFormat",
+ metavar="RDF_FORMAT",
+ choices=["xml", "trix", "n3", "nt", "rdfa"],
+ help="The format of the RDF document(s) to compare"
+ + "One of 'xml','n3','trix', 'nt', "
+ + "or 'rdfa'. The default is %default",
+ )
(options, args) = op.parse_args()
graphs = []
graph2FName = {}
if options.stdin:
- graph = IsomorphicTestableGraph().parse(
- sys.stdin, format=options.inputFormat)
+ graph = IsomorphicTestableGraph().parse(sys.stdin, format=options.inputFormat)
graphs.append(graph)
- graph2FName[graph] = '(STDIN)'
+ graph2FName[graph] = "(STDIN)"
for fn in args:
- graph = IsomorphicTestableGraph().parse(
- fn, format=options.inputFormat)
+ graph = IsomorphicTestableGraph().parse(fn, format=options.inputFormat)
graphs.append(graph)
graph2FName[graph] = fn
checked = set()
for graph1, graph2 in combinations(graphs, 2):
if (graph1, graph2) not in checked and (graph2, graph1) not in checked:
assert graph1 == graph2, "%s != %s" % (
- graph2FName[graph1], graph2FName[graph2])
+ graph2FName[graph1],
+ graph2FName[graph2],
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/tools/rdf2dot.py b/rdflib/tools/rdf2dot.py
index 84b251a1..9b622b66 100644
--- a/rdflib/tools/rdf2dot.py
+++ b/rdflib/tools/rdf2dot.py
@@ -20,23 +20,60 @@ import collections
from rdflib import XSD
-LABEL_PROPERTIES = [rdflib.RDFS.label,
- rdflib.URIRef("http://purl.org/dc/elements/1.1/title"),
- rdflib.URIRef("http://xmlns.com/foaf/0.1/name"),
- rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"),
- rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org")
- ]
+LABEL_PROPERTIES = [
+ rdflib.RDFS.label,
+ rdflib.URIRef("http://purl.org/dc/elements/1.1/title"),
+ rdflib.URIRef("http://xmlns.com/foaf/0.1/name"),
+ rdflib.URIRef("http://www.w3.org/2006/vcard/ns#fn"),
+ rdflib.URIRef("http://www.w3.org/2006/vcard/ns#org"),
+]
XSDTERMS = [
- XSD[x] for x in (
- "anyURI", "base64Binary", "boolean", "byte", "date",
- "dateTime", "decimal", "double", "duration", "float", "gDay", "gMonth",
- "gMonthDay", "gYear", "gYearMonth", "hexBinary", "ID", "IDREF",
- "IDREFS", "int", "integer", "language", "long", "Name", "NCName",
- "negativeInteger", "NMTOKEN", "NMTOKENS", "nonNegativeInteger",
- "nonPositiveInteger", "normalizedString", "positiveInteger", "QName",
- "short", "string", "time", "token", "unsignedByte", "unsignedInt",
- "unsignedLong", "unsignedShort")]
+ XSD[x]
+ for x in (
+ "anyURI",
+ "base64Binary",
+ "boolean",
+ "byte",
+ "date",
+ "dateTime",
+ "decimal",
+ "double",
+ "duration",
+ "float",
+ "gDay",
+ "gMonth",
+ "gMonthDay",
+ "gYear",
+ "gYearMonth",
+ "hexBinary",
+ "ID",
+ "IDREF",
+ "IDREFS",
+ "int",
+ "integer",
+ "language",
+ "long",
+ "Name",
+ "NCName",
+ "negativeInteger",
+ "NMTOKEN",
+ "NMTOKENS",
+ "nonNegativeInteger",
+ "nonPositiveInteger",
+ "normalizedString",
+ "positiveInteger",
+ "QName",
+ "short",
+ "string",
+ "time",
+ "token",
+ "unsignedByte",
+ "unsignedInt",
+ "unsignedLong",
+ "unsignedShort",
+ )
+]
EDGECOLOR = "blue"
NODECOLOR = "black"
@@ -73,10 +110,10 @@ def rdf2dot(g, stream, opts={}):
def formatliteral(l, g):
v = cgi.escape(l)
if l.datatype:
- return u'&quot;%s&quot;^^%s' % (v, qname(l.datatype, g))
+ return u"&quot;%s&quot;^^%s" % (v, qname(l.datatype, g))
elif l.language:
- return u'&quot;%s&quot;@%s' % (v, l.language)
- return u'&quot;%s&quot;' % v
+ return u"&quot;%s&quot;@%s" % (v, l.language)
+ return u"&quot;%s&quot;" % v
def qname(x, g):
try:
@@ -88,7 +125,7 @@ def rdf2dot(g, stream, opts={}):
def color(p):
return "BLACK"
- stream.write(u"digraph { \n node [ fontname=\"DejaVu Sans\" ] ; \n")
+ stream.write(u'digraph { \n node [ fontname="DejaVu Sans" ] ; \n')
for s, p, o in g:
sn = node(s)
@@ -96,40 +133,48 @@ def rdf2dot(g, stream, opts={}):
continue
if isinstance(o, (rdflib.URIRef, rdflib.BNode)):
on = node(o)
- opstr = u"\t%s -> %s [ color=%s, label=< <font point-size='10' " + \
- u"color='#336633'>%s</font> > ] ;\n"
+ opstr = (
+ u"\t%s -> %s [ color=%s, label=< <font point-size='10' "
+ + u"color='#336633'>%s</font> > ] ;\n"
+ )
stream.write(opstr % (sn, on, color(p), qname(p, g)))
else:
fields[sn].add((qname(p, g), formatliteral(o, g)))
for u, n in nodes.items():
stream.write(u"# %s %s\n" % (u, n))
- f = [u"<tr><td align='left'>%s</td><td align='left'>%s</td></tr>" %
- x for x in sorted(fields[n])]
- opstr = u"%s [ shape=none, color=%s label=< <table color='#666666'" + \
- u" cellborder='0' cellspacing='0' border='1'><tr>" + \
- u"<td colspan='2' bgcolor='grey'><B>%s</B></td></tr><tr>" + \
- u"<td href='%s' bgcolor='#eeeeee' colspan='2'>" + \
- u"<font point-size='10' color='#6666ff'>%s</font></td>" + \
- u"</tr>%s</table> > ] \n"
+ f = [
+ u"<tr><td align='left'>%s</td><td align='left'>%s</td></tr>" % x
+ for x in sorted(fields[n])
+ ]
+ opstr = (
+ u"%s [ shape=none, color=%s label=< <table color='#666666'"
+ + u" cellborder='0' cellspacing='0' border='1'><tr>"
+ + u"<td colspan='2' bgcolor='grey'><B>%s</B></td></tr><tr>"
+ + u"<td href='%s' bgcolor='#eeeeee' colspan='2'>"
+ + u"<font point-size='10' color='#6666ff'>%s</font></td>"
+ + u"</tr>%s</table> > ] \n"
+ )
stream.write(opstr % (n, NODECOLOR, label(u, g), u, u, u"".join(f)))
stream.write("}\n")
def _help():
- sys.stderr.write("""
+ sys.stderr.write(
+ """
rdf2dot.py [-f <format>] files...
Read RDF files given on STDOUT, writes a graph of the RDFS schema in DOT
language to stdout
-f specifies parser to use, if not given,
-""")
+"""
+ )
def main():
rdflib.extras.cmdlineutils.main(rdf2dot, _help)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/tools/rdfpipe.py b/rdflib/tools/rdfpipe.py
index 7bd63fc9..6ec9e6c2 100644
--- a/rdflib/tools/rdfpipe.py
+++ b/rdflib/tools/rdfpipe.py
@@ -15,21 +15,26 @@ import rdflib
from rdflib import plugin
from rdflib.store import Store
from rdflib.graph import ConjunctiveGraph
-from rdflib.namespace import RDF, RDFS, OWL, XSD
from rdflib.parser import Parser
from rdflib.serializer import Serializer
from rdflib.util import guess_format
-from six import PY3
-DEFAULT_INPUT_FORMAT = 'xml'
-DEFAULT_OUTPUT_FORMAT = 'n3'
+DEFAULT_INPUT_FORMAT = "xml"
+DEFAULT_OUTPUT_FORMAT = "n3"
-def parse_and_serialize(input_files, input_format, guess,
- outfile, output_format, ns_bindings,
- store_conn="", store_type=None):
+def parse_and_serialize(
+ input_files,
+ input_format,
+ guess,
+ outfile,
+ output_format,
+ ns_bindings,
+ store_conn="",
+ store_type=None,
+):
if store_type:
store = plugin.get(store_type, Store)()
@@ -44,7 +49,7 @@ def parse_and_serialize(input_files, input_format, guess,
for fpath in input_files:
use_format, kws = _format_and_kws(input_format)
- if fpath == '-':
+ if fpath == "-":
fpath = sys.stdin
elif not input_format and guess:
use_format = guess_format(fpath) or DEFAULT_INPUT_FORMAT
@@ -52,7 +57,7 @@ def parse_and_serialize(input_files, input_format, guess,
if outfile:
output_format, kws = _format_and_kws(output_format)
- kws.setdefault('base', None)
+ kws.setdefault("base", None)
graph.serialize(destination=outfile, format=output_format, **kws)
if store:
@@ -75,15 +80,15 @@ def _format_and_kws(fmt):
('fmt', {'a': 'b:c'})
"""
fmt, kws = fmt, {}
- if fmt and ':' in fmt:
- fmt, kwrepr = fmt.split(':', 1)
- for kw in kwrepr.split(','):
- if '=' in kw:
- k, v = kw.split('=')
+ if fmt and ":" in fmt:
+ fmt, kwrepr = fmt.split(":", 1)
+ for kw in kwrepr.split(","):
+ if "=" in kw:
+ k, v = kw.split("=")
kws[k] = v
- elif kw.startswith('-'):
+ elif kw.startswith("-"):
kws[kw[1:]] = False
- elif kw.startswith('+'):
+ elif kw.startswith("+"):
kws[kw[1:]] = True
else: # same as "+"
kws[kw] = True
@@ -96,62 +101,78 @@ def make_option_parser():
kw_example = "FORMAT:(+)KW1,-KW2,KW3=VALUE"
oparser = OptionParser(
- "%prog [-h] [-i INPUT_FORMAT] [-o OUTPUT_FORMAT] " +
- "[--ns=PFX=NS ...] [-] [FILE ...]",
- description=__doc__.strip() + (
+ "%prog [-h] [-i INPUT_FORMAT] [-o OUTPUT_FORMAT] "
+ + "[--ns=PFX=NS ...] [-] [FILE ...]",
+ description=__doc__.strip()
+ + (
" Reads file system paths, URLs or from stdin if '-' is given."
- " The result is serialized to stdout."),
- version="%prog " + "(using rdflib %s)" % rdflib.__version__)
+ " The result is serialized to stdout."
+ ),
+ version="%prog " + "(using rdflib %s)" % rdflib.__version__,
+ )
oparser.add_option(
- '-i', '--input-format',
+ "-i",
+ "--input-format",
type=str, # default=DEFAULT_INPUT_FORMAT,
help="Format of the input document(s)."
- " Available input formats are: %s." % parser_names +
- " If no format is given, it will be " +
- "guessed from the file name extension." +
- " Keywords to parser can be given after format like: %s." % kw_example,
- metavar="INPUT_FORMAT")
+ " Available input formats are: %s." % parser_names
+ + " If no format is given, it will be "
+ + "guessed from the file name extension."
+ + " Keywords to parser can be given after format like: %s." % kw_example,
+ metavar="INPUT_FORMAT",
+ )
oparser.add_option(
- '-o', '--output-format',
- type=str, default=DEFAULT_OUTPUT_FORMAT,
+ "-o",
+ "--output-format",
+ type=str,
+ default=DEFAULT_OUTPUT_FORMAT,
help="Format of the graph serialization."
- " Available output formats are: %s."
- % serializer_names +
- " Default format is: '%default'." +
- " Keywords to serializer can be given after format like: %s." %
- kw_example,
- metavar="OUTPUT_FORMAT")
+ " Available output formats are: %s." % serializer_names
+ + " Default format is: '%default'."
+ + " Keywords to serializer can be given after format like: %s." % kw_example,
+ metavar="OUTPUT_FORMAT",
+ )
oparser.add_option(
- '--ns',
- action="append", type=str,
+ "--ns",
+ action="append",
+ type=str,
help="Register a namespace binding (QName prefix to a base URI). "
"This can be used more than once.",
- metavar="PREFIX=NAMESPACE")
+ metavar="PREFIX=NAMESPACE",
+ )
oparser.add_option(
- '--no-guess', dest='guess',
- action='store_false', default=True,
- help="Don't guess format based on file suffix.")
+ "--no-guess",
+ dest="guess",
+ action="store_false",
+ default=True,
+ help="Don't guess format based on file suffix.",
+ )
oparser.add_option(
- '--no-out',
- action='store_true', default=False,
- help="Don't output the resulting graph " +
- "(useful for checking validity of input).")
+ "--no-out",
+ action="store_true",
+ default=False,
+ help="Don't output the resulting graph "
+ + "(useful for checking validity of input).",
+ )
oparser.add_option(
- '-w', '--warn',
- action='store_true', default=False,
- help="Output warnings to stderr (by default only critical errors).")
+ "-w",
+ "--warn",
+ action="store_true",
+ default=False,
+ help="Output warnings to stderr (by default only critical errors).",
+ )
return oparser
-def _get_plugin_names(kind): return ", ".join(
- p.name for p in plugin.plugins(kind=kind))
+def _get_plugin_names(kind):
+ return ", ".join(p.name for p in plugin.plugins(kind=kind))
def main():
@@ -170,18 +191,17 @@ def main():
ns_bindings = {}
if opts.ns:
for ns_kw in opts.ns:
- pfx, uri = ns_kw.split('=')
+ pfx, uri = ns_kw.split("=")
ns_bindings[pfx] = uri
- outfile = sys.stdout
- if PY3:
- outfile = sys.stdout.buffer
+ outfile = sys.stdout.buffer
if opts.no_out:
outfile = None
- parse_and_serialize(args, opts.input_format, opts.guess,
- outfile, opts.output_format, ns_bindings)
+ parse_and_serialize(
+ args, opts.input_format, opts.guess, outfile, opts.output_format, ns_bindings
+ )
if __name__ == "__main__":
diff --git a/rdflib/tools/rdfs2dot.py b/rdflib/tools/rdfs2dot.py
index 7135fe62..e8cf5813 100644
--- a/rdflib/tools/rdfs2dot.py
+++ b/rdflib/tools/rdfs2dot.py
@@ -21,14 +21,52 @@ import collections
from rdflib import XSD, RDF, RDFS
-XSDTERMS = [XSD[x] for x in (
- "anyURI", "base64Binary", "boolean", "byte", "date", "dateTime", "decimal",
- "double", "duration", "float", "gDay", "gMonth", "gMonthDay", "gYear",
- "gYearMonth", "hexBinary", "ID", "IDREF", "IDREFS", "int", "integer",
- "language", "long", "Name", "NCName", "negativeInteger", "NMTOKEN",
- "NMTOKENS", "nonNegativeInteger", "nonPositiveInteger", "normalizedString",
- "positiveInteger", "QName", "short", "string", "time", "token",
- "unsignedByte", "unsignedInt", "unsignedLong", "unsignedShort")]
+XSDTERMS = [
+ XSD[x]
+ for x in (
+ "anyURI",
+ "base64Binary",
+ "boolean",
+ "byte",
+ "date",
+ "dateTime",
+ "decimal",
+ "double",
+ "duration",
+ "float",
+ "gDay",
+ "gMonth",
+ "gMonthDay",
+ "gYear",
+ "gYearMonth",
+ "hexBinary",
+ "ID",
+ "IDREF",
+ "IDREFS",
+ "int",
+ "integer",
+ "language",
+ "long",
+ "Name",
+ "NCName",
+ "negativeInteger",
+ "NMTOKEN",
+ "NMTOKENS",
+ "nonNegativeInteger",
+ "nonPositiveInteger",
+ "normalizedString",
+ "positiveInteger",
+ "QName",
+ "short",
+ "string",
+ "time",
+ "token",
+ "unsignedByte",
+ "unsignedInt",
+ "unsignedLong",
+ "unsignedShort",
+ )
+]
EDGECOLOR = "blue"
NODECOLOR = "black"
@@ -52,15 +90,15 @@ def rdfs2dot(g, stream, opts={}):
def label(x, g):
- l = g.value(x, RDFS.label)
- if l is None:
+ l_ = g.value(x, RDFS.label)
+ if l_ is None:
try:
- l = g.namespace_manager.compute_qname(x)[2]
+ l_ = g.namespace_manager.compute_qname(x)[2]
except:
pass # bnodes and some weird URIs cannot be split
- return l
+ return l_
- stream.write(u"digraph { \n node [ fontname=\"DejaVu Sans\" ] ; \n")
+ stream.write(u'digraph { \n node [ fontname="DejaVu Sans" ] ; \n')
for x in g.subjects(RDF.type, RDFS.Class):
n = node(x)
@@ -72,44 +110,52 @@ def rdfs2dot(g, stream, opts={}):
for x in g.subjects(RDF.type, RDF.Property):
for a, b in itertools.product(
- g.objects(x, RDFS.domain), g.objects(x, RDFS.range)):
+ g.objects(x, RDFS.domain), g.objects(x, RDFS.range)
+ ):
if b in XSDTERMS or b == RDFS.Literal:
- l = label(b, g)
+ l_ = label(b, g)
if b == RDFS.Literal:
- l = "literal"
- fields[node(a)].add((label(x, g), l))
+ l_ = "literal"
+ fields[node(a)].add((label(x, g), l_))
else:
# if a in nodes and b in nodes:
stream.write(
- "\t%s -> %s [ color=%s, label=\"%s\" ];\n" % (
- node(a), node(b), EDGECOLOR, label(x, g)))
+ '\t%s -> %s [ color=%s, label="%s" ];\n'
+ % (node(a), node(b), EDGECOLOR, label(x, g))
+ )
for u, n in nodes.items():
stream.write(u"# %s %s\n" % (u, n))
- f = [u"<tr><td align='left'>%s</td><td>%s</td></tr>" %
- x for x in sorted(fields[n])]
- opstr = u"%s [ shape=none, color=%s label=< <table color='#666666'" + \
- u" cellborder=\"0\" cellspacing='0' border=\"1\"><tr>" + \
- u"<td colspan=\"2\" bgcolor='grey'><B>%s</B></td>" + \
- u"</tr>%s</table> > ] \n"
+ f = [
+ u"<tr><td align='left'>%s</td><td>%s</td></tr>" % x
+ for x in sorted(fields[n])
+ ]
+ opstr = (
+ u"%s [ shape=none, color=%s label=< <table color='#666666'"
+ + u' cellborder="0" cellspacing=\'0\' border="1"><tr>'
+ + u"<td colspan=\"2\" bgcolor='grey'><B>%s</B></td>"
+ + u"</tr>%s</table> > ] \n"
+ )
stream.write(opstr % (n, NODECOLOR, label(u, g), u"".join(f)))
stream.write("}\n")
def _help():
- sys.stderr.write("""
+ sys.stderr.write(
+ """
rdfs2dot.py [-f <format>] files...
Read RDF files given on STDOUT, writes a graph of the RDFS schema in
DOT language to stdout
-f specifies parser to use, if not given,
-""")
+"""
+ )
def main():
rdflib.extras.cmdlineutils.main(rdfs2dot, _help)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/rdflib/util.py b/rdflib/util.py
index 1789aa70..57b20915 100644
--- a/rdflib/util.py
+++ b/rdflib/util.py
@@ -34,6 +34,7 @@ from __future__ import print_function
from calendar import timegm
from time import altzone
+
# from time import daylight
from time import gmtime
from time import localtime
@@ -56,10 +57,24 @@ from rdflib.term import URIRef
from rdflib.compat import sign
__all__ = [
- 'list2set', 'first', 'uniq', 'more_than', 'to_term', 'from_n3',
- 'date_time', 'parse_date_time', 'check_context', 'check_subject',
- 'check_predicate', 'check_object', 'check_statement', 'check_pattern',
- 'guess_format', 'find_roots', 'get_tree']
+ "list2set",
+ "first",
+ "uniq",
+ "more_than",
+ "to_term",
+ "from_n3",
+ "date_time",
+ "parse_date_time",
+ "check_context",
+ "check_subject",
+ "check_predicate",
+ "check_object",
+ "check_statement",
+ "check_pattern",
+ "guess_format",
+ "find_roots",
+ "get_tree",
+]
def list2set(seq):
@@ -155,7 +170,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
'''
if not s:
return default
- if s.startswith('<'):
+ if s.startswith("<"):
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
return URIRef(s[1:-1].encode("raw-unicode-escape").decode("unicode-escape"))
@@ -170,7 +185,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
language = None
# as a given datatype overrules lang-tag check for it first
- dtoffset = rest.rfind('^^')
+ dtoffset = rest.rfind("^^")
if dtoffset >= 0:
# found a datatype
# datatype has to come after lang-tag so ignore everything before
@@ -181,28 +196,28 @@ def from_n3(s, default=None, backend=None, nsm=None):
if rest.startswith("@"):
language = rest[1:] # strip leading at sign
- value = value.replace(r'\"', '"')
+ value = value.replace(r"\"", '"')
# Hack: this should correctly handle strings with either native unicode
# characters, or \u1234 unicode escapes.
value = value.encode("raw-unicode-escape").decode("unicode-escape")
return Literal(value, language, datatype)
- elif s == 'true' or s == 'false':
- return Literal(s == 'true')
+ elif s == "true" or s == "false":
+ return Literal(s == "true")
elif s.isdigit():
return Literal(int(s))
- elif s.startswith('{'):
+ elif s.startswith("{"):
identifier = from_n3(s[1:-1])
return QuotedGraph(backend, identifier)
- elif s.startswith('['):
+ elif s.startswith("["):
identifier = from_n3(s[1:-1])
return Graph(backend, identifier)
elif s.startswith("_:"):
return BNode(s[2:])
- elif ':' in s:
+ elif ":" in s:
if nsm is None:
# instantiate default NamespaceManager and rely on its defaults
nsm = NamespaceManager(Graph())
- prefix, last_part = s.split(':', 1)
+ prefix, last_part = s.split(":", 1)
ns = dict(nsm.namespaces())[prefix]
return Namespace(ns)[last_part]
else:
@@ -210,8 +225,7 @@ def from_n3(s, default=None, backend=None, nsm=None):
def check_context(c):
- if not (isinstance(c, URIRef) or
- isinstance(c, BNode)):
+ if not (isinstance(c, URIRef) or isinstance(c, BNode)):
raise ContextTypeError("%s:%s" % (c, type(c)))
@@ -229,9 +243,7 @@ def check_predicate(p):
def check_object(o):
""" Test that o is a valid object identifier."""
- if not (isinstance(o, URIRef) or
- isinstance(o, Literal) or
- isinstance(o, BNode)):
+ if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
@@ -243,9 +255,7 @@ def check_statement(triple):
if not isinstance(p, URIRef):
raise PredicateTypeError(p)
- if not (isinstance(o, URIRef) or
- isinstance(o, Literal) or
- isinstance(o, BNode)):
+ if not (isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)):
raise ObjectTypeError(o)
@@ -257,9 +267,9 @@ def check_pattern(triple):
if p and not isinstance(p, URIRef):
raise PredicateTypeError(p)
- if o and not (isinstance(o, URIRef) or
- isinstance(o, Literal) or
- isinstance(o, BNode)):
+ if o and not (
+ isinstance(o, URIRef) or isinstance(o, Literal) or isinstance(o, BNode)
+ ):
raise ObjectTypeError(o)
@@ -294,8 +304,7 @@ def date_time(t=None, local_time_zone=False):
tzd = "Z"
year, month, day, hh, mm, ss, wd, y, z = time_tuple
- s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (
- year, month, day, hh, mm, ss, tzd)
+ s = "%0004d-%02d-%02dT%02d:%02d:%02d%s" % (year, month, day, hh, mm, ss, tzd)
return s
@@ -336,25 +345,26 @@ def parse_date_time(val):
year, month, day = ymd.split("-")
hour, minute, second = hms.split(":")
- t = timegm((int(year), int(month), int(day), int(hour),
- int(minute), int(second), 0, 0, 0))
+ t = timegm(
+ (int(year), int(month), int(day), int(hour), int(minute), int(second), 0, 0, 0)
+ )
t = t + tz_offset
return t
SUFFIX_FORMAT_MAP = {
- 'rdf': 'xml',
- 'rdfs': 'xml',
- 'owl': 'xml',
- 'n3': 'n3',
- 'ttl': 'turtle',
- 'nt': 'nt',
- 'trix': 'trix',
- 'xhtml': 'rdfa',
- 'html': 'rdfa',
- 'svg': 'rdfa',
- 'nq': 'nquads',
- 'trig': 'trig'
+ "rdf": "xml",
+ "rdfs": "xml",
+ "owl": "xml",
+ "n3": "n3",
+ "ttl": "turtle",
+ "nt": "nt",
+ "trix": "trix",
+ "xhtml": "rdfa",
+ "html": "rdfa",
+ "svg": "rdfa",
+ "nq": "nquads",
+ "trig": "trig",
}
@@ -405,11 +415,11 @@ def _get_ext(fpath, lower=True):
'rdf'
"""
ext = splitext(fpath)[-1]
- if ext == '' and fpath.startswith("."):
+ if ext == "" and fpath.startswith("."):
ext = fpath
if lower:
ext = ext.lower()
- if ext.startswith('.'):
+ if ext.startswith("."):
ext = ext[1:]
return ext
@@ -438,13 +448,9 @@ def find_roots(graph, prop, roots=None):
return roots
-def get_tree(graph,
- root,
- prop,
- mapper=lambda x: x,
- sortkey=None,
- done=None,
- dir='down'):
+def get_tree(
+ graph, root, prop, mapper=lambda x: x, sortkey=None, done=None, dir="down"
+):
"""
Return a nested list/tuple structure representing the tree
built by the transitive property given, starting from the root given
@@ -470,7 +476,7 @@ def get_tree(graph,
done.add(root)
tree = []
- if dir == 'down':
+ if dir == "down":
branches = graph.subjects(prop, root)
else:
branches = graph.objects(root, prop)
@@ -485,6 +491,7 @@ def get_tree(graph,
def test():
import doctest
+
doctest.testmod()
@@ -496,7 +503,7 @@ if __name__ == "__main__":
# time.tzset()
# except AttributeError, e:
# print e
- # pass
- # tzset missing! see
- # http://mail.python.org/pipermail/python-dev/2003-April/034480.html
+ # pass
+ # tzset missing! see
+ # http://mail.python.org/pipermail/python-dev/2003-April/034480.html
test() # pragma: no cover
diff --git a/rdflib/void.py b/rdflib/void.py
index db9bcc32..92a0e0be 100644
--- a/rdflib/void.py
+++ b/rdflib/void.py
@@ -93,10 +93,8 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
res.add((part, VOID.distinctSubjects, Literal(len(classes[c]))))
if distinctForPartitions:
- res.add(
- (part, VOID.properties, Literal(len(classProps[c]))))
- res.add((part, VOID.distinctObjects,
- Literal(len(classObjects[c]))))
+ res.add((part, VOID.properties, Literal(len(classProps[c]))))
+ res.add((part, VOID.distinctObjects, Literal(len(classObjects[c]))))
for i, p in enumerate(properties):
part = URIRef(dataset + "_property%d" % i)
@@ -121,9 +119,7 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
res.add((part, VOID.entities, Literal(entities)))
res.add((part, VOID.classes, Literal(len(propClasses))))
- res.add((part, VOID.distinctSubjects,
- Literal(len(propSubjects[p]))))
- res.add((part, VOID.distinctObjects,
- Literal(len(propObjects[p]))))
+ res.add((part, VOID.distinctSubjects, Literal(len(propSubjects[p]))))
+ res.add((part, VOID.distinctObjects, Literal(len(propObjects[p]))))
return res, dataset
diff --git a/requirements.dev.txt b/requirements.dev.txt
new file mode 100644
index 00000000..7e6aaf68
--- /dev/null
+++ b/requirements.dev.txt
@@ -0,0 +1,3 @@
+sphinx
+sphinxcontrib-apidoc
+black
diff --git a/requirements.txt b/requirements.txt
index 215e257a..97125175 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,5 +3,4 @@ html5lib
isodate
pyparsing
requests
-six
doctest-ignore-unicode
diff --git a/run_tests.py b/run_tests.py
index 77bc9fec..ab68f792 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -42,56 +42,58 @@ from __future__ import print_function
NOSE_ARGS = [
- '--with-doctest',
- '--doctest-extension=.doctest',
- '--doctest-tests',
+ "--with-doctest",
+ "--doctest-extension=.doctest",
+ "--doctest-tests",
# '--with-EARL',
]
COVERAGE_EXTRA_ARGS = [
- '--cover-package=rdflib',
- '--cover-inclusive',
+ "--cover-package=rdflib",
+ "--cover-inclusive",
]
-DEFAULT_LOCATION = '--where=./'
+DEFAULT_LOCATION = "--where=./"
DEFAULT_ATTRS = [] # ['!known_issue', '!sparql']
-DEFAULT_DIRS = ['test', 'rdflib']
+DEFAULT_DIRS = ["test", "rdflib"]
-if __name__ == '__main__':
+if __name__ == "__main__":
from sys import argv, exit, stderr
+
try:
import nose
except ImportError:
- print("""\
+ print(
+ """\
Requires Nose. Try:
$ sudo easy_install nose
- Exiting. """, file=stderr)
+ Exiting. """,
+ file=stderr,
+ )
exit(1)
-
- if '--with-coverage' in argv:
+ if "--with-coverage" in argv:
try:
import coverage
except ImportError:
print("No coverage module found, skipping code coverage.", file=stderr)
- argv.remove('--with-coverage')
+ argv.remove("--with-coverage")
else:
NOSE_ARGS += COVERAGE_EXTRA_ARGS
+ if True not in [a.startswith("-a") or a.startswith("--attr=") for a in argv]:
+ argv.append("--attr=" + ",".join(DEFAULT_ATTRS))
- if True not in [a.startswith('-a') or a.startswith('--attr=') for a in argv]:
- argv.append('--attr=' + ','.join(DEFAULT_ATTRS))
-
- if not [a for a in argv[1:] if not a.startswith('-')]:
+ if not [a for a in argv[1:] if not a.startswith("-")]:
argv += DEFAULT_DIRS # since nose doesn't look here by default..
- if not [a for a in argv if a.startswith('--where=')]:
+ if not [a for a in argv if a.startswith("--where=")]:
argv += [DEFAULT_LOCATION]
finalArgs = argv + NOSE_ARGS
diff --git a/setup.py b/setup.py
index 0203f299..7e4d4e21 100644
--- a/setup.py
+++ b/setup.py
@@ -5,10 +5,22 @@ import re
from setuptools import setup, find_packages
kwargs = {}
-kwargs['install_requires'] = [ 'six', 'isodate', 'pyparsing']
-kwargs['tests_require'] = ['html5lib', 'networkx']
-kwargs['test_suite'] = "nose.collector"
-kwargs['extras_require'] = {'html': ['html5lib'], 'sparql': ['requests']}
+kwargs["install_requires"] = ["isodate", "pyparsing"]
+kwargs["tests_require"] = [
+ "html5lib",
+ "networkx",
+ "nose",
+ "doctest-ignore-unicode",
+ "requests",
+]
+kwargs["test_suite"] = "nose.collector"
+kwargs["extras_require"] = {
+ "html": ["html5lib"],
+ "sparql": ["requests"],
+ "tests": kwargs["tests_require"],
+ "docs": ["sphinx < 4", "sphinxcontrib-apidoc"],
+}
+
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
@@ -17,20 +29,21 @@ def find_version(filename):
if version_match:
return version_match.group(1)
-version = find_version('rdflib/__init__.py')
-packages = find_packages(exclude=('examples*', 'test*'))
+version = find_version("rdflib/__init__.py")
+
+packages = find_packages(exclude=("examples*", "test*"))
-if os.environ.get('READTHEDOCS', None):
+if os.environ.get("READTHEDOCS", None):
# if building docs for RTD
# install examples, to get docstrings
packages.append("examples")
setup(
- name='rdflib',
+ name="rdflib",
version=version,
description="RDFLib is a Python library for working with RDF, a "
- "simple yet powerful language for representing information.",
+ "simple yet powerful language for representing information.",
author="Daniel 'eikeon' Krech",
author_email="eikeon@eikeon.com",
maintainer="RDFLib Team",
@@ -38,20 +51,19 @@ setup(
url="https://github.com/RDFLib/rdflib",
license="BSD-3-Clause",
platforms=["any"],
+ python_requires=">=3.5",
classifiers=[
- "Programming Language :: Python",
- "Programming Language :: Python :: 2",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 2.7",
- "Programming Language :: Python :: 3.4",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "License :: OSI Approved :: BSD License",
- "Topic :: Software Development :: Libraries :: Python Modules",
- "Operating System :: OS Independent",
- "Natural Language :: English",
- ],
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: BSD License",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Operating System :: OS Independent",
+ "Natural Language :: English",
+ ],
long_description="""\
RDFLib is a Python library for working with
RDF, a simple yet powerful language for representing information.
@@ -76,16 +88,15 @@ Read the docs at:
http://rdflib.readthedocs.io
""",
- packages = packages,
- entry_points = {
- 'console_scripts': [
- 'rdfpipe = rdflib.tools.rdfpipe:main',
- 'csv2rdf = rdflib.tools.csv2rdf:main',
- 'rdf2dot = rdflib.tools.rdf2dot:main',
- 'rdfs2dot = rdflib.tools.rdfs2dot:main',
- 'rdfgraphisomorphism = rdflib.tools.graphisomorphism:main',
- ],
- },
-
+ packages=packages,
+ entry_points={
+ "console_scripts": [
+ "rdfpipe = rdflib.tools.rdfpipe:main",
+ "csv2rdf = rdflib.tools.csv2rdf:main",
+ "rdf2dot = rdflib.tools.rdf2dot:main",
+ "rdfs2dot = rdflib.tools.rdfs2dot:main",
+ "rdfgraphisomorphism = rdflib.tools.graphisomorphism:main",
+ ],
+ },
**kwargs
- )
+)
diff --git a/test/earl.py b/test/earl.py
index 9e4d0413..54df7d3e 100644
--- a/test/earl.py
+++ b/test/earl.py
@@ -9,17 +9,17 @@ EARL = Namespace("http://www.w3.org/ns/earl#")
report = Graph()
-report.bind('foaf', FOAF)
-report.bind('earl', EARL)
-report.bind('doap', DOAP)
-report.bind('dc', DC)
+report.bind("foaf", FOAF)
+report.bind("earl", EARL)
+report.bind("doap", DOAP)
+report.bind("dc", DC)
-me = URIRef('http://gromgull.net/me')
+me = URIRef("http://gromgull.net/me")
report.add((me, RDF.type, FOAF.Person))
report.add((me, FOAF.homepage, URIRef("http://gromgull.net")))
report.add((me, FOAF.name, Literal("Gunnar Aastrand Grimnes")))
-rdflib = URIRef('https://github.com/RDFLib/rdflib')
+rdflib = URIRef("https://github.com/RDFLib/rdflib")
report.add((rdflib, DOAP.homepage, rdflib))
report.add((rdflib, DOAP.name, Literal("rdflib")))
diff --git a/test/manifest.py b/test/manifest.py
index 2c95e4d7..107b9422 100644
--- a/test/manifest.py
+++ b/test/manifest.py
@@ -4,28 +4,28 @@ from collections import namedtuple
from nose.tools import nottest
from rdflib import Graph, RDF, RDFS, Namespace
-from six import text_type
-MF = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#')
-QT = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-query#')
-UP = Namespace('http://www.w3.org/2009/sparql/tests/test-update#')
-RDFT = Namespace('http://www.w3.org/ns/rdftest#')
+MF = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#")
+QT = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
+UP = Namespace("http://www.w3.org/2009/sparql/tests/test-update#")
+RDFT = Namespace("http://www.w3.org/ns/rdftest#")
-DAWG = Namespace('http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#')
+DAWG = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#")
-RDFTest = namedtuple('RDFTest', ['uri', 'name', 'comment', 'data',
- 'graphdata', 'action', 'result', 'syntax'])
+RDFTest = namedtuple(
+ "RDFTest",
+ ["uri", "name", "comment", "data", "graphdata", "action", "result", "syntax"],
+)
def read_manifest(f, base=None, legacy=False):
-
def _str(x):
if x is not None:
- return text_type(x)
+ return str(x)
return None
g = Graph()
- g.load(f, publicID=base, format='turtle')
+ g.load(f, publicID=base, format="turtle")
for m in g.subjects(RDF.type, MF.Manifest):
@@ -37,17 +37,22 @@ def read_manifest(f, base=None, legacy=False):
for col in g.objects(m, MF.entries):
for e in g.items(col):
- approved = ((e, DAWG.approval, DAWG.Approved) in g or
- (e, DAWG.approval, DAWG.NotClassified) in g or
- (e, RDFT.approval, RDFT.Approved) in g)
+ approved = (
+ (e, DAWG.approval, DAWG.Approved) in g
+ or (e, DAWG.approval, DAWG.NotClassified) in g
+ or (e, RDFT.approval, RDFT.Approved) in g
+ )
# run proposed tests
# approved |= (e, RDFT.approval, RDFT.Proposed) in g
# run legacy tests with no approval set
if legacy:
- approved |= ((e, DAWG.approval, None) not in g and
- (e, RDFT.approval, None) not in g)
+ approved |= (e, DAWG.approval, None) not in g and (
+ e,
+ RDFT.approval,
+ None,
+ ) not in g
if not approved:
continue
@@ -76,15 +81,17 @@ def read_manifest(f, base=None, legacy=False):
data = g.value(a, UP.data)
graphdata = []
for gd in g.objects(a, UP.graphData):
- graphdata.append((g.value(gd, UP.graph),
- g.value(gd, RDFS.label)))
+ graphdata.append(
+ (g.value(gd, UP.graph), g.value(gd, RDFS.label))
+ )
r = g.value(e, MF.result)
resdata = g.value(r, UP.data)
resgraphdata = []
for gd in g.objects(r, UP.graphData):
- resgraphdata.append((g.value(gd, UP.graph),
- g.value(gd, RDFS.label)))
+ resgraphdata.append(
+ (g.value(gd, UP.graph), g.value(gd, RDFS.label))
+ )
res = resdata, resgraphdata
@@ -92,28 +99,37 @@ def read_manifest(f, base=None, legacy=False):
query = g.value(e, MF.action)
syntax = _type == MF.PositiveSyntaxTest11
- elif _type in (MF.PositiveUpdateSyntaxTest11,
- MF.NegativeUpdateSyntaxTest11):
+ elif _type in (
+ MF.PositiveUpdateSyntaxTest11,
+ MF.NegativeUpdateSyntaxTest11,
+ ):
query = g.value(e, MF.action)
syntax = _type == MF.PositiveUpdateSyntaxTest11
- elif _type in (RDFT.TestNQuadsPositiveSyntax,
- RDFT.TestNQuadsNegativeSyntax,
- RDFT.TestTrigPositiveSyntax,
- RDFT.TestTrigNegativeSyntax,
- RDFT.TestNTriplesPositiveSyntax,
- RDFT.TestNTriplesNegativeSyntax,
- RDFT.TestTurtlePositiveSyntax,
- RDFT.TestTurtleNegativeSyntax,
- ):
+ elif _type in (
+ RDFT.TestNQuadsPositiveSyntax,
+ RDFT.TestNQuadsNegativeSyntax,
+ RDFT.TestTrigPositiveSyntax,
+ RDFT.TestTrigNegativeSyntax,
+ RDFT.TestNTriplesPositiveSyntax,
+ RDFT.TestNTriplesNegativeSyntax,
+ RDFT.TestTurtlePositiveSyntax,
+ RDFT.TestTurtleNegativeSyntax,
+ ):
query = g.value(e, MF.action)
- syntax = _type in (RDFT.TestNQuadsPositiveSyntax,
- RDFT.TestNTriplesPositiveSyntax,
- RDFT.TestTrigPositiveSyntax,
- RDFT.TestTurtlePositiveSyntax)
-
- elif _type in (RDFT.TestTurtleEval, RDFT.TestTurtleNegativeEval,
- RDFT.TestTrigEval, RDFT.TestTrigNegativeEval):
+ syntax = _type in (
+ RDFT.TestNQuadsPositiveSyntax,
+ RDFT.TestNTriplesPositiveSyntax,
+ RDFT.TestTrigPositiveSyntax,
+ RDFT.TestTurtlePositiveSyntax,
+ )
+
+ elif _type in (
+ RDFT.TestTurtleEval,
+ RDFT.TestTurtleNegativeEval,
+ RDFT.TestTrigEval,
+ RDFT.TestTrigNegativeEval,
+ ):
query = g.value(e, MF.action)
res = g.value(e, MF.result)
syntax = _type in (RDFT.TestTurtleEval, RDFT.TestTrigEval)
@@ -123,9 +139,16 @@ def read_manifest(f, base=None, legacy=False):
print("I dont know DAWG Test Type %s" % _type)
continue
- yield _type, RDFTest(e, _str(name), _str(comment),
- _str(data), graphdata, _str(query),
- res, syntax)
+ yield _type, RDFTest(
+ e,
+ _str(name),
+ _str(comment),
+ _str(data),
+ graphdata,
+ _str(query),
+ res,
+ syntax,
+ )
@nottest
diff --git a/test/store_performance.py b/test/store_performance.py
index 578a51e5..9e55d654 100644
--- a/test/store_performance.py
+++ b/test/store_performance.py
@@ -24,7 +24,8 @@ class StoreTestCase(unittest.TestCase):
something other than a unit test... but for now we'll add it as a
unit test.
"""
- store = 'default'
+
+ store = "default"
tmppath = None
configString = os.environ.get("DBURI", "dburi")
@@ -36,6 +37,7 @@ class StoreTestCase(unittest.TestCase):
if self.store == "MySQL":
# from test.mysql import configString
from rdflib.store.MySQL import MySQL
+
path = self.configString
MySQL().destroy(path)
else:
@@ -54,10 +56,10 @@ class StoreTestCase(unittest.TestCase):
def testTime(self):
number = 1
print(self.store)
- print("input:", end=' ')
+ print("input:", end=" ")
for i in itertools.repeat(None, number):
self._testInput()
- print("random:", end=' ')
+ print("random:", end=" ")
for i in itertools.repeat(None, number):
self._testRandom()
print(".")
@@ -77,7 +79,7 @@ class StoreTestCase(unittest.TestCase):
for _i in it:
add_random()
t1 = time()
- print("%.3g" % (t1 - t0), end=' ')
+ print("%.3g" % (t1 - t0), end=" ")
def _testInput(self):
number = 1
@@ -92,12 +94,12 @@ class StoreTestCase(unittest.TestCase):
for _i in it:
add_from_input()
t1 = time()
- print("%.3g" % (t1 - t0), end=' ')
+ print("%.3g" % (t1 - t0), end=" ")
class MemoryStoreTestCase(StoreTestCase):
store = "IOMemory"
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_aggregate_graphs.py b/test/test_aggregate_graphs.py
index 4dace1cc..5d58f4d3 100644
--- a/test/test_aggregate_graphs.py
+++ b/test/test_aggregate_graphs.py
@@ -1,7 +1,7 @@
import unittest
-from rdflib.namespace import Namespace, RDF, RDFS
+from rdflib.namespace import RDF, RDFS
from rdflib import plugin
-from six import StringIO
+from io import StringIO
from rdflib.term import URIRef
from rdflib.store import Store
from rdflib.graph import Graph
@@ -36,8 +36,7 @@ testGraph3N3 = """
<> a log:N3Document.
"""
-sparqlQ = \
- """
+sparqlQ = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT *
FROM NAMED <http://example.com/graph1>
@@ -47,14 +46,12 @@ FROM <http://www.w3.org/2000/01/rdf-schema#>
WHERE {?sub ?pred rdfs:Class }"""
-sparqlQ2 =\
- """
+sparqlQ2 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?class
WHERE { GRAPH ?graph { ?member a ?class } }"""
-sparqlQ3 =\
- """
+sparqlQ3 = """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX log: <http://www.w3.org/2000/10/swap/log#>
SELECT ?n3Doc
@@ -63,15 +60,17 @@ WHERE {?n3Doc a log:N3Document }"""
class GraphAggregates1(unittest.TestCase):
def setUp(self):
- memStore = plugin.get('IOMemory', Store)()
+ memStore = plugin.get("IOMemory", Store)()
self.graph1 = Graph(memStore)
self.graph2 = Graph(memStore)
self.graph3 = Graph(memStore)
- for n3Str, graph in [(testGraph1N3, self.graph1),
- (testGraph2N3, self.graph2),
- (testGraph3N3, self.graph3)]:
- graph.parse(StringIO(n3Str), format='n3')
+ for n3Str, graph in [
+ (testGraph1N3, self.graph1),
+ (testGraph2N3, self.graph2),
+ (testGraph3N3, self.graph3),
+ ]:
+ graph.parse(StringIO(n3Str), format="n3")
self.G = ReadOnlyGraphAggregate([self.graph1, self.graph2, self.graph3])
@@ -92,7 +91,16 @@ class GraphAggregates1(unittest.TestCase):
assert (URIRef("http://test/foo"), RDF.type, RDFS.Resource) in self.G
barPredicates = [URIRef("http://test/d"), RDFS.isDefinedBy]
- assert len(list(self.G.triples_choices((URIRef("http://test/bar"), barPredicates, None)))) == 2
+ assert (
+ len(
+ list(
+ self.G.triples_choices(
+ (URIRef("http://test/bar"), barPredicates, None)
+ )
+ )
+ )
+ == 2
+ )
class GraphAggregates2(unittest.TestCase):
@@ -101,20 +109,22 @@ class GraphAggregates2(unittest.TestCase):
sparql = True
def setUp(self):
- memStore = plugin.get('IOMemory', Store)()
+ memStore = plugin.get("IOMemory", Store)()
self.graph1 = Graph(memStore, URIRef("http://example.com/graph1"))
self.graph2 = Graph(memStore, URIRef("http://example.com/graph2"))
self.graph3 = Graph(memStore, URIRef("http://example.com/graph3"))
- for n3Str, graph in [(testGraph1N3, self.graph1),
- (testGraph2N3, self.graph2),
- (testGraph3N3, self.graph3)]:
- graph.parse(StringIO(n3Str), format='n3')
+ for n3Str, graph in [
+ (testGraph1N3, self.graph1),
+ (testGraph2N3, self.graph2),
+ (testGraph3N3, self.graph3),
+ ]:
+ graph.parse(StringIO(n3Str), format="n3")
self.graph4 = Graph(memStore, RDFS)
self.graph4.parse(RDFS.uri)
self.G = ConjunctiveGraph(memStore)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_auditable.py b/test/test_auditable.py
index 63c7c5e5..e5aff715 100644
--- a/test/test_auditable.py
+++ b/test/test_auditable.py
@@ -9,7 +9,6 @@ EX = Namespace("http://example.org/")
class BaseTestAuditableStore(unittest.TestCase):
-
def assert_graph_equal(self, g1, g2):
try:
return self.assertSetEqual(set(g1), set(g2))
@@ -19,192 +18,157 @@ class BaseTestAuditableStore(unittest.TestCase):
class TestAuditableStore(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t = Graph(AuditableStore(self.g.store), self.g.identifier)
def test_add_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.g,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
def test_remove_commit(self):
self.t.remove((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(self.t, [(EX.s0, EX.p0, EX.o0bis),])
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0bis),])
def test_multiple_remove_commit(self):
self.t.remove((EX.s0, EX.p0, None))
- self.assert_graph_equal(self.t, [
- ])
+ self.assert_graph_equal(self.t, [])
self.t.commit()
- self.assert_graph_equal(self.g, [
- ])
+ self.assert_graph_equal(self.g, [])
def test_noop_add_commit(self):
self.t.add((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_remove_commit(self):
self.t.add((EX.s0, EX.p0, EX.o0))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_add_remove_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.remove((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.t, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_add_commit(self):
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.g,
+ [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis), (EX.s1, EX.p1, EX.o1),],
+ )
def test_add_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_rollback(self):
self.t.remove((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_multiple_remove_rollback(self):
self.t.remove((EX.s0, EX.p0, None))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_add_rollback(self):
self.t.add((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_noop_remove_rollback(self):
self.t.add((EX.s0, EX.p0, EX.o0))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_add_remove_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_remove_add_rollback(self):
self.t.remove((EX.s1, EX.p1, EX.o1))
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
class TestAuditableStoreEmptyGraph(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
- self.t = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t = Graph(AuditableStore(self.g.store), self.g.identifier)
def test_add_commit(self):
self.t.add((EX.s1, EX.p1, EX.o1))
- self.assert_graph_equal(self.t, [
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.t, [(EX.s1, EX.p1, EX.o1),])
self.t.commit()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.g, [(EX.s1, EX.p1, EX.o1),])
def test_add_rollback(self):
self.t.add((EX.s1, EX.p1, EX.o1))
self.t.rollback()
- self.assert_graph_equal(self.g, [
- ])
+ self.assert_graph_equal(self.g, [])
class TestAuditableStoreConccurent(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t1 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
- self.t2 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t1 = Graph(AuditableStore(self.g.store), self.g.identifier)
+ self.t2 = Graph(AuditableStore(self.g.store), self.g.identifier)
self.t1.add((EX.s1, EX.p1, EX.o1))
self.t2.add((EX.s2, EX.p2, EX.o2))
self.t1.remove((EX.s0, EX.p0, EX.o0))
@@ -213,93 +177,71 @@ class TestAuditableStoreConccurent(BaseTestAuditableStore):
def test_commit_commit(self):
self.t1.commit()
self.t2.commit()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s1, EX.p1, EX.o1), (EX.s2, EX.p2, EX.o2),])
def test_commit_rollback(self):
self.t1.commit()
self.t2.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s1, EX.p1, EX.o1),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s1, EX.p1, EX.o1), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_rollback_commit(self):
self.t1.rollback()
self.t2.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),])
def test_rollback_rollback(self):
self.t1.rollback()
self.t2.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
class TestAuditableStoreEmbeded(BaseTestAuditableStore):
-
def setUp(self):
self.g = Graph()
self.g.add((EX.s0, EX.p0, EX.o0))
self.g.add((EX.s0, EX.p0, EX.o0bis))
- self.t1 = Graph(AuditableStore(self.g.store),
- self.g.identifier)
+ self.t1 = Graph(AuditableStore(self.g.store), self.g.identifier)
self.t1.add((EX.s1, EX.p1, EX.o1))
self.t1.remove((EX.s0, EX.p0, EX.o0bis))
- self.t2 = Graph(AuditableStore(self.t1.store),
- self.t1.identifier)
+ self.t2 = Graph(AuditableStore(self.t1.store), self.t1.identifier)
self.t2.add((EX.s2, EX.p2, EX.o2))
self.t2.remove((EX.s1, EX.p1, EX.o1))
def test_commit_commit(self):
- self.assert_graph_equal(self.t2, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(
+ self.t2, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),]
+ )
self.t2.commit()
- self.assert_graph_equal(self.t1, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(
+ self.t1, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),]
+ )
self.t1.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s2, EX.p2, EX.o2),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s2, EX.p2, EX.o2),])
def test_commit_rollback(self):
self.t2.commit()
self.t1.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
def test_rollback_commit(self):
self.t2.rollback()
- self.assert_graph_equal(self.t1, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(
+ self.t1, [(EX.s0, EX.p0, EX.o0), (EX.s1, EX.p1, EX.o1),]
+ )
self.t1.commit()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s1, EX.p1, EX.o1),
- ])
+ self.assert_graph_equal(self.g, [(EX.s0, EX.p0, EX.o0), (EX.s1, EX.p1, EX.o1),])
def test_rollback_rollback(self):
self.t2.rollback()
self.t1.rollback()
- self.assert_graph_equal(self.g, [
- (EX.s0, EX.p0, EX.o0),
- (EX.s0, EX.p0, EX.o0bis),
- ])
+ self.assert_graph_equal(
+ self.g, [(EX.s0, EX.p0, EX.o0), (EX.s0, EX.p0, EX.o0bis),]
+ )
diff --git a/test/test_batch_add.py b/test/test_batch_add.py
new file mode 100644
index 00000000..43457e5e
--- /dev/null
+++ b/test/test_batch_add.py
@@ -0,0 +1,88 @@
+import unittest
+from rdflib.graph import Graph, BatchAddGraph
+from rdflib.term import URIRef
+
+
+class TestBatchAddGraph(unittest.TestCase):
+ def test_batch_size_zero_denied(self):
+ with self.assertRaises(ValueError):
+ BatchAddGraph(Graph(), batch_size=0)
+
+ def test_batch_size_none_denied(self):
+ with self.assertRaises(ValueError):
+ BatchAddGraph(Graph(), batch_size=None)
+
+ def test_batch_size_one_denied(self):
+ with self.assertRaises(ValueError):
+ BatchAddGraph(Graph(), batch_size=1)
+
+ def test_batch_size_negative_denied(self):
+ with self.assertRaises(ValueError):
+ BatchAddGraph(Graph(), batch_size=-12)
+
+ def test_exit_submits_partial_batch(self):
+ trip = (URIRef("a"), URIRef("b"), URIRef("c"))
+ g = Graph()
+ with BatchAddGraph(g, batch_size=10) as cut:
+ cut.add(trip)
+ self.assertIn(trip, g)
+
+ def test_add_more_than_batch_size(self):
+ trips = [(URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i)) for i in range(12)]
+ g = Graph()
+ with BatchAddGraph(g, batch_size=10) as cut:
+ for trip in trips:
+ cut.add(trip)
+ self.assertEqual(12, len(g))
+
+ def test_add_quad_for_non_conjunctive_empty(self):
+ """
+ Graph drops quads that don't match our graph. Make sure we do the same
+ """
+ g = Graph(identifier="http://example.org/g")
+ badg = Graph(identifier="http://example.org/badness")
+ with BatchAddGraph(g) as cut:
+ cut.add((URIRef("a"), URIRef("b"), URIRef("c"), badg))
+ self.assertEqual(0, len(g))
+
+ def test_add_quad_for_non_conjunctive_pass_on_context_matches(self):
+ g = Graph()
+ with BatchAddGraph(g) as cut:
+ cut.add((URIRef("a"), URIRef("b"), URIRef("c"), g))
+ self.assertEqual(1, len(g))
+
+ def test_no_addN_on_exception(self):
+ """
+ Even if we've added triples so far, it may be that attempting to add the last
+ batch is the cause of our exception, so we don't want to attempt again
+ """
+ g = Graph()
+ trips = [(URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i)) for i in range(12)]
+
+ try:
+ with BatchAddGraph(g, batch_size=10) as cut:
+ for i, trip in enumerate(trips):
+ cut.add(trip)
+ if i == 11:
+ raise Exception("myexc")
+ except Exception as e:
+ if str(e) != "myexc":
+ pass
+ self.assertEqual(10, len(g))
+
+ def test_addN_batching_addN(self):
+ class MockGraph(object):
+ def __init__(self):
+ self.counts = []
+
+ def addN(self, quads):
+ self.counts.append(sum(1 for _ in quads))
+
+ g = MockGraph()
+ quads = [
+ (URIRef("a"), URIRef("b%d" % i), URIRef("c%d" % i), g) for i in range(12)
+ ]
+
+ with BatchAddGraph(g, batch_size=10, batch_addn=True) as cut:
+ cut.addN(quads)
+ self.assertEqual(g.counts, [10, 2])
diff --git a/test/test_bnode_ncname.py b/test/test_bnode_ncname.py
index 78d96567..3e621579 100644
--- a/test/test_bnode_ncname.py
+++ b/test/test_bnode_ncname.py
@@ -1,14 +1,12 @@
# -*- coding: utf-8 -*-
import re
-import sys
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
+from hashlib import md5
+
try:
from uuid import uuid4
except ImportError:
+
def uuid4():
"""
Generates a uuid on behalf of Python 2.4
@@ -17,12 +15,13 @@ except ImportError:
import os
import time
import socket
+
try:
preseed = os.urandom(16)
except NotImplementedError:
- preseed = ''
+ preseed = ""
# Have doubts about this. random.seed will just hash the string
- random.seed('%s%s%s' % (preseed, os.getpid(), time.time()))
+ random.seed("%s%s%s" % (preseed, os.getpid(), time.time()))
del preseed
t = int(time.time() * 1000.0)
r = int(random.random() * 100000000000000000)
@@ -31,10 +30,11 @@ except ImportError:
except:
# if we can't get a network address, just imagine one
a = random.random() * 100000000000000000
- strdata = str(t) + ' ' + str(r) + ' ' + str(a)
- data = md5(strdata.encode('ascii')).hexdigest()
+ strdata = str(t) + " " + str(r) + " " + str(a)
+ data = md5(strdata.encode("ascii")).hexdigest()
yield data
+
# Adapted from http://icodesnip.com/snippet/python/simple-universally-unique-id-uuid-or-guid
@@ -72,13 +72,14 @@ def is_ncname(value):
>>> from rdflib import BNode
>>> assert is_ncname(BNode(_sn_gen=bnode_uuid, _prefix="urn:uuid:")) == True
"""
- ncnameexp = re.compile('[A-Za-z][A-Za-z0-9]*')
+ ncnameexp = re.compile("[A-Za-z][A-Za-z0-9]*")
if ncnameexp.match(value):
return True
else:
return False
-if __name__ == '__main__':
+if __name__ == "__main__":
import doctest
+
doctest.testmod()
diff --git a/test/test_canonicalization.py b/test/test_canonicalization.py
index 3ed8ce96..12dd657f 100644
--- a/test/test_canonicalization.py
+++ b/test/test_canonicalization.py
@@ -5,7 +5,6 @@ from rdflib.compare import to_isomorphic, to_canonical_graph
import rdflib
from rdflib.plugins.memory import IOMemory
-from six import text_type
from io import StringIO
@@ -20,39 +19,56 @@ def get_digest_value(rdf, mimetype):
def negative_graph_match_test():
- '''Test of FRIR identifiers against tricky RDF graphs with blank nodes.'''
+ """Test of FRIR identifiers against tricky RDF graphs with blank nodes."""
testInputs = [
- [text_type('''@prefix : <http://example.org/ns#> .
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
[ :label "Same" ].
- '''),
- text_type('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
[ :label "Same" ],
[ :label "Same" ].
- '''),
- False
- ],
- [text_type('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ False,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
<http://example.org/a>.
- '''),
- text_type('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
<http://example.org> :rel
<http://example.org/a>,
<http://example.org/a>.
- '''),
- True
- ],
- [text_type('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
:linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],
- [ :related [ :related :linear_two_step_symmatry_end]].'''),
- text_type('''@prefix : <http://example.org/ns#> .
+ [ :related [ :related :linear_two_step_symmatry_end]]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
:linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],
- [ :related [ :related :linear_two_step_symmatry_end]].'''),
- True
- ],
- [text_type('''@prefix : <http://example.org/ns#> .
+ [ :related [ :related :linear_two_step_symmatry_end]]."""
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -61,8 +77,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- text_type('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -73,11 +91,14 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- False
- ],
+ ]."""
+ ),
+ False,
+ ],
# This test fails because the algorithm purposefully breaks the symmetry of symetric
- [text_type('''@prefix : <http://example.org/ns#> .
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -86,8 +107,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- text_type('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -96,10 +119,13 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- True
- ],
- [text_type('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ True,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:label "foo";
@@ -109,8 +135,10 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- text_type('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:a :rel [
:rel [
:rel [
@@ -119,10 +147,13 @@ def negative_graph_match_test():
];
];
];
- ].'''),
- False
- ],
- [text_type('''@prefix : <http://example.org/ns#> .
+ ]."""
+ ),
+ False,
+ ],
+ [
+ str(
+ """@prefix : <http://example.org/ns#> .
_:0001 :rel _:0003, _:0004.
_:0002 :rel _:0005, _:0006.
_:0003 :rel _:0001, _:0007, _:0010.
@@ -133,8 +164,10 @@ def negative_graph_match_test():
_:0008 :rel _:0004, _:0006, _:0010.
_:0009 :rel _:0004, _:0005, _:0007.
_:0010 :rel _:0003, _:0006, _:0008.
- '''),
- text_type('''@prefix : <http://example.org/ns#> .
+ """
+ ),
+ str(
+ """@prefix : <http://example.org/ns#> .
_:0001 :rel _:0003, _:0004.
_:0002 :rel _:0005, _:0006.
_:0003 :rel _:0001, _:0007, _:0010.
@@ -145,9 +178,10 @@ def negative_graph_match_test():
_:0005 :rel _:0002, _:0007, _:0009.
_:0006 :rel _:0002, _:0008, _:0010.
_:0007 :rel _:0003, _:0005, _:0009.
- '''),
- True
- ],
+ """
+ ),
+ True,
+ ],
]
def fn(rdf1, rdf2, identical):
@@ -158,6 +192,7 @@ def negative_graph_match_test():
print(rdf2)
print(digest2)
assert (digest1 == digest2) == identical
+
for inputs in testInputs:
yield fn, inputs[0], inputs[1], inputs[2]
@@ -166,66 +201,30 @@ def test_issue494_collapsing_bnodes():
"""Test for https://github.com/RDFLib/rdflib/issues/494 collapsing BNodes"""
g = Graph()
g += [
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['predicate'],
- BNode('vcb3')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['subject'],
- BNode('vcb2')),
- (BNode('Na1a8fbcf755f41c1b5728f326be50994'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['object'],
- URIRef(u'target')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['predicate'],
- BNode('vcb0')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Na713b02f320d409c806ff0190db324f4'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['object'],
- BNode('vr0KcS4')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['predicate'],
- BNode('vrby3JV')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Ndb804ba690a64b3dbb9063c68d5e3550'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['predicate'],
- BNode('vcb5')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['subject'],
- URIRef(u'target')),
- (BNode('Ndfc47fb1cd2d4382bcb8d5eb7835a636'),
- RDF['type'],
- RDF['Statement']),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['object'],
- URIRef(u'source')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['predicate'],
- BNode('vcb4')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['subject'],
- URIRef(u'source')),
- (BNode('Nec6864ef180843838aa9805bac835c98'),
- RDF['type'],
- RDF['Statement']),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["object"], URIRef(u"source")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["predicate"], BNode("vcb3")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["subject"], BNode("vcb2")),
+ (BNode("Na1a8fbcf755f41c1b5728f326be50994"), RDF["type"], RDF["Statement"]),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["object"], URIRef(u"target")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["predicate"], BNode("vcb0")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["subject"], URIRef(u"source")),
+ (BNode("Na713b02f320d409c806ff0190db324f4"), RDF["type"], RDF["Statement"]),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["object"], BNode("vr0KcS4")),
+ (
+ BNode("Ndb804ba690a64b3dbb9063c68d5e3550"),
+ RDF["predicate"],
+ BNode("vrby3JV"),
+ ),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["subject"], URIRef(u"source")),
+ (BNode("Ndb804ba690a64b3dbb9063c68d5e3550"), RDF["type"], RDF["Statement"]),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["object"], URIRef(u"source")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["predicate"], BNode("vcb5")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["subject"], URIRef(u"target")),
+ (BNode("Ndfc47fb1cd2d4382bcb8d5eb7835a636"), RDF["type"], RDF["Statement"]),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["object"], URIRef(u"source")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["predicate"], BNode("vcb4")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["subject"], URIRef(u"source")),
+ (BNode("Nec6864ef180843838aa9805bac835c98"), RDF["type"], RDF["Statement"]),
]
# print('graph length: %d, nodes: %d' % (len(g), len(g.all_nodes())))
@@ -233,10 +232,10 @@ def test_issue494_collapsing_bnodes():
# for triple_bnode in g.subjects(RDF['type'], RDF['Statement']):
# print(len(list(g.triples([triple_bnode, None, None]))))
# print('all node degrees:')
- g_node_degs = sorted([
- len(list(g.triples([node, None, None])))
- for node in g.all_nodes()
- ], reverse=True)
+ g_node_degs = sorted(
+ [len(list(g.triples([node, None, None]))) for node in g.all_nodes()],
+ reverse=True,
+ )
# print(g_node_degs)
cg = to_canonical_graph(g)
@@ -245,21 +244,20 @@ def test_issue494_collapsing_bnodes():
# for triple_bnode in cg.subjects(RDF['type'], RDF['Statement']):
# print(len(list(cg.triples([triple_bnode, None, None]))))
# print('all node degrees:')
- cg_node_degs = sorted([
- len(list(cg.triples([node, None, None])))
- for node in cg.all_nodes()
- ], reverse=True)
+ cg_node_degs = sorted(
+ [len(list(cg.triples([node, None, None]))) for node in cg.all_nodes()],
+ reverse=True,
+ )
# print(cg_node_degs)
- assert len(g) == len(cg), \
- 'canonicalization changed number of triples in graph'
- assert len(g.all_nodes()) == len(cg.all_nodes()), \
- 'canonicalization changed number of nodes in graph'
- assert len(list(g.subjects(RDF['type'], RDF['Statement']))) == \
- len(list(cg.subjects(RDF['type'], RDF['Statement']))), \
- 'canonicalization changed number of statements'
- assert g_node_degs == cg_node_degs, \
- 'canonicalization changed node degrees'
+ assert len(g) == len(cg), "canonicalization changed number of triples in graph"
+ assert len(g.all_nodes()) == len(
+ cg.all_nodes()
+ ), "canonicalization changed number of nodes in graph"
+ assert len(list(g.subjects(RDF["type"], RDF["Statement"]))) == len(
+ list(cg.subjects(RDF["type"], RDF["Statement"]))
+ ), "canonicalization changed number of statements"
+ assert g_node_degs == cg_node_degs, "canonicalization changed node degrees"
# counter for subject, predicate and object nodes
g_pos_counts = Counter(), Counter(), Counter()
@@ -275,8 +273,9 @@ def test_issue494_collapsing_bnodes():
cg_pos_counts[i][t] += 1
cg_count_signature = [sorted(c.values()) for c in cg_pos_counts]
- assert g_count_signature == cg_count_signature, \
- 'canonicalization changed node position counts'
+ assert (
+ g_count_signature == cg_count_signature
+ ), "canonicalization changed node position counts"
def test_issue682_signing_named_graphs():
@@ -295,11 +294,11 @@ def test_issue682_signing_named_graphs():
gmary = Graph(store=store, identifier=cmary)
- gmary.add((mary, ns['hasName'], Literal("Mary")))
- gmary.add((mary, ns['loves'], john))
+ gmary.add((mary, ns["hasName"], Literal("Mary")))
+ gmary.add((mary, ns["loves"], john))
gjohn = Graph(store=store, identifier=cjohn)
- gjohn.add((john, ns['hasName'], Literal("John")))
+ gjohn.add((john, ns["hasName"], Literal("John")))
ig = to_isomorphic(g)
igmary = to_isomorphic(gmary)
@@ -313,69 +312,109 @@ def test_issue682_signing_named_graphs():
def test_issue725_collapsing_bnodes_2():
g = Graph()
g += [
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v2')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N0a76d42406b84fe4b8029d0a7fa04244'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v1')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N2f62af5936b94a8eb4b1e4bfa8e11d95'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v5')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v4')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N5ae541f93e1d4e5880450b1bdceb6404'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- URIRef(u'urn:gp_learner:fixed_var:source')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v0')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('N86ac7ca781f546ae939b8963895f672e'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#object'),
- BNode('v1')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate'),
- BNode('v3')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#subject'),
- URIRef(u'urn:gp_learner:fixed_var:target')),
- (BNode('Nac82b883ca3849b5ab6820b7ac15e490'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'),
- URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement'))
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v2"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N0a76d42406b84fe4b8029d0a7fa04244"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v1"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N2f62af5936b94a8eb4b1e4bfa8e11d95"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v5"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v4"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N5ae541f93e1d4e5880450b1bdceb6404"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ URIRef(u"urn:gp_learner:fixed_var:source"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v0"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("N86ac7ca781f546ae939b8963895f672e"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#object"),
+ BNode("v1"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#predicate"),
+ BNode("v3"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#subject"),
+ URIRef(u"urn:gp_learner:fixed_var:target"),
+ ),
+ (
+ BNode("Nac82b883ca3849b5ab6820b7ac15e490"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#type"),
+ URIRef(u"http://www.w3.org/1999/02/22-rdf-syntax-ns#Statement"),
+ ),
]
- turtle = '''
+ turtle = """
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xml: <http://www.w3.org/XML/1998/namespace> .
@@ -404,7 +443,7 @@ def test_issue725_collapsing_bnodes_2():
[] a rdf:Statement ;
rdf:object _:v1 ;
rdf:predicate [ ] ;
- rdf:subject <urn:gp_learner:fixed_var:target> .'''
+ rdf:subject <urn:gp_learner:fixed_var:target> ."""
# g = Graph()
# g.parse(data=turtle, format='turtle')
@@ -437,16 +476,16 @@ def test_issue725_collapsing_bnodes_2():
# [len(list(cg.triples([None, None, node]))) for node in cg.all_nodes()]))
# print(cg.serialize(format='n3'))
- assert (len(g.all_nodes()) == len(cg.all_nodes()))
+ assert len(g.all_nodes()) == len(cg.all_nodes())
cg = to_canonical_graph(g)
- assert len(g) == len(cg), \
- 'canonicalization changed number of triples in graph'
- assert len(g.all_nodes()) == len(cg.all_nodes()), \
- 'canonicalization changed number of nodes in graph'
- assert len(list(g.subjects(RDF['type'], RDF['Statement']))) == \
- len(list(cg.subjects(RDF['type'], RDF['Statement']))), \
- 'canonicalization changed number of statements'
+ assert len(g) == len(cg), "canonicalization changed number of triples in graph"
+ assert len(g.all_nodes()) == len(
+ cg.all_nodes()
+ ), "canonicalization changed number of nodes in graph"
+ assert len(list(g.subjects(RDF["type"], RDF["Statement"]))) == len(
+ list(cg.subjects(RDF["type"], RDF["Statement"]))
+ ), "canonicalization changed number of statements"
# counter for subject, predicate and object nodes
g_pos_counts = Counter(), Counter(), Counter()
@@ -461,5 +500,6 @@ def test_issue725_collapsing_bnodes_2():
cg_pos_counts[i][t] += 1
cg_count_signature = [sorted(c.values()) for c in cg_pos_counts]
- assert g_count_signature == cg_count_signature, \
- 'canonicalization changed node position counts'
+ assert (
+ g_count_signature == cg_count_signature
+ ), "canonicalization changed node position counts"
diff --git a/test/test_comparison.py b/test/test_comparison.py
index 3c8e50d4..8455598c 100644
--- a/test/test_comparison.py
+++ b/test/test_comparison.py
@@ -33,7 +33,6 @@ Ah... it's coming back to me...
class IdentifierEquality(unittest.TestCase):
-
def setUp(self):
self.uriref = URIRef("http://example.org/")
self.bnode = BNode()
@@ -66,7 +65,11 @@ class IdentifierEquality(unittest.TestCase):
self.assertEqual("foo" in CORE_SYNTAX_TERMS, False)
def testH(self):
- self.assertEqual(URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF") in CORE_SYNTAX_TERMS, True)
+ self.assertEqual(
+ URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF")
+ in CORE_SYNTAX_TERMS,
+ True,
+ )
def testI(self):
g = Graph()
diff --git a/test/test_conjunctive_graph.py b/test/test_conjunctive_graph.py
index 5c686027..41bf432f 100644
--- a/test/test_conjunctive_graph.py
+++ b/test/test_conjunctive_graph.py
@@ -19,9 +19,9 @@ def test_bnode_publicid():
g = ConjunctiveGraph()
b = BNode()
- data = '<d:d> <e:e> <f:f> .'
+ data = "<d:d> <e:e> <f:f> ."
print("Parsing %r into %r" % (data, b))
- g.parse(data=data, format='turtle', publicID=b)
+ g.parse(data=data, format="turtle", publicID=b)
triples = list(g.get_context(b).triples((None, None, None)))
if not triples:
@@ -36,8 +36,8 @@ def test_bnode_publicid():
def test_quad_contexts():
g = ConjunctiveGraph()
- a = URIRef('urn:a')
- b = URIRef('urn:b')
+ a = URIRef("urn:a")
+ b = URIRef("urn:b")
g.get_context(a).add((a, a, a))
g.addN([(b, b, b, b)])
@@ -57,11 +57,12 @@ def test_graph_ids():
yield check, dict(data=DATA, publicID=PUBLIC_ID, format="turtle")
- source = StringInputSource(DATA.encode('utf8'))
+ source = StringInputSource(DATA.encode("utf8"))
source.setPublicId(PUBLIC_ID)
- yield check, dict(source=source, format='turtle')
+ yield check, dict(source=source, format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_conneg.py b/test/test_conneg.py
index 91d15d34..b8eee3bc 100644
--- a/test/test_conneg.py
+++ b/test/test_conneg.py
@@ -3,8 +3,8 @@ import time
from rdflib import Graph
-from six.moves import _thread
-from six.moves.BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
+import _thread
+from http.server import HTTPServer, BaseHTTPRequestHandler
"""
Test that correct content negoation headers are passed
@@ -56,16 +56,15 @@ class TestHTTPHandler(BaseHTTPRequestHandler):
self.send_header("Content-type", rct)
self.end_headers()
- self.wfile.write(content.encode('utf-8'))
+ self.wfile.write(content.encode("utf-8"))
def log_message(self, *args):
pass
-def runHttpServer(server_class=HTTPServer,
- handler_class=TestHTTPHandler):
+def runHttpServer(server_class=HTTPServer, handler_class=TestHTTPHandler):
"""Start a server than can handle 3 requests :)"""
- server_address = ('localhost', 12345)
+ server_address = ("localhost", 12345)
httpd = server_class(server_address, handler_class)
httpd.handle_request()
@@ -87,5 +86,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_container.py b/test/test_container.py
new file mode 100644
index 00000000..ab98114d
--- /dev/null
+++ b/test/test_container.py
@@ -0,0 +1,77 @@
+from rdflib.term import BNode
+from rdflib.term import Literal
+from rdflib import Graph
+from rdflib.container import *
+import unittest
+
+
+class TestContainer(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.g = Graph()
+ cls.c1 = Bag(cls.g, BNode())
+ cls.c2 = Bag(cls.g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
+ cls.c3 = Alt(cls.g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
+ cls.c4 = Seq(cls.g, BNode(), [Literal("1"), Literal("2"), Literal("3"), Literal("4")])
+
+ def testA(self):
+ self.assertEqual(len(self.c1) == 0, True)
+
+ def testB(self):
+ self.assertEqual(len(self.c2) == 4, True)
+
+ def testC(self):
+ self.c2.append(Literal("5"))
+ del self.c2[2]
+ self.assertEqual(len(self.c2) == 4, True)
+
+ def testD(self):
+ self.assertEqual(self.c2.index(Literal("5")) == 4, True)
+
+ def testE(self):
+ self.assertEqual(self.c2[2] == Literal("3"), True)
+
+ def testF(self):
+ self.c2[2] = Literal("9")
+ self.assertEqual(self.c2[2] == Literal("9"), True)
+
+ def testG(self):
+ self.c2.clear()
+ self.assertEqual(len(self.c2) == 0, True)
+
+ def testH(self):
+ self.c2.append_multiple([Literal("80"), Literal("90")])
+ self.assertEqual(self.c2[1] == Literal("80"), True)
+
+ def testI(self):
+ self.assertEqual(self.c2[2] == Literal("90"), True)
+
+ def testJ(self):
+ self.assertEqual(len(self.c2) == 2, True)
+
+ def testK(self):
+ self.assertEqual(self.c2.end() == 2, True)
+
+ def testL(self):
+ self.assertEqual(self.c3.anyone() in [Literal("1"), Literal("2"), Literal("3"), Literal("4")], True)
+
+ def testM(self):
+ self.c4.add_at_position(3, Literal("60"))
+ self.assertEqual(len(self.c4) == 5, True)
+
+ def testN(self):
+ self.assertEqual(self.c4.index(Literal("60")) == 3, True)
+
+ def testO(self):
+ self.assertEqual(self.c4.index(Literal("3")) == 4, True)
+
+ def testP(self):
+ self.assertEqual(self.c4.index(Literal("4")) == 5, True)
+
+ def testQ(self):
+ self.assertEqual(self.c2.index(Literal("1000")) == 3, False) # there is no Literal("1000") in the Bag
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/test/test_conventions.py b/test/test_conventions.py
index 268047d7..11d7636a 100644
--- a/test/test_conventions.py
+++ b/test/test_conventions.py
@@ -12,10 +12,9 @@ modules should all be lower-case initial
class A(unittest.TestCase):
-
def module_names(self, path=None, names=None):
- skip_as_ignorably_private = ['embeddedRDF', 'OpenID', 'DublinCore']
+ skip_as_ignorably_private = ["embeddedRDF", "OpenID", "DublinCore"]
if path is None:
path = rdflib.__path__
@@ -23,13 +22,14 @@ class A(unittest.TestCase):
names = set()
# TODO: handle cases where len(path) is not 1
- assert len(path) == 1, "We're assuming the path has exactly one item in it for now"
+ assert (
+ len(path) == 1
+ ), "We're assuming the path has exactly one item in it for now"
path = path[0]
for importer, name, ispkg in pkgutil.iter_modules([path]):
if ispkg:
- result = self.module_names(path=os.path.join(path, name),
- names=names)
+ result = self.module_names(path=os.path.join(path, name), names=names)
names.union(result)
else:
if name != name.lower() and name not in skip_as_ignorably_private:
@@ -38,8 +38,7 @@ class A(unittest.TestCase):
def test_module_names(self):
names = self.module_names()
- self.assertTrue(
- names == set(), "module names '%s' are not lower case" % names)
+ self.assertTrue(names == set(), "module names '%s' are not lower case" % names)
if __name__ == "__main__":
diff --git a/test/test_core_sparqlstore.py b/test/test_core_sparqlstore.py
index 26c7554d..622e4a24 100644
--- a/test/test_core_sparqlstore.py
+++ b/test/test_core_sparqlstore.py
@@ -1,9 +1,10 @@
import unittest
from rdflib.graph import Graph
+
class TestSPARQLStoreGraphCore(unittest.TestCase):
- store_name = 'SPARQLStore'
+ store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
@@ -21,5 +22,5 @@ class TestSPARQLStoreGraphCore(unittest.TestCase):
print("Done")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_dataset.py b/test/test_dataset.py
index 51ba887a..ef7eda76 100644
--- a/test/test_dataset.py
+++ b/test/test_dataset.py
@@ -4,7 +4,7 @@ import unittest
from tempfile import mkdtemp, mkstemp
import shutil
-from rdflib import Graph, Dataset, URIRef, BNode, plugin
+from rdflib import Dataset, URIRef, plugin
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
from nose.exc import SkipTest
@@ -22,12 +22,12 @@ from nose.exc import SkipTest
# THIS WILL DELETE ALL DATA IN THE /db dataset
-HOST = 'http://localhost:3030'
-DB = '/db/'
+HOST = "http://localhost:3030"
+DB = "/db/"
class DatasetTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
slow = True
tmppath = None
@@ -35,11 +35,9 @@ class DatasetTestCase(unittest.TestCase):
try:
self.graph = Dataset(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
elif self.store == "SPARQLUpdateStore":
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
@@ -48,17 +46,17 @@ class DatasetTestCase(unittest.TestCase):
if self.store != "SPARQLUpdateStore":
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'urn:michel')
- self.tarek = URIRef(u'urn:tarek')
- self.bob = URIRef(u'urn:bob')
- self.likes = URIRef(u'urn:likes')
- self.hates = URIRef(u'urn:hates')
- self.pizza = URIRef(u'urn:pizza')
- self.cheese = URIRef(u'urn:cheese')
+ self.michel = URIRef(u"urn:michel")
+ self.tarek = URIRef(u"urn:tarek")
+ self.bob = URIRef(u"urn:bob")
+ self.likes = URIRef(u"urn:likes")
+ self.hates = URIRef(u"urn:hates")
+ self.pizza = URIRef(u"urn:pizza")
+ self.cheese = URIRef(u"urn:cheese")
# Use regular URIs because SPARQL endpoints like Fuseki alter short names
- self.c1 = URIRef(u'urn:context-1')
- self.c2 = URIRef(u'urn:context-2')
+ self.c1 = URIRef(u"urn:context-1")
+ self.c2 = URIRef(u"urn:context-2")
# delete the graph for each test!
self.graph.remove((None, None, None))
@@ -89,8 +87,10 @@ class DatasetTestCase(unittest.TestCase):
# empty named graphs
if self.store != "SPARQLUpdateStore":
# added graph exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
# added graph is empty
self.assertEqual(len(g1), 0)
@@ -98,8 +98,10 @@ class DatasetTestCase(unittest.TestCase):
g1.add((self.tarek, self.likes, self.pizza))
# added graph still exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
# added graph contains one triple
self.assertEqual(len(g1), 1)
@@ -113,77 +115,89 @@ class DatasetTestCase(unittest.TestCase):
# empty named graphs
if self.store != "SPARQLUpdateStore":
# graph still exists, although empty
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([self.c1, DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([self.c1, DATASET_DEFAULT_GRAPH_ID]),
+ )
g.remove_graph(self.c1)
# graph is gone
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
def testDefaultGraph(self):
# Something the default graph is read-only (e.g. TDB in union mode)
if self.store == "SPARQLUpdateStore":
- print("Please make sure updating the default graph "
- "is supported by your SPARQL endpoint")
+ print(
+ "Please make sure updating the default graph "
+ "is supported by your SPARQL endpoint"
+ )
self.graph.add((self.tarek, self.likes, self.pizza))
self.assertEqual(len(self.graph), 1)
# only default exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
# removing default graph removes triples but not actual graph
self.graph.remove_graph(DATASET_DEFAULT_GRAPH_ID)
self.assertEqual(len(self.graph), 0)
# default still exists
- self.assertEqual(set(x.identifier for x in self.graph.contexts()),
- set([DATASET_DEFAULT_GRAPH_ID]))
+ self.assertEqual(
+ set(x.identifier for x in self.graph.contexts()),
+ set([DATASET_DEFAULT_GRAPH_ID]),
+ )
def testNotUnion(self):
# Union depends on the SPARQL endpoint configuration
if self.store == "SPARQLUpdateStore":
- print("Please make sure your SPARQL endpoint has not configured "
- "its default graph as the union of the named graphs")
+ print(
+ "Please make sure your SPARQL endpoint has not configured "
+ "its default graph as the union of the named graphs"
+ )
g1 = self.graph.graph(self.c1)
g1.add((self.tarek, self.likes, self.pizza))
- self.assertEqual(list(self.graph.objects(self.tarek, None)),
- [])
+ self.assertEqual(list(self.graph.objects(self.tarek, None)), [])
self.assertEqual(list(g1.objects(self.tarek, None)), [self.pizza])
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore'):
+ if s.name in ("default", "IOMemory", "Auditable", "Concurrent", "SPARQLStore"):
continue # these are tested by default
if not s.getClass().graph_aware:
continue
if s.name == "SPARQLUpdateStore":
- from six.moves.urllib.request import urlopen
+ from urllib.request import urlopen
+
try:
assert len(urlopen(HOST).read()) > 0
except:
sys.stderr.write("No SPARQL endpoint for %s (tests skipped)\n" % s.name)
continue
- locals()["t%d" % tests] = type("%sContextTestCase" % s.name, (
- DatasetTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sContextTestCase" % s.name, (DatasetTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_datetime.py b/test/test_datetime.py
index c2cad4b6..d71fc392 100644
--- a/test/test_datetime.py
+++ b/test/test_datetime.py
@@ -9,13 +9,14 @@ from isodate.isostrf import DATE_EXT_COMPLETE, TZ_EXT
from rdflib.term import URIRef
from rdflib.term import Literal
from rdflib.namespace import XSD
-from six import text_type
class TestRelativeBase(unittest.TestCase):
def test_equality(self):
- x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
self.assertEqual(x == x, True)
def test_microseconds(self):
@@ -24,7 +25,7 @@ class TestRelativeBase(unittest.TestCase):
# datetime with microseconds should be cast as a literal with using
# XML Schema dateTime as the literal datatype
- self.assertEqual(text_type(l), '2009-06-15T23:37:06.522630')
+ self.assertEqual(str(l), "2009-06-15T23:37:06.522630")
self.assertEqual(l.datatype, XSD.dateTime)
dt2 = l.toPython()
@@ -32,45 +33,41 @@ class TestRelativeBase(unittest.TestCase):
def test_to_python(self):
dt = "2008-12-01T18:02:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
def test_timezone_z(self):
dt = "2008-12-01T18:02:00.522630Z"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
- self.assertEqual(datetime_isoformat(l.toPython(),
- DATE_EXT_COMPLETE + 'T' + '%H:%M:%S.%f' + TZ_EXT),
- dt)
- self.assertEqual(l.toPython().isoformat(),
- "2008-12-01T18:02:00.522630+00:00")
+ self.assertEqual(
+ datetime_isoformat(
+ l.toPython(), DATE_EXT_COMPLETE + "T" + "%H:%M:%S.%f" + TZ_EXT
+ ),
+ dt,
+ )
+ self.assertEqual(l.toPython().isoformat(), "2008-12-01T18:02:00.522630+00:00")
def test_timezone_offset(self):
dt = "2010-02-10T12:36:00+03:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
def test_timezone_offset_to_utc(self):
dt = "2010-02-10T12:36:00+03:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
utc_dt = l.toPython().astimezone(UTC)
- self.assertEqual(datetime_isoformat(utc_dt),
- "2010-02-10T09:36:00Z")
+ self.assertEqual(datetime_isoformat(utc_dt), "2010-02-10T09:36:00Z")
def test_timezone_offset_millisecond(self):
dt = "2011-01-16T19:39:18.239743+01:00"
- l = Literal(dt,
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ l = Literal(dt, datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"))
self.assertTrue(isinstance(l.toPython(), datetime))
self.assertEqual(l.toPython().isoformat(), dt)
diff --git a/test/test_dawg.py b/test/test_dawg.py
index 77c4e419..80289738 100644
--- a/test/test_dawg.py
+++ b/test/test_dawg.py
@@ -21,30 +21,28 @@ except:
defaultdict.__init__(self, int)
def most_common(self, N):
- return [x[0] for x in sorted(self.items(),
- key=itemgetter(1),
- reverse=True)[:10]]
+ return [
+ x[0] for x in sorted(self.items(), key=itemgetter(1), reverse=True)[:10]
+ ]
import datetime
import isodate
-from rdflib import (
- Dataset, Graph, URIRef, BNode)
+from rdflib import Dataset, Graph, URIRef, BNode
from rdflib.query import Result
from rdflib.compare import isomorphic
from rdflib.plugins import sparql as rdflib_sparql_module
-from rdflib.plugins.sparql.algebra import (
- pprintAlgebra, translateQuery, translateUpdate)
+from rdflib.plugins.sparql.algebra import pprintAlgebra, translateQuery, translateUpdate
from rdflib.plugins.sparql.parser import parseQuery, parseUpdate
from rdflib.plugins.sparql.results.rdfresults import RDFResultParser
from rdflib.plugins.sparql.update import evalUpdate
from rdflib.compat import decodeStringEscape, bopen
-from six.moves.urllib.parse import urljoin
-from six import BytesIO
+from urllib.parse import urljoin
+from io import BytesIO
from nose.tools import nottest, eq_
from nose import SkipTest
@@ -52,12 +50,15 @@ from nose import SkipTest
from .manifest import nose_tests, MF, UP
from .earl import report, add_test
+
+
def eq(a, b, msg):
- return eq_(a, b, msg + ': (%r!=%r)' % (a, b))
+ return eq_(a, b, msg + ": (%r!=%r)" % (a, b))
def setFlags():
import rdflib
+
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = False
@@ -70,6 +71,7 @@ def setFlags():
def resetFlags():
import rdflib
+
# Several tests rely on lexical form of literals being kept!
rdflib.NORMALIZE_LITERALS = True
@@ -114,8 +116,12 @@ def bopen_read_close(fn):
try:
with open("skiptests.list") as skip_tests_f:
- skiptests = dict([(URIRef(x.strip().split(
- "\t")[0]), x.strip().split("\t")[1]) for x in skip_tests_f])
+ skiptests = dict(
+ [
+ (URIRef(x.strip().split("\t")[0]), x.strip().split("\t")[1])
+ for x in skip_tests_f
+ ]
+ )
except IOError:
skiptests = set()
@@ -163,8 +169,8 @@ def bindingsCompatible(a, b):
else:
m[b1] = y[v1]
else:
- # if y[v1]!=b1:
- # return False
+ # if y[v1]!=b1:
+ # return False
try:
if y[v1].neq(b1):
return False
@@ -191,9 +197,14 @@ def pp_binding(solutions):
"""
Pretty print a single binding - for less eye-strain when debugging
"""
- return "\n[" + ",\n\t".join("{" + ", ".join("%s:%s" % (
- x[0], x[1].n3()) for x in bindings.items()) + "}"
- for bindings in solutions) + "]\n"
+ return (
+ "\n["
+ + ",\n\t".join(
+ "{" + ", ".join("%s:%s" % (x[0], x[1].n3()) for x in bindings.items()) + "}"
+ for bindings in solutions
+ )
+ + "]\n"
+ )
@nottest
@@ -246,17 +257,21 @@ def update_test(t):
for x, l in resgraphdata:
resg.load(x, publicID=URIRef(l), format=_fmt(x))
- eq(set(x.identifier for x in g.contexts() if x != g.default_context),
- set(x.identifier for x in resg.contexts()
- if x != resg.default_context), 'named graphs in datasets do not match')
- assert isomorphic(g.default_context, resg.default_context), \
- 'Default graphs are not isomorphic'
+ eq(
+ set(x.identifier for x in g.contexts() if x != g.default_context),
+ set(x.identifier for x in resg.contexts() if x != resg.default_context),
+ "named graphs in datasets do not match",
+ )
+ assert isomorphic(
+ g.default_context, resg.default_context
+ ), "Default graphs are not isomorphic"
for x in g.contexts():
if x == g.default_context:
continue
- assert isomorphic(x, resg.get_context(x.identifier)), \
+ assert isomorphic(x, resg.get_context(x.identifier)), (
"Graphs with ID %s are not isomorphic" % x.identifier
+ )
except Exception as e:
@@ -305,7 +320,7 @@ def update_test(t):
print(bopen_read_close(x[7:]))
print("------------- MY RESULT ----------")
- print(g.serialize(format='trig'))
+ print(g.serialize(format="trig"))
try:
pq = translateUpdate(parseUpdate(bopen_read_close(query[7:])))
@@ -318,6 +333,7 @@ def update_test(t):
print(decodeStringEscape(str(e)))
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
raise
@@ -332,7 +348,7 @@ def query_test(t):
if uri in skiptests:
raise SkipTest()
- def skip(reason='(none)'):
+ def skip(reason="(none)"):
print("Skipping %s from now on." % uri)
with bopen("skiptests.list", "a") as f:
f.write("%s\t%s\n" % (uri, reason))
@@ -350,91 +366,102 @@ def query_test(t):
# no result - syntax test
if syntax:
- translateQuery(parseQuery(
- bopen_read_close(query[7:])), base=urljoin(query, '.'))
+ translateQuery(
+ parseQuery(bopen_read_close(query[7:])), base=urljoin(query, ".")
+ )
else:
# negative syntax test
try:
- translateQuery(parseQuery(
- bopen_read_close(query[7:])), base=urljoin(query, '.'))
+ translateQuery(
+ parseQuery(bopen_read_close(query[7:])),
+ base=urljoin(query, "."),
+ )
- assert False, 'Query should not have parsed!'
+ assert False, "Query should not have parsed!"
except:
pass # it's fine - the query should not parse
return
# eval test - carry out query
- res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, '.'))
+ res2 = g.query(bopen_read_close(query[7:]), base=urljoin(query, "."))
- if resfile.endswith('ttl'):
+ if resfile.endswith("ttl"):
resg = Graph()
- resg.load(resfile, format='turtle', publicID=resfile)
+ resg.load(resfile, format="turtle", publicID=resfile)
res = RDFResultParser().parse(resg)
- elif resfile.endswith('rdf'):
+ elif resfile.endswith("rdf"):
resg = Graph()
resg.load(resfile, publicID=resfile)
res = RDFResultParser().parse(resg)
else:
with bopen(resfile[7:]) as f:
- if resfile.endswith('srj'):
- res = Result.parse(f, format='json')
- elif resfile.endswith('tsv'):
- res = Result.parse(f, format='tsv')
+ if resfile.endswith("srj"):
+ res = Result.parse(f, format="json")
+ elif resfile.endswith("tsv"):
+ res = Result.parse(f, format="tsv")
- elif resfile.endswith('csv'):
- res = Result.parse(f, format='csv')
+ elif resfile.endswith("csv"):
+ res = Result.parse(f, format="csv")
# CSV is lossy, round-trip our own resultset to
# lose the same info :)
# write bytes, read strings...
s = BytesIO()
- res2.serialize(s, format='csv')
+ res2.serialize(s, format="csv")
s.seek(0)
- res2 = Result.parse(s, format='csv')
+ res2 = Result.parse(s, format="csv")
s.close()
else:
- res = Result.parse(f, format='xml')
+ res = Result.parse(f, format="xml")
if not DETAILEDASSERT:
- eq(res.type, res2.type, 'Types do not match')
- if res.type == 'SELECT':
- eq(set(res.vars), set(res2.vars), 'Vars do not match')
- comp = bindingsCompatible(
- set(res),
- set(res2)
- )
- assert comp, 'Bindings do not match'
- elif res.type == 'ASK':
- eq(res.askAnswer, res2.askAnswer, 'Ask answer does not match')
- elif res.type in ('DESCRIBE', 'CONSTRUCT'):
- assert isomorphic(
- res.graph, res2.graph), 'graphs are not isomorphic!'
+ eq(res.type, res2.type, "Types do not match")
+ if res.type == "SELECT":
+ eq(set(res.vars), set(res2.vars), "Vars do not match")
+ comp = bindingsCompatible(set(res), set(res2))
+ assert comp, "Bindings do not match"
+ elif res.type == "ASK":
+ eq(res.askAnswer, res2.askAnswer, "Ask answer does not match")
+ elif res.type in ("DESCRIBE", "CONSTRUCT"):
+ assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
- raise Exception('Unknown result type: %s' % res.type)
+ raise Exception("Unknown result type: %s" % res.type)
else:
- eq(res.type, res2.type,
- 'Types do not match: %r != %r' % (res.type, res2.type))
- if res.type == 'SELECT':
- eq(set(res.vars),
- set(res2.vars), 'Vars do not match: %r != %r' % (
- set(res.vars), set(res2.vars)))
- assert bindingsCompatible(
- set(res),
- set(res2)
- ), 'Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s' % (
- res.serialize(format='txt', namespace_manager=g.namespace_manager),
- res2.serialize(format='txt', namespace_manager=g.namespace_manager))
- elif res.type == 'ASK':
- eq(res.askAnswer,
- res2.askAnswer, "Ask answer does not match: %r != %r" % (
- res.askAnswer, res2.askAnswer))
- elif res.type in ('DESCRIBE', 'CONSTRUCT'):
- assert isomorphic(
- res.graph, res2.graph), 'graphs are not isomorphic!'
+ eq(
+ res.type,
+ res2.type,
+ "Types do not match: %r != %r" % (res.type, res2.type),
+ )
+ if res.type == "SELECT":
+ eq(
+ set(res.vars),
+ set(res2.vars),
+ "Vars do not match: %r != %r" % (set(res.vars), set(res2.vars)),
+ )
+ assert bindingsCompatible(set(res), set(res2)), (
+ "Bindings do not match: \nexpected:\n%s\n!=\ngot:\n%s"
+ % (
+ res.serialize(
+ format="txt", namespace_manager=g.namespace_manager
+ ),
+ res2.serialize(
+ format="txt", namespace_manager=g.namespace_manager
+ ),
+ )
+ )
+ elif res.type == "ASK":
+ eq(
+ res.askAnswer,
+ res2.askAnswer,
+ "Ask answer does not match: %r != %r"
+ % (res.askAnswer, res2.askAnswer),
+ )
+ elif res.type in ("DESCRIBE", "CONSTRUCT"):
+ assert isomorphic(res.graph, res2.graph), "graphs are not isomorphic!"
else:
- raise Exception('Unknown result type: %s' % res.type)
+ raise Exception("Unknown result type: %s" % res.type)
except Exception as e:
@@ -478,13 +505,14 @@ def query_test(t):
try:
pq = parseQuery(bopen_read_close(query[7:]))
print("----------------- Parsed ------------------")
- pprintAlgebra(translateQuery(pq, base=urljoin(query, '.')))
+ pprintAlgebra(translateQuery(pq, base=urljoin(query, ".")))
except:
print("(parser error)")
print(decodeStringEscape(str(e)))
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
# pdb.set_trace()
# nose.tools.set_trace()
@@ -496,7 +524,6 @@ testers = {
MF.UpdateEvaluationTest: update_test,
MF.PositiveUpdateSyntaxTest11: update_test,
MF.NegativeUpdateSyntaxTest11: update_test,
-
MF.QueryEvaluationTest: query_test,
MF.NegativeSyntaxTest11: query_test,
MF.PositiveSyntaxTest11: query_test,
@@ -523,10 +550,11 @@ def test_dawg():
resetFlags()
-if __name__ == '__main__':
+if __name__ == "__main__":
import sys
import time
+
start = time.time()
if len(sys.argv) > 1:
NAME = sys.argv[1]
@@ -561,6 +589,7 @@ if __name__ == '__main__':
except:
add_test(t[0], "failed", "error")
import traceback
+
traceback.print_exc()
sys.stderr.write("%s\n" % t[0])
@@ -594,12 +623,13 @@ if __name__ == '__main__':
e_sum = sum(errors.values())
if success + f_sum + e_sum + skip != i:
- print("(Something is wrong, %d!=%d)" % (
- success + f_sum + e_sum + skip, i))
+ print("(Something is wrong, %d!=%d)" % (success + f_sum + e_sum + skip, i))
- print("\n%d tests, %d passed, %d failed, %d errors, \
- %d skipped (%.2f%% success)" % (
- i, success, f_sum, e_sum, skip, 100. * success / i))
+ print(
+ "\n%d tests, %d passed, %d failed, %d errors, \
+ %d skipped (%.2f%% success)"
+ % (i, success, f_sum, e_sum, skip, 100.0 * success / i)
+ )
print("Took %.2fs" % (time.time() - start))
if not NAME:
@@ -609,12 +639,12 @@ if __name__ == '__main__':
with open("testruns.txt", "a") as tf:
tf.write(
"%s\n%d tests, %d passed, %d failed, %d errors, %d "
- "skipped (%.2f%% success)\n\n" % (
- now, i, success, f_sum, e_sum, skip, 100. * success / i)
+ "skipped (%.2f%% success)\n\n"
+ % (now, i, success, f_sum, e_sum, skip, 100.0 * success / i)
)
- earl_report = 'test_reports/rdflib_sparql-%s.ttl' % now.replace(":", "")
+ earl_report = "test_reports/rdflib_sparql-%s.ttl" % now.replace(":", "")
- report.serialize(earl_report, format='n3')
- report.serialize('test_reports/rdflib_sparql-latest.ttl', format='n3')
+ report.serialize(earl_report, format="n3")
+ report.serialize("test_reports/rdflib_sparql-latest.ttl", format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
diff --git a/test/test_diff.py b/test/test_diff.py
index bf49dd9d..7e4db728 100644
--- a/test/test_diff.py
+++ b/test/test_diff.py
@@ -13,7 +13,7 @@ class TestDiff(unittest.TestCase):
def testA(self):
"""with bnode"""
g = rdflib.Graph()
- g.add((rdflib.BNode(), rdflib.URIRef("urn:p"), rdflib.Literal(u'\xe9')))
+ g.add((rdflib.BNode(), rdflib.URIRef("urn:p"), rdflib.Literal(u"\xe9")))
diff = graph_diff(g, g)
@@ -21,7 +21,7 @@ class TestDiff(unittest.TestCase):
"""Curiously, this one passes, even before the fix in issue 151"""
g = rdflib.Graph()
- g.add((rdflib.URIRef("urn:a"), rdflib.URIRef("urn:p"), rdflib.Literal(u'\xe9')))
+ g.add((rdflib.URIRef("urn:a"), rdflib.URIRef("urn:p"), rdflib.Literal(u"\xe9")))
diff = graph_diff(g, g)
diff --git a/test/test_duration.py b/test/test_duration.py
index 07542a45..cdea7ab7 100644
--- a/test/test_duration.py
+++ b/test/test_duration.py
@@ -30,13 +30,15 @@ class TestDuration(unittest.TestCase):
def test_duration_le(self):
self.assertTrue(
- Literal("P4DT5H6M7S", datatype=XSD.duration) < Literal("P8DT10H12M14S", datatype=XSD.duration)
+ Literal("P4DT5H6M7S", datatype=XSD.duration)
+ < Literal("P8DT10H12M14S", datatype=XSD.duration)
)
def test_duration_sum(self):
self.assertEqual(
- Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration) + Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration).toPython(),
- Literal("P2Y4M8DT10H12M14S", datatype=XSD.duration)
+ Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration)
+ + Literal("P1Y2M4DT5H6M7S", datatype=XSD.duration).toPython(),
+ Literal("P2Y4M8DT10H12M14S", datatype=XSD.duration),
)
diff --git a/test/test_empty_xml_base.py b/test/test_empty_xml_base.py
index a2714ee7..2f3364b8 100644
--- a/test/test_empty_xml_base.py
+++ b/test/test_empty_xml_base.py
@@ -9,11 +9,11 @@ from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef
from rdflib.namespace import Namespace
from rdflib.namespace import RDF
-from six import StringIO
+from io import StringIO
import unittest
-FOAF = Namespace('http://xmlns.com/foaf/0.1/')
+FOAF = Namespace("http://xmlns.com/foaf/0.1/")
test_data = """
<rdf:RDF
@@ -36,8 +36,8 @@ test_data2 = """
</rdf:RDF>"""
-baseUri = URIRef('http://example.com/')
-baseUri2 = URIRef('http://example.com/foo/bar')
+baseUri = URIRef("http://example.com/")
+baseUri2 = URIRef("http://example.com/foo/bar")
class TestEmptyBase(unittest.TestCase):
@@ -46,10 +46,13 @@ class TestEmptyBase(unittest.TestCase):
self.graph.parse(StringIO(test_data), publicID=baseUri)
def test_base_ref(self):
- self.assertTrue(len(self.graph) == 1,
- "There should be at least one statement in the graph")
- self.assertTrue((baseUri, RDF.type, FOAF.Document) in self.graph,
- "There should be a triple with %s as the subject" % baseUri)
+ self.assertTrue(
+ len(self.graph) == 1, "There should be at least one statement in the graph"
+ )
+ self.assertTrue(
+ (baseUri, RDF.type, FOAF.Document) in self.graph,
+ "There should be a triple with %s as the subject" % baseUri,
+ )
class TestRelativeBase(unittest.TestCase):
@@ -58,11 +61,14 @@ class TestRelativeBase(unittest.TestCase):
self.graph.parse(StringIO(test_data2), publicID=baseUri2)
def test_base_ref(self):
- self.assertTrue(len(self.graph) == 1,
- "There should be at least one statement in the graph")
- resolvedBase = URIRef('http://example.com/baz')
- self.assertTrue((resolvedBase, RDF.type, FOAF.Document) in self.graph,
- "There should be a triple with %s as the subject" % resolvedBase)
+ self.assertTrue(
+ len(self.graph) == 1, "There should be at least one statement in the graph"
+ )
+ resolvedBase = URIRef("http://example.com/baz")
+ self.assertTrue(
+ (resolvedBase, RDF.type, FOAF.Document) in self.graph,
+ "There should be a triple with %s as the subject" % resolvedBase,
+ )
if __name__ == "__main__":
diff --git a/test/test_evaluate_bind.py b/test/test_evaluate_bind.py
index bd4ea440..382b4ed5 100644
--- a/test/test_evaluate_bind.py
+++ b/test/test_evaluate_bind.py
@@ -8,19 +8,29 @@ from rdflib import Graph, URIRef, Literal, Variable
def test_bind():
base = "http://example.org/"
g = Graph()
- g.add((URIRef(
- base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
+ g.add((URIRef(base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
def check(expr, var, obj):
- r = g.query("""
+ r = g.query(
+ """
prefix : <http://example.org/ns#>
- select * where { ?s ?p ?o . %s } """ % expr)
+ select * where { ?s ?p ?o . %s } """
+ % expr
+ )
assert r.bindings[0][Variable(var)] == obj
- yield (check, 'bind("thing" as ?name)', 'name', Literal("thing"))
+ yield (check, 'bind("thing" as ?name)', "name", Literal("thing"))
- yield (check, 'bind(<http://example.org/other> as ?other)', 'other',
- URIRef("http://example.org/other"))
+ yield (
+ check,
+ "bind(<http://example.org/other> as ?other)",
+ "other",
+ URIRef("http://example.org/other"),
+ )
- yield (check, "bind(:Thing as ?type)", 'type',
- URIRef("http://example.org/ns#Thing"))
+ yield (
+ check,
+ "bind(:Thing as ?type)",
+ "type",
+ URIRef("http://example.org/ns#Thing"),
+ )
diff --git a/test/test_events.py b/test/test_events.py
index f7f706a9..6b413781 100644
--- a/test/test_events.py
+++ b/test/test_events.py
@@ -1,4 +1,3 @@
-
import unittest
from rdflib import events
@@ -24,7 +23,6 @@ def subscribe_all(caches):
class Cache(events.Dispatcher):
-
def __init__(self, data=None):
if data is None:
data = {}
@@ -54,18 +52,17 @@ class Cache(events.Dispatcher):
class EventTestCase(unittest.TestCase):
-
def testEvents(self):
c1 = Cache()
c2 = Cache()
c3 = Cache()
subscribe_all([c1, c2, c3])
- c1['bob'] = 'uncle'
- assert c2['bob'] == 'uncle'
- assert c3['bob'] == 'uncle'
- del c3['bob']
- assert ('bob' in c1) == False
- assert ('bob' in c2) == False
+ c1["bob"] = "uncle"
+ assert c2["bob"] == "uncle"
+ assert c3["bob"] == "uncle"
+ del c3["bob"]
+ assert ("bob" in c1) == False
+ assert ("bob" in c2) == False
if __name__ == "__main__":
diff --git a/test/test_expressions.py b/test/test_expressions.py
index d88d7766..1323e4fc 100644
--- a/test/test_expressions.py
+++ b/test/test_expressions.py
@@ -24,88 +24,89 @@ def _eval(e, ctx=None):
def _translate(e):
- return simplify(traverse(
- e, visitPost=partial(translatePName, prologue=Prologue())))
+ return simplify(traverse(e, visitPost=partial(translatePName, prologue=Prologue())))
def testRegex():
- assert _eval(
- _translate((p.Expression.parseString('REGEX("zxcabczxc","abc")')[0])))
+ assert _eval(_translate((p.Expression.parseString('REGEX("zxcabczxc","abc")')[0])))
- eq(bool(_eval(_translate(
- (p.Expression.parseString('REGEX("zxczxc","abc")')[0])))), False)
+ eq(
+ bool(_eval(_translate((p.Expression.parseString('REGEX("zxczxc","abc")')[0])))),
+ False,
+ )
- assert _eval(_translate(
- (p.Expression.parseString('REGEX("bbbaaaaabbb","ba*b")')[0])))
+ assert _eval(
+ _translate((p.Expression.parseString('REGEX("bbbaaaaabbb","ba*b")')[0]))
+ )
def test_arithmetic():
- eq(_eval(_translate((p.Expression.parseString('2+3')[0]))).value, 5)
- eq(_eval(_translate((p.Expression.parseString('3-2')[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("2+3")[0]))).value, 5)
+ eq(_eval(_translate((p.Expression.parseString("3-2")[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('2*3')[0]))).value, 6)
- eq(_eval(_translate((p.Expression.parseString('4/2')[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("2*3")[0]))).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("4/2")[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('2+2+2')[0]))).value, 6)
- eq(_eval(_translate((p.Expression.parseString('2-2+2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('(2-2)+2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('2-(2+2)')[0]))).value, -2)
+ eq(_eval(_translate((p.Expression.parseString("2+2+2")[0]))).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("2-2+2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("(2-2)+2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("2-(2+2)")[0]))).value, -2)
- eq(_eval(_translate((p.Expression.parseString('2*2*2')[0]))).value, 8)
- eq(_eval(_translate((p.Expression.parseString('4/2*2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/4*2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/(4*2)')[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('(2/2)*2')[0]))).value, 2)
- eq(_eval(_translate((p.Expression.parseString('4/(2*2)')[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("2*2*2")[0]))).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("4/2*2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/4*2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/(4*2)")[0]))).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("(2/2)*2")[0]))).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("4/(2*2)")[0]))).value, 1)
- eq(_eval(_translate((p.Expression.parseString('2+3*2')[0]))).value, 8)
- eq(_eval(_translate((p.Expression.parseString('(2+3)*2')[0]))).value, 10)
- eq(_eval(_translate((p.Expression.parseString('2+4/2')[0]))).value, 4)
- eq(_eval(_translate((p.Expression.parseString('(2+4)/2')[0]))).value, 3)
+ eq(_eval(_translate((p.Expression.parseString("2+3*2")[0]))).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("(2+3)*2")[0]))).value, 10)
+ eq(_eval(_translate((p.Expression.parseString("2+4/2")[0]))).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("(2+4)/2")[0]))).value, 3)
def test_arithmetic_var():
ctx = QueryContext()
- ctx[Variable('x')] = Literal(2)
+ ctx[Variable("x")] = Literal(2)
- eq(_eval(_translate((p.Expression.parseString('2+?x')[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("2+?x")[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('?x+3')[0])), ctx).value, 5)
- eq(_eval(_translate((p.Expression.parseString('3-?x')[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("?x+3")[0])), ctx).value, 5)
+ eq(_eval(_translate((p.Expression.parseString("3-?x")[0])), ctx).value, 1)
- eq(_eval(_translate((p.Expression.parseString('?x*3')[0])), ctx).value, 6)
- eq(_eval(_translate((p.Expression.parseString('4/?x')[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("?x*3")[0])), ctx).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("4/?x")[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('?x+?x+?x')[0])), ctx).value, 6)
- eq(_eval(_translate((p.Expression.parseString('?x-?x+?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('(?x-?x)+?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('?x-(?x+?x)')[0])), ctx).value, -2)
+ eq(_eval(_translate((p.Expression.parseString("?x+?x+?x")[0])), ctx).value, 6)
+ eq(_eval(_translate((p.Expression.parseString("?x-?x+?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("(?x-?x)+?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("?x-(?x+?x)")[0])), ctx).value, -2)
- eq(_eval(_translate((p.Expression.parseString('?x*?x*?x')[0])), ctx).value, 8)
- eq(_eval(_translate((p.Expression.parseString('4/?x*?x')[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/4*?x')[0])), ctx).value, 4)
- eq(_eval(_translate((p.Expression.parseString('8/(4*?x)')[0])), ctx).value, 1)
- eq(_eval(_translate((p.Expression.parseString('(?x/?x)*?x')[0])), ctx).value, 2)
- eq(_eval(_translate((p.Expression.parseString('4/(?x*?x)')[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("?x*?x*?x")[0])), ctx).value, 8)
+ eq(_eval(_translate((p.Expression.parseString("4/?x*?x")[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/4*?x")[0])), ctx).value, 4)
+ eq(_eval(_translate((p.Expression.parseString("8/(4*?x)")[0])), ctx).value, 1)
+ eq(_eval(_translate((p.Expression.parseString("(?x/?x)*?x")[0])), ctx).value, 2)
+ eq(_eval(_translate((p.Expression.parseString("4/(?x*?x)")[0])), ctx).value, 1)
def test_comparisons():
- eq(bool(_eval(_translate((p.Expression.parseString('2<3')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<3.0')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<3e0')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3.0")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<3e0")[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3')[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3.0')[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('4<3e0')[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3")[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3.0")[0])))), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("4<3e0")[0])))), False)
- eq(bool(_eval(_translate((p.Expression.parseString('2<2.1')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2<21e-1')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<2.1")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2<21e-1")[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2=2.0')[0])))), True)
- eq(bool(_eval(_translate((p.Expression.parseString('2=2e0')[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2=2.0")[0])))), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("2=2e0")[0])))), True)
eq(bool(_eval(_translate((p.Expression.parseString('2="cake"')[0])))), False)
@@ -113,39 +114,46 @@ def test_comparisons():
def test_comparisons_var():
ctx = QueryContext()
- ctx[Variable('x')] = Literal(2)
+ ctx[Variable("x")] = Literal(2)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3.0")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3e0")[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<2.1')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<21e-1')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<2.1")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<21e-1")[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x=2.0')[0])), ctx)), True)
- eq(bool(_eval(_translate((p.Expression.parseString('?x=2e0')[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x=2.0")[0])), ctx)), True)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x=2e0")[0])), ctx)), True)
eq(bool(_eval(_translate((p.Expression.parseString('?x="cake"')[0])), ctx)), False)
ctx = QueryContext()
- ctx[Variable('x')] = Literal(4)
+ ctx[Variable("x")] = Literal(4)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), False)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), False)
- eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3")[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3.0")[0])), ctx)), False)
+ eq(bool(_eval(_translate((p.Expression.parseString("?x<3e0")[0])), ctx)), False)
def test_and_or():
- eq(bool(_eval(_translate((p.Expression.parseString('3>2 && 3>1')[0])))), True)
- eq(bool(_eval(
- _translate((p.Expression.parseString('3>2 && 3>4 || 2>1')[0])))), True)
- eq(bool(_eval(
- _translate((p.Expression.parseString('2>1 || 3>2 && 3>4')[0])))), True)
- eq(bool(_eval(_translate(
- (p.Expression.parseString('(2>1 || 3>2) && 3>4')[0])))), False)
-
-
-if __name__ == '__main__':
+ eq(bool(_eval(_translate((p.Expression.parseString("3>2 && 3>1")[0])))), True)
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("3>2 && 3>4 || 2>1")[0])))),
+ True,
+ )
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("2>1 || 3>2 && 3>4")[0])))),
+ True,
+ )
+ eq(
+ bool(_eval(_translate((p.Expression.parseString("(2>1 || 3>2) && 3>4")[0])))),
+ False,
+ )
+
+
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_extras_external_graph_libs.py b/test/test_extras_external_graph_libs.py
index 5ccd67bc..25b69298 100644
--- a/test/test_extras_external_graph_libs.py
+++ b/test/test_extras_external_graph_libs.py
@@ -1,6 +1,5 @@
from nose import SkipTest
from rdflib import Graph, URIRef, Literal
-from six import text_type
def test_rdflib_to_networkx():
@@ -11,9 +10,10 @@ def test_rdflib_to_networkx():
from rdflib.extras.external_graph_libs import rdflib_to_networkx_multidigraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_digraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_graph
+
g = Graph()
- a, b, l = URIRef('a'), URIRef('b'), Literal('l')
- p, q = URIRef('p'), URIRef('q')
+ a, b, l = URIRef("a"), URIRef("b"), Literal("l")
+ p, q = URIRef("p"), URIRef("q")
edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
for t in edges:
g.add(t)
@@ -29,26 +29,26 @@ def test_rdflib_to_networkx():
assert mdg.has_edge(a, b, key=1)
dg = rdflib_to_networkx_digraph(g)
- assert dg[a][b]['weight'] == 2
- assert sorted(dg[a][b]['triples']) == [(a, p, b), (a, q, b)]
+ assert dg[a][b]["weight"] == 2
+ assert sorted(dg[a][b]["triples"]) == [(a, p, b), (a, q, b)]
assert len(dg.edges()) == 3
assert dg.size() == 3
- assert dg.size(weight='weight') == 4.0
+ assert dg.size(weight="weight") == 4.0
dg = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s, p, o: {})
- assert 'weight' not in dg[a][b]
- assert 'triples' not in dg[a][b]
+ assert "weight" not in dg[a][b]
+ assert "triples" not in dg[a][b]
ug = rdflib_to_networkx_graph(g)
- assert ug[a][b]['weight'] == 3
- assert sorted(ug[a][b]['triples']) == [(a, p, b), (a, q, b), (b, p, a)]
+ assert ug[a][b]["weight"] == 3
+ assert sorted(ug[a][b]["triples"]) == [(a, p, b), (a, q, b), (b, p, a)]
assert len(ug.edges()) == 2
assert ug.size() == 2
- assert ug.size(weight='weight') == 4.0
+ assert ug.size(weight="weight") == 4.0
ug = rdflib_to_networkx_graph(g, False, edge_attrs=lambda s, p, o: {})
- assert 'weight' not in ug[a][b]
- assert 'triples' not in ug[a][b]
+ assert "weight" not in ug[a][b]
+ assert "triples" not in ug[a][b]
def test_rdflib_to_graphtool():
@@ -57,9 +57,10 @@ def test_rdflib_to_graphtool():
except ImportError:
raise SkipTest("couldn't find graph_tool")
from rdflib.extras.external_graph_libs import rdflib_to_graphtool
+
g = Graph()
- a, b, l = URIRef('a'), URIRef('b'), Literal('l')
- p, q = URIRef('p'), URIRef('q')
+ a, b, l = URIRef("a"), URIRef("b"), Literal("l")
+ p, q = URIRef("p"), URIRef("q")
edges = [(a, p, b), (a, q, b), (b, p, a), (b, p, l)]
for t in edges:
g.add(t)
@@ -67,26 +68,26 @@ def test_rdflib_to_graphtool():
mdg = rdflib_to_graphtool(g)
assert len(list(mdg.edges())) == 4
- vpterm = mdg.vertex_properties['term']
+ vpterm = mdg.vertex_properties["term"]
va = gt_util.find_vertex(mdg, vpterm, a)[0]
vb = gt_util.find_vertex(mdg, vpterm, b)[0]
vl = gt_util.find_vertex(mdg, vpterm, l)[0]
assert (va, vb) in [(e.source(), e.target()) for e in list(mdg.edges())]
- epterm = mdg.edge_properties['term']
+ epterm = mdg.edge_properties["term"]
assert len(list(gt_util.find_edge(mdg, epterm, p))) == 3
assert len(list(gt_util.find_edge(mdg, epterm, q))) == 1
mdg = rdflib_to_graphtool(
- g,
- e_prop_names=[text_type('name')],
- transform_p=lambda s, p, o: {text_type('name'): text_type(p)})
- epterm = mdg.edge_properties['name']
- assert len(list(gt_util.find_edge(mdg, epterm, text_type(p)))) == 3
- assert len(list(gt_util.find_edge(mdg, epterm, text_type(q)))) == 1
+ g, e_prop_names=[str("name")], transform_p=lambda s, p, o: {str("name"): str(p)}
+ )
+ epterm = mdg.edge_properties["name"]
+ assert len(list(gt_util.find_edge(mdg, epterm, str(p)))) == 3
+ assert len(list(gt_util.find_edge(mdg, epterm, str(q)))) == 1
if __name__ == "__main__":
import sys
import nose
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_finalnewline.py b/test/test_finalnewline.py
index 956d6c0a..c78ac247 100644
--- a/test/test_finalnewline.py
+++ b/test/test_finalnewline.py
@@ -1,9 +1,6 @@
-
from rdflib import ConjunctiveGraph, URIRef
import rdflib.plugin
-from six import b
-
def testFinalNewline():
"""
@@ -12,15 +9,19 @@ def testFinalNewline():
import sys
graph = ConjunctiveGraph()
- graph.add((URIRef("http://ex.org/a"),
- URIRef("http://ex.org/b"),
- URIRef("http://ex.org/c")))
+ graph.add(
+ (
+ URIRef("http://ex.org/a"),
+ URIRef("http://ex.org/b"),
+ URIRef("http://ex.org/c"),
+ )
+ )
failed = set()
for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer):
v = graph.serialize(format=p.name)
- lines = v.split(b("\n"))
- if b("\n") not in v or (lines[-1] != b('')):
+ lines = v.split("\n".encode("latin-1"))
+ if "\n".encode("latin-1") not in v or (lines[-1] != "".encode("latin-1")):
failed.add(p.name)
assert len(failed) == 0, "No final newline for formats: '%s'" % failed
@@ -29,5 +30,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph.py b/test/test_graph.py
index 228550ec..0032213e 100644
--- a/test/test_graph.py
+++ b/test/test_graph.py
@@ -11,29 +11,27 @@ from nose.exc import SkipTest
class GraphTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
tmppath = None
def setUp(self):
try:
self.graph = Graph(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
def tearDown(self):
self.graph.close()
@@ -254,21 +252,27 @@ class GraphTestCase(unittest.TestCase):
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore',
- 'SPARQLUpdateStore'):
+ if s.name in (
+ "default",
+ "IOMemory",
+ "Auditable",
+ "Concurrent",
+ "SPARQLStore",
+ "SPARQLUpdateStore",
+ ):
continue # these are tested by default
- locals()["t%d" % tests] = type("%sGraphTestCase" %
- s.name, (GraphTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sGraphTestCase" % s.name, (GraphTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main(argv=sys.argv[:1])
diff --git a/test/test_graph_context.py b/test/test_graph_context.py
index cc5786dd..0a7ac8a3 100644
--- a/test/test_graph_context.py
+++ b/test/test_graph_context.py
@@ -10,7 +10,7 @@ from nose.exc import SkipTest
class ContextTestCase(unittest.TestCase):
- store = 'default'
+ store = "default"
slow = True
tmppath = None
@@ -18,24 +18,22 @@ class ContextTestCase(unittest.TestCase):
try:
self.graph = ConjunctiveGraph(store=self.store)
except ImportError:
- raise SkipTest(
- "Dependencies for store '%s' not available!" % self.store)
+ raise SkipTest("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
- _, self.tmppath = mkstemp(
- prefix='test', dir='/tmp', suffix='.sqlite')
+ _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
self.tmppath = mkdtemp()
self.graph.open(self.tmppath, create=True)
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
- self.c1 = URIRef(u'context-1')
- self.c2 = URIRef(u'context-2')
+ self.c1 = URIRef(u"context-1")
+ self.c2 = URIRef(u"context-2")
# delete the graph for each test!
self.graph.remove((None, None, None))
@@ -176,6 +174,7 @@ class ContextTestCase(unittest.TestCase):
def cid(c):
return c.identifier
+
self.assertTrue(self.c1 in map(cid, self.graph.contexts()))
self.assertTrue(self.c2 in map(cid, self.graph.contexts()))
@@ -305,32 +304,55 @@ class ContextTestCase(unittest.TestCase):
asserte(set(c.predicates(bob, pizza)), set([hates]))
asserte(set(c.predicates(bob, michel)), set([hates]))
- asserte(set(
- c.subject_objects(hates)), set([(bob, pizza), (bob, michel)]))
+ asserte(set(c.subject_objects(hates)), set([(bob, pizza), (bob, michel)]))
+ asserte(
+ set(c.subject_objects(likes)),
+ set(
+ [
+ (tarek, cheese),
+ (michel, cheese),
+ (michel, pizza),
+ (bob, cheese),
+ (tarek, pizza),
+ ]
+ ),
+ )
+
+ asserte(
+ set(c.predicate_objects(michel)), set([(likes, cheese), (likes, pizza)])
+ )
+ asserte(
+ set(c.predicate_objects(bob)),
+ set([(likes, cheese), (hates, pizza), (hates, michel)]),
+ )
asserte(
- set(c.subject_objects(likes)), set(
- [(tarek, cheese), (michel, cheese),
- (michel, pizza), (bob, cheese),
- (tarek, pizza)]))
-
- asserte(set(c.predicate_objects(
- michel)), set([(likes, cheese), (likes, pizza)]))
- asserte(set(c.predicate_objects(bob)), set([(likes,
- cheese), (hates, pizza), (hates, michel)]))
- asserte(set(c.predicate_objects(
- tarek)), set([(likes, cheese), (likes, pizza)]))
-
- asserte(set(c.subject_predicates(
- pizza)), set([(bob, hates), (tarek, likes), (michel, likes)]))
- asserte(set(c.subject_predicates(cheese)), set([(
- bob, likes), (tarek, likes), (michel, likes)]))
+ set(c.predicate_objects(tarek)), set([(likes, cheese), (likes, pizza)])
+ )
+
+ asserte(
+ set(c.subject_predicates(pizza)),
+ set([(bob, hates), (tarek, likes), (michel, likes)]),
+ )
+ asserte(
+ set(c.subject_predicates(cheese)),
+ set([(bob, likes), (tarek, likes), (michel, likes)]),
+ )
asserte(set(c.subject_predicates(michel)), set([(bob, hates)]))
- asserte(set(c), set(
- [(bob, hates, michel), (bob, likes, cheese),
- (tarek, likes, pizza), (michel, likes, pizza),
- (michel, likes, cheese), (bob, hates, pizza),
- (tarek, likes, cheese)]))
+ asserte(
+ set(c),
+ set(
+ [
+ (bob, hates, michel),
+ (bob, likes, cheese),
+ (tarek, likes, pizza),
+ (michel, likes, pizza),
+ (michel, likes, cheese),
+ (bob, hates, pizza),
+ (tarek, likes, cheese),
+ ]
+ ),
+ )
# remove stuff and make sure the graph is empty again
self.removeStuff()
@@ -340,22 +362,29 @@ class ContextTestCase(unittest.TestCase):
# dynamically create classes for each registered Store
pluginname = None
-if __name__ == '__main__':
+if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
tests = 0
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in ('default', 'IOMemory', 'Auditable',
- 'Concurrent', 'SPARQLStore', 'SPARQLUpdateStore'):
+ if s.name in (
+ "default",
+ "IOMemory",
+ "Auditable",
+ "Concurrent",
+ "SPARQLStore",
+ "SPARQLUpdateStore",
+ ):
continue # these are tested by default
if not s.getClass().context_aware:
continue
- locals()["t%d" % tests] = type("%sContextTestCase" % s.name, (
- ContextTestCase,), {"store": s.name})
+ locals()["t%d" % tests] = type(
+ "%sContextTestCase" % s.name, (ContextTestCase,), {"store": s.name}
+ )
tests += 1
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_graph_formula.py b/test/test_graph_formula.py
index 412e7a77..52764628 100644
--- a/test/test_graph_formula.py
+++ b/test/test_graph_formula.py
@@ -31,8 +31,8 @@ def testFormulaStore(store="default", configString=None):
g.destroy(configString)
g.open(configString)
else:
- if store == 'SQLite':
- _, path = mkstemp(prefix='test', dir='/tmp', suffix='.sqlite')
+ if store == "SQLite":
+ _, path = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
g.open(path, create=True)
else:
g.open(mkdtemp(), create=True)
@@ -45,10 +45,10 @@ def testFormulaStore(store="default", configString=None):
assert type(formulaA) == QuotedGraph and type(formulaB) == QuotedGraph
# a = URIRef('http://test/a')
- b = URIRef('http://test/b')
- c = URIRef('http://test/c')
- d = URIRef('http://test/d')
- v = Variable('y')
+ b = URIRef("http://test/b")
+ c = URIRef("http://test/c")
+ d = URIRef("http://test/d")
+ v = Variable("y")
universe = ConjunctiveGraph(g.store)
@@ -69,10 +69,8 @@ def testFormulaStore(store="default", configString=None):
assert len(list(formulaA.triples((None, None, None)))) == 2
assert len(list(formulaB.triples((None, None, None)))) == 2
assert len(list(universe.triples((None, None, None)))) == 3
- assert len(list(formulaB.triples(
- (None, URIRef('http://test/d'), None)))) == 2
- assert len(list(universe.triples(
- (None, URIRef('http://test/d'), None)))) == 1
+ assert len(list(formulaB.triples((None, URIRef("http://test/d"), None)))) == 2
+ assert len(list(universe.triples((None, URIRef("http://test/d"), None)))) == 1
# #context tests
# #test contexts with triple argument
@@ -115,13 +113,13 @@ def testFormulaStore(store="default", configString=None):
assert len(universe) == 0
g.close()
- if store == 'SQLite':
+ if store == "SQLite":
os.unlink(path)
else:
g.store.destroy(configString)
except:
g.close()
- if store == 'SQLite':
+ if store == "SQLite":
os.unlink(path)
else:
g.store.destroy(configString)
@@ -130,21 +128,19 @@ def testFormulaStore(store="default", configString=None):
def testFormulaStores():
pluginname = None
- if __name__ == '__main__':
+ if __name__ == "__main__":
if len(sys.argv) > 1:
pluginname = sys.argv[1]
for s in plugin.plugins(pluginname, plugin.Store):
- if s.name in (
- 'Auditable', 'Concurrent',
- 'SPARQLStore', 'SPARQLUpdateStore',
- ):
+ if s.name in ("Auditable", "Concurrent", "SPARQLStore", "SPARQLUpdateStore",):
continue
if not s.getClass().formula_aware:
continue
yield testFormulaStore, s.name
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph_items.py b/test/test_graph_items.py
index b6cb2529..bc13c367 100644
--- a/test/test_graph_items.py
+++ b/test/test_graph_items.py
@@ -2,7 +2,8 @@ from rdflib import Graph, RDF
def test_recursive_list_detection():
- g = Graph().parse(data="""
+ g = Graph().parse(
+ data="""
@prefix : <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<> :value _:a .
@@ -14,7 +15,9 @@ def test_recursive_list_detection():
<> :value [ :first "turtles"; :rest _:c ] .
_:c :first "all the way down"; :rest _:a .
- """, format="turtle")
+ """,
+ format="turtle",
+ )
for v in g.objects(None, RDF.value):
try:
diff --git a/test/test_hex_binary.py b/test/test_hex_binary.py
index 419a47e2..5f46bda5 100644
--- a/test/test_hex_binary.py
+++ b/test/test_hex_binary.py
@@ -3,11 +3,9 @@
import unittest
import binascii
from rdflib import Literal, XSD
-import six
class HexBinaryTestCase(unittest.TestCase):
-
def test_int(self):
self._test_integer(5)
self._test_integer(3452)
@@ -23,40 +21,28 @@ class HexBinaryTestCase(unittest.TestCase):
bin_i = l.toPython()
self.assertEquals(int(binascii.hexlify(bin_i), 16), i)
- if six.PY2:
- self.assertEquals(unicode(l), hex_i)
- else:
- self.assertEquals(str(l), hex_i)
+ self.assertEquals(str(l), hex_i)
self.assertEquals(int(hex_i, 16), i)
- if six.PY2:
- self.assertEquals(int(unicode(l), 16), i)
- else:
- self.assertEquals(int(l, 16), i)
+ self.assertEquals(int(l, 16), i)
self.assertEquals(int(str(l), 16), i)
def test_unicode(self):
str1 = u"Test utf-8 string éàë"
# u hexstring
- hex_str1 = binascii.hexlify(str1.encode('utf-8')).decode()
+ hex_str1 = binascii.hexlify(str1.encode("utf-8")).decode()
l1 = Literal(hex_str1, datatype=XSD.hexBinary)
b_str1 = l1.toPython()
- self.assertEquals(b_str1.decode('utf-8'), str1)
- if six.PY2:
- self.assertEquals(unicode(l1), hex_str1)
- else:
- self.assertEquals(str(l1), hex_str1)
+ self.assertEquals(b_str1.decode("utf-8"), str1)
+ self.assertEquals(str(l1), hex_str1)
# b hexstring
- hex_str1b = binascii.hexlify(str1.encode('utf-8'))
+ hex_str1b = binascii.hexlify(str1.encode("utf-8"))
l1b = Literal(hex_str1b, datatype=XSD.hexBinary)
b_str1b = l1b.toPython()
self.assertEquals(b_str1, b_str1b)
- self.assertEquals(b_str1b.decode('utf-8'), str1)
- if six.PY2:
- self.assertEquals(unicode(l1b), hex_str1)
- else:
- self.assertEquals(str(l1b), hex_str1)
+ self.assertEquals(b_str1b.decode("utf-8"), str1)
+ self.assertEquals(str(l1b), hex_str1)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_initbindings.py b/test/test_initbindings.py
index efa94191..138041b2 100644
--- a/test/test_initbindings.py
+++ b/test/test_initbindings.py
@@ -1,181 +1,349 @@
-
from nose import SkipTest
from rdflib.plugins.sparql import prepareQuery
from rdflib import ConjunctiveGraph, URIRef, Literal, Namespace, Variable
+
g = ConjunctiveGraph()
def testStr():
- a = set(g.query("SELECT (STR(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (STR(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (STR(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (STR(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "STR: %r != %r" % (a, b)
def testIsIRI():
- a = set(g.query("SELECT (isIRI(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (isIRI(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (isIRI(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isIRI(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "isIRI: %r != %r" % (a, b)
def testIsBlank():
- a = set(g.query("SELECT (isBlank(?target) AS ?r) WHERE { }", initBindings={'target': URIRef('example:a')}))
- b = set(g.query("SELECT (isBlank(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"))
+ a = set(
+ g.query(
+ "SELECT (isBlank(?target) AS ?r) WHERE { }",
+ initBindings={"target": URIRef("example:a")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isBlank(?target) AS ?r) WHERE { } VALUES (?target) {(<example:a>)}"
+ )
+ )
assert a == b, "isBlank: %r != %r" % (a, b)
def testIsLiteral():
- a = set(g.query("SELECT (isLiteral(?target) AS ?r) WHERE { }", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (isLiteral(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (isLiteral(?target) AS ?r) WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (isLiteral(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "isLiteral: %r != %r" % (a, b)
def testUCase():
- a = set(g.query("SELECT (UCASE(?target) AS ?r) WHERE { }", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) AS ?r) WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) AS ?r) WHERE { } VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "UCASE: %r != %r" % (a, b)
def testNoFunc():
- a = set(g.query("SELECT ?target WHERE { }", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query("SELECT ?target WHERE { }", initBindings={"target": Literal("example")})
+ )
b = set(g.query("SELECT ?target WHERE { } VALUES (?target) {('example')}"))
assert a == b, "no func: %r != %r" % (a, b)
def testOrderBy():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderby: %r != %r" % (a, b)
def testOrderByFunc():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target VALUES (?target) {('example')} "))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target VALUES (?target) {('example')} "
+ )
+ )
assert a == b, "orderbyFunc: %r != %r" % (a, b)
def testNoFuncLimit():
- a = set(g.query("SELECT ?target WHERE { } LIMIT 1", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT ?target WHERE { } LIMIT 1 VALUES (?target) {('example')}"))
assert a == b, "limit: %r != %r" % (a, b)
def testOrderByLimit():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyLimit: %r != %r" % (a, b)
def testOrderByFuncLimit():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyFuncLimit: %r != %r" % (a, b)
def testNoFuncOffset():
- a = set(g.query("SELECT ?target WHERE { } OFFSET 1", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT ?target WHERE { } OFFSET 1 VALUES (?target) {('example')}"))
assert a == b, "offset: %r != %r" % (a, b)
def testNoFuncLimitOffset():
- a = set(g.query("SELECT ?target WHERE { } LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "limitOffset: %r != %r" % (a, b)
def testOrderByLimitOffset():
- a = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT ?target WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyLimitOffset: %r != %r" % (a, b)
def testOrderByFuncLimitOffset():
- a = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT (UCASE(?target) as ?r) WHERE { } ORDER BY ?target LIMIT 1 OFFSET 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "orderbyFuncLimitOffset: %r != %r" % (a, b)
def testDistinct():
- a = set(g.query("SELECT DISTINCT ?target WHERE { }", initBindings={'target': Literal('example')}))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { }",
+ initBindings={"target": Literal("example")},
+ )
+ )
b = set(g.query("SELECT DISTINCT ?target WHERE { } VALUES (?target) {('example')}"))
assert a == b, "distinct: %r != %r" % (a, b)
def testDistinctOrderBy():
- a = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "distinctOrderby: %r != %r" % (a, b)
def testDistinctOrderByLimit():
- a = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1", initBindings={'target': Literal('example')}))
- b = set(g.query("SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"))
+ a = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1",
+ initBindings={"target": Literal("example")},
+ )
+ )
+ b = set(
+ g.query(
+ "SELECT DISTINCT ?target WHERE { } ORDER BY ?target LIMIT 1 VALUES (?target) {('example')}"
+ )
+ )
assert a == b, "distinctOrderbyLimit: %r != %r" % (a, b)
def testPrepare():
- q = prepareQuery('SELECT ?target WHERE { }')
+ q = prepareQuery("SELECT ?target WHERE { }")
r = list(g.query(q))
e = []
- assert r == e, 'prepare: %r != %r' % (r, e)
+ assert r == e, "prepare: %r != %r" % (r, e)
- r = list(g.query(q, initBindings={'target': Literal('example')}))
- e = [(Literal('example'),)]
- assert r == e, 'prepare: %r != %r' % (r, e)
+ r = list(g.query(q, initBindings={"target": Literal("example")}))
+ e = [(Literal("example"),)]
+ assert r == e, "prepare: %r != %r" % (r, e)
r = list(g.query(q))
e = []
- assert r == e, 'prepare: %r != %r' % (r, e)
+ assert r == e, "prepare: %r != %r" % (r, e)
def testData():
data = ConjunctiveGraph()
- data += [(URIRef('urn:a'), URIRef('urn:p'), Literal('a')),
- (URIRef('urn:b'), URIRef('urn:p'), Literal('b'))]
-
- a = set(g.query("SELECT ?target WHERE { ?target <urn:p> ?val }", initBindings={'val': Literal('a')}))
- b = set(g.query("SELECT ?target WHERE { ?target <urn:p> ?val } VALUES (?val) {('a')}"))
+ data += [
+ (URIRef("urn:a"), URIRef("urn:p"), Literal("a")),
+ (URIRef("urn:b"), URIRef("urn:p"), Literal("b")),
+ ]
+
+ a = set(
+ g.query(
+ "SELECT ?target WHERE { ?target <urn:p> ?val }",
+ initBindings={"val": Literal("a")},
+ )
+ )
+ b = set(
+ g.query("SELECT ?target WHERE { ?target <urn:p> ?val } VALUES (?val) {('a')}")
+ )
assert a == b, "data: %r != %r" % (a, b)
def testAsk():
- a = set(g.query("ASK { }", initBindings={'target': Literal('example')}))
+ a = set(g.query("ASK { }", initBindings={"target": Literal("example")}))
b = set(g.query("ASK { } VALUES (?target) {('example')}"))
assert a == b, "ask: %r != %r" % (a, b)
EX = Namespace("http://example.com/")
g2 = ConjunctiveGraph()
-g2.bind('', EX)
-g2.add((EX['s1'], EX['p'], EX['o1']))
-g2.add((EX['s2'], EX['p'], EX['o2']))
+g2.bind("", EX)
+g2.add((EX["s1"], EX["p"], EX["o1"]))
+g2.add((EX["s2"], EX["p"], EX["o2"]))
def testStringKey():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"s": EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"s": EX["s1"]})
+ )
assert len(results) == 1, results
def testStringKeyWithQuestionMark():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"?s": EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={"?s": EX["s1"]})
+ )
assert len(results) == 1, results
def testVariableKey():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX['s1']}))
+ results = list(
+ g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX["s1"]})
+ )
assert len(results) == 1, results
+
def testVariableKeyWithQuestionMark():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX['s1']}))
+ results = list(
+ g2.query(
+ "SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX["s1"]}
+ )
+ )
assert len(results) == 1, results
def testFilter():
- results = list(g2.query("SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}", initBindings={Variable("?x"): EX['s1']}))
+ results = list(
+ g2.query(
+ "SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}",
+ initBindings={Variable("?x"): EX["s1"]},
+ )
+ )
assert len(results) == 1, results
@@ -183,5 +351,6 @@ if __name__ == "__main__":
import sys
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_iomemory.py b/test/test_iomemory.py
index 897cc8b2..4239fc3c 100644
--- a/test/test_iomemory.py
+++ b/test/test_iomemory.py
@@ -1,4 +1,3 @@
-
"""
Iteration and update conflict with set based IOMemory store
@@ -63,6 +62,6 @@ def test_concurrent2():
assert i == n
-if __name__ == '__main__':
+if __name__ == "__main__":
test_concurrent1()
test_concurrent2()
diff --git a/test/test_issue084.py b/test/test_issue084.py
index 75da7615..23536550 100644
--- a/test/test_issue084.py
+++ b/test/test_issue084.py
@@ -1,5 +1,5 @@
from codecs import getreader
-from six import BytesIO, StringIO
+from io import BytesIO, StringIO
from rdflib import URIRef, Literal
from rdflib.graph import Graph
@@ -20,55 +20,65 @@ rdf = u"""@prefix skos:
"""
-rdf_utf8 = rdf.encode('utf-8')
+rdf_utf8 = rdf.encode("utf-8")
-rdf_reader = getreader('utf-8')(BytesIO(rdf.encode('utf-8')))
+rdf_reader = getreader("utf-8")(BytesIO(rdf.encode("utf-8")))
def test_a():
"""Test reading N3 from a unicode objects as data"""
g = Graph()
- g.parse(data=rdf, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdf, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_b():
"""Test reading N3 from a utf8 encoded string as data"""
g = Graph()
- g.parse(data=rdf_utf8, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdf_utf8, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_c():
"""Test reading N3 from a codecs.StreamReader, outputting unicode"""
g = Graph()
-# rdf_reader.seek(0)
- g.parse(source=rdf_reader, format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ # rdf_reader.seek(0)
+ g.parse(source=rdf_reader, format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_d():
"""Test reading N3 from a StringIO over the unicode object"""
g = Graph()
- g.parse(source=StringIO(rdf), format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=StringIO(rdf), format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_e():
"""Test reading N3 from a BytesIO over the string object"""
g = Graph()
- g.parse(source=BytesIO(rdf_utf8), format='n3')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=BytesIO(rdf_utf8), format="n3")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
# this is unicode
@@ -86,27 +96,32 @@ rdfxml = u"""<?xml version="1.0" encoding="UTF-8"?>
"""
# this is a str
-rdfxml_utf8 = rdfxml.encode('utf-8')
+rdfxml_utf8 = rdfxml.encode("utf-8")
-rdfxml_reader = getreader('utf-8')(BytesIO(rdfxml.encode('utf-8')))
+rdfxml_reader = getreader("utf-8")(BytesIO(rdfxml.encode("utf-8")))
def test_xml_a():
"""Test reading XML from a unicode object as data"""
g = Graph()
- g.parse(data=rdfxml, format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdfxml, format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
def test_xml_b():
"""Test reading XML from a utf8 encoded string object as data"""
g = Graph()
- g.parse(data=rdfxml_utf8, format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(data=rdfxml_utf8, format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
+
# The following two cases are currently not supported by Graph.parse
# def test_xml_c():
@@ -127,7 +142,9 @@ def test_xml_b():
def test_xml_e():
"""Test reading XML from a BytesIO created from utf8 encoded string"""
g = Graph()
- g.parse(source=BytesIO(rdfxml_utf8), format='xml')
- v = g.value(subject=URIRef("http://www.test.org/#CI"),
- predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"))
- assert v == Literal(u"C\u00f4te d'Ivoire", lang='fr')
+ g.parse(source=BytesIO(rdfxml_utf8), format="xml")
+ v = g.value(
+ subject=URIRef("http://www.test.org/#CI"),
+ predicate=URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ )
+ assert v == Literal(u"C\u00f4te d'Ivoire", lang="fr")
diff --git a/test/test_issue1003.py b/test/test_issue1003.py
new file mode 100644
index 00000000..d59caf3d
--- /dev/null
+++ b/test/test_issue1003.py
@@ -0,0 +1,133 @@
+from rdflib import Graph, Dataset, Literal, Namespace, RDF, URIRef
+from rdflib.namespace import SKOS, DCTERMS
+
+"""
+Testing scenarios:
+ 1. no base set
+ 2. base set at graph creation
+ 3. base set at serialization
+ 4. base set at both graph creation & serialization, serialization overrides
+ 5. multiple serialization side effect checking
+ 6. checking results for RDF/XML
+ 7. checking results for N3
+ 8. checking results for TriX & TriG
+"""
+
+# variables
+base_one = Namespace("http://one.org/")
+base_two = Namespace("http://two.org/")
+title = Literal("Title", lang="en")
+description = Literal("Test Description", lang="en")
+creator = URIRef("https://creator.com")
+cs = URIRef("")
+
+# starting graph
+g = Graph()
+g.add((cs, RDF.type, SKOS.ConceptScheme))
+g.add((cs, DCTERMS.creator, creator))
+g.add((cs, DCTERMS.source, URIRef("nick")))
+g.bind("dct", DCTERMS)
+g.bind("skos", SKOS)
+
+
+# 1. no base set for graph, no base set for serialization
+g1 = Graph()
+g1 += g
+# @base should not be in output
+assert "@base" not in g.serialize(format="turtle").decode("utf-8")
+
+
+# 2. base one set for graph, no base set for serialization
+g2 = Graph(base=base_one)
+g2 += g
+# @base should be in output, from Graph (one)
+assert "@base <http://one.org/> ." in g2.serialize(format="turtle").decode("utf-8")
+
+
+# 3. no base set for graph, base two set for serialization
+g3 = Graph()
+g3 += g
+# @base should be in output, from serialization (two)
+assert "@base <http://two.org/> ." in g3.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
+
+
+# 4. base one set for graph, base two set for serialization, Graph one overrides
+g4 = Graph(base=base_one)
+g4 += g
+# @base should be in output, from graph (one)
+assert "@base <http://two.org/> ." in g4.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
+# just checking that the serialization setting (two) hasn't snuck through
+assert "@base <http://one.org/> ." not in g4.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
+
+
+# 5. multiple serialization side effect checking
+g5 = Graph()
+g5 += g
+# @base should be in output, from serialization (two)
+assert "@base <http://two.org/> ." in g5.serialize(
+ format="turtle", base=base_two
+).decode("utf-8")
+
+# checking for side affects - no base now set for this serialization
+# @base should not be in output
+assert "@base" not in g5.serialize(format="turtle").decode("utf-8")
+
+
+# 6. checking results for RDF/XML
+g6 = Graph()
+g6 += g
+g6.bind("dct", DCTERMS)
+g6.bind("skos", SKOS)
+assert "@xml:base" not in g6.serialize(format="xml").decode("utf-8")
+assert 'xml:base="http://one.org/"' in g6.serialize(format="xml", base=base_one).decode(
+ "utf-8"
+)
+g6.base = base_two
+assert 'xml:base="http://two.org/"' in g6.serialize(format="xml").decode("utf-8")
+assert 'xml:base="http://one.org/"' in g6.serialize(format="xml", base=base_one).decode(
+ "utf-8"
+)
+
+# 7. checking results for N3
+g7 = Graph()
+g7 += g
+g7.bind("dct", DCTERMS)
+g7.bind("skos", SKOS)
+assert "@xml:base" not in g7.serialize(format="xml").decode("utf-8")
+assert "@base <http://one.org/> ." in g7.serialize(format="n3", base=base_one).decode(
+ "utf-8"
+)
+g7.base = base_two
+assert "@base <http://two.org/> ." in g7.serialize(format="n3").decode("utf-8")
+assert "@base <http://one.org/> ." in g7.serialize(format="n3", base=base_one).decode(
+ "utf-8"
+)
+
+# 8. checking results for TriX & TriG
+# TriX can specify a base per graph but setting a base for the whole
+base_three = Namespace("http://three.org/")
+ds1 = Dataset()
+ds1.bind("dct", DCTERMS)
+ds1.bind("skos", SKOS)
+g8 = ds1.graph(URIRef("http://g8.com/"), base=base_one)
+g9 = ds1.graph(URIRef("http://g9.com/"))
+g8 += g
+g9 += g
+g9.base = base_two
+ds1.base = base_three
+
+trix = ds1.serialize(format="trix", base=Namespace("http://two.org/")).decode("utf-8")
+assert '<graph xml:base="http://one.org/">' in trix
+assert '<graph xml:base="http://two.org/">' in trix
+assert '<TriX xml:base="http://two.org/"' in trix
+
+trig = ds1.serialize(format="trig", base=Namespace("http://two.org/")).decode("utf-8")
+assert "@base <http://one.org/> ." not in trig
+assert "@base <http://three.org/> ." not in trig
+assert "@base <http://two.org/> ." in trig
diff --git a/test/test_issue160.py b/test/test_issue160.py
index 17ae18c5..b3c7b422 100644
--- a/test/test_issue160.py
+++ b/test/test_issue160.py
@@ -43,11 +43,10 @@ target2xml = """\
class CollectionTest(TestCase):
-
def test_collection_render(self):
- foo = Namespace('http://www.example.org/foo/ns/')
- ex = Namespace('http://www.example.org/example/foo/')
- rdf = Namespace('http://www.w3.org/1999/02/22-rdf-syntax-ns#')
+ foo = Namespace("http://www.example.org/foo/ns/")
+ ex = Namespace("http://www.example.org/example/foo/")
+ rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
# Works: x a rdf:List, a foo:Other ;
# Fails: y a foo:Wrapper, foo:wraps x; x a rdf:List, a foo:Other ;
@@ -58,14 +57,14 @@ class CollectionTest(TestCase):
target2.parse(data=target2xml)
g = ConjunctiveGraph()
- bits = [ex['a'], ex['b'], ex['c']]
- l = Collection(g, ex['thing'], bits)
- triple = (ex['thing'], rdf['type'], foo['Other'])
+ bits = [ex["a"], ex["b"], ex["c"]]
+ l = Collection(g, ex["thing"], bits)
+ triple = (ex["thing"], rdf["type"], foo["Other"])
g.add(triple)
- triple = (ex['thing'], foo['property'], Literal('Some Value'))
+ triple = (ex["thing"], foo["property"], Literal("Some Value"))
g.add(triple)
for b in bits:
- triple = (b, rdf['type'], foo['Item'])
+ triple = (b, rdf["type"], foo["Item"])
g.add(triple)
self.assertEqual(g.isomorphic(target1), True)
diff --git a/test/test_issue161.py b/test/test_issue161.py
index db8c2857..fa7529dc 100644
--- a/test/test_issue161.py
+++ b/test/test_issue161.py
@@ -1,15 +1,12 @@
from unittest import TestCase
-from six import b
from rdflib.graph import ConjunctiveGraph
class EntityTest(TestCase):
-
def test_turtle_namespace_prefixes(self):
g = ConjunctiveGraph()
- n3 = \
- """
+ n3 = """
@prefix _9: <http://data.linkedmdb.org/resource/movie/> .
@prefix p_9: <urn:test:> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@@ -21,13 +18,13 @@ class EntityTest(TestCase):
rdfs:label "Cecil B. DeMille (Director)";
_9:director_name "Cecil B. DeMille" ."""
- g.parse(data=n3, format='n3')
+ g.parse(data=n3, format="n3")
turtle = g.serialize(format="turtle")
# Check round-tripping, just for kicks.
g = ConjunctiveGraph()
- g.parse(data=turtle, format='turtle')
+ g.parse(data=turtle, format="turtle")
# Shouldn't have got to here
s = g.serialize(format="turtle")
- self.assertTrue(b('@prefix _9') not in s)
+ self.assertTrue("@prefix _9".encode("latin-1") not in s)
diff --git a/test/test_issue184.py b/test/test_issue184.py
index b4fba8d3..7693dd1c 100644
--- a/test/test_issue184.py
+++ b/test/test_issue184.py
@@ -12,8 +12,8 @@ def test_escaping_of_triple_doublequotes():
is emitted by the serializer, which in turn cannot be parsed correctly.
"""
g = ConjunctiveGraph()
- g.add((URIRef('http://foobar'), URIRef('http://fooprop'), Literal('abc\ndef"""""')))
+ g.add((URIRef("http://foobar"), URIRef("http://fooprop"), Literal('abc\ndef"""""')))
# assert g.serialize(format='n3') == '@prefix ns1: <http:// .\n\nns1:foobar ns1:fooprop """abc\ndef\\"\\"\\"\\"\\"""" .\n\n'
g2 = ConjunctiveGraph()
- g2.parse(data=g.serialize(format='n3'), format='n3')
+ g2.parse(data=g.serialize(format="n3"), format="n3")
assert g.isomorphic(g2) is True
diff --git a/test/test_issue190.py b/test/test_issue190.py
index e5173eff..f8ab37e7 100644
--- a/test/test_issue190.py
+++ b/test/test_issue190.py
@@ -4,7 +4,8 @@ from rdflib.graph import ConjunctiveGraph
from rdflib.parser import StringInputSource
import textwrap
-prefix = textwrap.dedent('''\
+prefix = textwrap.dedent(
+ """\
@prefix nie: <http://www.semanticdesktop.org/ontologies/2007/01/19/nie#> .
@prefix nfo: <http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#> .
@prefix nco: <http://www.semanticdesktop.org/ontologies/2007/03/22/nco#> .
@@ -15,9 +16,11 @@ prefix = textwrap.dedent('''\
@prefix dc: <http://dublincore.org/documents/2010/10/11/dces/#> .
@prefix nmm: <http://library.gnome.org/devel/ontology/unstable/nmm-classes.html#> .
@prefix nao: <http://www.semanticdesktop.org/ontologies/2007/08/15/nao#> .
- ''')
+ """
+)
-meta = textwrap.dedent(u"""\
+meta = textwrap.dedent(
+ u"""\
a nfo:PaginatedTextDocument ;
nie:title "SV Meldung" ;
nco:creator [ a nco:Contact ;
@@ -30,16 +33,20 @@ a nfo:PaginatedTextDocument ;
nie:plainTextContent "%s" .
} } WHERE { {
?tag1 a nao:Tag ; nao:prefLabel "()" .
-""")
+"""
+)
test_string1 = u"""\
Betriebsnummer der Einzugsstelle:\nKnappschaft\n980 0000 6\nWICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!\n """
def test1():
- meta1 = meta.encode('utf-8') % test_string1.encode('utf-8')
+ meta1 = meta.encode("utf-8") % test_string1.encode("utf-8")
graph = ConjunctiveGraph()
- graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta1), format='n3')
+ graph.parse(
+ StringInputSource(prefix + "<http://example.org/>" + meta1), format="n3"
+ )
+
test_string2 = u"""\
Betriebsnummer der Einzugsstelle:
@@ -50,8 +57,11 @@ WICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!
def test2():
- meta2 = meta.encode('utf-8') % test_string2.encode('utf-8')
+ meta2 = meta.encode("utf-8") % test_string2.encode("utf-8")
graph = ConjunctiveGraph()
- graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta2), format='n3')
+ graph.parse(
+ StringInputSource(prefix + "<http://example.org/>" + meta2), format="n3"
+ )
+
raise SkipTest("Known issue, with newlines in text")
diff --git a/test/test_issue200.py b/test/test_issue200.py
index 80ce3f31..3fb76894 100644
--- a/test/test_issue200.py
+++ b/test/test_issue200.py
@@ -9,11 +9,11 @@ try:
import os.pipe
except ImportError:
from nose import SkipTest
- raise SkipTest('No os.fork() and/or os.pipe() on this platform, skipping')
+ raise SkipTest("No os.fork() and/or os.pipe() on this platform, skipping")
-class TestRandomSeedInFork(unittest.TestCase):
+class TestRandomSeedInFork(unittest.TestCase):
def test_bnode_id_differs_in_fork(self):
"""Checks that os.fork()ed child processes produce a
different sequence of BNode ids from the parent process.
@@ -28,14 +28,15 @@ class TestRandomSeedInFork(unittest.TestCase):
os.waitpid(pid, 0) # make sure the child process gets cleaned up
else:
os.close(r)
- w = os.fdopen(w, 'w')
+ w = os.fdopen(w, "w")
cb = rdflib.term.BNode()
w.write(cb)
w.close()
os._exit(0)
- assert txt != str(pb1), "Parent process BNode id: " + \
- "%s, child process BNode id: %s" % (
- txt, str(pb1))
+ assert txt != str(pb1), (
+ "Parent process BNode id: "
+ + "%s, child process BNode id: %s" % (txt, str(pb1))
+ )
if __name__ == "__main__":
diff --git a/test/test_issue209.py b/test/test_issue209.py
index 1feb0615..083d763d 100644
--- a/test/test_issue209.py
+++ b/test/test_issue209.py
@@ -11,7 +11,6 @@ def makeNode():
class TestRandomSeedInThread(unittest.TestCase):
-
def test_bnode_id_gen_in_thread(self):
"""
"""
diff --git a/test/test_issue223.py b/test/test_issue223.py
index e1981a30..ab61d9d8 100644
--- a/test/test_issue223.py
+++ b/test/test_issue223.py
@@ -11,12 +11,14 @@ ttl = """
def test_collection_with_duplicates():
g = Graph().parse(data=ttl, format="turtle")
- for _, _, o in g.triples((URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)):
+ for _, _, o in g.triples(
+ (URIRef("http://example.org/s"), URIRef("http://example.org/p"), None)
+ ):
break
c = g.collection(o)
assert list(c) == list(URIRef("http://example.org/" + x) for x in ["a", "b", "a"])
assert len(c) == 3
-if __name__ == '__main__':
+if __name__ == "__main__":
test_collection_with_duplicates()
diff --git a/test/test_issue247.py b/test/test_issue247.py
index 780d578b..747dd1e0 100644
--- a/test/test_issue247.py
+++ b/test/test_issue247.py
@@ -31,7 +31,6 @@ passxml = """\
class TestXMLLiteralwithLangAttr(unittest.TestCase):
-
def test_successful_parse_of_literal_without_xmllang_attr(self):
"""
Test parse of Literal without xmllang attr passes
diff --git a/test/test_issue248.py b/test/test_issue248.py
index 4cc490a6..528e81a2 100644
--- a/test/test_issue248.py
+++ b/test/test_issue248.py
@@ -3,7 +3,6 @@ import unittest
class TestSerialization(unittest.TestCase):
-
def test_issue_248(self):
"""
Ed Summers Thu, 24 May 2007 12:21:17 -0700
@@ -63,31 +62,22 @@ class TestSerialization(unittest.TestCase):
"""
graph = rdflib.Graph()
- DC = rdflib.Namespace('http://purl.org/dc/terms/')
- SKOS = rdflib.Namespace('http://www.w3.org/2004/02/skos/core#')
- LCCO = rdflib.Namespace('http://loc.gov/catdir/cpso/lcco/')
-
- graph.bind('dc', DC)
- graph.bind('skos', SKOS)
- graph.bind('lcco', LCCO)
-
- concept = rdflib.URIRef(LCCO['1'])
- graph.add(
- (concept,
- rdflib.RDF.type,
- SKOS['Concept']))
- graph.add(
- (concept,
- SKOS['prefLabel'],
- rdflib.Literal('Scrapbooks')))
- graph.add(
- (concept,
- DC['LCC'],
- rdflib.Literal('AC999.0999 - AC999999.Z9999')))
- sg = graph.serialize(format='n3', base=LCCO).decode('utf8')
+ DC = rdflib.Namespace("http://purl.org/dc/terms/")
+ SKOS = rdflib.Namespace("http://www.w3.org/2004/02/skos/core#")
+ LCCO = rdflib.Namespace("http://loc.gov/catdir/cpso/lcco/")
+
+ graph.bind("dc", DC)
+ graph.bind("skos", SKOS)
+ graph.bind("lcco", LCCO)
+
+ concept = rdflib.URIRef(LCCO["1"])
+ graph.add((concept, rdflib.RDF.type, SKOS["Concept"]))
+ graph.add((concept, SKOS["prefLabel"], rdflib.Literal("Scrapbooks")))
+ graph.add((concept, DC["LCC"], rdflib.Literal("AC999.0999 - AC999999.Z9999")))
+ sg = graph.serialize(format="n3", base=LCCO).decode("utf8")
# See issue 248
# Actual test should be the inverse of the below ...
- self.assertTrue('<1> a skos:Concept ;' in sg, sg)
+ self.assertTrue("<1> a skos:Concept ;" in sg, sg)
if __name__ == "__main__":
diff --git a/test/test_issue274.py b/test/test_issue274.py
index 288d7857..79fc4d15 100644
--- a/test/test_issue274.py
+++ b/test/test_issue274.py
@@ -3,15 +3,18 @@ from nose.tools import eq_
from unittest import TestCase
from rdflib import BNode, Graph, Literal, Namespace, RDFS, XSD
-from rdflib.plugins.sparql.operators import register_custom_function, unregister_custom_function
+from rdflib.plugins.sparql.operators import (
+ register_custom_function,
+ unregister_custom_function,
+)
-EX = Namespace('http://example.org/')
+EX = Namespace("http://example.org/")
G = Graph()
G.add((BNode(), RDFS.label, Literal("bnode")))
NS = {
- 'ex': EX,
- 'rdfs': RDFS,
- 'xsd': XSD,
+ "ex": EX,
+ "rdfs": RDFS,
+ "xsd": XSD,
}
@@ -28,142 +31,145 @@ def teardown():
def test_cast_string_to_string():
- res = query('''SELECT (xsd:string("hello") as ?x) {}''')
+ res = query("""SELECT (xsd:string("hello") as ?x) {}""")
eq_(list(res)[0][0], Literal("hello", datatype=XSD.string))
def test_cast_int_to_string():
- res = query('''SELECT (xsd:string(42) as ?x) {}''')
+ res = query("""SELECT (xsd:string(42) as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.string))
def test_cast_float_to_string():
- res = query('''SELECT (xsd:string(3.14) as ?x) {}''')
+ res = query("""SELECT (xsd:string(3.14) as ?x) {}""")
eq_(list(res)[0][0], Literal("3.14", datatype=XSD.string))
def test_cast_bool_to_string():
- res = query('''SELECT (xsd:string(true) as ?x) {}''')
+ res = query("""SELECT (xsd:string(true) as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.string))
def test_cast_iri_to_string():
- res = query('''SELECT (xsd:string(<http://example.org/>) as ?x) {}''')
+ res = query("""SELECT (xsd:string(<http://example.org/>) as ?x) {}""")
eq_(list(res)[0][0], Literal("http://example.org/", datatype=XSD.string))
def test_cast_datetime_to_datetime():
- res = query('''SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:dateTime) as ?x) {}''')
+ res = query(
+ """SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:dateTime) as ?x) {}"""
+ )
eq_(list(res)[0][0], Literal("1970-01-01T00:00:00Z", datatype=XSD.dateTime))
def test_cast_string_to_datetime():
- res = query('''SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:string) as ?x) {}''')
+ res = query(
+ """SELECT (xsd:dateTime("1970-01-01T00:00:00Z"^^xsd:string) as ?x) {}"""
+ )
eq_(list(res)[0][0], Literal("1970-01-01T00:00:00Z", datatype=XSD.dateTime))
def test_cast_string_to_float():
- res = query('''SELECT (xsd:float("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_int_to_float():
- res = query('''SELECT (xsd:float(1) as ?x) {}''')
+ res = query("""SELECT (xsd:float(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.float))
def test_cast_float_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_double_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_decimal_to_float():
- res = query('''SELECT (xsd:float("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:float("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.float))
def test_cast_string_to_double():
- res = query('''SELECT (xsd:double("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_int_to_double():
- res = query('''SELECT (xsd:double(1) as ?x) {}''')
+ res = query("""SELECT (xsd:double(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.double))
def test_cast_float_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_double_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_decimal_to_double():
- res = query('''SELECT (xsd:double("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:double("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.double))
def test_cast_string_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5") as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5") as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_int_to_decimal():
- res = query('''SELECT (xsd:decimal(1) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal(1) as ?x) {}""")
eq_(list(res)[0][0], Literal("1", datatype=XSD.decimal))
def test_cast_float_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:float) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:float) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_double_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:double) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:double) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_decimal_to_decimal():
- res = query('''SELECT (xsd:decimal("0.5"^^xsd:decimal) as ?x) {}''')
+ res = query("""SELECT (xsd:decimal("0.5"^^xsd:decimal) as ?x) {}""")
eq_(list(res)[0][0], Literal("0.5", datatype=XSD.decimal))
def test_cast_string_to_int():
- res = query('''SELECT (xsd:integer("42") as ?x) {}''')
+ res = query("""SELECT (xsd:integer("42") as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.integer))
def test_cast_int_to_int():
- res = query('''SELECT (xsd:integer(42) as ?x) {}''')
+ res = query("""SELECT (xsd:integer(42) as ?x) {}""")
eq_(list(res)[0][0], Literal("42", datatype=XSD.integer))
def test_cast_string_to_bool():
- res = query('''SELECT (xsd:boolean("TRUE") as ?x) {}''')
+ res = query("""SELECT (xsd:boolean("TRUE") as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.boolean))
def test_cast_bool_to_bool():
- res = query('''SELECT (xsd:boolean(true) as ?x) {}''')
+ res = query("""SELECT (xsd:boolean(true) as ?x) {}""")
eq_(list(res)[0][0], Literal("true", datatype=XSD.boolean))
def test_cast_bool_to_bool():
- res = query('''SELECT (ex:f(42, "hello") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello") as ?x) {}""")
eq_(len(list(res)), 0)
class TestCustom(TestCase):
-
@staticmethod
def f(x, y):
return Literal("%s %s" % (x, y), datatype=XSD.string)
@@ -186,13 +192,13 @@ class TestCustom(TestCase):
unregister_custom_function(EX.f, lambda x, y: None)
def test_f(self):
- res = query('''SELECT (ex:f(42, "hello") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello") as ?x) {}""")
eq_(list(res)[0][0], Literal("42 hello", datatype=XSD.string))
def test_f_too_few_args(self):
- res = query('''SELECT (ex:f(42) as ?x) {}''')
+ res = query("""SELECT (ex:f(42) as ?x) {}""")
eq_(len(list(res)), 0)
def test_f_too_many_args(self):
- res = query('''SELECT (ex:f(42, "hello", "world") as ?x) {}''')
+ res = query("""SELECT (ex:f(42, "hello", "world") as ?x) {}""")
eq_(len(list(res)), 0)
diff --git a/test/test_issue363.py b/test/test_issue363.py
index 7fc6cb26..792c2441 100644
--- a/test/test_issue363.py
+++ b/test/test_issue363.py
@@ -1,7 +1,7 @@
import rdflib
from nose.tools import assert_raises
-data = '''<?xml version="1.0" encoding="utf-8"?>
+data = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:http="http://www.w3.org/2011/http#">
@@ -13,9 +13,9 @@ data = '''<?xml version="1.0" encoding="utf-8"?>
</http:HeaderElement>
</rdf:RDF>
-'''
+"""
-data2 = '''<?xml version="1.0" encoding="utf-8"?>
+data2 = """<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns="http://www.example.org/meeting_organization#">
@@ -26,11 +26,11 @@ data2 = '''<?xml version="1.0" encoding="utf-8"?>
</Location>
</rdf:Description>
</rdf:RDF>
-'''
+"""
def test_broken_rdfxml():
- #import ipdb; ipdb.set_trace()
+ # import ipdb; ipdb.set_trace()
def p():
rdflib.Graph().parse(data=data)
@@ -39,9 +39,9 @@ def test_broken_rdfxml():
def test_parsetype_resource():
g = rdflib.Graph().parse(data=data2)
- print(g.serialize(format='n3'))
+ print(g.serialize(format="n3"))
-if __name__ == '__main__':
+if __name__ == "__main__":
test_broken_rdfxml()
test_parsetype_resource()
diff --git a/test/test_issue379.py b/test/test_issue379.py
index 31dfce2b..348e3d0f 100644
--- a/test/test_issue379.py
+++ b/test/test_issue379.py
@@ -41,7 +41,7 @@ class TestBaseAllowsHash(TestCase):
permitted for an IRIREF:
http://www.w3.org/TR/2014/REC-turtle-20140225/#grammar-production-prefixID
"""
- self.g.parse(data=prefix_data, format='n3')
+ self.g.parse(data=prefix_data, format="n3")
self.assertIsInstance(next(self.g.subjects()), rdflib.URIRef)
def test_parse_successful_base_with_hash(self):
@@ -50,7 +50,7 @@ class TestBaseAllowsHash(TestCase):
permitted for an '@prefix' since both allow an IRIREF:
http://www.w3.org/TR/2014/REC-turtle-20140225/#grammar-production-base
"""
- self.g.parse(data=base_data, format='n3')
+ self.g.parse(data=base_data, format="n3")
self.assertIsInstance(next(self.g.subjects()), rdflib.URIRef)
diff --git a/test/test_issue381.py b/test/test_issue381.py
index 3ab21d88..a48cafe7 100644
--- a/test/test_issue381.py
+++ b/test/test_issue381.py
@@ -12,10 +12,9 @@ def test_no_spurious_semicolon():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -28,10 +27,9 @@ def test_one_spurious_semicolon():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -44,10 +42,9 @@ def test_one_spurious_semicolon_no_perdiod():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -60,10 +57,9 @@ def test_two_spurious_semicolons_no_period():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -76,10 +72,9 @@ def test_one_spurious_semicolons_bnode():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (BNode("a"), NS.b, NS.c),
- (BNode("a"), NS.d, NS.e),
- ])
+ expected.addN(
+ t + (expected,) for t in [(BNode("a"), NS.b, NS.c), (BNode("a"), NS.d, NS.e),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -98,11 +93,10 @@ def test_pathological():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- (NS.a, NS.f, NS.g),
- ])
+ expected.addN(
+ t + (expected,)
+ for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e), (NS.a, NS.f, NS.g),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
@@ -116,10 +110,9 @@ def test_mixing_spurious_semicolons_and_commas():
} WHERE {}
"""
expected = Graph()
- expected.addN(t + (expected,) for t in [
- (NS.a, NS.b, NS.c),
- (NS.a, NS.d, NS.e),
- (NS.a, NS.d, NS.f),
- ])
+ expected.addN(
+ t + (expected,)
+ for t in [(NS.a, NS.b, NS.c), (NS.a, NS.d, NS.e), (NS.a, NS.d, NS.f),]
+ )
got = Graph().query(sparql).graph
assert isomorphic(got, expected), got.serialize(format="turtle")
diff --git a/test/test_issue432.py b/test/test_issue432.py
index 05d8258a..c0731eb0 100644
--- a/test/test_issue432.py
+++ b/test/test_issue432.py
@@ -12,7 +12,7 @@ def test_trig_default_graph():
<g1> { <d> <e> <f> . }
<g2> { <g> <h> <i> . }
"""
- ds.parse(data=data, format='trig', publicID=ds.default_context.identifier)
+ ds.parse(data=data, format="trig", publicID=ds.default_context.identifier)
assert len(list(ds.contexts())) == 3
assert len(list(ds.default_context)) == 2
diff --git a/test/test_issue446.py b/test/test_issue446.py
index 79cd41be..98c46578 100644
--- a/test/test_issue446.py
+++ b/test/test_issue446.py
@@ -7,16 +7,15 @@ from rdflib import Graph, URIRef, Literal
def test_sparql_unicode():
g = Graph()
trip = (
- URIRef('http://example.org/foo'),
- URIRef('http://example.org/bar'),
- URIRef(u'http://example.org/jörn')
+ URIRef("http://example.org/foo"),
+ URIRef("http://example.org/bar"),
+ URIRef(u"http://example.org/jörn"),
)
g.add(trip)
q = 'select ?s ?p ?o where { ?s ?p ?o . FILTER(lang(?o) = "") }'
r = list(g.query(q))
- assert r == [], \
- 'sparql query %r should return nothing but returns %r' % (q, r)
+ assert r == [], "sparql query %r should return nothing but returns %r" % (q, r)
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sparql_unicode()
diff --git a/test/test_issue492.py b/test/test_issue492.py
index 754e5cbf..713ce7ac 100644
--- a/test/test_issue492.py
+++ b/test/test_issue492.py
@@ -6,7 +6,7 @@ import rdflib
def test_issue492():
- query = '''
+ query = """
prefix owl: <http://www.w3.org/2002/07/owl#>
prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
select ?x
@@ -15,7 +15,7 @@ def test_issue492():
?x rdf:rest/rdf:first _:6.
?x rdf:rest/rdf:first _:5.
}
- '''
+ """
print(rdflib.__version__)
g = rdflib.Graph()
diff --git a/test/test_issue523.py b/test/test_issue523.py
index 774167f3..2910cdd7 100644
--- a/test/test_issue523.py
+++ b/test/test_issue523.py
@@ -5,10 +5,12 @@ import rdflib
def test_issue523():
g = rdflib.Graph()
- r = g.query("SELECT (<../baz> as ?test) WHERE {}",
- base=rdflib.URIRef("http://example.org/foo/bar"))
+ r = g.query(
+ "SELECT (<../baz> as ?test) WHERE {}",
+ base=rdflib.URIRef("http://example.org/foo/bar"),
+ )
res = r.serialize(format="csv")
- assert res == b'test\r\nhttp://example.org/baz\r\n', repr(res)
+ assert res == b"test\r\nhttp://example.org/baz\r\n", repr(res)
# expected result:
# test
diff --git a/test/test_issue532.py b/test/test_issue532.py
index 422dd507..0e9fa89f 100644
--- a/test/test_issue532.py
+++ b/test/test_issue532.py
@@ -32,7 +32,7 @@ def test_issue532():
"""
g = Graph()
- g.parse(data=data, format='n3')
+ g.parse(data=data, format="n3")
getnewMeps = """
PREFIX lpv: <http://purl.org/linkedpolitics/vocabulary/>
diff --git a/test/test_issue545.py b/test/test_issue545.py
index 86c8723a..ea9f185b 100644
--- a/test/test_issue545.py
+++ b/test/test_issue545.py
@@ -1,4 +1,3 @@
-
from rdflib.plugins import sparql
from rdflib.namespace import RDFS, OWL, DC, SKOS
@@ -15,4 +14,5 @@ def test_issue():
?property rdfs:label | skos:altLabel ?label .
}
""",
- initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS})
+ initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS},
+ )
diff --git a/test/test_issue554.py b/test/test_issue554.py
index ba946cf4..4ea83d21 100644
--- a/test/test_issue554.py
+++ b/test/test_issue554.py
@@ -5,11 +5,10 @@ import rdflib
def test_sparql_empty_no_row():
g = rdflib.Graph()
- q = 'select ?whatever { }'
+ q = "select ?whatever { }"
r = list(g.query(q))
- assert r == [], \
- 'sparql query %s should return empty list but returns %s' % (q, r)
+ assert r == [], "sparql query %s should return empty list but returns %s" % (q, r)
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sparql_empty_no_row()
diff --git a/test/test_issue563.py b/test/test_issue563.py
index 1ae8460d..58721236 100644
--- a/test/test_issue563.py
+++ b/test/test_issue563.py
@@ -25,22 +25,26 @@ def test_sample():
g = Graph()
results = set(tuple(i) for i in g.query(QUERY % ("SAMPLE", "SAMPLE")))
- assert results == set([
- (Literal(2), Literal(6), Literal(10)),
- (Literal(3), Literal(9), Literal(15)),
- (Literal(5), None, Literal(25)),
- ])
+ assert results == set(
+ [
+ (Literal(2), Literal(6), Literal(10)),
+ (Literal(3), Literal(9), Literal(15)),
+ (Literal(5), None, Literal(25)),
+ ]
+ )
def test_count():
g = Graph()
results = set(tuple(i) for i in g.query(QUERY % ("COUNT", "COUNT")))
- assert results == set([
- (Literal(2), Literal(1), Literal(1)),
- (Literal(3), Literal(1), Literal(1)),
- (Literal(5), Literal(0), Literal(1)),
- ])
+ assert results == set(
+ [
+ (Literal(2), Literal(1), Literal(1)),
+ (Literal(3), Literal(1), Literal(1)),
+ (Literal(5), Literal(0), Literal(1)),
+ ]
+ )
if __name__ == "__main__":
diff --git a/test/test_issue579.py b/test/test_issue579.py
index 9ba326b3..2420e077 100644
--- a/test/test_issue579.py
+++ b/test/test_issue579.py
@@ -6,9 +6,9 @@ from rdflib.namespace import FOAF, RDF
def test_issue579():
g = Graph()
- g.bind('foaf', FOAF)
+ g.bind("foaf", FOAF)
n = Namespace("http://myname/")
- g.add((n.bob, FOAF.name, Literal('bb')))
+ g.add((n.bob, FOAF.name, Literal("bb")))
# query is successful.
assert len(g.query("select ?n where { ?n foaf:name 'bb' . }")) == 1
# update is not.
diff --git a/test/test_issue604.py b/test/test_issue604.py
index aef19b8c..7a827241 100644
--- a/test/test_issue604.py
+++ b/test/test_issue604.py
@@ -6,7 +6,7 @@ from rdflib.collection import Collection
def test_issue604():
- EX = Namespace('http://ex.co/')
+ EX = Namespace("http://ex.co/")
g = Graph()
bn = BNode()
g.add((EX.s, EX.p, bn))
diff --git a/test/test_issue655.py b/test/test_issue655.py
index 1c640709..cac449f1 100644
--- a/test/test_issue655.py
+++ b/test/test_issue655.py
@@ -5,53 +5,27 @@ from rdflib.compare import to_isomorphic
class TestIssue655(unittest.TestCase):
-
def test_issue655(self):
# make sure that inf and nan are serialized correctly
- dt = XSD['double'].n3()
- self.assertEqual(
- Literal(float("inf"))._literal_n3(True),
- '"INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(float("-inf"))._literal_n3(True),
- '"-INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(float("nan"))._literal_n3(True),
- '"NaN"^^%s' % dt
- )
+ dt = XSD["double"].n3()
+ self.assertEqual(Literal(float("inf"))._literal_n3(True), '"INF"^^%s' % dt)
+ self.assertEqual(Literal(float("-inf"))._literal_n3(True), '"-INF"^^%s' % dt)
+ self.assertEqual(Literal(float("nan"))._literal_n3(True), '"NaN"^^%s' % dt)
- dt = XSD['decimal'].n3()
- self.assertEqual(
- Literal(Decimal("inf"))._literal_n3(True),
- '"INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(Decimal("-inf"))._literal_n3(True),
- '"-INF"^^%s' % dt
- )
- self.assertEqual(
- Literal(Decimal("nan"))._literal_n3(True),
- '"NaN"^^%s' % dt
- )
+ dt = XSD["decimal"].n3()
+ self.assertEqual(Literal(Decimal("inf"))._literal_n3(True), '"INF"^^%s' % dt)
+ self.assertEqual(Literal(Decimal("-inf"))._literal_n3(True), '"-INF"^^%s' % dt)
+ self.assertEqual(Literal(Decimal("nan"))._literal_n3(True), '"NaN"^^%s' % dt)
self.assertEqual(
- Literal("inf", datatype=XSD['decimal'])._literal_n3(True),
- '"INF"^^%s' % dt
+ Literal("inf", datatype=XSD["decimal"])._literal_n3(True), '"INF"^^%s' % dt
)
# assert that non-numerical aren't changed
- self.assertEqual(
- Literal('inf')._literal_n3(True),
- '"inf"'
- )
- self.assertEqual(
- Literal('nan')._literal_n3(True),
- '"nan"'
- )
+ self.assertEqual(Literal("inf")._literal_n3(True), '"inf"')
+ self.assertEqual(Literal("nan")._literal_n3(True), '"nan"')
- PROV = Namespace('http://www.w3.org/ns/prov#')
+ PROV = Namespace("http://www.w3.org/ns/prov#")
bob = URIRef("http://example.org/object/Bob")
@@ -62,7 +36,7 @@ class TestIssue655(unittest.TestCase):
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
- g2.parse(data=g1.serialize(format='turtle'), format='turtle')
+ g2.parse(data=g1.serialize(format="turtle"), format="turtle")
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
diff --git a/test/test_issue715.py b/test/test_issue715.py
index 121e05fd..a2e21169 100644
--- a/test/test_issue715.py
+++ b/test/test_issue715.py
@@ -11,19 +11,18 @@ from rdflib import URIRef, Graph
def test_issue_715():
g = Graph()
a, b, x, y, z = [URIRef(s) for s in "abxyz"]
- isa = URIRef('isa')
+ isa = URIRef("isa")
g.add((a, isa, x))
g.add((a, isa, y))
g.add((b, isa, x))
- l1 = list(g.query('SELECT ?child ?parent WHERE {?child <isa> ?parent .}'))
- l2 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>+ ?parent .}'))
+ l1 = list(g.query("SELECT ?child ?parent WHERE {?child <isa> ?parent .}"))
+ l2 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>+ ?parent .}"))
assert len(l1) == len(l2)
assert set(l1) == set(l2)
- l3 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>* ?parent .}'))
+ l3 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>* ?parent .}"))
assert len(l3) == 7
- assert set(l3) == set(l1).union({(URIRef(n), URIRef(n)) for
- n in (a, b, x, y)})
+ assert set(l3) == set(l1).union({(URIRef(n), URIRef(n)) for n in (a, b, x, y)})
g.add((y, isa, z))
- l4 = list(g.query('SELECT ?child ?parent WHERE {?child <isa>* ?parent .}'))
+ l4 = list(g.query("SELECT ?child ?parent WHERE {?child <isa>* ?parent .}"))
assert len(l4) == 10
assert (a, z) in l4
diff --git a/test/test_issue733.py b/test/test_issue733.py
index bffeb400..2a6b612a 100644
--- a/test/test_issue733.py
+++ b/test/test_issue733.py
@@ -12,13 +12,12 @@ from rdflib.namespace import RDF, RDFS, NamespaceManager, Namespace
class TestIssue733(unittest.TestCase):
-
def test_issue_733(self):
g = Graph()
- example = Namespace('http://example.org/')
+ example = Namespace("http://example.org/")
g.add((example.S, example.P, example.O1))
g.add((example.S, example.P, example.O2))
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st ?ot ?gt where {
{SELECT (count(*) as ?st) where {
@@ -34,20 +33,20 @@ class TestIssue733(unittest.TestCase):
FILTER (?o!=ex:O1 && ?s!=ex:O2)
}}
}
- '''
+ """
res = g.query(q)
assert len(res) == 1
results = [[lit.toPython() for lit in line] for line in res]
- assert results[0][0]== 2
+ assert results[0][0] == 2
assert results[0][1] == 1
assert results[0][2] == 1
def test_issue_733_independant(self):
g = Graph()
- example = Namespace('http://example.org/')
+ example = Namespace("http://example.org/")
g.add((example.S, example.P, example.O1))
g.add((example.S, example.P, example.O2))
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st where {
{SELECT (count(*) as ?st) where {
@@ -55,12 +54,12 @@ class TestIssue733(unittest.TestCase):
FILTER (?s=ex:S)
}}
}
- '''
+ """
res = g.query(q)
assert len(res) == 1
results = [[lit.toPython() for lit in line] for line in res]
assert results[0][0] == 2
- q = '''
+ q = """
prefix ex:<http://example.org/>
select ?st where {
{SELECT (count(*) as ?st) where {
@@ -68,7 +67,7 @@ class TestIssue733(unittest.TestCase):
FILTER (?o=ex:O1)
}}
}
- '''
+ """
res = g.query(q)
results = [[lit.toPython() for lit in line] for line in res]
assert results[0][0] == 1
diff --git a/test/test_issue920.py b/test/test_issue920.py
index eb12edc4..7aafa794 100644
--- a/test/test_issue920.py
+++ b/test/test_issue920.py
@@ -14,22 +14,21 @@ import unittest
class TestIssue920(unittest.TestCase):
-
def test_issue_920(self):
g = Graph()
# NT tests
- g.parse(data='<a:> <b:> <c:> .', format='nt')
- g.parse(data='<http://a> <http://b> <http://c> .', format='nt')
- g.parse(data='<https://a> <http://> <http://c> .', format='nt')
+ g.parse(data="<a:> <b:> <c:> .", format="nt")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="nt")
+ g.parse(data="<https://a> <http://> <http://c> .", format="nt")
# related parser tests
- g.parse(data='<a:> <b:> <c:> .', format='turtle')
- g.parse(data='<http://a> <http://b> <http://c> .', format='turtle')
- g.parse(data='<https://a> <http://> <http://c> .', format='turtle')
+ g.parse(data="<a:> <b:> <c:> .", format="turtle")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="turtle")
+ g.parse(data="<https://a> <http://> <http://c> .", format="turtle")
- g.parse(data='<a:> <b:> <c:> .', format='n3')
- g.parse(data='<http://a> <http://b> <http://c> .', format='n3')
- g.parse(data='<https://a> <http://> <http://c> .', format='n3')
+ g.parse(data="<a:> <b:> <c:> .", format="n3")
+ g.parse(data="<http://a> <http://b> <http://c> .", format="n3")
+ g.parse(data="<https://a> <http://> <http://c> .", format="n3")
if __name__ == "__main__":
diff --git a/test/test_issue923.py b/test/test_issue923.py
index 3becb6f8..48f2e4de 100644
--- a/test/test_issue923.py
+++ b/test/test_issue923.py
@@ -32,4 +32,7 @@ RESULT_SOURCE = u"""\
def test_issue_923():
with StringIO(RESULT_SOURCE) as result_source:
- Result.parse(source=result_source, content_type="application/sparql-results+json;charset=utf-8")
+ Result.parse(
+ source=result_source,
+ content_type="application/sparql-results+json;charset=utf-8",
+ )
diff --git a/test/test_issue953.py b/test/test_issue953.py
index 1e211e12..879486d8 100644
--- a/test/test_issue953.py
+++ b/test/test_issue953.py
@@ -5,11 +5,11 @@ import unittest
class TestIssue953(unittest.TestCase):
-
def test_issue_939(self):
- lit = Literal(Fraction('2/3'))
- assert lit.datatype == URIRef('http://www.w3.org/2002/07/owl#rational')
+ lit = Literal(Fraction("2/3"))
+ assert lit.datatype == URIRef("http://www.w3.org/2002/07/owl#rational")
assert lit.n3() == '"2/3"^^<http://www.w3.org/2002/07/owl#rational>'
+
if __name__ == "__main__":
unittest.main()
diff --git a/test/test_issue_git_200.py b/test/test_issue_git_200.py
index 32a4ba9f..84e06b1a 100644
--- a/test/test_issue_git_200.py
+++ b/test/test_issue_git_200.py
@@ -10,7 +10,8 @@ def test_broken_add():
nose.tools.assert_raises(AssertionError, lambda: g.addN([(1, 2, 3, g)]))
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_issue_git_336.py b/test/test_issue_git_336.py
index f3250107..6a8abb7c 100644
--- a/test/test_issue_git_336.py
+++ b/test/test_issue_git_336.py
@@ -8,7 +8,7 @@ import nose.tools
# stripped-down culprit:
-'''\
+"""\
@prefix fs: <http://freesurfer.net/fswiki/terms/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@@ -17,21 +17,24 @@ import nose.tools
prov:Entity ;
fs:mrisurf.c-cvs_version
"$Id: mrisurf.c,v 1.693.2.2 2011/04/27 19:21:05 nicks Exp $" .
-'''
+"""
def test_ns_localname_roundtrip():
- XNS = rdflib.Namespace('http://example.net/fs')
+ XNS = rdflib.Namespace("http://example.net/fs")
g = rdflib.Graph()
- g.bind('xns', str(XNS))
- g.add((
- rdflib.URIRef('http://example.com/thingy'),
- XNS['lowecase.xxx-xxx_xxx'], # <- not round trippable
- rdflib.Literal("Junk")))
- turtledump = g.serialize(format="turtle").decode('utf-8')
- xmldump = g.serialize().decode('utf-8')
+ g.bind("xns", str(XNS))
+ g.add(
+ (
+ rdflib.URIRef("http://example.com/thingy"),
+ XNS["lowecase.xxx-xxx_xxx"], # <- not round trippable
+ rdflib.Literal("Junk"),
+ )
+ )
+ turtledump = g.serialize(format="turtle").decode("utf-8")
+ xmldump = g.serialize().decode("utf-8")
g1 = rdflib.Graph()
g1.parse(data=xmldump)
@@ -39,7 +42,8 @@ def test_ns_localname_roundtrip():
g1.parse(data=turtledump, format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_literal.py b/test/test_literal.py
index dae2d187..8124f99d 100644
--- a/test/test_literal.py
+++ b/test/test_literal.py
@@ -1,14 +1,11 @@
import unittest
import rdflib # needed for eval(repr(...)) below
-from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind
-from six import integer_types, PY3, string_types
+from rdflib.term import Literal, URIRef, _XSD_DOUBLE, bind, _XSD_BOOLEAN
def uformat(s):
- if PY3:
- return s.replace("u'", "'")
- return s
+ return s.replace("u'", "'")
class TestLiteral(unittest.TestCase):
@@ -37,7 +34,7 @@ class TestLiteral(unittest.TestCase):
"""
g = rdflib.Graph()
g.parse(data=d)
- a = rdflib.Literal('a\\b')
+ a = rdflib.Literal("a\\b")
b = list(g.objects())[0]
self.assertEqual(a, b)
@@ -48,8 +45,9 @@ class TestLiteral(unittest.TestCase):
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
- self.assertRaises(TypeError,
- Literal, 'foo', lang='en', datatype=URIRef("http://example.com/"))
+ self.assertRaises(
+ TypeError, Literal, "foo", lang="en", datatype=URIRef("http://example.com/")
+ )
def testFromOtherLiteral(self):
l = Literal(1)
@@ -60,7 +58,7 @@ class TestNew(unittest.TestCase):
# change datatype
l = Literal("1")
l2 = Literal(l, datatype=rdflib.XSD.integer)
- self.assertTrue(isinstance(l2.value, integer_types))
+ self.assertTrue(isinstance(l2.value, int))
def testDatatypeGetsAutoURIRefConversion(self):
# drewp disapproves of this behavior, but it should be
@@ -74,21 +72,26 @@ class TestNew(unittest.TestCase):
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
- self.assertEqual(repr(Literal("foo")),
- uformat("rdflib.term.Literal(u'foo')"))
+ self.assertEqual(repr(Literal("foo")), uformat("rdflib.term.Literal(u'foo')"))
def testOmitsMissingDatatype(self):
- self.assertEqual(repr(Literal("foo", lang='en')),
- uformat("rdflib.term.Literal(u'foo', lang='en')"))
+ self.assertEqual(
+ repr(Literal("foo", lang="en")),
+ uformat("rdflib.term.Literal(u'foo', lang='en')"),
+ )
def testOmitsMissingLang(self):
self.assertEqual(
- repr(Literal("foo", datatype=URIRef('http://example.com/'))),
- uformat("rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"))
+ repr(Literal("foo", datatype=URIRef("http://example.com/"))),
+ uformat(
+ "rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef(u'http://example.com/'))"
+ ),
+ )
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
+
x = MyLiteral(u"foo")
self.assertEqual(repr(x), uformat("MyLiteral(u'foo')"))
@@ -101,18 +104,40 @@ class TestDoubleOutput(unittest.TestCase):
self.assertTrue(out in ["8.8e-01", "0.88"], out)
-class TestBindings(unittest.TestCase):
+class TestParseBoolean(unittest.TestCase):
+ """confirms the fix for https://github.com/RDFLib/rdflib/issues/913"""
- def testBinding(self):
+ def testTrueBoolean(self):
+ test_value = Literal("tRue", datatype=_XSD_BOOLEAN)
+ self.assertTrue(test_value.value)
+ test_value = Literal("1", datatype=_XSD_BOOLEAN)
+ self.assertTrue(test_value.value)
+
+ def testFalseBoolean(self):
+ test_value = Literal("falsE", datatype=_XSD_BOOLEAN)
+ self.assertFalse(test_value.value)
+ test_value = Literal("0", datatype=_XSD_BOOLEAN)
+ self.assertFalse(test_value.value)
+ def testNonFalseBoolean(self):
+ test_value = Literal("abcd", datatype=_XSD_BOOLEAN)
+ self.assertRaises(DeprecationWarning)
+ self.assertFalse(test_value.value)
+ test_value = Literal("10", datatype=_XSD_BOOLEAN)
+ self.assertRaises(DeprecationWarning)
+ self.assertFalse(test_value.value)
+
+
+class TestBindings(unittest.TestCase):
+ def testBinding(self):
class a:
def __init__(self, v):
self.v = v[3:-3]
def __str__(self):
- return '<<<%s>>>' % self.v
+ return "<<<%s>>>" % self.v
- dtA = rdflib.URIRef('urn:dt:a')
+ dtA = rdflib.URIRef("urn:dt:a")
bind(dtA, a)
va = a("<<<2>>>")
@@ -129,10 +154,10 @@ class TestBindings(unittest.TestCase):
self.v = v[3:-3]
def __str__(self):
- return 'B%s' % self.v
+ return "B%s" % self.v
- dtB = rdflib.URIRef('urn:dt:b')
- bind(dtB, b, None, lambda x: '<<<%s>>>' % x)
+ dtB = rdflib.URIRef("urn:dt:b")
+ bind(dtB, b, None, lambda x: "<<<%s>>>" % x)
vb = b("<<<3>>>")
lb = Literal(vb, normalize=True)
@@ -140,17 +165,16 @@ class TestBindings(unittest.TestCase):
self.assertEqual(lb.datatype, dtB)
def testSpecificBinding(self):
-
def lexify(s):
return "--%s--" % s
def unlexify(s):
return s[2:-2]
- datatype = rdflib.URIRef('urn:dt:mystring')
+ datatype = rdflib.URIRef("urn:dt:mystring")
- #Datatype-specific rule
- bind(datatype, string_types, unlexify, lexify, datatype_specific=True)
+ # Datatype-specific rule
+ bind(datatype, str, unlexify, lexify, datatype_specific=True)
s = "Hello"
normal_l = Literal(s)
diff --git a/test/test_memory_store.py b/test/test_memory_store.py
index f579250e..546d12ad 100644
--- a/test/test_memory_store.py
+++ b/test/test_memory_store.py
@@ -1,21 +1,21 @@
import unittest
import rdflib
-rdflib.plugin.register('Memory', rdflib.store.Store,
- 'rdflib.plugins.memory', 'Memory')
+rdflib.plugin.register("Memory", rdflib.store.Store, "rdflib.plugins.memory", "Memory")
class StoreTestCase(unittest.TestCase):
-
def test_memory_store(self):
g = rdflib.Graph("Memory")
subj1 = rdflib.URIRef("http://example.org/foo#bar1")
pred1 = rdflib.URIRef("http://example.org/foo#bar2")
obj1 = rdflib.URIRef("http://example.org/foo#bar3")
triple1 = (subj1, pred1, obj1)
- triple2 = (subj1,
- rdflib.URIRef("http://example.org/foo#bar4"),
- rdflib.URIRef("http://example.org/foo#bar5"))
+ triple2 = (
+ subj1,
+ rdflib.URIRef("http://example.org/foo#bar4"),
+ rdflib.URIRef("http://example.org/foo#bar5"),
+ )
g.add(triple1)
self.assertTrue(len(g) == 1)
g.add(triple2)
@@ -27,5 +27,5 @@ class StoreTestCase(unittest.TestCase):
g.serialize()
-if __name__ == '__main__':
- unittest.main(defaultTest='test_suite')
+if __name__ == "__main__":
+ unittest.main(defaultTest="test_suite")
diff --git a/test/test_mulpath_n3.py b/test/test_mulpath_n3.py
index f0bbda73..f4f26dc4 100644
--- a/test/test_mulpath_n3.py
+++ b/test/test_mulpath_n3.py
@@ -4,6 +4,6 @@ from rdflib import RDFS, URIRef
def test_mulpath_n3():
- uri = 'http://example.com/foo'
+ uri = "http://example.com/foo"
n3 = (URIRef(uri) * ZeroOrMore).n3()
- assert n3 == '<' + uri + '>*'
+ assert n3 == "<" + uri + ">*"
diff --git a/test/test_n3.py b/test/test_n3.py
index 5d447732..9a378843 100644
--- a/test/test_n3.py
+++ b/test/test_n3.py
@@ -1,10 +1,9 @@
from rdflib.graph import Graph, ConjunctiveGraph
import unittest
from rdflib.term import Literal, URIRef
-from rdflib.plugins.parsers.notation3 import BadSyntax
-
-from six import b
-from six.moves.urllib.error import URLError
+from rdflib.plugins.parsers.notation3 import BadSyntax, exponent_syntax
+import itertools
+from urllib.error import URLError
test_data = """
# Definitions of terms describing the n3 model
@@ -62,7 +61,6 @@ n3:context a rdf:Property; rdfs:domain n3:statement;
class TestN3Case(unittest.TestCase):
-
def setUp(self):
pass
@@ -93,12 +91,10 @@ class TestN3Case(unittest.TestCase):
g = Graph()
g.parse(data=input, format="n3")
print(list(g))
- self.assertTrue((None, None, Literal('Foo')) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/bar'), None, None) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/doc2/bing'), None, None) in g)
- self.assertTrue((URIRef('http://test.com/bong'), None, None) in g)
+ self.assertTrue((None, None, Literal("Foo")) in g)
+ self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
+ self.assertTrue((URIRef("http://example.com/doc/doc2/bing"), None, None) in g)
+ self.assertTrue((URIRef("http://test.com/bong"), None, None) in g)
def testBaseExplicit(self):
"""
@@ -115,21 +111,24 @@ class TestN3Case(unittest.TestCase):
<bar> :name "Bar" .
"""
g = Graph()
- g.parse(data=input, publicID='http://blah.com/', format="n3")
+ g.parse(data=input, publicID="http://blah.com/", format="n3")
print(list(g))
- self.assertTrue(
- (URIRef('http://blah.com/foo'), None, Literal('Foo')) in g)
- self.assertTrue(
- (URIRef('http://example.com/doc/bar'), None, None) in g)
+ self.assertTrue((URIRef("http://blah.com/foo"), None, Literal("Foo")) in g)
+ self.assertTrue((URIRef("http://example.com/doc/bar"), None, None) in g)
def testBaseSerialize(self):
g = Graph()
- g.add((URIRef('http://example.com/people/Bob'), URIRef(
- 'urn:knows'), URIRef('http://example.com/people/Linda')))
- s = g.serialize(base='http://example.com/', format='n3')
- self.assertTrue(b('<people/Bob>') in s)
+ g.add(
+ (
+ URIRef("http://example.com/people/Bob"),
+ URIRef("urn:knows"),
+ URIRef("http://example.com/people/Linda"),
+ )
+ )
+ s = g.serialize(base="http://example.com/", format="n3")
+ self.assertTrue("<people/Bob>".encode("latin-1") in s)
g2 = ConjunctiveGraph()
- g2.parse(data=s, publicID='http://example.com/', format='n3')
+ g2.parse(data=s, publicID="http://example.com/", format="n3")
self.assertEqual(list(g), list(g2))
def testIssue23(self):
@@ -164,11 +163,37 @@ foo-bar:Ex foo-bar:name "Test" . """
g = Graph()
g.parse("test/n3/issue156.n3", format="n3")
+ def testIssue999(self):
+ """
+ Make sure the n3 parser does recognize exponent and leading dot in ".171e-11"
+ """
+ data = """
+@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
+
+<http://qudt.org/vocab/unit/MilliM-PER-YR>
+ a <http://qudt.org/schema/qudt/Unit> ;
+ <http://qudt.org/schema/qudt/conversionMultiplier> .171e-11 ;
+ <http://qudt.org/schema/qudt/conversionOffset> 0e+00 ;
+ <http://qudt.org/schema/qudt/description> "0.001-fold of the SI base unit metre divided by the unit year" ;
+ <http://qudt.org/schema/qudt/hasQuantityKind> <http://qudt.org/vocab/quantitykind/Velocity> ;
+ <http://qudt.org/schema/qudt/iec61360Code> "0112/2///62720#UAA868" ;
+ <http://qudt.org/schema/qudt/uneceCommonCode> "H66" ;
+ rdfs:isDefinedBy <http://qudt.org/2.1/vocab/unit> ;
+ rdfs:isDefinedBy <http://qudt.org/vocab/unit> ;
+ rdfs:label "MilliM PER YR" ;
+ <http://www.w3.org/2004/02/skos/core#prefLabel> "millimetre per year" ;
+.
+ """
+ g = Graph()
+ g.parse(data=data, format="n3")
+ g.parse(data=data, format="turtle")
+
def testDotInPrefix(self):
g = Graph()
g.parse(
data="@prefix a.1: <http://example.org/> .\n a.1:cake <urn:x> <urn:y> . \n",
- format='n3')
+ format="n3",
+ )
def testModel(self):
g = ConjunctiveGraph()
@@ -191,40 +216,74 @@ foo-bar:Ex foo-bar:name "Test" . """
g = ConjunctiveGraph()
try:
g.parse(
- "http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3", format="n3")
+ "http://groups.csail.mit.edu/dig/2005/09/rein/examples/troop42-policy.n3",
+ format="n3",
+ )
except URLError:
from nose import SkipTest
- raise SkipTest(
- 'No network to retrieve the information, skipping test')
+
+ raise SkipTest("No network to retrieve the information, skipping test")
def testSingleQuotedLiterals(self):
- test_data = ["""@prefix : <#> . :s :p 'o' .""",
- """@prefix : <#> . :s :p '''o''' ."""]
+ test_data = [
+ """@prefix : <#> . :s :p 'o' .""",
+ """@prefix : <#> . :s :p '''o''' .""",
+ ]
for data in test_data:
# N3 doesn't accept single quotes around string literals
g = ConjunctiveGraph()
- self.assertRaises(BadSyntax, g.parse,
- data=data, format='n3')
+ self.assertRaises(BadSyntax, g.parse, data=data, format="n3")
g = ConjunctiveGraph()
- g.parse(data=data, format='turtle')
+ g.parse(data=data, format="turtle")
self.assertEqual(len(g), 1)
for _, _, o in g:
- self.assertEqual(o, Literal('o'))
+ self.assertEqual(o, Literal("o"))
def testEmptyPrefix(self):
# this is issue https://github.com/RDFLib/rdflib/issues/312
g1 = Graph()
- g1.parse(data=":a :b :c .", format='n3')
+ g1.parse(data=":a :b :c .", format="n3")
g2 = Graph()
- g2.parse(data="@prefix : <#> . :a :b :c .", format='n3')
+ g2.parse(data="@prefix : <#> . :a :b :c .", format="n3")
assert set(g1) == set(
- g2), 'Document with declared empty prefix must match default #'
-
-
-if __name__ == '__main__':
+ g2
+ ), "Document with declared empty prefix must match default #"
+
+
+class TestRegularExpressions(unittest.TestCase):
+ def testExponents(self):
+ signs = ("", "+", "-")
+ mantissas = (
+ "1",
+ "1.",
+ ".1",
+ "12",
+ "12.",
+ "1.2",
+ ".12",
+ "123",
+ "123.",
+ "12.3",
+ "1.23",
+ ".123",
+ )
+ es = "eE"
+ exps = ("1", "12", "+1", "-1", "+12", "-12")
+ for parts in itertools.product(signs, mantissas, es, exps):
+ expstring = "".join(parts)
+ self.assertTrue(exponent_syntax.match(expstring))
+
+ def testInvalidExponents(self):
+ # Add test cases as needed
+ invalid = (".e1",)
+ for expstring in invalid:
+ self.assertFalse(exponent_syntax.match(expstring))
+
+
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_n3_suite.py b/test/test_n3_suite.py
index 21e6bcba..f2ab1ab6 100644
--- a/test/test_n3_suite.py
+++ b/test/test_n3_suite.py
@@ -11,19 +11,19 @@ except:
def _get_test_files_formats():
- skiptests = [
- ]
- for f in os.listdir('test/n3'):
+ skiptests = []
+ for f in os.listdir("test/n3"):
if f not in skiptests:
fpath = "test/n3/" + f
- if f.endswith('.rdf'):
- yield fpath, 'xml'
- elif f.endswith('.n3'):
- yield fpath, 'n3'
+ if f.endswith(".rdf"):
+ yield fpath, "xml"
+ elif f.endswith(".n3"):
+ yield fpath, "n3"
+
def all_n3_files():
skiptests = [
- 'test/n3/example-lots_of_graphs.n3', # only n3 can serialize QuotedGraph, no point in testing roundtrip
+ "test/n3/example-lots_of_graphs.n3", # only n3 can serialize QuotedGraph, no point in testing roundtrip
]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
@@ -31,15 +31,17 @@ def all_n3_files():
else:
yield fpath, fmt
+
def test_n3_writing():
for fpath, fmt in _get_test_files_formats():
- yield check_serialize_parse, fpath, fmt, 'n3'
+ yield check_serialize_parse, fpath, fmt, "n3"
if __name__ == "__main__":
if len(sys.argv) > 1:
- check_serialize_parse(sys.argv[1], 'n3', 'n3', True)
+ check_serialize_parse(sys.argv[1], "n3", "n3", True)
sys.exit()
else:
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_namespace.py b/test/test_namespace.py
index 4041433e..48896fdc 100644
--- a/test/test_namespace.py
+++ b/test/test_namespace.py
@@ -3,73 +3,99 @@ import unittest
from rdflib.graph import Graph
from rdflib.namespace import FOAF
from rdflib.term import URIRef
-from six import b
class NamespacePrefixTest(unittest.TestCase):
-
def test_compute_qname(self):
"""Test sequential assignment of unknown prefixes"""
g = Graph()
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar/baz")),
- ("ns1", URIRef("http://foo/bar/"), "baz"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar/baz")),
+ ("ns1", URIRef("http://foo/bar/"), "baz"),
+ )
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar#baz")),
- ("ns2", URIRef("http://foo/bar#"), "baz"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar#baz")),
+ ("ns2", URIRef("http://foo/bar#"), "baz"),
+ )
# should skip to ns4 when ns3 is already assigned
g.bind("ns3", URIRef("http://example.org/"))
- self.assertEqual(g.compute_qname(URIRef("http://blip/blop")),
- ("ns4", URIRef("http://blip/"), "blop"))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://blip/blop")),
+ ("ns4", URIRef("http://blip/"), "blop"),
+ )
# should return empty qnames correctly
- self.assertEqual(g.compute_qname(URIRef("http://foo/bar/")),
- ("ns1", URIRef("http://foo/bar/"), ""))
+ self.assertEqual(
+ g.compute_qname(URIRef("http://foo/bar/")),
+ ("ns1", URIRef("http://foo/bar/"), ""),
+ )
def test_reset(self):
- data = ('@prefix a: <http://example.org/a> .\n'
- 'a: <http://example.org/b> <http://example.org/c> .')
- graph = Graph().parse(data=data, format='turtle')
+ data = (
+ "@prefix a: <http://example.org/a> .\n"
+ "a: <http://example.org/b> <http://example.org/c> ."
+ )
+ graph = Graph().parse(data=data, format="turtle")
for p, n in tuple(graph.namespaces()):
graph.store._IOMemory__namespace.pop(p)
graph.store._IOMemory__prefix.pop(n)
graph.namespace_manager.reset()
self.assertFalse(tuple(graph.namespaces()))
- u = URIRef('http://example.org/a')
- prefix, namespace, name = graph.namespace_manager.compute_qname(u, generate=True)
+ u = URIRef("http://example.org/a")
+ prefix, namespace, name = graph.namespace_manager.compute_qname(
+ u, generate=True
+ )
self.assertNotEqual(namespace, u)
def test_reset_preserve_prefixes(self):
- data = ('@prefix a: <http://example.org/a> .\n'
- 'a: <http://example.org/b> <http://example.org/c> .')
- graph = Graph().parse(data=data, format='turtle')
+ data = (
+ "@prefix a: <http://example.org/a> .\n"
+ "a: <http://example.org/b> <http://example.org/c> ."
+ )
+ graph = Graph().parse(data=data, format="turtle")
graph.namespace_manager.reset()
self.assertTrue(tuple(graph.namespaces()))
- u = URIRef('http://example.org/a')
- prefix, namespace, name = graph.namespace_manager.compute_qname(u, generate=True)
+ u = URIRef("http://example.org/a")
+ prefix, namespace, name = graph.namespace_manager.compute_qname(
+ u, generate=True
+ )
self.assertEqual(namespace, u)
def test_n3(self):
g = Graph()
- g.add((URIRef("http://example.com/foo"),
- URIRef("http://example.com/bar"),
- URIRef("http://example.com/baz")))
+ g.add(
+ (
+ URIRef("http://example.com/foo"),
+ URIRef("http://example.com/bar"),
+ URIRef("http://example.com/baz"),
+ )
+ )
n3 = g.serialize(format="n3")
# Gunnar disagrees that this is right:
# self.assertTrue("<http://example.com/foo> ns1:bar <http://example.com/baz> ." in n3)
# as this is much prettier, and ns1 is already defined:
- self.assertTrue(b("ns1:foo ns1:bar ns1:baz .") in n3)
+ self.assertTrue("ns1:foo ns1:bar ns1:baz .".encode("latin-1") in n3)
def test_n32(self):
# this test not generating prefixes for subjects/objects
g = Graph()
- g.add((URIRef("http://example1.com/foo"),
- URIRef("http://example2.com/bar"),
- URIRef("http://example3.com/baz")))
+ g.add(
+ (
+ URIRef("http://example1.com/foo"),
+ URIRef("http://example2.com/bar"),
+ URIRef("http://example3.com/baz"),
+ )
+ )
n3 = g.serialize(format="n3")
self.assertTrue(
- b("<http://example1.com/foo> ns1:bar <http://example3.com/baz> .") in n3)
+ "<http://example1.com/foo> ns1:bar <http://example3.com/baz> .".encode(
+ "latin-1"
+ )
+ in n3
+ )
def test_closed_namespace(self):
"""Tests terms both in an out of the ClosedNamespace FOAF"""
@@ -84,4 +110,7 @@ class NamespacePrefixTest(unittest.TestCase):
self.assertRaises(KeyError, add_not_in_namespace, "firstName")
# a property name within the core FOAF namespace
- self.assertEqual(add_not_in_namespace("givenName"), URIRef("http://xmlns.com/foaf/0.1/givenName"))
+ self.assertEqual(
+ add_not_in_namespace("givenName"),
+ URIRef("http://xmlns.com/foaf/0.1/givenName"),
+ )
diff --git a/test/test_nodepickler.py b/test/test_nodepickler.py
index 31a667da..970ec232 100644
--- a/test/test_nodepickler.py
+++ b/test/test_nodepickler.py
@@ -7,7 +7,7 @@ from rdflib.store import NodePickler
# same as nt/more_literals.nt
cases = [
- 'no quotes',
+ "no quotes",
"single ' quote",
'double " quote',
'triple """ quotes',
@@ -15,7 +15,7 @@ cases = [
'"',
"'",
'"\'"',
- '\\', # len 1
+ "\\", # len 1
'\\"', # len 2
'\\\\"', # len 3
'\\"\\', # len 3
@@ -24,12 +24,13 @@ cases = [
class UtilTestCase(unittest.TestCase):
-
def test_to_bits_from_bits_round_trip(self):
np = NodePickler()
- a = Literal(u'''A test with a \\n (backslash n), "\u00a9" , and newline \n and a second line.
-''')
+ a = Literal(
+ u"""A test with a \\n (backslash n), "\u00a9" , and newline \n and a second line.
+"""
+ )
b = np.loads(np.dumps(a))
self.assertEqual(a, b)
@@ -49,5 +50,5 @@ class UtilTestCase(unittest.TestCase):
self.assertEqual(np._objects, np2._objects)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_nquads.py b/test/test_nquads.py
index 559f754f..c25bc7ed 100644
--- a/test/test_nquads.py
+++ b/test/test_nquads.py
@@ -1,12 +1,10 @@
import unittest
from rdflib import ConjunctiveGraph, URIRef, Namespace
-from six import b
-TEST_BASE = 'test/nquads.rdflib'
+TEST_BASE = "test/nquads.rdflib"
class NQuadsParserTest(unittest.TestCase):
-
def _load_example(self):
g = ConjunctiveGraph()
with open("test/nquads.rdflib/example.nquads", "rb") as data:
@@ -47,22 +45,26 @@ class NQuadsParserTest(unittest.TestCase):
uri1 = URIRef("http://example.org/mygraph1")
uri2 = URIRef("http://example.org/mygraph2")
- bob = URIRef(u'urn:bob')
- likes = URIRef(u'urn:likes')
- pizza = URIRef(u'urn:pizza')
+ bob = URIRef(u"urn:bob")
+ likes = URIRef(u"urn:likes")
+ pizza = URIRef(u"urn:pizza")
g.get_context(uri1).add((bob, likes, pizza))
g.get_context(uri2).add((bob, likes, pizza))
- s = g.serialize(format='nquads')
- self.assertEqual(len([x for x in s.split(b("\n")) if x.strip()]), 2)
+ s = g.serialize(format="nquads")
+ self.assertEqual(
+ len([x for x in s.split("\n".encode("latin-1")) if x.strip()]), 2
+ )
g2 = ConjunctiveGraph()
- g2.parse(data=s, format='nquads')
+ g2.parse(data=s, format="nquads")
self.assertEqual(len(g), len(g2))
- self.assertEqual(sorted(x.identifier for x in g.contexts()),
- sorted(x.identifier for x in g2.contexts()))
+ self.assertEqual(
+ sorted(x.identifier for x in g.contexts()),
+ sorted(x.identifier for x in g2.contexts()),
+ )
if __name__ == "__main__":
diff --git a/test/test_nquads_w3c.py b/test/test_nquads_w3c.py
index f12850d2..02d79576 100644
--- a/test/test_nquads_w3c.py
+++ b/test/test_nquads_w3c.py
@@ -13,7 +13,7 @@ def nquads(test):
g = ConjunctiveGraph()
try:
- g.parse(test.action, format='nquads')
+ g.parse(test.action, format="nquads")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
@@ -21,14 +21,11 @@ def nquads(test):
raise
-testers = {
- RDFT.TestNQuadsPositiveSyntax: nquads,
- RDFT.TestNQuadsNegativeSyntax: nquads
-}
+testers = {RDFT.TestNQuadsPositiveSyntax: nquads, RDFT.TestNQuadsNegativeSyntax: nquads}
def test_nquads(tests=None):
- for t in nose_tests(testers, 'test/w3c/nquads/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/nquads/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -39,7 +36,7 @@ def test_nquads(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_nquads, 'rdflib_nquads')
+ nose_tst_earl_report(test_nquads, "rdflib_nquads")
diff --git a/test/test_nt_misc.py b/test/test_nt_misc.py
index d9f9e1f9..7934f70e 100644
--- a/test/test_nt_misc.py
+++ b/test/test_nt_misc.py
@@ -4,14 +4,12 @@ import os
import re
from rdflib import Graph, Literal, URIRef
from rdflib.plugins.parsers import ntriples
-from six import binary_type, text_type, b
-from six.moves.urllib.request import urlopen
+from urllib.request import urlopen
log = logging.getLogger(__name__)
class NTTestCase(unittest.TestCase):
-
def testIssue859(self):
graphA = Graph()
graphB = Graph()
@@ -26,15 +24,15 @@ class NTTestCase(unittest.TestCase):
def testIssue78(self):
g = Graph()
g.add((URIRef("foo"), URIRef("foo"), Literal(u"R\u00E4ksm\u00F6rg\u00E5s")))
- s = g.serialize(format='nt')
- self.assertEqual(type(s), binary_type)
- self.assertTrue(b(r"R\u00E4ksm\u00F6rg\u00E5s") in s)
+ s = g.serialize(format="nt")
+ self.assertEqual(type(s), bytes)
+ self.assertTrue(r"R\u00E4ksm\u00F6rg\u00E5s".encode("latin-1") in s)
def testIssue146(self):
g = Graph()
g.add((URIRef("foo"), URIRef("foo"), Literal("test\n", lang="en")))
s = g.serialize(format="nt").strip()
- self.assertEqual(s, b('<foo> <foo> "test\\n"@en .'))
+ self.assertEqual(s, '<foo> <foo> "test\\n"@en .'.encode("latin-1"))
def test_sink(self):
s = ntriples.Sink()
@@ -46,7 +44,7 @@ class NTTestCase(unittest.TestCase):
safe = """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> ."""
ntriples.validate = False
res = ntriples.unquote(safe)
- self.assertTrue(isinstance(res, text_type))
+ self.assertTrue(isinstance(res, str))
def test_validating_unquote(self):
quot = """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> ."""
@@ -80,7 +78,7 @@ class NTTestCase(unittest.TestCase):
self.assertEqual(res, uniquot)
def test_NTriplesParser_fpath(self):
- fpath = "test/nt/" + os.listdir('test/nt')[0]
+ fpath = "test/nt/" + os.listdir("test/nt")[0]
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parse, fpath)
@@ -89,7 +87,7 @@ class NTTestCase(unittest.TestCase):
data = 3
self.assertRaises(ntriples.ParseError, p.parsestring, data)
fname = "test/nt/lists-02.nt"
- with open(fname, 'r') as f:
+ with open(fname, "r") as f:
data = f.read()
p = ntriples.NTriplesParser()
res = p.parsestring(data)
@@ -106,15 +104,21 @@ class NTTestCase(unittest.TestCase):
self.assertTrue(sink is not None)
def test_bad_line(self):
- data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
+ data = (
+ """<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
+ )
p = ntriples.NTriplesParser()
self.assertRaises(ntriples.ParseError, p.parsestring, data)
def test_cover_eat(self):
- data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
+ data = (
+ """<http://example.org/resource32> 3 <http://example.org/datatype1> .\n"""
+ )
p = ntriples.NTriplesParser()
p.line = data
- self.assertRaises(ntriples.ParseError, p.eat, re.compile('<http://example.org/datatype1>'))
+ self.assertRaises(
+ ntriples.ParseError, p.eat, re.compile("<http://example.org/datatype1>")
+ )
def test_cover_subjectobjectliteral(self):
# data = '''<http://example.org/resource32> 3 <http://example.org/datatype1> .\n'''
diff --git a/test/test_nt_suite.py b/test/test_nt_suite.py
index c9175320..753d2897 100644
--- a/test/test_nt_suite.py
+++ b/test/test_nt_suite.py
@@ -12,41 +12,39 @@ The actual tests are done in test_roundtrip
def _get_test_files_formats():
- for f in os.listdir('test/nt'):
+ for f in os.listdir("test/nt"):
fpath = "test/nt/" + f
- if f.endswith('.rdf'):
- yield fpath, 'xml'
- elif f.endswith('.nt'):
- yield fpath, 'nt'
+ if f.endswith(".rdf"):
+ yield fpath, "xml"
+ elif f.endswith(".nt"):
+ yield fpath, "nt"
def all_nt_files():
skiptests = [
# illegal literal as subject
- 'test/nt/literals-01.nt',
- 'test/nt/keywords-08.nt',
- 'test/nt/paths-04.nt',
- 'test/nt/numeric-01.nt',
- 'test/nt/numeric-02.nt',
- 'test/nt/numeric-03.nt',
- 'test/nt/numeric-04.nt',
- 'test/nt/numeric-05.nt',
-
+ "test/nt/literals-01.nt",
+ "test/nt/keywords-08.nt",
+ "test/nt/paths-04.nt",
+ "test/nt/numeric-01.nt",
+ "test/nt/numeric-02.nt",
+ "test/nt/numeric-03.nt",
+ "test/nt/numeric-04.nt",
+ "test/nt/numeric-05.nt",
# illegal variables
- 'test/nt/formulae-01.nt',
- 'test/nt/formulae-02.nt',
- 'test/nt/formulae-03.nt',
- 'test/nt/formulae-05.nt',
- 'test/nt/formulae-06.nt',
- 'test/nt/formulae-10.nt',
-
+ "test/nt/formulae-01.nt",
+ "test/nt/formulae-02.nt",
+ "test/nt/formulae-03.nt",
+ "test/nt/formulae-05.nt",
+ "test/nt/formulae-06.nt",
+ "test/nt/formulae-10.nt",
# illegal bnode as predicate
- 'test/nt/paths-06.nt',
- 'test/nt/anons-02.nt',
- 'test/nt/anons-03.nt',
- 'test/nt/qname-01.nt',
- 'test/nt/lists-06.nt',
- ]
+ "test/nt/paths-06.nt",
+ "test/nt/anons-02.nt",
+ "test/nt/anons-03.nt",
+ "test/nt/qname-01.nt",
+ "test/nt/lists-06.nt",
+ ]
for fpath, fmt in _get_test_files_formats():
if fpath in skiptests:
log.debug("Skipping %s, known issue" % fpath)
diff --git a/test/test_nt_w3c.py b/test/test_nt_w3c.py
index 65166f5e..8294e8ff 100644
--- a/test/test_nt_w3c.py
+++ b/test/test_nt_w3c.py
@@ -13,7 +13,7 @@ def nt(test):
g = Graph()
try:
- g.parse(test.action, format='nt')
+ g.parse(test.action, format="nt")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
except:
@@ -21,14 +21,11 @@ def nt(test):
raise
-testers = {
- RDFT.TestNTriplesPositiveSyntax: nt,
- RDFT.TestNTriplesNegativeSyntax: nt
-}
+testers = {RDFT.TestNTriplesPositiveSyntax: nt, RDFT.TestNTriplesNegativeSyntax: nt}
def test_nt(tests=None):
- for t in nose_tests(testers, 'test/w3c/nt/manifest.ttl', legacy=True):
+ for t in nose_tests(testers, "test/w3c/nt/manifest.ttl", legacy=True):
if tests:
for test in tests:
if test in t[1].uri:
@@ -39,7 +36,7 @@ def test_nt(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_nt, 'rdflib_nt')
+ nose_tst_earl_report(test_nt, "rdflib_nt")
diff --git a/test/test_parser.py b/test/test_parser.py
index d311a89b..3aaf5658 100644
--- a/test/test_parser.py
+++ b/test/test_parser.py
@@ -7,8 +7,8 @@ from rdflib.graph import Graph
class ParserTestCase(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
self.graph = Graph(store=self.backend)
@@ -19,7 +19,8 @@ class ParserTestCase(unittest.TestCase):
def testNoPathWithHash(self):
g = self.graph
- g.parse(data="""\
+ g.parse(
+ data="""\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
@@ -31,7 +32,9 @@ class ParserTestCase(unittest.TestCase):
</rdfs:Class>
</rdf:RDF>
-""", publicID="http://example.org")
+""",
+ publicID="http://example.org",
+ )
subject = URIRef("http://example.org#")
label = g.value(subject, RDFS.label)
diff --git a/test/test_parser_helpers.py b/test/test_parser_helpers.py
index 58d083cb..090a8a49 100644
--- a/test/test_parser_helpers.py
+++ b/test/test_parser_helpers.py
@@ -1,4 +1,5 @@
from rdflib.plugins.sparql.parser import TriplesSameSubject
+
# from rdflib.plugins.sparql.algebra import triples
diff --git a/test/test_prefixTypes.py b/test/test_prefixTypes.py
index 415f0459..8a785094 100644
--- a/test/test_prefixTypes.py
+++ b/test/test_prefixTypes.py
@@ -1,17 +1,18 @@
import unittest
-
from rdflib import Graph
-from six import b
-graph = Graph().parse(format='n3', data="""
+graph = Graph().parse(
+ format="n3",
+ data="""
@prefix dct: <http://purl.org/dc/terms/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.org/doc> a foaf:Document;
dct:created "2011-03-20"^^xsd:date .
-""")
+""",
+)
class PrefixTypesTest(unittest.TestCase):
@@ -24,11 +25,11 @@ class PrefixTypesTest(unittest.TestCase):
"""
def test(self):
- s = graph.serialize(format='n3')
+ s = graph.serialize(format="n3")
print(s)
- self.assertTrue(b("foaf:Document") in s)
- self.assertTrue(b("xsd:date") in s)
+ self.assertTrue("foaf:Document".encode("latin-1") in s)
+ self.assertTrue("xsd:date".encode("latin-1") in s)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_preflabel.py b/test/test_preflabel.py
index 76c4131e..b35c626c 100644
--- a/test/test_preflabel.py
+++ b/test/test_preflabel.py
@@ -8,50 +8,73 @@ from rdflib import URIRef
class TestPrefLabel(unittest.TestCase):
-
def setUp(self):
self.g = ConjunctiveGraph()
- self.u = URIRef('http://example.com/foo')
- self.g.add([self.u, RDFS.label, Literal('foo')])
- self.g.add([self.u, RDFS.label, Literal('bar')])
+ self.u = URIRef("http://example.com/foo")
+ self.g.add([self.u, RDFS.label, Literal("foo")])
+ self.g.add([self.u, RDFS.label, Literal("bar")])
def test_default_label_sorting(self):
res = sorted(self.g.preferredLabel(self.u))
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'bar')),
- (rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'),
- rdflib.term.Literal(u'foo'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#label"),
+ rdflib.term.Literal(u"bar"),
+ ),
+ (
+ rdflib.term.URIRef("http://www.w3.org/2000/01/rdf-schema#label"),
+ rdflib.term.Literal(u"foo"),
+ ),
+ ]
self.assertEqual(res, tgt)
def test_default_preflabel_sorting(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
res = self.g.preferredLabel(self.u)
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ )
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_no_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
res = sorted(self.g.preferredLabel(self.u))
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla')),
- (rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ ),
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"blubb", lang="en"),
+ ),
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_empty_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('bla')])
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
- res = self.g.preferredLabel(self.u, lang='')
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'bla'))]
+ self.g.add([self.u, SKOS.prefLabel, Literal("bla")])
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
+ res = self.g.preferredLabel(self.u, lang="")
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"bla"),
+ )
+ ]
self.assertEqual(res, tgt)
def test_preflabel_lang_sorting_en_lang_attr(self):
- self.g.add([self.u, SKOS.prefLabel, Literal('blubb', lang='en')])
- res = self.g.preferredLabel(self.u, lang='en')
- tgt = [(rdflib.term.URIRef('http://www.w3.org/2004/02/skos/core#prefLabel'),
- rdflib.term.Literal(u'blubb', lang='en'))]
+ self.g.add([self.u, SKOS.prefLabel, Literal("blubb", lang="en")])
+ res = self.g.preferredLabel(self.u, lang="en")
+ tgt = [
+ (
+ rdflib.term.URIRef("http://www.w3.org/2004/02/skos/core#prefLabel"),
+ rdflib.term.Literal(u"blubb", lang="en"),
+ )
+ ]
self.assertEqual(res, tgt)
diff --git a/test/test_prettyxml.py b/test/test_prettyxml.py
index 996e6e54..4a033fa4 100644
--- a/test/test_prettyxml.py
+++ b/test/test_prettyxml.py
@@ -1,8 +1,7 @@
# -*- coding: UTF-8 -*-
from rdflib.term import URIRef, BNode, Literal
from rdflib.namespace import RDF, RDFS
-from six import b, BytesIO
-
+from io import BytesIO
from rdflib.plugins.serializers.rdfxml import PrettyXMLSerializer
from rdflib.graph import ConjunctiveGraph
@@ -46,7 +45,9 @@ def _mangled_copy(g):
"Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
gcopy = ConjunctiveGraph()
- def isbnode(v): return isinstance(v, BNode)
+ def isbnode(v):
+ return isinstance(v, BNode)
+
for s, p, o in g:
if isbnode(s):
s = _blank
@@ -117,56 +118,101 @@ class TestPrettyXmlSerializer(SerializerTestBase):
rdfs:seeAlso _:bnode2 .
"""
- testContentFormat = 'n3'
+ testContentFormat = "n3"
def test_result_fragments(self):
rdfXml = serialize(self.sourceGraph, self.serializer)
- assert b('<Test rdf:about="http://example.org/data/a">') in rdfXml
- assert b('<rdf:Description rdf:about="http://example.org/data/b">') in rdfXml
- assert b('<name xml:lang="en">Bee</name>') in rdfXml
- assert b('<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>') in rdfXml
- assert b('<BNode rdf:nodeID="') in rdfXml, "expected one identified bnode in serialized graph"
- #onlyBNodesMsg = "expected only inlined subClassOf-bnodes in serialized graph"
- #assert '<rdfs:subClassOf>' in rdfXml, onlyBNodesMsg
- #assert not '<rdfs:subClassOf ' in rdfXml, onlyBNodesMsg
+ assert (
+ '<Test rdf:about="http://example.org/data/a">'.encode("latin-1") in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1")
+ in rdfXml
+ )
+ assert '<name xml:lang="en">Bee</name>'.encode("latin-1") in rdfXml
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
+ # onlyBNodesMsg = "expected only inlined subClassOf-bnodes in serialized graph"
+ # assert '<rdfs:subClassOf>' in rdfXml, onlyBNodesMsg
+ # assert not '<rdfs:subClassOf ' in rdfXml, onlyBNodesMsg
def test_result_fragments_with_base(self):
- rdfXml = serialize(self.sourceGraph, self.serializer,
- extra_args={'base': "http://example.org/", 'xml_base': "http://example.org/"})
- assert b('xml:base="http://example.org/"') in rdfXml
- assert b('<Test rdf:about="data/a">') in rdfXml
- assert b('<rdf:Description rdf:about="data/b">') in rdfXml
- assert b('<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>') in rdfXml
- assert b('<BNode rdf:nodeID="') in rdfXml, "expected one identified bnode in serialized graph"
+ rdfXml = serialize(
+ self.sourceGraph,
+ self.serializer,
+ extra_args={
+ "base": "http://example.org/",
+ "xml_base": "http://example.org/",
+ },
+ )
+ assert 'xml:base="http://example.org/"'.encode("latin-1") in rdfXml
+ assert '<Test rdf:about="data/a">'.encode("latin-1") in rdfXml
+ assert '<rdf:Description rdf:about="data/b">'.encode("latin-1") in rdfXml
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<BNode rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_subClassOf_objects(self):
reparsedGraph = serialize_and_load(self.sourceGraph, self.serializer)
- _assert_expected_object_types_for_predicates(reparsedGraph,
- [RDFS.seeAlso, RDFS.subClassOf],
- [URIRef, BNode])
+ _assert_expected_object_types_for_predicates(
+ reparsedGraph, [RDFS.seeAlso, RDFS.subClassOf], [URIRef, BNode]
+ )
def test_pretty_xmlliteral(self):
# given:
g = ConjunctiveGraph()
- g.add((BNode(), RDF.value, Literal(u'''<p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p>''', datatype=RDF.XMLLiteral)))
+ g.add(
+ (
+ BNode(),
+ RDF.value,
+ Literal(
+ u"""<p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p>""",
+ datatype=RDF.XMLLiteral,
+ ),
+ )
+ )
# when:
- xmlrepr = g.serialize(format='pretty-xml')
+ xmlrepr = g.serialize(format="pretty-xml")
# then:
- assert u'''<rdf:value rdf:parseType="Literal"><p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p></rdf:value>'''.encode('utf-8') in xmlrepr
+ assert (
+ u"""<rdf:value rdf:parseType="Literal"><p xmlns="http://www.w3.org/1999/xhtml">See also <a href="#aring">Å</a></p></rdf:value>""".encode(
+ "utf-8"
+ )
+ in xmlrepr
+ )
def test_pretty_broken_xmlliteral(self):
# given:
g = ConjunctiveGraph()
- g.add((BNode(), RDF.value, Literal(u'''<p ''', datatype=RDF.XMLLiteral)))
+ g.add((BNode(), RDF.value, Literal(u"""<p """, datatype=RDF.XMLLiteral)))
# when:
- xmlrepr = g.serialize(format='pretty-xml')
+ xmlrepr = g.serialize(format="pretty-xml")
# then:
- assert u'''<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral">&lt;p '''.encode('utf-8') in xmlrepr
+ assert (
+ u"""<rdf:value rdf:datatype="http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral">&lt;p """.encode(
+ "utf-8"
+ )
+ in xmlrepr
+ )
def _assert_expected_object_types_for_predicates(graph, predicates, types):
for s, p, o in graph:
if p in predicates:
someTrue = [isinstance(o, t) for t in types]
- assert True in someTrue, \
- "Bad type %s for object when predicate is <%s>." % (type(o), p)
+ assert (
+ True in someTrue
+ ), "Bad type %s for object when predicate is <%s>." % (type(o), p)
diff --git a/test/test_rdf_lists.py b/test/test_rdf_lists.py
index a73d14d8..466b4847 100644
--- a/test/test_rdf_lists.py
+++ b/test/test_rdf_lists.py
@@ -5,8 +5,7 @@ from rdflib.graph import Graph
from rdflib.term import URIRef
-DATA =\
- """<http://example.com#C> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
+DATA = """<http://example.com#C> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
<http://example.com#B> <http://www.w3.org/2000/01/rdf-schema#subClassOf> _:fIYNVPxd4.
<http://example.com#B> <http://www.w3.org/2000/01/rdf-schema#subClassOf> <http://example.com#A>.
<http://example.com#B> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/2002/07/owl#Class>.
@@ -20,8 +19,7 @@ _:fIYNVPxd3 <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <http://example.c
_:fIYNVPxd3 <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> <http://www.w3.org/1999/02/22-rdf-syntax-ns#nil>.
"""
-DATA_FALSE_ELEMENT =\
- """
+DATA_FALSE_ELEMENT = """
<http://example.org/#ThreeMemberList> <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> <http://example.org/#p> .
<http://example.org/#ThreeMemberList> <http://www.w3.org/1999/02/22-rdf-syntax-ns#rest> _:list2 .
_:list2 <http://www.w3.org/1999/02/22-rdf-syntax-ns#first> "false"^^<http://www.w3.org/2001/XMLSchema#boolean> .
@@ -36,19 +34,19 @@ def main():
class OWLCollectionTest(unittest.TestCase):
-
def testCollectionRDFXML(self):
- g = Graph().parse(data=DATA, format='nt')
- g.namespace_manager.bind('owl', URIRef('http://www.w3.org/2002/07/owl#'))
- print(g.serialize(format='pretty-xml'))
+ g = Graph().parse(data=DATA, format="nt")
+ g.namespace_manager.bind("owl", URIRef("http://www.w3.org/2002/07/owl#"))
+ print(g.serialize(format="pretty-xml"))
class ListTest(unittest.TestCase):
def testFalseElement(self):
- g = Graph().parse(data=DATA_FALSE_ELEMENT, format='nt')
+ g = Graph().parse(data=DATA_FALSE_ELEMENT, format="nt")
self.assertEqual(
- len(list(g.items(URIRef('http://example.org/#ThreeMemberList')))), 3)
+ len(list(g.items(URIRef("http://example.org/#ThreeMemberList")))), 3
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/test/test_rdfxml.py b/test/test_rdfxml.py
index 94275e72..845a9a7d 100644
--- a/test/test_rdfxml.py
+++ b/test/test_rdfxml.py
@@ -8,7 +8,7 @@ import unittest
import os
import os.path
-from six.moves.urllib.request import url2pathname, urlopen
+from urllib.request import url2pathname, urlopen
from rdflib import RDF, RDFS, URIRef, BNode, Literal, Namespace, Graph
from rdflib.exceptions import ParserError
@@ -42,7 +42,10 @@ class TestStore(Graph):
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in self.expected:
m = "Triple not in expected result: %s, %s, %s" % (
- s.n3(), p.n3(), o.n3())
+ s.n3(),
+ p.n3(),
+ o.n3(),
+ )
if verbose:
write(m)
# raise Exception(m)
@@ -73,7 +76,7 @@ def cached_file(url):
folder = os.path.dirname(fpath)
if not os.path.exists(folder):
os.makedirs(folder)
- f = open(fpath, 'w')
+ f = open(fpath, "w")
try:
f.write(urlopen(url).read())
finally:
@@ -85,7 +88,7 @@ RDFCOREBASE = "http://www.w3.org/2000/10/rdf-tests/rdfcore/"
def relative(url):
- return url[len(RDFCOREBASE):]
+ return url[len(RDFCOREBASE) :]
def resolve(rel):
@@ -164,15 +167,16 @@ def _testNegative(uri, manifest):
class ParserTestCase(unittest.TestCase):
- store = 'default'
- path = 'store'
+ store = "default"
+ path = "store"
slow = True
def setUp(self):
self.manifest = manifest = Graph(store=self.store)
manifest.open(self.path)
- manifest.load(cached_file(
- "http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf"))
+ manifest.load(
+ cached_file("http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf")
+ )
def tearDown(self):
self.manifest.close()
@@ -188,8 +192,7 @@ class ParserTestCase(unittest.TestCase):
result = _testNegative(neg, manifest)
total += 1
num_failed += result
- self.assertEqual(
- num_failed, 0, "Failed: %s of %s." % (num_failed, total))
+ self.assertEqual(num_failed, 0, "Failed: %s of %s." % (num_failed, total))
def testPositive(self):
manifest = self.manifest
@@ -213,8 +216,7 @@ class ParserTestCase(unittest.TestCase):
results.add((test, RDF.type, RESULT["FailingRun"]))
total += 1
num_failed += result
- self.assertEqual(
- num_failed, 0, "Failed: %s of %s." % (num_failed, total))
+ self.assertEqual(num_failed, 0, "Failed: %s of %s." % (num_failed, total))
RESULT = Namespace("http://www.w3.org/2002/03owlt/resultsOntology#")
@@ -231,12 +233,14 @@ results.add((system, RDFS.comment, Literal("")))
if __name__ == "__main__":
manifest = Graph()
- manifest.load(cached_file(
- "http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf"))
+ manifest.load(
+ cached_file("http://www.w3.org/2000/10/rdf-tests/rdfcore/Manifest.rdf")
+ )
import sys
import getopt
+
try:
- optlist, args = getopt.getopt(sys.argv[1:], 'h:', ["help"])
+ optlist, args = getopt.getopt(sys.argv[1:], "h:", ["help"])
except getopt.GetoptError as msg:
write(msg)
# usage()
diff --git a/test/test_roundtrip.py b/test/test_roundtrip.py
index 9dfed952..149e9eb5 100644
--- a/test/test_roundtrip.py
+++ b/test/test_roundtrip.py
@@ -4,8 +4,10 @@ import rdflib.compare
try:
from .test_nt_suite import all_nt_files
+
assert all_nt_files
from .test_n3_suite import all_n3_files
+
assert all_n3_files
except:
from test.test_nt_suite import all_nt_files
@@ -28,10 +30,13 @@ tests roundtripping through rdf/xml with only the literals-02 file
SKIP = [
- ('xml', 'test/n3/n3-writer-test-29.n3'), # has predicates that cannot be shortened to strict qnames
- ('xml', 'test/nt/qname-02.nt'), # uses a property that cannot be qname'd
- ('trix', 'test/n3/strquot.n3'), # contains charachters forbidden by the xml spec
- ('xml', 'test/n3/strquot.n3'), # contains charachters forbidden by the xml spec
+ (
+ "xml",
+ "test/n3/n3-writer-test-29.n3",
+ ), # has predicates that cannot be shortened to strict qnames
+ ("xml", "test/nt/qname-02.nt"), # uses a property that cannot be qname'd
+ ("trix", "test/n3/strquot.n3"), # contains charachters forbidden by the xml spec
+ ("xml", "test/n3/strquot.n3"), # contains charachters forbidden by the xml spec
]
@@ -78,11 +83,9 @@ def test_cases():
global formats
if not formats:
serializers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Serializer))
- parsers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Parser))
+ x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer)
+ )
+ parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser))
formats = parsers.intersection(serializers)
for testfmt in formats:
@@ -97,15 +100,14 @@ def test_n3():
global formats
if not formats:
serializers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Serializer))
- parsers = set(
- x.name for x in rdflib.plugin.plugins(
- None, rdflib.plugin.Parser))
+ x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Serializer)
+ )
+ parsers = set(x.name for x in rdflib.plugin.plugins(None, rdflib.plugin.Parser))
formats = parsers.intersection(serializers)
for testfmt in formats:
- if "/" in testfmt: continue # skip double testing
+ if "/" in testfmt:
+ continue # skip double testing
for f, infmt in all_n3_files():
if (testfmt, f) not in SKIP:
yield roundtrip, (infmt, testfmt, f)
@@ -113,12 +115,13 @@ def test_n3():
if __name__ == "__main__":
import nose
+
if len(sys.argv) == 1:
nose.main(defaultTest=sys.argv[0])
elif len(sys.argv) == 2:
import test.test_roundtrip
+
test.test_roundtrip.formats = [sys.argv[1]]
nose.main(defaultTest=sys.argv[0], argv=sys.argv[:1])
else:
- roundtrip(
- (sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
+ roundtrip((sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
diff --git a/test/test_rules.py b/test/test_rules.py
index 008104da..c2496760 100644
--- a/test/test_rules.py
+++ b/test/test_rules.py
@@ -36,11 +36,15 @@ try:
def facts(g):
for s, p, o in g:
- if p != LOG.implies and not isinstance(s, BNode) and not isinstance(o, BNode):
+ if (
+ p != LOG.implies
+ and not isinstance(s, BNode)
+ and not isinstance(o, BNode)
+ ):
yield terms.Fact(_convert(s), _convert(p), _convert(o))
class PychinkoTestCase(unittest.TestCase):
- backend = 'default'
+ backend = "default"
tmppath = None
def setUp(self):
@@ -66,7 +70,8 @@ try:
source = self.g
interp.addFacts(set(facts(source)), initialSet=True)
interp.run()
- #_logger.debug("inferred facts: %s" % interp.inferredFacts)
+ # _logger.debug("inferred facts: %s" % interp.inferredFacts)
+
except ImportError as e:
print("Could not test Pychinko: %s" % e)
diff --git a/test/test_seq.py b/test/test_seq.py
index a1411649..7f177574 100644
--- a/test/test_seq.py
+++ b/test/test_seq.py
@@ -23,8 +23,8 @@ s = """\
class SeqTestCase(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
store = self.store = Graph(store=self.backend)
@@ -47,5 +47,5 @@ def test_suite():
return unittest.makeSuite(SeqTestCase)
-if __name__ == '__main__':
- unittest.main(defaultTest='test_suite')
+if __name__ == "__main__":
+ unittest.main(defaultTest="test_suite")
diff --git a/test/test_serializexml.py b/test/test_serializexml.py
index 0dfa5e69..6ca25a92 100644
--- a/test/test_serializexml.py
+++ b/test/test_serializexml.py
@@ -1,7 +1,6 @@
from rdflib.term import URIRef, BNode
from rdflib.namespace import RDFS
-from six import b, BytesIO
-
+from io import BytesIO
from rdflib.plugins.serializers.rdfxml import XMLSerializer
from rdflib.graph import ConjunctiveGraph
@@ -45,7 +44,9 @@ def _mangled_copy(g):
"Makes a copy of the graph, replacing all bnodes with the bnode ``_blank``."
gcopy = ConjunctiveGraph()
- def isbnode(v): return isinstance(v, BNode)
+ def isbnode(v):
+ return isinstance(v, BNode)
+
for s, p, o in g:
if isbnode(s):
s = _blank
@@ -116,43 +117,75 @@ class TestXMLSerializer(SerializerTestBase):
rdfs:seeAlso _:bnode2 .
"""
- testContentFormat = 'n3'
+ testContentFormat = "n3"
def test_result_fragments(self):
rdfXml = serialize(self.sourceGraph, self.serializer)
# print "--------"
# print rdfXml
# print "--------"
- assert b('<rdf:Description rdf:about="http://example.org/data/a">') in rdfXml
- assert b('<rdf:type rdf:resource="http://example.org/model/test#Test"/>') in rdfXml
- assert b('<rdf:Description rdf:about="http://example.org/data/b">') in rdfXml
- assert b('<name xml:lang="en">Bee</name>') in rdfXml
- assert b('<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>') in rdfXml
- assert b('<rdf:Description rdf:nodeID="') in rdfXml, "expected one identified bnode in serialized graph"
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/a">'.encode("latin-1")
+ in rdfXml
+ )
+ assert (
+ '<rdf:type rdf:resource="http://example.org/model/test#Test"/>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:about="http://example.org/data/b">'.encode("latin-1")
+ in rdfXml
+ )
+ assert '<name xml:lang="en">Bee</name>'.encode("latin-1") in rdfXml
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_result_fragments_with_base(self):
- rdfXml = serialize(self.sourceGraph, self.serializer,
- extra_args={'base': "http://example.org/", 'xml_base': "http://example.org/"})
+ rdfXml = serialize(
+ self.sourceGraph,
+ self.serializer,
+ extra_args={
+ "base": "http://example.org/",
+ "xml_base": "http://example.org/",
+ },
+ )
# print "--------"
# print rdfXml
# print "--------"
- assert b('xml:base="http://example.org/"') in rdfXml
- assert b('<rdf:Description rdf:about="data/a">') in rdfXml
- assert b('<rdf:type rdf:resource="model/test#Test"/>') in rdfXml
- assert b('<rdf:Description rdf:about="data/b">') in rdfXml
- assert b('<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>') in rdfXml
- assert b('<rdf:Description rdf:nodeID="') in rdfXml, "expected one identified bnode in serialized graph"
+ assert 'xml:base="http://example.org/"'.encode("latin-1") in rdfXml
+ assert '<rdf:Description rdf:about="data/a">'.encode("latin-1") in rdfXml
+ assert '<rdf:type rdf:resource="model/test#Test"/>'.encode("latin-1") in rdfXml
+ assert '<rdf:Description rdf:about="data/b">'.encode("latin-1") in rdfXml
+ assert (
+ '<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>'.encode(
+ "latin-1"
+ )
+ in rdfXml
+ )
+ assert (
+ '<rdf:Description rdf:nodeID="'.encode("latin-1") in rdfXml
+ ), "expected one identified bnode in serialized graph"
def test_subClassOf_objects(self):
reparsedGraph = serialize_and_load(self.sourceGraph, self.serializer)
- _assert_expected_object_types_for_predicates(reparsedGraph,
- [RDFS.seeAlso, RDFS.subClassOf],
- [URIRef, BNode])
+ _assert_expected_object_types_for_predicates(
+ reparsedGraph, [RDFS.seeAlso, RDFS.subClassOf], [URIRef, BNode]
+ )
def _assert_expected_object_types_for_predicates(graph, predicates, types):
for s, p, o in graph:
if p in predicates:
someTrue = [isinstance(o, t) for t in types]
- assert True in someTrue, \
- "Bad type %s for object when predicate is <%s>." % (type(o), p)
+ assert (
+ True in someTrue
+ ), "Bad type %s for object when predicate is <%s>." % (type(o), p)
diff --git a/test/test_slice.py b/test/test_slice.py
index 27e6e49a..36c72ca8 100644
--- a/test/test_slice.py
+++ b/test/test_slice.py
@@ -1,10 +1,8 @@
-
from rdflib import Graph, URIRef
import unittest
class GraphSlice(unittest.TestCase):
-
def testSlice(self):
"""
We pervert the slice object,
@@ -13,10 +11,12 @@ class GraphSlice(unittest.TestCase):
all operations return generators over full triples
"""
- def sl(x, y): return self.assertEqual(len(list(x)), y)
+ def sl(x, y):
+ return self.assertEqual(len(list(x)), y)
+
+ def soe(x, y):
+ return self.assertEqual(set([a[2] for a in x]), set(y)) # equals objects
- def soe(x, y): return self.assertEqual(
- set([a[2] for a in x]), set(y)) # equals objects
g = self.graph
# Single terms are all trivial:
@@ -27,35 +27,35 @@ class GraphSlice(unittest.TestCase):
# single slice slices by s,p,o, with : used to split
# tell me everything about "tarek" (same as above)
- sl(g[self.tarek::], 2)
+ sl(g[self.tarek : :], 2)
# give me every "likes" relationship
- sl(g[:self.likes:], 5)
+ sl(g[: self.likes :], 5)
# give me every relationship to pizza
- sl(g[::self.pizza], 3)
+ sl(g[:: self.pizza], 3)
# give me everyone who likes pizza
- sl(g[:self.likes:self.pizza], 2)
+ sl(g[: self.likes : self.pizza], 2)
# does tarek like pizza?
- self.assertTrue(g[self.tarek:self.likes:self.pizza])
+ self.assertTrue(g[self.tarek : self.likes : self.pizza])
# More intesting is using paths
# everything hated or liked
- sl(g[:self.hates | self.likes], 7)
+ sl(g[: self.hates | self.likes], 7)
def setUp(self):
self.graph = Graph()
- self.michel = URIRef(u'michel')
- self.tarek = URIRef(u'tarek')
- self.bob = URIRef(u'bob')
- self.likes = URIRef(u'likes')
- self.hates = URIRef(u'hates')
- self.pizza = URIRef(u'pizza')
- self.cheese = URIRef(u'cheese')
+ self.michel = URIRef(u"michel")
+ self.tarek = URIRef(u"tarek")
+ self.bob = URIRef(u"bob")
+ self.likes = URIRef(u"likes")
+ self.hates = URIRef(u"hates")
+ self.pizza = URIRef(u"pizza")
+ self.cheese = URIRef(u"cheese")
self.addStuff()
@@ -77,5 +77,5 @@ class GraphSlice(unittest.TestCase):
self.graph.add((bob, hates, michel)) # gasp!
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_sparql.py b/test/test_sparql.py
index c3f289fc..fdf29c3c 100644
--- a/test/test_sparql.py
+++ b/test/test_sparql.py
@@ -11,24 +11,30 @@ def test_graph_prefix():
"""
g1 = Graph()
- g1.parse(data="""
+ g1.parse(
+ data="""
@prefix : <urn:ns1:> .
:foo <p> 42.
- """, format="n3")
+ """,
+ format="n3",
+ )
g2 = Graph()
- g2.parse(data="""
+ g2.parse(
+ data="""
@prefix : <urn:somethingelse:> .
<urn:ns1:foo> <p> 42.
- """, format="n3")
+ """,
+ format="n3",
+ )
assert isomorphic(g1, g2)
- q_str = ("""
+ q_str = """
PREFIX : <urn:ns1:>
SELECT ?val
WHERE { :foo ?p ?val }
- """)
+ """
q_prepared = prepareQuery(q_str)
expected = [(Literal(42),)]
@@ -61,21 +67,21 @@ def test_sparql_bnodelist():
"""
- prepareQuery('select * where { ?s ?p ( [] ) . }')
- prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }')
- prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }')
- prepareQuery('select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }')
+ prepareQuery("select * where { ?s ?p ( [] ) . }")
+ prepareQuery("select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }")
+ prepareQuery("select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }")
+ prepareQuery("select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }")
def test_complex_sparql_construct():
g = Graph()
- q = '''select ?subject ?study ?id where {
+ q = """select ?subject ?study ?id where {
?s a <urn:Person>;
<urn:partOf> ?c;
<urn:hasParent> ?mother, ?father;
<urn:id> [ a <urn:Identifier>; <urn:has-value> ?id].
- }'''
+ }"""
g.query(q)
@@ -84,8 +90,7 @@ def test_sparql_update_with_bnode():
Test if the blank node is inserted correctly.
"""
graph = Graph()
- graph.update(
- "INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
+ graph.update("INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
for t in graph.triples((None, None, None)):
assert isinstance(t[0], BNode)
eq_(t[1].n3(), "<urn:type>")
@@ -97,9 +102,8 @@ def test_sparql_update_with_bnode_serialize_parse():
Test if the blank node is inserted correctly, can be serialized and parsed.
"""
graph = Graph()
- graph.update(
- "INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
- string = graph.serialize(format='ntriples').decode('utf-8')
+ graph.update("INSERT DATA { _:blankA <urn:type> <urn:Blank> }")
+ string = graph.serialize(format="ntriples").decode("utf-8")
raised = False
try:
Graph().parse(data=string, format="ntriples")
@@ -108,6 +112,7 @@ def test_sparql_update_with_bnode_serialize_parse():
assert not raised
-if __name__ == '__main__':
+if __name__ == "__main__":
import nose
+
nose.main(defaultTest=__name__)
diff --git a/test/test_sparql_agg_distinct.py b/test/test_sparql_agg_distinct.py
index 7ab0f58a..39d6eb95 100644
--- a/test/test_sparql_agg_distinct.py
+++ b/test/test_sparql_agg_distinct.py
@@ -1,6 +1,6 @@
from rdflib import Graph
-query_tpl = '''
+query_tpl = """
SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) {
VALUES (?x ?y_ ?z_) {
("x1" 10 1)
@@ -8,42 +8,37 @@ SELECT ?x (MIN(?y_) as ?y) (%s(DISTINCT ?z_) as ?z) {
("x2" 20 2)
}
} GROUP BY ?x ORDER BY ?x
-'''
+"""
def test_group_concat_distinct():
g = Graph()
- results = g.query(query_tpl % 'GROUP_CONCAT')
+ results = g.query(query_tpl % "GROUP_CONCAT")
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == "1", results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, "1"],
- ["x2", 20, "2"],
- ], results
+ assert results == [["x1", 10, "1"], ["x2", 20, "2"],], results
def test_sum_distinct():
g = Graph()
- results = g.query(query_tpl % 'SUM')
+ results = g.query(query_tpl % "SUM")
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == 1, results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, 1],
- ["x2", 20, 2],
- ], results
+ assert results == [["x1", 10, 1], ["x2", 20, 2],], results
def test_avg_distinct():
g = Graph()
- results = g.query("""
+ results = g.query(
+ """
SELECT ?x (MIN(?y_) as ?y) (AVG(DISTINCT ?z_) as ?z) {
VALUES (?x ?y_ ?z_) {
("x1" 10 1)
@@ -52,23 +47,24 @@ def test_avg_distinct():
("x2" 20 2)
}
} GROUP BY ?x ORDER BY ?x
- """)
+ """
+ )
results = [[lit.toPython() for lit in line] for line in results]
# this is the tricky part
assert results[0][2] == 2, results[0][2]
# still check the whole result, to be on the safe side
- assert results == [
- ["x1", 10, 2],
- ["x2", 20, 2],
- ], results
+ assert results == [["x1", 10, 2], ["x2", 20, 2],], results
def test_count_distinct():
g = Graph()
- g.parse(format="turtle", publicID="http://example.org/", data="""
+ g.parse(
+ format="turtle",
+ publicID="http://example.org/",
+ data="""
@prefix : <> .
<#a>
@@ -83,26 +79,31 @@ def test_count_distinct():
:knows <#b>, <#c> ;
:age 20 .
- """)
+ """,
+ )
# Query 1: people knowing someone younger
- results = g.query("""
+ results = g.query(
+ """
PREFIX : <http://example.org/>
SELECT DISTINCT ?x {
?x :age ?ax ; :knows [ :age ?ay ].
FILTER( ?ax > ?ay )
}
- """)
+ """
+ )
assert len(results) == 2
# nQuery 2: count people knowing someone younger
- results = g.query("""
+ results = g.query(
+ """
PREFIX : <http://example.org/>
SELECT (COUNT(DISTINCT ?x) as ?cx) {
?x :age ?ax ; :knows [ :age ?ay ].
FILTER( ?ax > ?ay )
}
- """)
+ """
+ )
assert list(results)[0][0].toPython() == 2
diff --git a/test/test_sparql_agg_undef.py b/test/test_sparql_agg_undef.py
index 649a5a8c..f36e9eb5 100644
--- a/test/test_sparql_agg_undef.py
+++ b/test/test_sparql_agg_undef.py
@@ -1,6 +1,6 @@
from rdflib import Graph, Literal, Variable
-query_tpl = '''
+query_tpl = """
SELECT ?x (%s(?y_) as ?y) {
VALUES (?x ?y_ ?z) {
("x1" undef 1)
@@ -9,7 +9,7 @@ SELECT ?x (%s(?y_) as ?y) {
("x2" 42 4)
}
} GROUP BY ?x ORDER BY ?x
-'''
+"""
Y = Variable("y")
@@ -24,18 +24,20 @@ def template_tst(agg_func, first, second):
def test_aggregates():
- yield template_tst, 'SUM', Literal(0), Literal(42)
- yield template_tst, 'MIN', None, Literal(42)
- yield template_tst, 'MAX', None, Literal(42)
+ yield template_tst, "SUM", Literal(0), Literal(42)
+ yield template_tst, "MIN", None, Literal(42)
+ yield template_tst, "MAX", None, Literal(42)
# yield template_tst, 'AVG', Literal(0), Literal(42)
- yield template_tst, 'SAMPLE', None, Literal(42)
- yield template_tst, 'COUNT', Literal(0), Literal(1)
- yield template_tst, 'GROUP_CONCAT', Literal(''), Literal("42")
+ yield template_tst, "SAMPLE", None, Literal(42)
+ yield template_tst, "COUNT", Literal(0), Literal(1)
+ yield template_tst, "GROUP_CONCAT", Literal(""), Literal("42")
def test_group_by_null():
g = Graph()
- results = list(g.query("""
+ results = list(
+ g.query(
+ """
SELECT ?x ?y (AVG(?z) as ?az) {
VALUES (?x ?y ?z) {
(1 undef 10)
@@ -46,7 +48,9 @@ def test_group_by_null():
}
} GROUP BY ?x ?y
ORDER BY ?x
- """))
+ """
+ )
+ )
assert len(results) == 2
assert results[0][0] == Literal(1)
assert results[1][0] == Literal(2)
diff --git a/test/test_sparql_construct_bindings.py b/test/test_sparql_construct_bindings.py
new file mode 100644
index 00000000..8f8240b2
--- /dev/null
+++ b/test/test_sparql_construct_bindings.py
@@ -0,0 +1,39 @@
+from rdflib import Graph, URIRef, Literal, BNode
+from rdflib.plugins.sparql import prepareQuery
+from rdflib.compare import isomorphic
+
+import unittest
+from nose.tools import eq_
+
+
+class TestConstructInitBindings(unittest.TestCase):
+ def test_construct_init_bindings(self):
+ """
+ This is issue https://github.com/RDFLib/rdflib/issues/1001
+ """
+
+ g1 = Graph()
+
+ q_str = """
+ PREFIX : <urn:ns1:>
+ CONSTRUCT {
+ ?uri :prop1 ?val1;
+ :prop2 ?c .
+ }
+ WHERE {
+ bind(uri(concat("urn:ns1:", ?a)) as ?uri)
+ bind(?b as ?val1)
+ }
+ """
+ q_prepared = prepareQuery(q_str)
+
+ expected = [
+ (URIRef("urn:ns1:A"), URIRef("urn:ns1:prop1"), Literal("B")),
+ (URIRef("urn:ns1:A"), URIRef("urn:ns1:prop2"), Literal("C")),
+ ]
+ results = g1.query(
+ q_prepared,
+ initBindings={"a": Literal("A"), "b": Literal("B"), "c": Literal("C")},
+ )
+
+ eq_(sorted(results, key=lambda x: str(x[1])), expected)
diff --git a/test/test_sparql_service.py b/test/test_sparql_service.py
index 45cb84a0..550bfcb2 100644
--- a/test/test_sparql_service.py
+++ b/test/test_sparql_service.py
@@ -5,7 +5,7 @@ from rdflib.compare import isomorphic
def test_service():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment
+ q = """select ?dbpHypernym ?dbpComment
where
{ service <http://DBpedia.org/sparql>
{ select ?dbpHypernym ?dbpComment
@@ -15,7 +15,7 @@ def test_service():
<http://purl.org/linguistics/gold/hypernym> ?dbpHypernym ;
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -25,7 +25,7 @@ def test_service():
def test_service_with_bind():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment ?dbpDeathPlace
+ q = """select ?dbpHypernym ?dbpComment ?dbpDeathPlace
where
{ bind (<http://dbpedia.org/resource/Eltham> as ?dbpDeathPlace)
service <http://DBpedia.org/sparql>
@@ -37,7 +37,7 @@ def test_service_with_bind():
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment ;
<http://dbpedia.org/ontology/deathPlace> ?dbpDeathPlace .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -47,7 +47,7 @@ def test_service_with_bind():
def test_service_with_values():
g = Graph()
- q = '''select ?dbpHypernym ?dbpComment ?dbpDeathPlace
+ q = """select ?dbpHypernym ?dbpComment ?dbpDeathPlace
where
{ values (?dbpHypernym ?dbpDeathPlace) {(<http://dbpedia.org/resource/Leveller> <http://dbpedia.org/resource/London>) (<http://dbpedia.org/resource/Leveller> <http://dbpedia.org/resource/Eltham>)}
service <http://DBpedia.org/sparql>
@@ -59,7 +59,7 @@ def test_service_with_values():
<http://www.w3.org/2000/01/rdf-schema#comment> ?dbpComment ;
<http://dbpedia.org/ontology/deathPlace> ?dbpDeathPlace .
- } } } limit 2'''
+ } } } limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -69,13 +69,13 @@ def test_service_with_values():
def test_service_with_implicit_select():
g = Graph()
- q = '''select ?s ?p ?o
+ q = """select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(<http://example.org/a> <http://example.org/b> 1) (<http://example.org/a> <http://example.org/b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -85,30 +85,31 @@ def test_service_with_implicit_select():
def test_service_with_implicit_select_and_prefix():
g = Graph()
- q = '''prefix ex:<http://example.org/>
+ q = """prefix ex:<http://example.org/>
select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(ex:a ex:b 1) (<http://example.org/a> <http://example.org/b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
for r in results:
assert len(r) == 3
+
def test_service_with_implicit_select_and_base():
g = Graph()
- q = '''base <http://example.org/>
+ q = """base <http://example.org/>
select ?s ?p ?o
where
{
service <http://DBpedia.org/sparql>
{
values (?s ?p ?o) {(<a> <b> 1) (<a> <b> 2)}
- }} limit 2'''
+ }} limit 2"""
results = g.query(q)
assert len(results) == 2
@@ -116,7 +117,21 @@ def test_service_with_implicit_select_and_base():
assert len(r) == 3
-#def test_with_fixture(httpserver):
+def test_service_with_implicit_select_and_allcaps():
+ g = Graph()
+ q = """SELECT ?s
+ WHERE
+ {
+ SERVICE <http://dbpedia.org/sparql>
+ {
+ ?s <http://purl.org/linguistics/gold/hypernym> <http://dbpedia.org/resource/Leveller> .
+ }
+ } LIMIT 3"""
+ results = g.query(q)
+ assert len(results) == 3
+
+
+# def test_with_fixture(httpserver):
# httpserver.expect_request("/sparql/?query=SELECT * WHERE ?s ?p ?o").respond_with_json({"vars": ["s","p","o"], "bindings":[]})
# test_server = httpserver.url_for('/sparql')
# g = Graph()
@@ -125,7 +140,7 @@ def test_service_with_implicit_select_and_base():
# assert len(results) == 0
-if __name__ == '__main__':
+if __name__ == "__main__":
# import nose
# nose.main(defaultTest=__name__)
test_service()
diff --git a/test/test_sparqlstore.py b/test/test_sparqlstore.py
index 26a69460..38a8b481 100644
--- a/test/test_sparqlstore.py
+++ b/test/test_sparqlstore.py
@@ -1,10 +1,12 @@
from rdflib import Graph, URIRef, Literal
-from six.moves.urllib.request import urlopen
-import os
+from urllib.request import urlopen
import unittest
from nose import SkipTest
from requests import HTTPError
-
+from http.server import BaseHTTPRequestHandler, HTTPServer
+import socket
+from threading import Thread
+import requests
try:
assert len(urlopen("http://dbpedia.org/sparql").read()) > 0
@@ -13,7 +15,7 @@ except:
class SPARQLStoreDBPediaTestCase(unittest.TestCase):
- store_name = 'SPARQLStore'
+ store_name = "SPARQLStore"
path = "http://dbpedia.org/sparql"
storetest = True
create = False
@@ -39,8 +41,8 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
res = self.graph.query(
- query,
- initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"})
+ query, initNs={"xyzzy": "http://www.w3.org/2004/02/skos/core#"}
+ )
for i in res:
assert type(i[0]) == Literal, i[0].n3()
@@ -49,10 +51,7 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
SELECT ?label WHERE
{ ?s a xyzzy:Concept ; xyzzy:prefLabel ?label . } LIMIT 10
"""
- self.assertRaises(
- HTTPError,
- self.graph.query,
- query)
+ self.assertRaises(HTTPError, self.graph.query, query)
def test_query_with_added_prolog(self):
prologue = """\
@@ -67,5 +66,92 @@ class SPARQLStoreDBPediaTestCase(unittest.TestCase):
assert type(i[0]) == Literal, i[0].n3()
-if __name__ == '__main__':
+class SPARQLStoreUpdateTestCase(unittest.TestCase):
+ def setUp(self):
+ port = self.setup_mocked_endpoint()
+ self.graph = Graph(store="SPARQLUpdateStore", identifier=URIRef("urn:ex"))
+ self.graph.open(
+ (
+ "http://localhost:{port}/query".format(port=port),
+ "http://localhost:{port}/update".format(port=port),
+ ),
+ create=False,
+ )
+ ns = list(self.graph.namespaces())
+ assert len(ns) > 0, ns
+
+ def setup_mocked_endpoint(self):
+ # Configure mock server.
+ s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
+ s.bind(("localhost", 0))
+ address, port = s.getsockname()
+ s.close()
+ mock_server = HTTPServer(("localhost", port), SPARQL11ProtocolStoreMock)
+
+ # Start running mock server in a separate thread.
+ # Daemon threads automatically shut down when the main process exits.
+ mock_server_thread = Thread(target=mock_server.serve_forever)
+ mock_server_thread.setDaemon(True)
+ mock_server_thread.start()
+ print(
+ "Started mocked sparql endpoint on http://localhost:{port}/".format(
+ port=port
+ )
+ )
+ return port
+
+ def tearDown(self):
+ self.graph.close()
+
+ def test_Query(self):
+ query = "insert data {<urn:s> <urn:p> <urn:o>}"
+ res = self.graph.update(query)
+ print(res)
+
+
+class SPARQL11ProtocolStoreMock(BaseHTTPRequestHandler):
+ def do_POST(self):
+ """
+ If the body should be analysed as well, just use:
+ ```
+ body = self.rfile.read(int(self.headers['Content-Length'])).decode()
+ print(body)
+ ```
+ """
+ contenttype = self.headers.get("Content-Type")
+ if self.path == "/query":
+ if self.headers.get("Content-Type") == "application/sparql-query":
+ pass
+ elif (
+ self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
+ ):
+ pass
+ else:
+ self.send_response(requests.codes.not_acceptable)
+ self.end_headers()
+ elif self.path == "/update":
+ if self.headers.get("Content-Type") == "application/sparql-update":
+ pass
+ elif (
+ self.headers.get("Content-Type") == "application/x-www-form-urlencoded"
+ ):
+ pass
+ else:
+ self.send_response(requests.codes.not_acceptable)
+ self.end_headers()
+ else:
+ self.send_response(requests.codes.not_found)
+ self.end_headers()
+ self.send_response(requests.codes.ok)
+ self.end_headers()
+ return
+
+ def do_GET(self):
+ # Process an HTTP GET request and return a response with an HTTP 200 status.
+ self.send_response(requests.codes.ok)
+ self.end_headers()
+ return
+
+
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_sparqlupdatestore.py b/test/test_sparqlupdatestore.py
index 161ed6a0..b6dcedbd 100644
--- a/test/test_sparqlupdatestore.py
+++ b/test/test_sparqlupdatestore.py
@@ -7,11 +7,10 @@ import unittest
import re
from rdflib import ConjunctiveGraph, URIRef, Literal, BNode, Graph
-from six import text_type
-from six.moves.urllib.request import urlopen
+from urllib.request import urlopen
-HOST = 'http://localhost:3031'
-DB = '/db/'
+HOST = "http://localhost:3031"
+DB = "/db/"
# this assumes SPARQL1.1 query/update endpoints running locally at
# http://localhost:3031/db/
@@ -25,23 +24,22 @@ DB = '/db/'
# THIS WILL DELETE ALL DATA IN THE /db dataset
-michel = URIRef(u'urn:michel')
-tarek = URIRef(u'urn:tarek')
-bob = URIRef(u'urn:bob')
-likes = URIRef(u'urn:likes')
-hates = URIRef(u'urn:hates')
-pizza = URIRef(u'urn:pizza')
-cheese = URIRef(u'urn:cheese')
+michel = URIRef("urn:michel")
+tarek = URIRef("urn:tarek")
+bob = URIRef("urn:bob")
+likes = URIRef("urn:likes")
+hates = URIRef("urn:hates")
+pizza = URIRef("urn:pizza")
+cheese = URIRef("urn:cheese")
-graphuri = URIRef('urn:graph')
-othergraphuri = URIRef('urn:othergraph')
+graphuri = URIRef("urn:graph")
+othergraphuri = URIRef("urn:othergraph")
class TestSparql11(unittest.TestCase):
-
def setUp(self):
self.longMessage = True
- self.graph = ConjunctiveGraph('SPARQLUpdateStore')
+ self.graph = ConjunctiveGraph("SPARQLUpdateStore")
root = HOST + DB
self.graph.open((root + "sparql", root + "update"))
@@ -63,8 +61,8 @@ class TestSparql11(unittest.TestCase):
g2 = self.graph.get_context(othergraphuri)
g2.add((michel, likes, pizza))
- self.assertEqual(3, len(g), 'graph contains 3 triples')
- self.assertEqual(1, len(g2), 'other graph contains 1 triple')
+ self.assertEqual(3, len(g), "graph contains 3 triples")
+ self.assertEqual(1, len(g2), "other graph contains 1 triple")
r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEqual(2, len(list(r)), "two people like pizza")
@@ -73,8 +71,9 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(2, len(list(r)), "two people like pizza")
# Test initBindings
- r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
- initBindings={'s': tarek})
+ r = g.query(
+ "SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={"s": tarek}
+ )
self.assertEqual(1, len(list(r)), "i was asking only about tarek")
r = g.triples((tarek, likes, pizza))
@@ -95,7 +94,7 @@ class TestSparql11(unittest.TestCase):
g2.add((bob, likes, pizza))
g.add((tarek, hates, cheese))
- self.assertEqual(2, len(g), 'graph contains 2 triples')
+ self.assertEqual(2, len(g), "graph contains 2 triples")
# the following are actually bad tests as they depend on your endpoint,
# as pointed out in the sparqlstore.py code:
@@ -107,15 +106,19 @@ class TestSparql11(unittest.TestCase):
##
# Fuseki/TDB has a flag for specifying that the default graph
# is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config).
- self.assertEqual(3, len(self.graph),
- 'default union graph should contain three triples but contains:\n'
- '%s' % list(self.graph))
+ self.assertEqual(
+ 3,
+ len(self.graph),
+ "default union graph should contain three triples but contains:\n"
+ "%s" % list(self.graph),
+ )
r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }")
self.assertEqual(2, len(list(r)), "two people like pizza")
- r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }",
- initBindings={'s': tarek})
+ r = self.graph.query(
+ "SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={"s": tarek}
+ )
self.assertEqual(1, len(list(r)), "i was asking only about tarek")
r = self.graph.triples((tarek, likes, pizza))
@@ -130,44 +133,47 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(1, len(list(r)), "only tarek likes pizza")
def testUpdate(self):
- self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }")
+ self.graph.update(
+ "INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }"
+ )
g = self.graph.get_context(graphuri)
- self.assertEqual(1, len(g), 'graph contains 1 triples')
+ self.assertEqual(1, len(g), "graph contains 1 triples")
def testUpdateWithInitNs(self):
self.graph.update(
"INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }",
- initNs={'ns': URIRef('urn:')}
+ initNs={"ns": URIRef("urn:")},
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }",
initBindings={
- 'a': URIRef('urn:michel'),
- 'b': URIRef('urn:likes'),
- 'c': URIRef('urn:pizza'),
- }
+ "a": URIRef("urn:michel"),
+ "b": URIRef("urn:likes"),
+ "c": URIRef("urn:pizza"),
+ },
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testUpdateWithBlankNode(self):
self.graph.update(
- "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }")
+ "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }"
+ )
g = self.graph.get_context(graphuri)
for t in g.triples((None, None, None)):
self.assertTrue(isinstance(t[0], BNode))
@@ -176,33 +182,34 @@ class TestSparql11(unittest.TestCase):
def testUpdateWithBlankNodeSerializeAndParse(self):
self.graph.update(
- "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }")
+ "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }"
+ )
g = self.graph.get_context(graphuri)
- string = g.serialize(format='ntriples').decode('utf-8')
+ string = g.serialize(format="ntriples").decode("utf-8")
raised = False
try:
Graph().parse(data=string, format="ntriples")
except Exception as e:
raised = True
- self.assertFalse(raised, 'Exception raised when parsing: ' + string)
+ self.assertFalse(raised, "Exception raised when parsing: " + string)
def testMultipleUpdateWithInitBindings(self):
self.graph.update(
"INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };"
"INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }",
initBindings={
- 'a': URIRef('urn:michel'),
- 'b': URIRef('urn:likes'),
- 'c': URIRef('urn:pizza'),
- 'd': URIRef('urn:bob'),
- }
+ "a": URIRef("urn:michel"),
+ "b": URIRef("urn:likes"),
+ "c": URIRef("urn:pizza"),
+ "d": URIRef("urn:bob"),
+ },
)
g = self.graph.get_context(graphuri)
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza), (bob, likes, pizza)]),
- 'michel and bob like pizza'
+ "michel and bob like pizza",
)
def testNamedGraphUpdate(self):
@@ -212,25 +219,31 @@ class TestSparql11(unittest.TestCase):
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
- r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \
- "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
+ r2 = (
+ "DELETE { <urn:michel> <urn:likes> <urn:pizza> } "
+ + "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}"
+ )
g.update(r2)
self.assertEqual(
set(g.triples((None, None, None))),
set([(bob, likes, pizza)]),
- 'only bob likes pizza'
+ "only bob likes pizza",
)
says = URIRef("urn:says")
# Strings with unbalanced curly braces
- tricky_strs = ["With an unbalanced curly brace %s " % brace
- for brace in ["{", "}"]]
+ tricky_strs = [
+ "With an unbalanced curly brace %s " % brace for brace in ["{", "}"]
+ ]
for tricky_str in tricky_strs:
- r3 = """INSERT { ?b <urn:says> "%s" }
- WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str
+ r3 = (
+ """INSERT { ?b <urn:says> "%s" }
+ WHERE { ?b <urn:likes> <urn:pizza>} """
+ % tricky_str
+ )
g.update(r3)
values = set()
@@ -254,16 +267,26 @@ class TestSparql11(unittest.TestCase):
r4strings.append(r"""'''9: adfk } <foo> #éï \\'''""")
r4strings.append("'''10: ad adsfj \n { \n sadfj'''")
- r4 = "\n".join([
- u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s
- for s in r4strings
- ])
+ r4 = "\n".join(
+ ["INSERT DATA { <urn:michel> <urn:says> %s } ;" % s for s in r4strings]
+ )
g.update(r4)
values = set()
for v in g.objects(michel, says):
- values.add(text_type(v))
- self.assertEqual(values, set([re.sub(r"\\(.)", r"\1", re.sub(
- r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s)) for s in r4strings]))
+ values.add(str(v))
+ self.assertEqual(
+ values,
+ set(
+ [
+ re.sub(
+ r"\\(.)",
+ r"\1",
+ re.sub(r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s),
+ )
+ for s in r4strings
+ ]
+ ),
+ )
# IRI Containing ' or #
# The fragment identifier must not be misinterpreted as a comment
@@ -275,11 +298,11 @@ class TestSparql11(unittest.TestCase):
g.update(r5)
values = set()
for v in g.objects(michel, hates):
- values.add(text_type(v))
- self.assertEqual(values, set([u"urn:foo'bar?baz;a=1&b=2#fragment", u"'}"]))
+ values.add(str(v))
+ self.assertEqual(values, set(["urn:foo'bar?baz;a=1&b=2#fragment", "'}"]))
# Comments
- r6 = u"""
+ r6 = """
INSERT DATA {
<urn:bob> <urn:hates> <urn:bob> . # No closing brace: }
<urn:bob> <urn:hates> <urn:michel>.
@@ -295,39 +318,40 @@ class TestSparql11(unittest.TestCase):
def testNamedGraphUpdateWithInitBindings(self):
g = self.graph.get_context(graphuri)
r = "INSERT { ?a ?b ?c } WHERE {}"
- g.update(r, initBindings={
- 'a': michel,
- 'b': likes,
- 'c': pizza
- })
+ g.update(r, initBindings={"a": michel, "b": likes, "c": pizza})
self.assertEqual(
set(g.triples((None, None, None))),
set([(michel, likes, pizza)]),
- 'only michel likes pizza'
+ "only michel likes pizza",
)
def testEmptyNamedGraph(self):
empty_graph_iri = "urn:empty-graph-1"
self.graph.update("CREATE GRAPH <%s>" % empty_graph_iri)
- named_graphs = [text_type(r[0]) for r in self.graph.query(
- "SELECT ?name WHERE { GRAPH ?name {} }")]
+ named_graphs = [
+ str(r[0]) for r in self.graph.query("SELECT ?name WHERE { GRAPH ?name {} }")
+ ]
# Some SPARQL endpoint backends (like TDB) are not able to find empty named graphs
# (at least with this query)
if empty_graph_iri in named_graphs:
- self.assertTrue(empty_graph_iri in [text_type(g.identifier)
- for g in self.graph.contexts()])
+ self.assertTrue(
+ empty_graph_iri in [str(g.identifier) for g in self.graph.contexts()]
+ )
def testEmptyLiteral(self):
# test for https://github.com/RDFLib/rdflib/issues/457
# also see test_issue457.py which is sparql store independent!
g = self.graph.get_context(graphuri)
- g.add((
- URIRef('http://example.com/s'),
- URIRef('http://example.com/p'),
- Literal('')))
+ g.add(
+ (
+ URIRef("http://example.com/s"),
+ URIRef("http://example.com/p"),
+ Literal(""),
+ )
+ )
o = tuple(g)[0][2]
- self.assertEqual(o, Literal(''), repr(o))
+ self.assertEqual(o, Literal(""), repr(o))
try:
@@ -336,5 +360,5 @@ except:
raise SkipTest(HOST + " is unavailable.")
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_swap_n3.py b/test/test_swap_n3.py
index c81378e1..f7071bec 100644
--- a/test/test_swap_n3.py
+++ b/test/test_swap_n3.py
@@ -2,6 +2,7 @@ from nose.exc import SkipTest
import os
import sys
import unittest
+
try:
maketrans = str.maketrans
except AttributeError:
@@ -42,22 +43,22 @@ qt = rdflib.Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
skiptests = [
- 'syntax_neg_single_quote',
- 'syntax_neg_literal_predicate',
- 'syntax_this_quantifiers',
- 'syntax_trailing_semicolon',
- 'syntax_neg_thisadoc',
- 'syntax_equals1',
- 'syntax_equals2',
- 'syntax_this_rules',
- 'syntax_neg_keywords3',
- 'syntax_zero_objects',
- 'syntax_neg_formula_predicate',
- 'syntax_zero_predicates',
+ "syntax_neg_single_quote",
+ "syntax_neg_literal_predicate",
+ "syntax_this_quantifiers",
+ "syntax_trailing_semicolon",
+ "syntax_neg_thisadoc",
+ "syntax_equals1",
+ "syntax_equals2",
+ "syntax_this_rules",
+ "syntax_neg_keywords3",
+ "syntax_zero_objects",
+ "syntax_neg_formula_predicate",
+ "syntax_zero_predicates",
# 'syntax_qvars1',
# 'syntax_qvars2',
# 'contexts',
- 'syntax_too_nested'
+ "syntax_too_nested",
]
@@ -81,7 +82,7 @@ def generictest(e):
def dir_to_uri(directory, sep=os.path.sep):
- '''
+ """
Convert a local path to a File URI.
>>> dir_to_uri('c:\\\\temp\\\\foo\\\\file.txt', sep='\\\\')
@@ -89,28 +90,36 @@ def dir_to_uri(directory, sep=os.path.sep):
>>> dir_to_uri('/tmp/foo/file.txt', sep='/')
'file:///tmp/foo/file.txt'
- '''
+ """
items = directory.split(sep)
- path = '/'.join(items)
- if path.startswith('/'):
+ path = "/".join(items)
+ if path.startswith("/"):
path = path[1:]
- return 'file:///%s' % (path,)
+ return "file:///%s" % (path,)
def test_cases():
from copy import deepcopy
+
g = rdflib.Graph()
- swap_dir = os.path.join(os.getcwd(), 'test', 'swap-n3')
- g.parse(os.path.join(swap_dir, 'n3-rdf.tests'), format="n3")
- g.parse(os.path.join(swap_dir, 'n3-full.tests'), format="n3")
+ swap_dir = os.path.join(os.getcwd(), "test", "swap-n3")
+ g.parse(os.path.join(swap_dir, "n3-rdf.tests"), format="n3")
+ g.parse(os.path.join(swap_dir, "n3-full.tests"), format="n3")
tfiles = []
- swap_dir_uri = dir_to_uri(swap_dir) + '/'
+ swap_dir_uri = dir_to_uri(swap_dir) + "/"
for tst in g.subjects():
- files = [str(tfile).replace('http://www.w3.org/2000/10/', swap_dir_uri)
- for tfile in g.objects(tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")) if tfile.endswith('n3')]
+ files = [
+ str(tfile).replace("http://www.w3.org/2000/10/", swap_dir_uri)
+ for tfile in g.objects(
+ tst, rdflib.URIRef("http://www.w3.org/2004/11/n3test#inputDocument")
+ )
+ if tfile.endswith("n3")
+ ]
tfiles += files
for tfile in set(tfiles):
- gname = tfile.split('/swap-n3/swap/test/')[1][:-3].translate(maketrans('-/','__'))
+ gname = tfile.split("/swap-n3/swap/test/")[1][:-3].translate(
+ maketrans("-/", "__")
+ )
e = Envelope(gname, tfile)
if gname in skiptests:
e.skip = True
@@ -119,6 +128,7 @@ def test_cases():
# e.skip = True
if sys.version_info[:2] == (2, 4):
import pickle
+
gjt = pickle.dumps(generictest)
gt = pickle.loads(gjt)
else:
@@ -130,25 +140,3 @@ def test_cases():
if __name__ == "__main__":
test_cases()
# unittest.main()
-
-
-"""
-Interesting failure in Python 2.4 ...
-
-======================================================================
-ERROR: Failure: TypeError (function() takes at least 2 arguments (0 given))
-----------------------------------------------------------------------
-Traceback (most recent call last):
- File ".../python2.4/site-packages/nose/loader.py", line 231, in generate
- for test in g():
- File ".../rdflib/test/test_swap_n3.py", line 95, in test_cases
- gt = deepcopy(generictest)
- File "/usr/local/python2.4/lib/python2.4/copy.py", line 204, in deepcopy
- y = _reconstruct(x, rv, 1, memo)
- File "/usr/local/python2.4/lib/python2.4/copy.py", line 336, in _reconstruct
- y = callable(*args)
- File "...py24/lib/python2.4/copy_reg.py", line 92, in __newobj__
- return cls.__new__(cls, *args)
-TypeError: function() takes at least 2 arguments (0 given)
-
-"""
diff --git a/test/test_term.py b/test/test_term.py
index 4dea9c3c..0363baba 100644
--- a/test/test_term.py
+++ b/test/test_term.py
@@ -10,13 +10,9 @@ from rdflib.term import URIRef, BNode, Literal, _is_valid_unicode
from rdflib.graph import QuotedGraph, Graph
from rdflib.namespace import XSD
-from six import PY3
-
def uformat(s):
- if PY3:
- return s.replace("u'", "'")
- return s
+ return s.replace("u'", "'")
class TestURIRefRepr(unittest.TestCase):
@@ -27,11 +23,12 @@ class TestURIRefRepr(unittest.TestCase):
def testSubclassNameAppearsInRepr(self):
class MyURIRef(URIRef):
pass
- x = MyURIRef('http://example.com/')
+
+ x = MyURIRef("http://example.com/")
self.assertEqual(repr(x), uformat("MyURIRef(u'http://example.com/')"))
def testGracefulOrdering(self):
- u = URIRef('cake')
+ u = URIRef("cake")
g = Graph()
a = u > u
a = u > BNode()
@@ -40,18 +37,17 @@ class TestURIRefRepr(unittest.TestCase):
class TestBNodeRepr(unittest.TestCase):
-
def testSubclassNameAppearsInRepr(self):
class MyBNode(BNode):
pass
+
x = MyBNode()
self.assertTrue(repr(x).startswith("MyBNode("))
class TestLiteral(unittest.TestCase):
-
def test_base64_values(self):
- b64msg = 'cmRmbGliIGlzIGNvb2whIGFsc28gaGVyZSdzIHNvbWUgYmluYXJ5IAAR83UC'
+ b64msg = "cmRmbGliIGlzIGNvb2whIGFsc28gaGVyZSdzIHNvbWUgYmluYXJ5IAAR83UC"
decoded_b64msg = base64.b64decode(b64msg)
lit = Literal(b64msg, datatype=XSD.base64Binary)
self.assertEqual(lit.value, decoded_b64msg)
@@ -60,30 +56,14 @@ class TestLiteral(unittest.TestCase):
def test_total_order(self):
types = {
XSD.dateTime: (
- '2001-01-01T00:00:00',
- '2001-01-01T00:00:00Z',
- '2001-01-01T00:00:00-00:00'
- ),
- XSD.date: (
- '2001-01-01',
- '2001-01-01Z',
- '2001-01-01-00:00'
- ),
- XSD.time: (
- '00:00:00',
- '00:00:00Z',
- '00:00:00-00:00'
- ),
- XSD.gYear: (
- '2001',
- '2001Z',
- '2001-00:00'
- ), # interval
- XSD.gYearMonth: (
- '2001-01',
- '2001-01Z',
- '2001-01-00:00'
+ "2001-01-01T00:00:00",
+ "2001-01-01T00:00:00Z",
+ "2001-01-01T00:00:00-00:00",
),
+ XSD.date: ("2001-01-01", "2001-01-01Z", "2001-01-01-00:00"),
+ XSD.time: ("00:00:00", "00:00:00Z", "00:00:00-00:00"),
+ XSD.gYear: ("2001", "2001Z", "2001-00:00"), # interval
+ XSD.gYearMonth: ("2001-01", "2001-01Z", "2001-01-00:00"),
}
literals = [
Literal(literal, datatype=t)
@@ -104,19 +84,19 @@ class TestLiteral(unittest.TestCase):
l1 = [
Literal(l, datatype=XSD.dateTime)
for l in [
- '2001-01-01T00:00:00',
- '2001-01-01T01:00:00',
- '2001-01-01T01:00:01',
- '2001-01-02T01:00:01',
- '2001-01-01T00:00:00Z',
- '2001-01-01T00:00:00-00:00',
- '2001-01-01T01:00:00Z',
- '2001-01-01T01:00:00-00:00',
- '2001-01-01T00:00:00-01:30',
- '2001-01-01T01:00:00-01:30',
- '2001-01-02T01:00:01Z',
- '2001-01-02T01:00:01-00:00',
- '2001-01-02T01:00:01-01:30'
+ "2001-01-01T00:00:00",
+ "2001-01-01T01:00:00",
+ "2001-01-01T01:00:01",
+ "2001-01-02T01:00:01",
+ "2001-01-01T00:00:00Z",
+ "2001-01-01T00:00:00-00:00",
+ "2001-01-01T01:00:00Z",
+ "2001-01-01T01:00:00-00:00",
+ "2001-01-01T00:00:00-01:30",
+ "2001-01-01T01:00:00-01:30",
+ "2001-01-02T01:00:01Z",
+ "2001-01-02T01:00:01-00:00",
+ "2001-01-02T01:00:01-01:30",
]
]
l2 = list(l1)
@@ -138,7 +118,12 @@ class TestLiteral(unittest.TestCase):
(3, Literal(float(1)), Literal(float(1)), Literal(float(2))),
(4, Literal(1), Literal(1.1), Literal(2.1, datatype=XSD.decimal)),
(5, Literal(1.1), Literal(1.1), Literal(2.2)),
- (6, Literal(Decimal(1)), Literal(Decimal(1.1)), Literal(Decimal(2.1), datatype=XSD.decimal)),
+ (
+ 6,
+ Literal(Decimal(1)),
+ Literal(Decimal(1.1)),
+ Literal(Decimal(2.1), datatype=XSD.decimal),
+ ),
(7, Literal(Decimal(1.1)), Literal(Decimal(1.1)), Literal(Decimal(2.2))),
(8, Literal(float(1)), Literal(float(1.1)), Literal(float(2.1))),
(9, Literal(float(1.1)), Literal(float(1.1)), Literal(float(2.2))),
@@ -148,27 +133,74 @@ class TestLiteral(unittest.TestCase):
(14, Literal(-1), Literal(-1.1), Literal(-2.1)),
(15, Literal(-1.1), Literal(-1.1), Literal(-2.2)),
(16, Literal(Decimal(-1)), Literal(Decimal(-1.1)), Literal(Decimal(-2.1))),
- (17, Literal(Decimal(-1.1)), Literal(Decimal(-1.1)), Literal(Decimal(-2.2))),
+ (
+ 17,
+ Literal(Decimal(-1.1)),
+ Literal(Decimal(-1.1)),
+ Literal(Decimal(-2.2)),
+ ),
(18, Literal(float(-1)), Literal(float(-1.1)), Literal(float(-2.1))),
(19, Literal(float(-1.1)), Literal(float(-1.1)), Literal(float(-2.2))),
-
(20, Literal(1), Literal(1.0), Literal(2.0)),
(21, Literal(1.0), Literal(1.0), Literal(2.0)),
(22, Literal(Decimal(1)), Literal(Decimal(1.0)), Literal(Decimal(2.0))),
(23, Literal(Decimal(1.0)), Literal(Decimal(1.0)), Literal(Decimal(2.0))),
(24, Literal(float(1)), Literal(float(1.0)), Literal(float(2.0))),
(25, Literal(float(1.0)), Literal(float(1.0)), Literal(float(2.0))),
-
- (26, Literal(1, datatype=XSD.integer), Literal(1, datatype=XSD.integer), Literal(2, datatype=XSD.integer)),
- (27, Literal(1, datatype=XSD.integer), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (28, Literal("1", datatype=XSD.integer), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (29, Literal("1"), Literal("1", datatype=XSD.integer), Literal("11", datatype=XSD.string)),
- (30, Literal(1), Literal("1", datatype=XSD.integer), Literal("2", datatype=XSD.integer)),
- (31, Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(2), datatype=XSD.decimal)),
- (32, Literal(Decimal(1)), Literal(Decimal(1), datatype=XSD.decimal), Literal(Decimal(2), datatype=XSD.decimal)),
- (33, Literal(float(1)), Literal(float(1), datatype=XSD.float), Literal(float(2), datatype=XSD.float)),
- (34, Literal(float(1), datatype=XSD.float), Literal(float(1), datatype=XSD.float), Literal(float(2), datatype=XSD.float)),
-
+ (
+ 26,
+ Literal(1, datatype=XSD.integer),
+ Literal(1, datatype=XSD.integer),
+ Literal(2, datatype=XSD.integer),
+ ),
+ (
+ 27,
+ Literal(1, datatype=XSD.integer),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 28,
+ Literal("1", datatype=XSD.integer),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 29,
+ Literal("1"),
+ Literal("1", datatype=XSD.integer),
+ Literal("11", datatype=XSD.string),
+ ),
+ (
+ 30,
+ Literal(1),
+ Literal("1", datatype=XSD.integer),
+ Literal("2", datatype=XSD.integer),
+ ),
+ (
+ 31,
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(2), datatype=XSD.decimal),
+ ),
+ (
+ 32,
+ Literal(Decimal(1)),
+ Literal(Decimal(1), datatype=XSD.decimal),
+ Literal(Decimal(2), datatype=XSD.decimal),
+ ),
+ (
+ 33,
+ Literal(float(1)),
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(2), datatype=XSD.float),
+ ),
+ (
+ 34,
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(1), datatype=XSD.float),
+ Literal(float(2), datatype=XSD.float),
+ ),
(35, Literal(1), 1, Literal(2)),
(36, Literal(1), 1.0, Literal(2, datatype=XSD.decimal)),
(37, Literal(1.0), 1, Literal(2, datatype=XSD.decimal)),
@@ -177,14 +209,42 @@ class TestLiteral(unittest.TestCase):
(40, Literal(Decimal(1.0)), Decimal(1.0), Literal(Decimal(2.0))),
(41, Literal(float(1.0)), float(1), Literal(float(2.0))),
(42, Literal(float(1.0)), float(1.0), Literal(float(2.0))),
-
- (43, Literal(1, datatype=XSD.integer), "+1.1", Literal("1+1.1", datatype=XSD.string)),
- (44, Literal(1, datatype=XSD.integer), Literal("+1.1", datatype=XSD.string), Literal("1+1.1", datatype=XSD.string)),
- (45, Literal(Decimal(1.0), datatype=XSD.integer), Literal(u"1", datatype=XSD.string), Literal("11", datatype=XSD.string)),
- (46, Literal(1.1, datatype=XSD.integer), Literal("1", datatype=XSD.string), Literal("1.11", datatype=XSD.string)),
-
- (47, Literal(1, datatype=XSD.integer), None, Literal(1, datatype=XSD.integer)),
- (48, Literal("1", datatype=XSD.string), None, Literal("1", datatype=XSD.string)),
+ (
+ 43,
+ Literal(1, datatype=XSD.integer),
+ "+1.1",
+ Literal("1+1.1", datatype=XSD.string),
+ ),
+ (
+ 44,
+ Literal(1, datatype=XSD.integer),
+ Literal("+1.1", datatype=XSD.string),
+ Literal("1+1.1", datatype=XSD.string),
+ ),
+ (
+ 45,
+ Literal(Decimal(1.0), datatype=XSD.integer),
+ Literal(u"1", datatype=XSD.string),
+ Literal("11", datatype=XSD.string),
+ ),
+ (
+ 46,
+ Literal(1.1, datatype=XSD.integer),
+ Literal("1", datatype=XSD.string),
+ Literal("1.11", datatype=XSD.string),
+ ),
+ (
+ 47,
+ Literal(1, datatype=XSD.integer),
+ None,
+ Literal(1, datatype=XSD.integer),
+ ),
+ (
+ 48,
+ Literal("1", datatype=XSD.string),
+ None,
+ Literal("1", datatype=XSD.string),
+ ),
]
for case in cases:
@@ -200,22 +260,26 @@ class TestLiteral(unittest.TestCase):
if not case_passed:
print(case[1], case[2])
print("expected: " + case[3] + ", " + case[3].datatype)
- print("actual: " + (case[1] + case[2]) + ", " + (case[1] + case[2]).datatype)
+ print(
+ "actual: "
+ + (case[1] + case[2])
+ + ", "
+ + (case[1] + case[2]).datatype
+ )
self.assertTrue(case_passed, "Case " + str(case[0]) + " failed")
class TestValidityFunctions(unittest.TestCase):
-
def test_is_valid_unicode(self):
testcase_list = (
(None, True),
(1, True),
- (['foo'], True),
- ({'foo': b'bar'}, True),
- ('foo', True),
- (b'foo\x00', True),
- (b'foo\xf3\x02', False)
+ (["foo"], True),
+ ({"foo": b"bar"}, True),
+ ("foo", True),
+ (b"foo\x00", True),
+ (b"foo\xf3\x02", False),
)
for val, expected in testcase_list:
self.assertEqual(_is_valid_unicode(val), expected)
diff --git a/test/test_trig.py b/test/test_trig.py
index dff6269d..90321c5c 100644
--- a/test/test_trig.py
+++ b/test/test_trig.py
@@ -3,87 +3,87 @@ import rdflib
import re
from nose import SkipTest
-from six import b
-TRIPLE = (rdflib.URIRef("http://example.com/s"),
- rdflib.RDFS.label,
- rdflib.Literal("example 1"))
+TRIPLE = (
+ rdflib.URIRef("http://example.com/s"),
+ rdflib.RDFS.label,
+ rdflib.Literal("example 1"),
+)
class TestTrig(unittest.TestCase):
-
def testEmpty(self):
g = rdflib.Graph()
- s = g.serialize(format='trig')
+ s = g.serialize(format="trig")
self.assertTrue(s is not None)
def testRepeatTriples(self):
g = rdflib.ConjunctiveGraph()
- g.get_context('urn:a').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:2'),
- rdflib.URIRef('urn:3')))
+ g.get_context("urn:a").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:2"), rdflib.URIRef("urn:3"))
+ )
- g.get_context('urn:b').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:2'),
- rdflib.URIRef('urn:3')))
+ g.get_context("urn:b").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:2"), rdflib.URIRef("urn:3"))
+ )
- self.assertEqual(len(g.get_context('urn:a')), 1)
- self.assertEqual(len(g.get_context('urn:b')), 1)
+ self.assertEqual(len(g.get_context("urn:a")), 1)
+ self.assertEqual(len(g.get_context("urn:b")), 1)
- s = g.serialize(format='trig')
- self.assertTrue(b('{}') not in s) # no empty graphs!
+ s = g.serialize(format="trig")
+ self.assertTrue("{}".encode("latin-1") not in s) # no empty graphs!
def testSameSubject(self):
g = rdflib.ConjunctiveGraph()
- g.get_context('urn:a').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:p1'),
- rdflib.URIRef('urn:o1')))
+ g.get_context("urn:a").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:p1"), rdflib.URIRef("urn:o1"))
+ )
- g.get_context('urn:b').add((rdflib.URIRef('urn:1'),
- rdflib.URIRef('urn:p2'),
- rdflib.URIRef('urn:o2')))
+ g.get_context("urn:b").add(
+ (rdflib.URIRef("urn:1"), rdflib.URIRef("urn:p2"), rdflib.URIRef("urn:o2"))
+ )
- self.assertEqual(len(g.get_context('urn:a')), 1)
- self.assertEqual(len(g.get_context('urn:b')), 1)
+ self.assertEqual(len(g.get_context("urn:a")), 1)
+ self.assertEqual(len(g.get_context("urn:b")), 1)
- s = g.serialize(format='trig')
+ s = g.serialize(format="trig")
- self.assertEqual(len(re.findall(b("p1"), s)), 1)
- self.assertEqual(len(re.findall(b("p2"), s)), 1)
+ self.assertEqual(len(re.findall("p1".encode("latin-1"), s)), 1)
+ self.assertEqual(len(re.findall("p2".encode("latin-1"), s)), 1)
- self.assertTrue(b('{}') not in s) # no empty graphs!
+ self.assertTrue("{}".encode("latin-1") not in s) # no empty graphs!
def testRememberNamespace(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.URIRef("http://example.com/graph1"),))
# In 4.2.0 the first serialization would fail to include the
# prefix for the graph but later serialize() calls would work.
- first_out = g.serialize(format='trig')
- second_out = g.serialize(format='trig')
- self.assertTrue(b'@prefix ns1: <http://example.com/> .' in second_out)
- self.assertTrue(b'@prefix ns1: <http://example.com/> .' in first_out)
+ first_out = g.serialize(format="trig")
+ second_out = g.serialize(format="trig")
+ self.assertTrue(b"@prefix ns1: <http://example.com/> ." in second_out)
+ self.assertTrue(b"@prefix ns1: <http://example.com/> ." in first_out)
def testGraphQnameSyntax(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.URIRef("http://example.com/graph1"),))
- out = g.serialize(format='trig')
- self.assertTrue(b'ns1:graph1 {' in out)
+ out = g.serialize(format="trig")
+ self.assertTrue(b"ns1:graph1 {" in out)
def testGraphUriSyntax(self):
g = rdflib.ConjunctiveGraph()
# getQName will not abbreviate this, so it should serialize as
# a '<...>' term.
g.add(TRIPLE + (rdflib.URIRef("http://example.com/foo."),))
- out = g.serialize(format='trig')
- self.assertTrue(b'<http://example.com/foo.> {' in out)
+ out = g.serialize(format="trig")
+ self.assertTrue(b"<http://example.com/foo.> {" in out)
def testBlankGraphIdentifier(self):
g = rdflib.ConjunctiveGraph()
g.add(TRIPLE + (rdflib.BNode(),))
- out = g.serialize(format='trig')
+ out = g.serialize(format="trig")
graph_label_line = out.splitlines()[-4]
- self.assertTrue(re.match(br'^_:[a-zA-Z0-9]+ \{', graph_label_line))
+ self.assertTrue(re.match(br"^_:[a-zA-Z0-9]+ \{", graph_label_line))
def testGraphParsing(self):
# should parse into single default graph context
@@ -91,7 +91,7 @@ class TestTrig(unittest.TestCase):
<http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> .
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 1)
# should parse into single default graph context
@@ -101,7 +101,7 @@ class TestTrig(unittest.TestCase):
{ <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . }
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 1)
# should parse into 2 contexts, one default, one named
@@ -115,12 +115,12 @@ class TestTrig(unittest.TestCase):
}
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
+ g.parse(data=data, format="trig")
self.assertEqual(len(list(g.contexts())), 2)
def testRoundTrips(self):
- raise SkipTest('skipped until 5.0')
+ raise SkipTest("skipped until 5.0")
data = """
<http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> .
@@ -133,17 +133,17 @@ class TestTrig(unittest.TestCase):
"""
g = rdflib.ConjunctiveGraph()
for i in range(5):
- g.parse(data=data, format='trig')
- data = g.serialize(format='trig')
+ g.parse(data=data, format="trig")
+ data = g.serialize(format="trig")
# output should only contain 1 mention of each resource/graph name
- self.assertEqual(data.count('thing_a'), 1)
- self.assertEqual(data.count('thing_b'), 1)
- self.assertEqual(data.count('thing_c'), 1)
- self.assertEqual(data.count('thing_d'), 1)
- self.assertEqual(data.count('thing_e'), 1)
- self.assertEqual(data.count('thing_f'), 1)
- self.assertEqual(data.count('graph_a'), 1)
+ self.assertEqual(data.count("thing_a"), 1)
+ self.assertEqual(data.count("thing_b"), 1)
+ self.assertEqual(data.count("thing_c"), 1)
+ self.assertEqual(data.count("thing_d"), 1)
+ self.assertEqual(data.count("thing_e"), 1)
+ self.assertEqual(data.count("thing_f"), 1)
+ self.assertEqual(data.count("graph_a"), 1)
def testDefaultGraphSerializesWithoutName(self):
data = """
@@ -152,10 +152,10 @@ class TestTrig(unittest.TestCase):
{ <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . }
"""
g = rdflib.ConjunctiveGraph()
- g.parse(data=data, format='trig')
- data = g.serialize(format='trig')
+ g.parse(data=data, format="trig")
+ data = g.serialize(format="trig")
- self.assertTrue(b('None') not in data)
+ self.assertTrue("None".encode("latin-1") not in data)
def testPrefixes(self):
@@ -172,9 +172,9 @@ class TestTrig(unittest.TestCase):
"""
cg = rdflib.ConjunctiveGraph()
- cg.parse(data=data, format='trig')
- data = cg.serialize(format='trig')
+ cg.parse(data=data, format="trig")
+ data = cg.serialize(format="trig")
- self.assert_(b('ns2: <http://ex.org/docs/') in data, data)
- self.assert_(b('<ns2:document1>') not in data, data)
- self.assert_(b('ns2:document1') in data, data)
+ self.assert_("ns2: <http://ex.org/docs/".encode("latin-1") in data, data)
+ self.assert_("<ns2:document1>".encode("latin-1") not in data, data)
+ self.assert_("ns2:document1".encode("latin-1") in data, data)
diff --git a/test/test_trig_w3c.py b/test/test_trig_w3c.py
index bb8588e0..d59a2f08 100644
--- a/test/test_trig_w3c.py
+++ b/test/test_trig_w3c.py
@@ -16,15 +16,15 @@ def trig(test):
g = ConjunctiveGraph()
try:
- base = 'http://www.w3.org/2013/TriGTests/' + split_uri(test.action)[1]
+ base = "http://www.w3.org/2013/TriGTests/" + split_uri(test.action)[1]
- g.parse(test.action, publicID=base, format='trig')
+ g.parse(test.action, publicID=base, format="trig")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
if test.result: # eval test
res = ConjunctiveGraph()
- res.parse(test.result, format='nquads')
+ res.parse(test.result, format="nquads")
if verbose:
@@ -32,13 +32,13 @@ def trig(test):
if not first and not second:
return
- print('===============================')
- print('TriG')
- print(g.serialize(format='nquads'))
- print('===============================')
- print('NQuads')
- print(res.serialize(format='nquads'))
- print('===============================')
+ print("===============================")
+ print("TriG")
+ print(g.serialize(format="nquads"))
+ print("===============================")
+ print("NQuads")
+ print(res.serialize(format="nquads"))
+ print("===============================")
print("Diff:")
# print "%d triples in both"%len(both)
@@ -50,9 +50,9 @@ def trig(test):
print("NQuads Only")
for t in second:
print(t)
- raise Exception('Graphs do not match!')
+ raise Exception("Graphs do not match!")
- assert isomorphic(g, res), 'graphs must be the same'
+ assert isomorphic(g, res), "graphs must be the same"
except:
if test.syntax:
@@ -63,12 +63,12 @@ testers = {
RDFT.TestTrigPositiveSyntax: trig,
RDFT.TestTrigNegativeSyntax: trig,
RDFT.TestTrigEval: trig,
- RDFT.TestTrigNegativeEval: trig
+ RDFT.TestTrigNegativeEval: trig,
}
def test_trig(tests=None):
- for t in nose_tests(testers, 'test/w3c/trig/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/trig/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -79,7 +79,7 @@ def test_trig(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_trig, 'rdflib_trig')
+ nose_tst_earl_report(test_trig, "rdflib_trig")
diff --git a/test/test_trix_parse.py b/test/test_trix_parse.py
index 1b0f9fb9..290ce0b6 100644
--- a/test/test_trix_parse.py
+++ b/test/test_trix_parse.py
@@ -6,7 +6,6 @@ import unittest
class TestTrixParse(unittest.TestCase):
-
def setUp(self):
pass
@@ -45,5 +44,5 @@ class TestTrixParse(unittest.TestCase):
# print "Parsed %d triples"%len(g)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_trix_serialize.py b/test/test_trix_serialize.py
index f1b68de4..4fe78a18 100644
--- a/test/test_trix_serialize.py
+++ b/test/test_trix_serialize.py
@@ -5,11 +5,10 @@ import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
-from six import BytesIO
+from io import BytesIO
class TestTrixSerialize(unittest.TestCase):
-
def setUp(self):
pass
@@ -18,17 +17,17 @@ class TestTrixSerialize(unittest.TestCase):
def testSerialize(self):
- s1 = URIRef('store:1')
- r1 = URIRef('resource:1')
- r2 = URIRef('resource:2')
+ s1 = URIRef("store:1")
+ r1 = URIRef("resource:1")
+ r2 = URIRef("resource:2")
- label = URIRef('predicate:label')
+ label = URIRef("predicate:label")
g1 = Graph(identifier=s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
- s2 = URIRef('store:2')
+ s2 = URIRef("store:2")
g2 = Graph(identifier=s2)
g2.add((r2, label, Literal("label 3")))
@@ -37,13 +36,13 @@ class TestTrixSerialize(unittest.TestCase):
g.addN([(s, p, o, g1)])
for s, p, o in g2.triples((None, None, None)):
g.addN([(s, p, o, g2)])
- r3 = URIRef('resource:3')
+ r3 = URIRef("resource:3")
g.add((r3, label, Literal(4)))
- r = g.serialize(format='trix')
+ r = g.serialize(format="trix")
g3 = ConjunctiveGraph()
- g3.parse(BytesIO(r), format='trix')
+ g3.parse(BytesIO(r), format="trix")
for q in g3.quads((None, None, None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
@@ -87,12 +86,10 @@ class TestTrixSerialize(unittest.TestCase):
graph = ConjunctiveGraph()
graph.bind(None, "http://defaultnamespace")
- sg = graph.serialize(format='trix').decode('UTF-8')
- self.assertTrue(
- 'xmlns="http://defaultnamespace"' not in sg, sg)
- self.assertTrue(
- 'xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
+ sg = graph.serialize(format="trix").decode("UTF-8")
+ self.assertTrue('xmlns="http://defaultnamespace"' not in sg, sg)
+ self.assertTrue('xmlns="http://www.w3.org/2004/03/trix/trix-1/' in sg, sg)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/test/test_tsvresults.py b/test/test_tsvresults.py
index d5222a34..5c4d12d0 100644
--- a/test/test_tsvresults.py
+++ b/test/test_tsvresults.py
@@ -1,10 +1,9 @@
import unittest
-from six import StringIO
+from io import StringIO
from rdflib.plugins.sparql.results.tsvresults import TSVResultParser
class TestTSVResults(unittest.TestCase):
-
def test_empty_tsvresults_bindings(self):
# check that optional bindings are ordered properly
source = """?s\t?p\t?o
diff --git a/test/test_turtle_serialize.py b/test/test_turtle_serialize.py
index 81f57847..155cdffd 100644
--- a/test/test_turtle_serialize.py
+++ b/test/test_turtle_serialize.py
@@ -1,7 +1,6 @@
from rdflib import Graph, URIRef, BNode, RDF, Literal, Namespace
from rdflib.collection import Collection
from rdflib.plugins.serializers.turtle import TurtleSerializer
-from six import b
def testTurtleFinalDot():
@@ -13,8 +12,8 @@ def testTurtleFinalDot():
u = URIRef("http://ex.org/bob.")
g.bind("ns", "http://ex.org/")
g.add((u, u, u))
- s = g.serialize(format='turtle')
- assert b("ns:bob.") not in s
+ s = g.serialize(format="turtle")
+ assert "ns:bob.".encode("latin-1") not in s
def testTurtleBoolList():
@@ -51,18 +50,23 @@ def testUnicodeEscaping():
assert len(triples) == 3
print(triples)
# Now check that was decoded into python values properly
- assert triples[0][2] == URIRef(u'http://example.com/aaa\xf3bbbb')
- assert triples[1][2] == URIRef(u'http://example.com/zzz\U00100000zzz')
- assert triples[2][2] == URIRef(u'http://example.com/aaa\xf3bbb')
+ assert triples[0][2] == URIRef(u"http://example.com/aaa\xf3bbbb")
+ assert triples[1][2] == URIRef(u"http://example.com/zzz\U00100000zzz")
+ assert triples[2][2] == URIRef(u"http://example.com/aaa\xf3bbb")
def test_turtle_valid_list():
- NS = Namespace('http://example.org/ns/')
+ NS = Namespace("http://example.org/ns/")
g = Graph()
- g.parse(data="""
+ g.parse(
+ data="""
@prefix : <{0}> .
:s :p (""), (0), (false) .
- """.format(NS), format='turtle')
+ """.format(
+ NS
+ ),
+ format="turtle",
+ )
turtle_serializer = TurtleSerializer(g)
@@ -71,24 +75,30 @@ def test_turtle_valid_list():
def test_turtle_namespace():
- graph = Graph()
- graph.bind('OBO', 'http://purl.obolibrary.org/obo/')
- graph.bind('GENO', 'http://purl.obolibrary.org/obo/GENO_')
- graph.bind('RO', 'http://purl.obolibrary.org/obo/RO_')
- graph.bind('RO_has_phenotype',
- 'http://purl.obolibrary.org/obo/RO_0002200')
- graph.add((URIRef('http://example.org'),
- URIRef('http://purl.obolibrary.org/obo/RO_0002200'),
- URIRef('http://purl.obolibrary.org/obo/GENO_0000385')))
- output = [val for val in
- graph.serialize(format='turtle').decode().splitlines()
- if not val.startswith('@prefix')]
- output = ' '.join(output)
- assert 'RO_has_phenotype:' in output
- assert 'GENO:0000385' in output
+ graph = Graph()
+ graph.bind("OBO", "http://purl.obolibrary.org/obo/")
+ graph.bind("GENO", "http://purl.obolibrary.org/obo/GENO_")
+ graph.bind("RO", "http://purl.obolibrary.org/obo/RO_")
+ graph.bind("RO_has_phenotype", "http://purl.obolibrary.org/obo/RO_0002200")
+ graph.add(
+ (
+ URIRef("http://example.org"),
+ URIRef("http://purl.obolibrary.org/obo/RO_0002200"),
+ URIRef("http://purl.obolibrary.org/obo/GENO_0000385"),
+ )
+ )
+ output = [
+ val
+ for val in graph.serialize(format="turtle").decode().splitlines()
+ if not val.startswith("@prefix")
+ ]
+ output = " ".join(output)
+ assert "RO_has_phenotype:" in output
+ assert "GENO:0000385" in output
if __name__ == "__main__":
import nose
import sys
+
nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_turtle_sort_issue613.py b/test/test_turtle_sort_issue613.py
index f81cba33..a26ede28 100644
--- a/test/test_turtle_sort_issue613.py
+++ b/test/test_turtle_sort_issue613.py
@@ -17,8 +17,8 @@ https://github.com/RDFLib/rdflib/issues/676
def test_sort_dates():
g = rdflib.Graph()
- y = '''@prefix ex: <http://ex.org> .
-ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2016-01-01T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime> . '''
+ y = """@prefix ex: <http://ex.org> .
+ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2016-01-01T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime> . """
p = g.parse(data=y, format="turtle")
p.serialize(format="turtle")
@@ -27,14 +27,14 @@ ex:X ex:p "2016-01-01T00:00:00"^^<http://www.w3.org/2001/XMLSchema#dateTime>, "2
def test_sort_docfrag():
g = rdflib.Graph()
- y = '''@prefix ex: <http://ex.org> .
+ y = """@prefix ex: <http://ex.org> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
-ex:X ex:p "<h1>hi</h1>"^^rdf:HTML, "<h1>ho</h1>"^^rdf:HTML . '''
+ex:X ex:p "<h1>hi</h1>"^^rdf:HTML, "<h1>ho</h1>"^^rdf:HTML . """
p = g.parse(data=y, format="turtle")
p.serialize(format="turtle")
-if __name__ == '__main__':
+if __name__ == "__main__":
test_sort_docfrag()
diff --git a/test/test_turtle_w3c.py b/test/test_turtle_w3c.py
index 469ed023..b89ce66b 100644
--- a/test/test_turtle_w3c.py
+++ b/test/test_turtle_w3c.py
@@ -15,15 +15,15 @@ def turtle(test):
g = Graph()
try:
- base = 'http://www.w3.org/2013/TurtleTests/' + split_uri(test.action)[1]
+ base = "http://www.w3.org/2013/TurtleTests/" + split_uri(test.action)[1]
- g.parse(test.action, publicID=base, format='turtle')
+ g.parse(test.action, publicID=base, format="turtle")
if not test.syntax:
raise AssertionError("Input shouldn't have parsed!")
if test.result: # eval test
res = Graph()
- res.parse(test.result, format='nt')
+ res.parse(test.result, format="nt")
if verbose:
both, first, second = graph_diff(g, res)
@@ -39,9 +39,9 @@ def turtle(test):
print("NT Only")
for t in second:
print(t)
- raise Exception('Graphs do not match!')
+ raise Exception("Graphs do not match!")
- assert isomorphic(g, res), 'graphs must be the same'
+ assert isomorphic(g, res), "graphs must be the same"
except:
if test.syntax:
@@ -52,13 +52,12 @@ testers = {
RDFT.TestTurtlePositiveSyntax: turtle,
RDFT.TestTurtleNegativeSyntax: turtle,
RDFT.TestTurtleEval: turtle,
- RDFT.TestTurtleNegativeEval: turtle
+ RDFT.TestTurtleNegativeEval: turtle,
}
def test_turtle(tests=None):
- for t in nose_tests(testers,
- 'test/w3c/turtle/manifest.ttl'):
+ for t in nose_tests(testers, "test/w3c/turtle/manifest.ttl"):
if tests:
for test in tests:
if test in t[1].uri:
@@ -69,8 +68,8 @@ def test_turtle(tests=None):
yield t
-if __name__ == '__main__':
+if __name__ == "__main__":
verbose = True
- nose_tst_earl_report(test_turtle, 'rdflib_turtle')
+ nose_tst_earl_report(test_turtle, "rdflib_turtle")
diff --git a/test/test_util.py b/test/test_util.py
index 4184b659..89890c8d 100644
--- a/test/test_util.py
+++ b/test/test_util.py
@@ -54,11 +54,13 @@ n3source = """\
class TestUtilMisc(unittest.TestCase):
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_list2set(self):
- base = [Literal('foo'), self.x]
+ base = [Literal("foo"), self.x]
r = util.list2set(base + base)
self.assertTrue(r == base)
@@ -75,10 +77,11 @@ class TestUtilMisc(unittest.TestCase):
class TestUtilDateTime(unittest.TestCase):
-
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_date_time_tisnoneandnotz(self):
t = None
@@ -115,8 +118,10 @@ class TestUtilDateTime(unittest.TestCase):
def ablocaltime(t):
from time import gmtime
+
res = gmtime(t)
return res
+
util.localtime = ablocaltime
res = util.date_time(t, local_time_zone=True)
self.assertTrue(res is not t)
@@ -124,8 +129,10 @@ class TestUtilDateTime(unittest.TestCase):
class TestUtilTermConvert(unittest.TestCase):
def setUp(self):
- self.x = Literal("2008-12-01T18:02:00Z",
- datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
+ self.x = Literal(
+ "2008-12-01T18:02:00Z",
+ datatype=URIRef("http://www.w3.org/2001/XMLSchema#dateTime"),
+ )
def test_util_to_term_sisNone(self):
s = None
@@ -145,12 +152,12 @@ class TestUtilTermConvert(unittest.TestCase):
self.assertEqual(str(res), s[1:-1])
def test_util_to_term_sisbnode(self):
- s = '_http%23%4F%4Fexample%33com'
+ s = "_http%23%4F%4Fexample%33com"
res = util.to_term(s)
self.assertTrue(isinstance(res, BNode))
def test_util_to_term_sisunknown(self):
- s = 'http://example.com'
+ s = "http://example.com"
self.assertRaises(Exception, util.to_term, s)
def test_util_to_term_sisnotstr(self):
@@ -185,7 +192,7 @@ class TestUtilTermConvert(unittest.TestCase):
self.assertTrue(isinstance(res, Literal))
def test_util_from_n3_expecturiref(self):
- s = '<http://example.org/schema>'
+ s = "<http://example.org/schema>"
res = util.from_n3(s, default=None, backend=None)
self.assertTrue(isinstance(res, URIRef))
@@ -198,89 +205,99 @@ class TestUtilTermConvert(unittest.TestCase):
s = '"michel"@fr^^xsd:fr'
res = util.from_n3(s, default=None, backend=None)
self.assertTrue(isinstance(res, Literal))
- self.assertEqual(res, Literal('michel',
- datatype=XSD['fr']))
+ self.assertEqual(res, Literal("michel", datatype=XSD["fr"]))
def test_util_from_n3_expectliteralanddtype(self):
s = '"true"^^xsd:boolean'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res.eq(Literal('true', datatype=XSD['boolean'])))
+ self.assertTrue(res.eq(Literal("true", datatype=XSD["boolean"])))
def test_util_from_n3_expectliteralwithdatatypefromint(self):
- s = '42'
+ s = "42"
res = util.from_n3(s)
self.assertEqual(res, Literal(42))
def test_util_from_n3_expectliteralwithdatatypefrombool(self):
- s = 'true'
+ s = "true"
res = util.from_n3(s)
self.assertEqual(res, Literal(True))
- s = 'false'
+ s = "false"
res = util.from_n3(s)
self.assertEqual(res, Literal(False))
def test_util_from_n3_expectliteralmultiline(self):
s = '"""multi\nline\nstring"""@en'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res, Literal('multi\nline\nstring', lang='en'))
+ self.assertTrue(res, Literal("multi\nline\nstring", lang="en"))
def test_util_from_n3_expectliteralwithescapedquote(self):
s = '"\\""'
res = util.from_n3(s, default=None, backend=None)
- self.assertTrue(res, Literal('\\"', lang='en'))
+ self.assertTrue(res, Literal('\\"', lang="en"))
def test_util_from_n3_expectliteralwithtrailingbackslash(self):
s = '"trailing\\\\"^^<http://www.w3.org/2001/XMLSchema#string>'
res = util.from_n3(s)
- self.assertTrue(res, Literal('trailing\\', datatype=XSD['string']))
+ self.assertTrue(res, Literal("trailing\\", datatype=XSD["string"]))
self.assertTrue(res.n3(), s)
def test_util_from_n3_expectpartialidempotencewithn3(self):
- for n3 in ('<http://ex.com/foo>',
- '"foo"@de',
- u'<http://ex.com/漢字>',
- u'<http://ex.com/a#あ>',
- # '"\\""', # exception as '\\"' --> '"' by orig parser as well
- '"""multi\n"line"\nstring"""@en'):
- self.assertEqual(util.from_n3(n3).n3(), n3,
- 'from_n3(%(n3e)r).n3() != %(n3e)r' % {'n3e': n3})
+ for n3 in (
+ "<http://ex.com/foo>",
+ '"foo"@de',
+ u"<http://ex.com/漢字>",
+ u"<http://ex.com/a#あ>",
+ # '"\\""', # exception as '\\"' --> '"' by orig parser as well
+ '"""multi\n"line"\nstring"""@en',
+ ):
+ self.assertEqual(
+ util.from_n3(n3).n3(),
+ n3,
+ "from_n3(%(n3e)r).n3() != %(n3e)r" % {"n3e": n3},
+ )
def test_util_from_n3_expectsameasn3parser(self):
def parse_n3(term_n3):
- ''' Disclaimer: Quick and dirty hack using the n3 parser. '''
- prepstr = ("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
- "<urn:no_use> <urn:no_use> %s.\n" % term_n3)
+ """ Disclaimer: Quick and dirty hack using the n3 parser. """
+ prepstr = (
+ "@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\n"
+ "<urn:no_use> <urn:no_use> %s.\n" % term_n3
+ )
g = ConjunctiveGraph()
- g.parse(data=prepstr, format='n3')
+ g.parse(data=prepstr, format="n3")
return [t for t in g.triples((None, None, None))][0][2]
for n3 in ( # "michel", # won't parse in original parser
# "_:michel", # BNodes won't be the same
'"michel"',
- '<http://example.org/schema>',
+ "<http://example.org/schema>",
'"michel"@fr',
# '"michel"@fr^^xsd:fr', # FIXME: invalid n3, orig parser will prefer datatype
# '"true"^^xsd:boolean', # FIXME: orig parser will expand xsd prefix
- '42',
- 'true',
- 'false',
+ "42",
+ "true",
+ "false",
'"""multi\nline\nstring"""@en',
- '<http://ex.com/foo>',
+ "<http://ex.com/foo>",
'"foo"@de',
'"\\""@en',
- '"""multi\n"line"\nstring"""@en'):
+ '"""multi\n"line"\nstring"""@en',
+ ):
res, exp = util.from_n3(n3), parse_n3(n3)
- self.assertEqual(res, exp,
- 'from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r' % {
- 'res': res, 'exp': exp, 'n3e': n3})
+ self.assertEqual(
+ res,
+ exp,
+ "from_n3(%(n3e)r): %(res)r != parser.notation3: %(exp)r"
+ % {"res": res, "exp": exp, "n3e": n3},
+ )
def test_util_from_n3_expectquotedgraph(self):
- s = '{<http://example.com/schema>}'
+ s = "{<http://example.com/schema>}"
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, QuotedGraph))
def test_util_from_n3_expectgraph(self):
- s = '[<http://example.com/schema>]'
+ s = "[<http://example.com/schema>]"
res = util.from_n3(s, default=None, backend="IOMemory")
self.assertTrue(isinstance(res, Graph))
@@ -317,35 +334,17 @@ class TestUtilCheckers(unittest.TestCase):
def test_util_check_statement(self):
c = "http://example.com"
- self.assertRaises(
- SubjectTypeError,
- util.check_statement,
- (c, self.p, self.o))
- self.assertRaises(
- PredicateTypeError,
- util.check_statement,
- (self.s, c, self.o))
- self.assertRaises(
- ObjectTypeError,
- util.check_statement,
- (self.s, self.p, c))
+ self.assertRaises(SubjectTypeError, util.check_statement, (c, self.p, self.o))
+ self.assertRaises(PredicateTypeError, util.check_statement, (self.s, c, self.o))
+ self.assertRaises(ObjectTypeError, util.check_statement, (self.s, self.p, c))
res = util.check_statement((self.s, self.p, self.o))
self.assertTrue(res == None)
def test_util_check_pattern(self):
c = "http://example.com"
- self.assertRaises(
- SubjectTypeError,
- util.check_pattern,
- (c, self.p, self.o))
- self.assertRaises(
- PredicateTypeError,
- util.check_pattern,
- (self.s, c, self.o))
- self.assertRaises(
- ObjectTypeError,
- util.check_pattern,
- (self.s, self.p, c))
+ self.assertRaises(SubjectTypeError, util.check_pattern, (c, self.p, self.o))
+ self.assertRaises(PredicateTypeError, util.check_pattern, (self.s, c, self.o))
+ self.assertRaises(ObjectTypeError, util.check_pattern, (self.s, self.p, c))
res = util.check_pattern((self.s, self.p, self.o))
self.assertTrue(res == None)
diff --git a/test/test_wide_python.py b/test/test_wide_python.py
index feef4519..5463e798 100644
--- a/test/test_wide_python.py
+++ b/test/test_wide_python.py
@@ -1,14 +1,13 @@
-
def test_wide_python_build():
"""This test is meant to fail on narrow python builds (common on Mac OS X).
See https://github.com/RDFLib/rdflib/issues/456 for more information.
"""
- assert len(u'\U0010FFFF') == 1, (
- 'You are using a narrow Python build!\n'
- 'This means that your Python does not properly support chars > 16bit.\n'
+ assert len(u"\U0010FFFF") == 1, (
+ "You are using a narrow Python build!\n"
+ "This means that your Python does not properly support chars > 16bit.\n"
'On your system chars like c=u"\\U0010FFFF" will have a len(c)==2.\n'
- 'As this can cause hard to debug problems with string processing\n'
- '(slicing, regexp, ...) later on, we strongly advise to use a wide\n'
- 'Python build in production systems.'
+ "As this can cause hard to debug problems with string processing\n"
+ "(slicing, regexp, ...) later on, we strongly advise to use a wide\n"
+ "Python build in production systems."
)
diff --git a/test/test_xmlliterals.py b/test/test_xmlliterals.py
index b467e82a..fcc0ddf2 100644
--- a/test/test_xmlliterals.py
+++ b/test/test_xmlliterals.py
@@ -3,24 +3,24 @@ from rdflib import RDF, Graph, Literal
def testPythonRoundtrip():
- l1 = Literal('<msg>hello</msg>', datatype=RDF.XMLLiteral)
- assert l1.value is not None, 'xml must have been parsed'
- assert l1.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ l1 = Literal("<msg>hello</msg>", datatype=RDF.XMLLiteral)
+ assert l1.value is not None, "xml must have been parsed"
+ assert l1.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l2 = Literal('<msg>good morning</msg>', datatype=RDF.XMLLiteral)
- assert l2.value is not None, 'xml must have been parsed'
- assert not l1.eq(l2), 'literals must NOT be equal'
+ l2 = Literal("<msg>good morning</msg>", datatype=RDF.XMLLiteral)
+ assert l2.value is not None, "xml must have been parsed"
+ assert not l1.eq(l2), "literals must NOT be equal"
l3 = Literal(l1.value)
- assert l1.eq(l3), 'roundtripped literals must be equal'
- assert l3.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ assert l1.eq(l3), "roundtripped literals must be equal"
+ assert l3.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l4 = Literal('<msg >hello</msg>', datatype=RDF.XMLLiteral)
+ l4 = Literal("<msg >hello</msg>", datatype=RDF.XMLLiteral)
assert l1 == l4
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = False
- l4 = Literal('<msg >hello</msg>', datatype=RDF.XMLLiteral)
+ l4 = Literal("<msg >hello</msg>", datatype=RDF.XMLLiteral)
assert l1 != l4
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = True
@@ -49,9 +49,13 @@ def testRDFXMLParse():
def graph():
g = rdflib.Graph()
- g.add((rdflib.URIRef('http://example.org/a'),
- rdflib.URIRef('http://example.org/p'),
- rdflib.Literal('<msg>hei</hei>', datatype=RDF.XMLLiteral)))
+ g.add(
+ (
+ rdflib.URIRef("http://example.org/a"),
+ rdflib.URIRef("http://example.org/p"),
+ rdflib.Literal("<msg>hei</hei>", datatype=RDF.XMLLiteral),
+ )
+ )
return g
@@ -65,20 +69,20 @@ def roundtrip(fmt):
def testRoundtrip():
- roundtrip('xml')
- roundtrip('n3')
- roundtrip('nt')
+ roundtrip("xml")
+ roundtrip("n3")
+ roundtrip("nt")
def testHTML():
- l1 = Literal('<msg>hello</msg>', datatype=RDF.XMLLiteral)
- assert l1.value is not None, 'xml must have been parsed'
- assert l1.datatype == RDF.XMLLiteral, 'literal must have right datatype'
+ l1 = Literal("<msg>hello</msg>", datatype=RDF.XMLLiteral)
+ assert l1.value is not None, "xml must have been parsed"
+ assert l1.datatype == RDF.XMLLiteral, "literal must have right datatype"
- l2 = Literal('<msg>hello</msg>', datatype=RDF.HTML)
- assert l2.value is not None, 'xml must have been parsed'
- assert l2.datatype == RDF.HTML, 'literal must have right datatype'
+ l2 = Literal("<msg>hello</msg>", datatype=RDF.HTML)
+ assert l2.value is not None, "xml must have been parsed"
+ assert l2.datatype == RDF.HTML, "literal must have right datatype"
assert l1 != l2
assert not l1.eq(l2)
diff --git a/test/testutils.py b/test/testutils.py
index 20b060d3..03366cfb 100644
--- a/test/testutils.py
+++ b/test/testutils.py
@@ -65,6 +65,7 @@ def _parse_or_report(verbose, graph, *args, **kwargs):
def nose_tst_earl_report(generator, earl_report_name=None):
from optparse import OptionParser
+
p = OptionParser()
(options, args) = p.parse_args()
@@ -74,7 +75,7 @@ def nose_tst_earl_report(generator, earl_report_name=None):
for t in generator(args):
tests += 1
- print('Running ', t[1].uri)
+ print("Running ", t[1].uri)
try:
t[0](t[1])
add_test(t[1].uri, "passed")
@@ -93,11 +94,16 @@ def nose_tst_earl_report(generator, earl_report_name=None):
print_exc()
sys.stderr.write("%s\n" % t[1].uri)
- print("Ran %d tests, %d skipped, %d failed. "%(tests, skip, tests-skip-success))
+ print(
+ "Ran %d tests, %d skipped, %d failed. " % (tests, skip, tests - skip - success)
+ )
if earl_report_name:
now = isodate.datetime_isoformat(datetime.datetime.utcnow())
- earl_report = 'test_reports/%s-%s.ttl' % (earl_report_name, now.replace(":", ""))
+ earl_report = "test_reports/%s-%s.ttl" % (
+ earl_report_name,
+ now.replace(":", ""),
+ )
- report.serialize(earl_report, format='n3')
- report.serialize('test_reports/%s-latest.ttl'%earl_report_name, format='n3')
+ report.serialize(earl_report, format="n3")
+ report.serialize("test_reports/%s-latest.ttl" % earl_report_name, format="n3")
print("Wrote EARL-report to '%s'" % earl_report)
diff --git a/test/triple_store.py b/test/triple_store.py
index b9c5221a..f37bea33 100644
--- a/test/triple_store.py
+++ b/test/triple_store.py
@@ -6,8 +6,8 @@ from rdflib.graph import Graph
class GraphTest(unittest.TestCase):
- backend = 'default'
- path = 'store'
+ backend = "default"
+ path = "store"
def setUp(self):
self.store = Graph(store=self.backend)
diff --git a/test/type_check.py b/test/type_check.py
index 605f0916..19329a39 100644
--- a/test/type_check.py
+++ b/test/type_check.py
@@ -10,9 +10,11 @@ foo = URIRef("foo")
class TypeCheckCase(unittest.TestCase):
- unstable = True # TODO: until we decide if we want to add type checking back to rdflib
- backend = 'default'
- path = 'store'
+ unstable = (
+ True # TODO: until we decide if we want to add type checking back to rdflib
+ )
+ backend = "default"
+ path = "store"
def setUp(self):
self.store = Graph(backend=self.backend)
@@ -22,13 +24,10 @@ class TypeCheckCase(unittest.TestCase):
self.store.close()
def testSubjectTypeCheck(self):
- self.assertRaises(SubjectTypeError,
- self.store.add, (None, foo, foo))
+ self.assertRaises(SubjectTypeError, self.store.add, (None, foo, foo))
def testPredicateTypeCheck(self):
- self.assertRaises(PredicateTypeError,
- self.store.add, (foo, None, foo))
+ self.assertRaises(PredicateTypeError, self.store.add, (foo, None, foo))
def testObjectTypeCheck(self):
- self.assertRaises(ObjectTypeError,
- self.store.add, (foo, foo, None))
+ self.assertRaises(ObjectTypeError, self.store.add, (foo, foo, None))
diff --git a/tox.ini b/tox.ini
index ee712287..b5b588fb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
envlist =
- py27,py34,py35,py36
+ py35,py36,py37,py38
[testenv]
setenv =
@@ -20,7 +20,7 @@ deps =
[testenv:cover]
basepython =
- python2.7
+ python3.7
commands =
{envpython} run_tests.py --where=./ \
--with-coverage --cover-html --cover-html-dir=./coverage \