summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordvora-h <67596500+dvora-h@users.noreply.github.com>2022-10-23 16:53:57 +0300
committerGitHub <noreply@github.com>2022-10-23 16:53:57 +0300
commitaa0ae6897828009342338e435773a5d0b90a21ea (patch)
treecf951514512ab11aef3d2b36153d25962fd2fd83
parente867f9eec740fde0b749b1ca35b8b9772c5ada92 (diff)
downloadredis-py-aa0ae6897828009342338e435773a5d0b90a21ea.tar.gz
Fix `TIMESERIES` (round floats) and `JSON` tests (#2421)
* Fix timeseries tests (round floats) * fix json tests
-rw-r--r--tests/test_asyncio/test_json.py2
-rw-r--r--tests/test_asyncio/test_timeseries.py2
-rw-r--r--tests/test_json.py2
-rw-r--r--tests/test_search.py11
-rw-r--r--tests/test_timeseries.py4
5 files changed, 7 insertions, 14 deletions
diff --git a/tests/test_asyncio/test_json.py b/tests/test_asyncio/test_json.py
index 416a9f4..b8854d2 100644
--- a/tests/test_asyncio/test_json.py
+++ b/tests/test_asyncio/test_json.py
@@ -817,7 +817,7 @@ async def test_objlen_dollar(modclient: redis.Redis):
},
)
# Test multi
- assert await modclient.json().objlen("doc1", "$..a") == [2, None, 1]
+ assert await modclient.json().objlen("doc1", "$..a") == [None, 2, 1]
# Test single
assert await modclient.json().objlen("doc1", "$.nested1.a") == [2]
diff --git a/tests/test_asyncio/test_timeseries.py b/tests/test_asyncio/test_timeseries.py
index ef58226..a710993 100644
--- a/tests/test_asyncio/test_timeseries.py
+++ b/tests/test_asyncio/test_timeseries.py
@@ -240,7 +240,7 @@ async def test_range_advanced(modclient: redis.Redis):
assert [(0, 5.0), (5, 6.0)] == await modclient.ts().range(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=5
)
- assert [(0, 2.5500000000000003), (10, 3.0)] == await modclient.ts().range(
+ assert [(0, 2.55), (10, 3.0)] == await modclient.ts().range(
1, 0, 10, aggregation_type="twa", bucket_size_msec=10
)
diff --git a/tests/test_json.py b/tests/test_json.py
index 0965a93..676683d 100644
--- a/tests/test_json.py
+++ b/tests/test_json.py
@@ -824,7 +824,7 @@ def test_objlen_dollar(client):
},
)
# Test multi
- assert client.json().objlen("doc1", "$..a") == [2, None, 1]
+ assert client.json().objlen("doc1", "$..a") == [None, 2, 1]
# Test single
assert client.json().objlen("doc1", "$.nested1.a") == [2]
diff --git a/tests/test_search.py b/tests/test_search.py
index 5fe5ab1..abaa908 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -1448,9 +1448,9 @@ def test_json_with_jsonpath(client):
assert res.docs[0].id == "doc:1"
assert res.docs[0].json == '{"prod:name":"RediSearch"}'
- # query for an unsupported field fails
+ # query for an unsupported field
res = client.ft().search("@name_unsupported:RediSearch")
- assert res.total == 0
+ assert res.total == 1
# return of a supported field succeeds
res = client.ft().search(Query("@name:RediSearch").return_field("name"))
@@ -1458,13 +1458,6 @@ def test_json_with_jsonpath(client):
assert res.docs[0].id == "doc:1"
assert res.docs[0].name == "RediSearch"
- # return of an unsupported field fails
- res = client.ft().search(Query("@name:RediSearch").return_field("name_unsupported"))
- assert res.total == 1
- assert res.docs[0].id == "doc:1"
- with pytest.raises(Exception):
- res.docs[0].name_unsupported
-
@pytest.mark.redismod
@pytest.mark.onlynoncluster
diff --git a/tests/test_timeseries.py b/tests/test_timeseries.py
index cb27a75..6ced535 100644
--- a/tests/test_timeseries.py
+++ b/tests/test_timeseries.py
@@ -233,7 +233,7 @@ def test_range_advanced(client):
assert [(0, 5.0), (5, 6.0)] == client.ts().range(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=5
)
- assert [(0, 2.5500000000000003), (10, 3.0)] == client.ts().range(
+ assert [(0, 2.55), (10, 3.0)] == client.ts().range(
1, 0, 10, aggregation_type="twa", bucket_size_msec=10
)
@@ -343,7 +343,7 @@ def test_rev_range(client):
assert [(1, 10.0), (0, 1.0)] == client.ts().revrange(
1, 0, 10, aggregation_type="count", bucket_size_msec=10, align=1
)
- assert [(10, 3.0), (0, 2.5500000000000003)] == client.ts().revrange(
+ assert [(10, 3.0), (0, 2.55)] == client.ts().revrange(
1, 0, 10, aggregation_type="twa", bucket_size_msec=10
)