Skip to content

Commit 9a6ab4a

Browse files
authored
Merge branch 'master' into keep-last-cluster-node-for-retry
2 parents 699f8f6 + a757bad commit 9a6ab4a

File tree

11 files changed

+1459
-14
lines changed

11 files changed

+1459
-14
lines changed

.github/workflows/integration.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ jobs:
7474
max-parallel: 15
7575
fail-fast: false
7676
matrix:
77-
redis-version: ['8.2-M01-pre', '${{ needs.redis_version.outputs.CURRENT }}', '7.4.4', '7.2.9']
77+
redis-version: ['8.2-RC1-pre', '${{ needs.redis_version.outputs.CURRENT }}', '7.4.4', '7.2.9']
7878
python-version: ['3.9', '3.13']
7979
parser-backend: ['plain']
8080
event-loop: ['asyncio']

.github/workflows/spellcheck.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ jobs:
88
- name: Checkout
99
uses: actions/checkout@v4
1010
- name: Check Spelling
11-
uses: rojopolis/spellcheck-github-actions@0.49.0
11+
uses: rojopolis/spellcheck-github-actions@0.51.0
1212
with:
1313
config_path: .github/spellcheck-settings.yml
1414
task_name: Markdown

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ Start a redis via docker (for Redis versions < 8.0):
4141

4242
``` bash
4343
docker run -p 6379:6379 -it redis/redis-stack:latest
44-
44+
```
4545
To install redis-py, simply:
4646

4747
``` bash
@@ -209,4 +209,4 @@ Special thanks to:
209209
system.
210210
- Paul Hubbard for initial packaging support.
211211

212-
[![Redis](./docs/_static/logo-redis.svg)](https://redis.io)
212+
[![Redis](./docs/_static/logo-redis.svg)](https://redis.io)

doctests/home_prob_dts.py

Lines changed: 232 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,232 @@
1+
# EXAMPLE: home_prob_dts
2+
"""
3+
Probabilistic data type examples:
4+
https://redis.io/docs/latest/develop/connect/clients/python/redis-py/prob
5+
"""
6+
7+
# HIDE_START
8+
import redis
9+
r = redis.Redis(decode_responses=True)
10+
# HIDE_END
11+
# REMOVE_START
12+
r.delete(
13+
"recorded_users", "other_users",
14+
"group:1", "group:2", "both_groups",
15+
"items_sold",
16+
"male_heights", "female_heights", "all_heights",
17+
"top_3_songs"
18+
)
19+
# REMOVE_END
20+
21+
# STEP_START bloom
22+
res1 = r.bf().madd("recorded_users", "andy", "cameron", "david", "michelle")
23+
print(res1) # >>> [1, 1, 1, 1]
24+
25+
res2 = r.bf().exists("recorded_users", "cameron")
26+
print(res2) # >>> 1
27+
28+
res3 = r.bf().exists("recorded_users", "kaitlyn")
29+
print(res3) # >>> 0
30+
# STEP_END
31+
# REMOVE_START
32+
assert res1 == [1, 1, 1, 1]
33+
assert res2 == 1
34+
assert res3 == 0
35+
# REMOVE_END
36+
37+
# STEP_START cuckoo
38+
res4 = r.cf().add("other_users", "paolo")
39+
print(res4) # >>> 1
40+
41+
res5 = r.cf().add("other_users", "kaitlyn")
42+
print(res5) # >>> 1
43+
44+
res6 = r.cf().add("other_users", "rachel")
45+
print(res6) # >>> 1
46+
47+
res7 = r.cf().mexists("other_users", "paolo", "rachel", "andy")
48+
print(res7) # >>> [1, 1, 0]
49+
50+
res8 = r.cf().delete("other_users", "paolo")
51+
print(res8) # >>> 1
52+
53+
res9 = r.cf().exists("other_users", "paolo")
54+
print(res9) # >>> 0
55+
# STEP_END
56+
# REMOVE_START
57+
assert res4 == 1
58+
assert res5 == 1
59+
assert res6 == 1
60+
assert res7 == [1, 1, 0]
61+
assert res8 == 1
62+
assert res9 == 0
63+
# REMOVE_END
64+
65+
# STEP_START hyperloglog
66+
res10 = r.pfadd("group:1", "andy", "cameron", "david")
67+
print(res10) # >>> 1
68+
69+
res11 = r.pfcount("group:1")
70+
print(res11) # >>> 3
71+
72+
res12 = r.pfadd("group:2", "kaitlyn", "michelle", "paolo", "rachel")
73+
print(res12) # >>> 1
74+
75+
res13 = r.pfcount("group:2")
76+
print(res13) # >>> 4
77+
78+
res14 = r.pfmerge("both_groups", "group:1", "group:2")
79+
print(res14) # >>> True
80+
81+
res15 = r.pfcount("both_groups")
82+
print(res15) # >>> 7
83+
# STEP_END
84+
# REMOVE_START
85+
assert res10 == 1
86+
assert res11 == 3
87+
assert res12 == 1
88+
assert res13 == 4
89+
assert res14
90+
assert res15 == 7
91+
# REMOVE_END
92+
93+
# STEP_START cms
94+
# Specify that you want to keep the counts within 0.01
95+
# (1%) of the true value with a 0.005 (0.5%) chance
96+
# of going outside this limit.
97+
res16 = r.cms().initbyprob("items_sold", 0.01, 0.005)
98+
print(res16) # >>> True
99+
100+
# The parameters for `incrby()` are two lists. The count
101+
# for each item in the first list is incremented by the
102+
# value at the same index in the second list.
103+
res17 = r.cms().incrby(
104+
"items_sold",
105+
["bread", "tea", "coffee", "beer"], # Items sold
106+
[300, 200, 200, 100]
107+
)
108+
print(res17) # >>> [300, 200, 200, 100]
109+
110+
res18 = r.cms().incrby(
111+
"items_sold",
112+
["bread", "coffee"],
113+
[100, 150]
114+
)
115+
print(res18) # >>> [400, 350]
116+
117+
res19 = r.cms().query("items_sold", "bread", "tea", "coffee", "beer")
118+
print(res19) # >>> [400, 200, 350, 100]
119+
# STEP_END
120+
# REMOVE_START
121+
assert res16
122+
assert res17 == [300, 200, 200, 100]
123+
assert res18 == [400, 350]
124+
assert res19 == [400, 200, 350, 100]
125+
# REMOVE_END
126+
127+
# STEP_START tdigest
128+
res20 = r.tdigest().create("male_heights")
129+
print(res20) # >>> True
130+
131+
res21 = r.tdigest().add(
132+
"male_heights",
133+
[175.5, 181, 160.8, 152, 177, 196, 164]
134+
)
135+
print(res21) # >>> OK
136+
137+
res22 = r.tdigest().min("male_heights")
138+
print(res22) # >>> 152.0
139+
140+
res23 = r.tdigest().max("male_heights")
141+
print(res23) # >>> 196.0
142+
143+
res24 = r.tdigest().quantile("male_heights", 0.75)
144+
print(res24) # >>> 181
145+
146+
# Note that the CDF value for 181 is not exactly
147+
# 0.75. Both values are estimates.
148+
res25 = r.tdigest().cdf("male_heights", 181)
149+
print(res25) # >>> [0.7857142857142857]
150+
151+
res26 = r.tdigest().create("female_heights")
152+
print(res26) # >>> True
153+
154+
res27 = r.tdigest().add(
155+
"female_heights",
156+
[155.5, 161, 168.5, 170, 157.5, 163, 171]
157+
)
158+
print(res27) # >>> OK
159+
160+
res28 = r.tdigest().quantile("female_heights", 0.75)
161+
print(res28) # >>> [170]
162+
163+
res29 = r.tdigest().merge(
164+
"all_heights", 2, "male_heights", "female_heights"
165+
)
166+
print(res29) # >>> OK
167+
168+
res30 = r.tdigest().quantile("all_heights", 0.75)
169+
print(res30) # >>> [175.5]
170+
# STEP_END
171+
# REMOVE_START
172+
assert res20
173+
assert res21 == "OK"
174+
assert res22 == 152.0
175+
assert res23 == 196.0
176+
assert res24 == [181]
177+
assert res25 == [0.7857142857142857]
178+
assert res26
179+
assert res27 == "OK"
180+
assert res28 == [170]
181+
assert res29 == "OK"
182+
assert res30 == [175.5]
183+
# REMOVE_END
184+
185+
# STEP_START topk
186+
# The `reserve()` method creates the Top-K object with
187+
# the given key. The parameters are the number of items
188+
# in the ranking and values for `width`, `depth`, and
189+
# `decay`, described in the Top-K reference page.
190+
res31 = r.topk().reserve("top_3_songs", 3, 7, 8, 0.9)
191+
print(res31) # >>> True
192+
193+
# The parameters for `incrby()` are two lists. The count
194+
# for each item in the first list is incremented by the
195+
# value at the same index in the second list.
196+
res32 = r.topk().incrby(
197+
"top_3_songs",
198+
[
199+
"Starfish Trooper",
200+
"Only one more time",
201+
"Rock me, Handel",
202+
"How will anyone know?",
203+
"Average lover",
204+
"Road to everywhere"
205+
],
206+
[
207+
3000,
208+
1850,
209+
1325,
210+
3890,
211+
4098,
212+
770
213+
]
214+
)
215+
print(res32)
216+
# >>> [None, None, None, 'Rock me, Handel', 'Only one more time', None]
217+
218+
res33 = r.topk().list("top_3_songs")
219+
print(res33)
220+
# >>> ['Average lover', 'How will anyone know?', 'Starfish Trooper']
221+
222+
res34 = r.topk().query(
223+
"top_3_songs", "Starfish Trooper", "Road to everywhere"
224+
)
225+
print(res34) # >>> [1, 0]
226+
# STEP_END
227+
# REMOVE_START
228+
assert res31
229+
assert res32 == [None, None, None, 'Rock me, Handel', 'Only one more time', None]
230+
assert res33 == ['Average lover', 'How will anyone know?', 'Starfish Trooper']
231+
assert res34 == [1, 0]
232+
# REMOVE_END

redis/_parsers/helpers.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -676,7 +676,8 @@ def parse_client_info(value):
676676
"omem",
677677
"tot-mem",
678678
}:
679-
client_info[int_key] = int(client_info[int_key])
679+
if int_key in client_info:
680+
client_info[int_key] = int(client_info[int_key])
680681
return client_info
681682

682683

redis/commands/search/field.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
181181
182182
``name`` is the name of the field.
183183
184-
``algorithm`` can be "FLAT" or "HNSW".
184+
``algorithm`` can be "FLAT", "HNSW", or "SVS-VAMANA".
185185
186186
``attributes`` each algorithm can have specific attributes. Some of them
187187
are mandatory and some of them are optional. See
@@ -194,10 +194,10 @@ def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
194194
if sort or noindex:
195195
raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
196196

197-
if algorithm.upper() not in ["FLAT", "HNSW"]:
197+
if algorithm.upper() not in ["FLAT", "HNSW", "SVS-VAMANA"]:
198198
raise DataError(
199-
"Realtime vector indexing supporting 2 Indexing Methods:"
200-
"'FLAT' and 'HNSW'."
199+
"Realtime vector indexing supporting 3 Indexing Methods:"
200+
"'FLAT', 'HNSW', and 'SVS-VAMANA'."
201201
)
202202

203203
attr_li = []

redis/utils.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,10 @@
11
import datetime
22
import logging
33
import textwrap
4+
from collections.abc import Callable
45
from contextlib import contextmanager
56
from functools import wraps
6-
from typing import Any, Dict, List, Mapping, Optional, Union
7+
from typing import Any, Dict, List, Mapping, Optional, TypeVar, Union
78

89
from redis.exceptions import DataError
910
from redis.typing import AbsExpiryT, EncodableT, ExpiryT
@@ -150,18 +151,21 @@ def warn_deprecated_arg_usage(
150151
warnings.warn(msg, category=DeprecationWarning, stacklevel=stacklevel)
151152

152153

154+
C = TypeVar("C", bound=Callable)
155+
156+
153157
def deprecated_args(
154158
args_to_warn: list = ["*"],
155159
allowed_args: list = [],
156160
reason: str = "",
157161
version: str = "",
158-
):
162+
) -> Callable[[C], C]:
159163
"""
160164
Decorator to mark specified args of a function as deprecated.
161165
If '*' is in args_to_warn, all arguments will be marked as deprecated.
162166
"""
163167

164-
def decorator(func):
168+
def decorator(func: C) -> C:
165169
@wraps(func)
166170
def wrapper(*args, **kwargs):
167171
# Get function argument names

0 commit comments

Comments
 (0)