Skip to content

Commit e3d5c28

Browse files
refactor: handle duplicates while using the enforcer directly (#98)
While loading policies from a datastore to another using the enforcer directly, we should make the operations on the enforcer with the latest policies. To do this, we should reload the policy each time a write operation was performed to ensure we're using the latest policies in the datastore.
1 parent a3b5cdd commit e3d5c28

File tree

4 files changed

+429
-7
lines changed

4 files changed

+429
-7
lines changed

CHANGELOG.rst

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,14 @@ Unreleased
1616

1717
*
1818

19+
0.4.1 - 2025-10-16
20+
******************
21+
22+
Fixed
23+
=====
24+
25+
* Load policy before adding policies in the loading script to avoid duplicates.
26+
1927
0.4.0 - 2025-16-10
2028
******************
2129

openedx_authz/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,6 @@
44

55
import os
66

7-
__version__ = "0.4.0"
7+
__version__ = "0.4.1"
88

99
ROOT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))

openedx_authz/engine/utils.py

Lines changed: 32 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -27,24 +27,50 @@ def migrate_policy_between_enforcers(
2727
# Load latest policies from the source enforcer
2828
source_enforcer.load_policy()
2929
policies = source_enforcer.get_policy()
30+
logger.info(f"Loaded {len(policies)} policies from source enforcer.")
31+
32+
# Load target enforcer policies to check for duplicates
33+
target_enforcer.load_policy()
34+
logger.info(f"Target enforcer has {len(target_enforcer.get_policy())} existing policies before migration.")
35+
36+
# TODO: this operations use the enforcer directly, which may not be ideal
37+
# since we have to load the policy after each addition to avoid duplicates.
38+
# I think we should consider using an API which can validate whether
39+
# all policies exist before adding them or we have the
40+
# latest policies loaded in the enforcer.
41+
3042
for policy in policies:
31-
if not target_enforcer.has_policy(*policy):
32-
target_enforcer.add_policy(*policy)
43+
if target_enforcer.has_policy(*policy):
44+
logger.info(f"Policy {policy} already exists in target, skipping.")
45+
continue
46+
target_enforcer.add_policy(*policy)
47+
48+
# Ensure latest policies are loaded in the target enforcer after each addition
49+
# to avoid duplicates
50+
target_enforcer.load_policy()
3351

3452
for grouping_policy_ptype in GROUPING_POLICY_PTYPES:
3553
try:
3654
grouping_policies = source_enforcer.get_named_grouping_policy(
3755
grouping_policy_ptype
3856
)
3957
for grouping in grouping_policies:
40-
if not target_enforcer.has_named_grouping_policy(
58+
if target_enforcer.has_named_grouping_policy(
4159
grouping_policy_ptype, *grouping
4260
):
43-
target_enforcer.add_named_grouping_policy(
44-
grouping_policy_ptype, *grouping
61+
logger.info(
62+
f"Grouping policy {grouping_policy_ptype}, {grouping} already exists in target, skipping."
4563
)
64+
continue
65+
target_enforcer.add_named_grouping_policy(
66+
grouping_policy_ptype, *grouping
67+
)
68+
69+
# Ensure latest policies are loaded in the target enforcer after each addition
70+
# to avoid duplicates
71+
target_enforcer.load_policy()
4672
except KeyError as e:
47-
logger.debug(
73+
logger.info(
4874
f"Skipping {grouping_policy_ptype} policies: {e} not found in source enforcer."
4975
)
5076
logger.info(

0 commit comments

Comments
 (0)