From 06f8a523c7bc91fd2a1a4967cd2919ce3ba0d915 Mon Sep 17 00:00:00 2001 From: Guillermo Date: Thu, 19 Dec 2024 18:03:50 -0600 Subject: [PATCH] fix(NLP): NLP features broken on non-Stripe instances TASK-1322 (#5337) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix bug in NLP feature when stripe is disabled ### 📖 Description This PR fixes a bug found on the NLP feature that occurred when stripe was disabled. Some code was refactored and a new unit tests was added to ensure that no issue arises when stripe is disabled. ### 💭 Notes Ensure that all the necessary env variables are set up. While testing this on the local instance, I found that the variable GS_BUCKET_NAME was not set. Signed-off-by: Guillermo --- .../subsequences/integrations/google/base.py | 6 ++++++ .../tests/test_submission_extras_api_post.py | 16 ++++++++++------ kobo/apps/trackers/tests/test_utils.py | 18 ++++++++++++++++++ kobo/apps/trackers/utils.py | 18 +++++++++++------- kpi/utils/usage_calculator.py | 7 ------- 5 files changed, 45 insertions(+), 20 deletions(-) diff --git a/kobo/apps/subsequences/integrations/google/base.py b/kobo/apps/subsequences/integrations/google/base.py index 36400617a8..8dcd4cf894 100644 --- a/kobo/apps/subsequences/integrations/google/base.py +++ b/kobo/apps/subsequences/integrations/google/base.py @@ -12,6 +12,7 @@ from googleapiclient import discovery from kobo.apps.trackers.utils import update_nlp_counter +from kpi.utils.log import logging from ...constants import GOOGLE_CACHE_TIMEOUT, make_nlp_async_cache_key from ...exceptions import SubsequenceTimeoutError from ...models import SubmissionExtras @@ -36,6 +37,11 @@ def __init__(self, submission: SubmissionExtras): self.user = submission.asset.owner self.credentials = google_credentials_from_constance_config() self.storage_client = storage.Client(credentials=self.credentials) + if settings.GS_BUCKET_NAME is None: + logging.warning( + 'GS_BUCKET_NAME is None, NLP processing will fail ' + 'when storing files in google cloud.' + ) self.bucket = self.storage_client.bucket( bucket_name=settings.GS_BUCKET_NAME ) diff --git a/kobo/apps/subsequences/tests/test_submission_extras_api_post.py b/kobo/apps/subsequences/tests/test_submission_extras_api_post.py index 372fc6dcdc..0949334e66 100644 --- a/kobo/apps/subsequences/tests/test_submission_extras_api_post.py +++ b/kobo/apps/subsequences/tests/test_submission_extras_api_post.py @@ -425,10 +425,8 @@ def setUp(self): ) @override_settings( - CACHES={ - 'default': - {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'} - } + CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}, + STRIPE_ENABLED=False, ) @override_config(ASR_MT_INVITEE_USERNAMES='*') @patch('google.cloud.speech.SpeechClient') @@ -460,7 +458,10 @@ def test_google_transcript_post(self, m1, m2): with self.assertNumQueries(FuzzyInt(25, 35)): self.client.post(url, data, format='json') - @override_settings(CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}) + @override_settings( + CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}, + STRIPE_ENABLED=False, + ) def test_google_transcript_permissions(self): url = reverse('advanced-submission-post', args=[self.asset.uid]) submission_id = 'abc123-def456' @@ -488,7 +489,10 @@ def test_google_transcript_permissions(self): res = self.client.get(url + '?submission=' + submission_id, format='json') self.assertEqual(res.status_code, 404) - @override_settings(CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}) + @override_settings( + CACHES={'default': {'BACKEND': 'django.core.cache.backends.dummy.DummyCache'}}, + STRIPE_ENABLED=False, + ) @override_config(ASR_MT_INVITEE_USERNAMES='*') @patch('kobo.apps.subsequences.integrations.google.google_translate.translate') @patch('kobo.apps.subsequences.integrations.google.base.storage') diff --git a/kobo/apps/trackers/tests/test_utils.py b/kobo/apps/trackers/tests/test_utils.py index 05c11d1f07..17d020a2af 100644 --- a/kobo/apps/trackers/tests/test_utils.py +++ b/kobo/apps/trackers/tests/test_utils.py @@ -1,4 +1,7 @@ +from math import inf + from ddt import data, ddt +from django.test import override_settings from django.utils import timezone from djstripe.models import Charge, PaymentIntent, Price, Product from model_bakery import baker @@ -119,3 +122,18 @@ def test_organization_usage_utils(self, usage_type): ) remaining = get_organization_remaining_usage(self.organization, usage_type) assert remaining == total_limit - 2500 + + @override_settings( + STRIPE_ENABLED=False, + ) + @data('characters', 'seconds') + def test_org_usage_utils_without_stripe(self, usage_type): + remaining = get_organization_remaining_usage(self.organization, usage_type) + assert remaining == inf + + update_nlp_counter( + USAGE_LIMIT_MAP[usage_type], 10000, self.someuser.id, self.asset.id + ) + + remaining = get_organization_remaining_usage(self.organization, usage_type) + assert remaining == inf diff --git a/kobo/apps/trackers/utils.py b/kobo/apps/trackers/utils.py index 1c70ac1f3f..8db33df26d 100644 --- a/kobo/apps/trackers/utils.py +++ b/kobo/apps/trackers/utils.py @@ -1,6 +1,7 @@ from typing import Optional, Union from django.apps import apps +from django.conf import settings from django.db.models import F from django.utils import timezone from django_request_cache import cache_for_request @@ -49,14 +50,15 @@ def update_nlp_counter( counter_id = counter.pk # Update the total counters by the usage amount to keep them current + deduct = settings.STRIPE_ENABLED kwargs = {} if service.endswith('asr_seconds'): kwargs['total_asr_seconds'] = F('total_asr_seconds') + amount - if asset_id is not None and organization is not None: + if deduct and asset_id is not None: handle_usage_deduction(organization, 'seconds', amount) if service.endswith('mt_characters'): kwargs['total_mt_characters'] = F('total_mt_characters') + amount - if asset_id is not None and organization is not None: + if deduct and asset_id is not None: handle_usage_deduction(organization, 'characters', amount) NLPUsageCounter.objects.filter(pk=counter_id).update( @@ -84,16 +86,18 @@ def get_organization_remaining_usage( """ Get the organization remaining usage count for a given limit type """ - PlanAddOn = apps.get_model('stripe', 'PlanAddOn') # noqa + addon_remaining = 0 + if settings.STRIPE_ENABLED: + PlanAddOn = apps.get_model('stripe', 'PlanAddOn') # noqa + _, addon_remaining = PlanAddOn.get_organization_totals( + organization, + usage_type, + ) plan_limit = get_organization_plan_limit(organization, usage_type) if plan_limit is None: plan_limit = 0 usage = get_organization_usage(organization, usage_type) - addon_limit, addon_remaining = PlanAddOn.get_organization_totals( - organization, - usage_type, - ) plan_remaining = max(0, plan_limit - usage) # if negative, they have 0 remaining total_remaining = addon_remaining + plan_remaining diff --git a/kpi/utils/usage_calculator.py b/kpi/utils/usage_calculator.py index 3e80f72638..61816ef62b 100644 --- a/kpi/utils/usage_calculator.py +++ b/kpi/utils/usage_calculator.py @@ -38,13 +38,6 @@ def get_nlp_usage_by_type(self, usage_key: str) -> int: """Returns the usage for a given organization and usage key. The usage key should be the value from the USAGE_LIMIT_MAP found in the stripe kobo app. """ - if self.organization is None: - return None - - billing_details = self.organization.active_subscription_billing_details() - if not billing_details: - return None - nlp_usage = self.get_nlp_usage_counters() cached_usage = {