Skip to content

Commit a33b1a0

Browse files
committed
feat: POC automation test for AI using python
JIRA: QA-23855 risk: nonprod
1 parent 40b351c commit a33b1a0

26 files changed

+1025
-1
lines changed

.gitignore

+2
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,5 @@ docs/resources/_gen
1616
docs/tmp/
1717
docs/versioned_docs
1818
docs/.hugo_build.lock
19+
20+
integration_tests/**/__pycache__

gooddata-sdk/gooddata_sdk/compute/service.py

+17-1
Original file line numberDiff line numberDiff line change
@@ -105,5 +105,21 @@ def ai_chat_history_reset(self, workspace_id: str) -> None:
105105
Args:
106106
workspace_id: workspace identifier
107107
"""
108-
chat_history_request = ChatHistoryRequest(reset=True)
108+
chat_history_request = ChatHistoryRequest(
109+
reset=True,
110+
)
111+
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)
112+
113+
def ai_chat_history_user_feedback(
114+
self, workspace_id: str, chat_history_interaction_id: int = 0, user_feedback: str = "POSITIVE"
115+
) -> None:
116+
"""
117+
Reset chat history with AI in GoodData workspace.
118+
119+
Args:
120+
workspace_id: workspace identifier
121+
"""
122+
chat_history_request = ChatHistoryRequest(
123+
chat_history_interaction_id=chat_history_interaction_id, user_feedback=user_feedback
124+
)
109125
self._actions_api.ai_chat_history(workspace_id, chat_history_request, _check_return_type=False)

integration_tests/.env.template

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
# (C) 2024 GoodData Corporation
2+
HOST=
3+
TOKEN=
4+
DATASOURCE_ID=
5+
WORKSPACE_ID=
6+
LLM_TOKEN=

integration_tests/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# (C) 2021 GoodData Corporation

integration_tests/conftest.py

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# (C) 2024 GoodData Corporation
2+
# filepath: /Users/tubui/Documents/CODE/gooddata-python-sdk-1/gooddata-sdk/integration_tests/scripts/conftest.py
3+
import os
4+
5+
import pytest
6+
from dotenv import load_dotenv
7+
8+
# Load the .env file from the current directory
9+
load_dotenv()
10+
11+
12+
@pytest.fixture(scope="session", autouse=True)
13+
def setup_env():
14+
# Ensure that the environment variables are set
15+
os.environ["HOST"] = os.getenv("HOST", "https://checklist.staging.stg11.panther.intgdc.com")
16+
os.environ["TOKEN"] = os.getenv("TOKEN", "")
17+
os.environ["DATASOURCE_ID"] = os.getenv("DATASOURCE_ID", "")
18+
os.environ["WORKSPACE_ID"] = os.getenv("WORKSPACE_ID", "")
19+
os.environ["DATASOURCE_TYPE"] = os.getenv("DATASOURCE_TYPE", "")
20+
os.environ["DATASOURCE_PASSWORD"] = os.getenv("DATASOURCE_PASSWORD", "")
21+
22+
# Check if the necessary environment variables are set
23+
if not os.environ["HOST"]:
24+
raise OSError("\nHOST environment variable is not set.")
25+
if not os.environ["TOKEN"]:
26+
raise OSError("\nTOKEN environment variable is not set.")
27+
if not os.environ["DATASOURCE_ID"]:
28+
print("\nWarning: DATA_SOURCE_ID environment variable is not set.")
29+
if not os.environ["WORKSPACE_ID"]:
30+
print("\nWarning: WORKSPACE_ID environment variable is not set.")
31+
if not os.environ["DATASOURCE_TYPE"]:
32+
print("\nWarning: DATASOURCE_TYPE environment variable is not set.")
33+
if not os.environ["DATASOURCE_PASSWORD"]:
34+
print("\nWarning: DATASOURCE_PASSWORD environment variable is not set.")
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
{
2+
"id": "total_returns_per_month",
3+
"title": "Total Returns per Month",
4+
"visualizationType": "COLUMN",
5+
"metrics": [
6+
{
7+
"id": "total_returns",
8+
"type": "metric",
9+
"title": "Total Returns"
10+
}
11+
],
12+
"dimensionality": [
13+
{
14+
"id": "return_date.month",
15+
"type": "attribute",
16+
"title": "Return date - Month/Year"
17+
}
18+
],
19+
"filters": [],
20+
"suggestions": [
21+
{
22+
"query": "Switch to a line chart to better visualize the trend of total returns over the months.",
23+
"label": "Line Chart for Trends"
24+
},
25+
{
26+
"query": "Filter the data to show total returns for this year only.",
27+
"label": "This Year's Returns"
28+
}
29+
]
30+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
{
2+
"id": "number_of_order_ids",
3+
"title": "Number of Order IDs",
4+
"visualizationType": "HEADLINE",
5+
"metrics": [
6+
{
7+
"id": "order_id",
8+
"type": "attribute",
9+
"title": "Number of Order IDs",
10+
"aggFunction": "COUNT"
11+
}
12+
],
13+
"dimensionality": [],
14+
"filters": [],
15+
"suggestions": [
16+
{
17+
"query": "Show the number of orders by year",
18+
"label": "Show by Year"
19+
}
20+
]
21+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
[
2+
{
3+
"question": "What is number of order id, show as HEADLINE chart?",
4+
"expected_objects_file": "headline_count_of_order.json"
5+
},
6+
{
7+
"question": "What is total returns per month? show as COLUMN chart",
8+
"expected_objects_file": "column_total_returns_by_month.json"
9+
}
10+
]
+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
# (C) 2021 GoodData Corporation

integration_tests/scenarios/aiChat.py

+56
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
# (C) 2024 GoodData Corporation
2+
import os
3+
import sys
4+
from pprint import pprint
5+
6+
import pytest
7+
from dotenv import load_dotenv
8+
from gooddata_sdk import GoodDataSdk
9+
10+
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
11+
sys.path.append(SCRIPTS_DIR)
12+
13+
# Load environment variables from the .env file
14+
load_dotenv()
15+
16+
# Create the test_config dictionary with the loaded environment variables
17+
test_config = {"host": os.getenv("HOST"), "token": os.getenv("TOKEN")}
18+
workspace_id = os.getenv("WORKSPACE_ID")
19+
20+
questions = ["What is number of order line id ?"]
21+
sdk = GoodDataSdk.create(host_=test_config["host"], token_=test_config["token"])
22+
23+
24+
def test_reset_chat_history():
25+
sdk.compute.ai_chat_history_reset(workspace_id)
26+
27+
28+
@pytest.mark.parametrize("question", questions)
29+
def test_ask_ai(question):
30+
chat_ai_res = sdk.compute.ai_chat(workspace_id, question=question)
31+
pprint(chat_ai_res.to_dict())
32+
assert chat_ai_res["created_visualizations"] is not None, "Created visualizations should not be None"
33+
assert chat_ai_res["routing"] is not None, "Routing should not be None"
34+
35+
36+
def test_ai_chat_history():
37+
chat_ai_res = sdk.compute.ai_chat(workspace_id, question="show me a headline generating net sales and net order")
38+
chat_ai_res.to_dict()
39+
chat_history_interaction_id = chat_ai_res["chat_history_interaction_id"]
40+
pprint(chat_history_interaction_id)
41+
chat_history_res = sdk.compute.ai_chat_history(workspace_id, chat_history_interaction_id)
42+
sdk.compute.ai_chat_history_user_feedback(workspace_id, chat_history_interaction_id, "POSITIVE")
43+
pprint(chat_history_res.to_dict())
44+
45+
46+
def test_get_chat_history():
47+
chat_history_res = sdk.compute.ai_chat_history(workspace_id)
48+
pprint(chat_history_res.to_dict())
49+
assert chat_history_res["interactions"] is not None, "Interactions should not be None"
50+
assert (
51+
chat_history_res["interactions"][0]["question"] == "What is number of order line id ?"
52+
), "First interaction question should match"
53+
54+
55+
if __name__ == "__main__":
56+
pytest.main()
+97
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
# (C) 2024 GoodData Corporation
2+
3+
import os
4+
from pathlib import Path
5+
from pprint import pprint
6+
7+
import gooddata_api_client
8+
import pytest
9+
from dotenv import load_dotenv
10+
from gooddata_api_client.api import smart_functions_api
11+
from gooddata_api_client.model.chat_history_request import ChatHistoryRequest
12+
from gooddata_api_client.model.chat_request import ChatRequest
13+
14+
from integration_tests.scenarios.utils import compare_and_print_diff, load_json, normalize_metrics
15+
16+
_current_dir = Path(__file__).parent.absolute()
17+
parent_dir = _current_dir.parent
18+
expected_object_dir = parent_dir / "expected"
19+
questions_list_dir = parent_dir / "fixtures" / "ai_questions.json"
20+
21+
# Load environment variables from the .env file
22+
load_dotenv()
23+
24+
25+
@pytest.fixture(scope="module")
26+
def test_config():
27+
return {
28+
"host": os.getenv("HOST"),
29+
"token": os.getenv("TOKEN"),
30+
"workspace_id": os.getenv("WORKSPACE_ID"),
31+
"llm_token": os.getenv("LLM_TOKEN"),
32+
}
33+
34+
35+
@pytest.fixture(scope="module")
36+
def api_client(test_config):
37+
configuration = gooddata_api_client.Configuration(host=test_config["host"])
38+
api_client = gooddata_api_client.ApiClient(configuration)
39+
api_client.default_headers["Authorization"] = f"Bearer {test_config['token']}"
40+
return api_client
41+
42+
43+
def validate_response(actual_response, expected_response):
44+
actual_metrics = normalize_metrics(
45+
actual_response["created_visualizations"]["objects"][0]["metrics"], exclude_keys=["title"]
46+
)
47+
expected_metrics = normalize_metrics(expected_response["metrics"], exclude_keys=["title"])
48+
compare_and_print_diff(actual_metrics, expected_metrics, "Metrics")
49+
actual_visualization_type = actual_response["created_visualizations"]["objects"][0]["visualization_type"]
50+
expected_visualization_type = expected_response["visualizationType"]
51+
compare_and_print_diff(actual_visualization_type, expected_visualization_type, "Visualization type")
52+
actual_dimensionality = actual_response["created_visualizations"]["objects"][0]["dimensionality"]
53+
expected_dimensionality = expected_response["dimensionality"]
54+
compare_and_print_diff(actual_dimensionality, expected_dimensionality, "Dimensionality")
55+
actual_filters = actual_response["created_visualizations"]["objects"][0]["filters"]
56+
expected_filters = expected_response["filters"]
57+
compare_and_print_diff(actual_filters, expected_filters, "Filters")
58+
59+
60+
def test_ai_chat_history_reset(api_client, test_config):
61+
api_instance = smart_functions_api.SmartFunctionsApi(api_client)
62+
chat_history_request = ChatHistoryRequest(reset=True)
63+
try:
64+
api_response = api_instance.ai_chat_history(test_config["workspace_id"], chat_history_request)
65+
pprint(api_response)
66+
except gooddata_api_client.ApiException as e:
67+
pytest.fail(f"API exception: {e}")
68+
except Exception as e:
69+
pytest.fail(f"Unexpected error: {e}")
70+
71+
72+
questions_list = load_json(questions_list_dir)
73+
74+
75+
@pytest.mark.parametrize(
76+
"question, expected_file",
77+
[(item["question"], item["expected_objects_file"]) for item in questions_list],
78+
ids=[item["question"] for item in questions_list],
79+
)
80+
def test_ai_chat(api_client, test_config, question, expected_file):
81+
expected_objects = load_json(os.path.join(expected_object_dir, expected_file))
82+
api_instance = smart_functions_api.SmartFunctionsApi(api_client)
83+
try:
84+
api_response = api_instance.ai_chat(test_config["workspace_id"], ChatRequest(question=question))
85+
print("\napi_response", api_response.created_visualizations.objects[0])
86+
print("\nexpected_file", expected_objects)
87+
88+
validate_response(api_response.to_dict(), expected_objects)
89+
90+
except gooddata_api_client.ApiException as e:
91+
pytest.fail(f"API exception: {e}")
92+
except Exception as e:
93+
pytest.fail(f"Unexpected error: {e}")
94+
95+
96+
if __name__ == "__main__":
97+
pytest.main(["-s", __file__])

0 commit comments

Comments
 (0)