diff --git a/src/rpft/parsers/common/rowparser.py b/src/rpft/parsers/common/rowparser.py index 7774b13..18d80bb 100644 --- a/src/rpft/parsers/common/rowparser.py +++ b/src/rpft/parsers/common/rowparser.py @@ -47,16 +47,15 @@ def get_list_child_model(model): def is_list_type(model): - # Determine whether model is a list type, - # such as list, List, List[str], ... - # issubclass only works for Python <=3.6 - # model.__dict__.get('__origin__') returns different things in different Python - # version. - # This function tries to accommodate both 3.6 and 3.8 (at least) + """ + Determine whether model is a list type, such as list, list[str], List, List[str]. + + typing.List is deprecated as of Python 3.9 + """ return ( is_basic_list_type(model) or model is List - or model.__dict__.get("__origin__") in [list, List] + or getattr(model, "__origin__", None) is list ) @@ -205,7 +204,7 @@ def assign_value(self, field, key, value, model): value = list(value) if isinstance(value[0], str): assert len(value) == 2 - field[key] = {value[0] : value[1]} + field[key] = {value[0]: value[1]} elif isinstance(value[0], list): for entry in value: assert len(entry) == 2 @@ -327,7 +326,11 @@ def parse_entry( # The model of field[key] is model, and thus value should also be interpreted # as being of type model. if not value_is_parsed: - if is_list_type(model) or is_basic_dict_type(model) or is_parser_model_type(model): + if ( + is_list_type(model) + or is_basic_dict_type(model) + or is_parser_model_type(model) + ): # If the expected type of the value is list/object, # parse the cell content as such. # Otherwise leave it as a string diff --git a/src/rpft/parsers/creation/contentindexparser.py b/src/rpft/parsers/creation/contentindexparser.py index 928db22..11b03d7 100644 --- a/src/rpft/parsers/creation/contentindexparser.py +++ b/src/rpft/parsers/creation/contentindexparser.py @@ -1,8 +1,6 @@ import importlib import logging from collections import OrderedDict -from typing import Dict, List - from rpft.logger.logger import logging_context from rpft.parsers.common.model_inference import model_from_headers from rpft.parsers.common.sheetparser import SheetParser @@ -58,8 +56,8 @@ def __init__( self.tag_matcher = tag_matcher self.template_sheets = {} self.data_sheets = {} - self.flow_definition_rows: List[ContentIndexRowModel] = [] - self.campaign_parsers: Dict[str, tuple[str, CampaignParser]] = {} + self.flow_definition_rows: list[ContentIndexRowModel] = [] + self.campaign_parsers: dict[str, tuple[str, CampaignParser]] = {} self.surveys = {} self.trigger_parsers = OrderedDict() self.user_models_module = ( @@ -133,7 +131,7 @@ def _process_content_index_table(self, sheet: Sheet): name = campaign_parser.campaign.name if name in self.campaign_parsers: - LOGGER.warning( + LOGGER.debug( f"Duplicate campaign definition sheet '{name}'. " "Overwriting previous definition." ) @@ -155,7 +153,7 @@ def _add_template(self, row, update_duplicates=False): sheet_name = row.sheet_name[0] if sheet_name in self.template_sheets and update_duplicates: - LOGGER.info( + LOGGER.debug( f"Duplicate template definition sheet '{sheet_name}'. " "Overwriting previous definition." ) @@ -193,7 +191,7 @@ def _get_sheet_or_die(self, sheet_name): if len(candidates) > 1: readers = [c.reader.name for c in candidates] - LOGGER.warning( + LOGGER.debug( "Duplicate sheets found, " + str( { @@ -248,7 +246,7 @@ def _process_data_sheet(self, row): new_name = row.new_name or sheet_names[0] if new_name in self.data_sheets: - LOGGER.warning( + LOGGER.debug( f"Duplicate data sheet {new_name}. Overwriting previous definition." ) diff --git a/src/rpft/parsers/creation/contentindexrowmodel.py b/src/rpft/parsers/creation/contentindexrowmodel.py index 9e24a9a..ecf196e 100644 --- a/src/rpft/parsers/creation/contentindexrowmodel.py +++ b/src/rpft/parsers/creation/contentindexrowmodel.py @@ -1,5 +1,4 @@ from enum import Enum -from typing import List from rpft.parsers.common.rowparser import ParserModel from rpft.parsers.creation.models import SurveyConfig @@ -24,10 +23,10 @@ class Operation(ParserModel): class ContentIndexRowModel(ParserModel): type: str = "" new_name: str = "" - sheet_name: List[str] = [] + sheet_name: list[str] = [] data_sheet: str = "" data_row_id: str = "" - template_argument_definitions: List[TemplateArgument] = [] # internal name + template_argument_definitions: list[TemplateArgument] = [] # internal name template_arguments: list = [] options: dict = {} survey_config: SurveyConfig = SurveyConfig() @@ -35,7 +34,7 @@ class ContentIndexRowModel(ParserModel): data_model: str = "" group: str = "" status: str = "" - tags: List[str] = [] + tags: list[str] = [] def field_name_to_header_name(field): if field == "template_argument_definitions": diff --git a/src/rpft/parsers/creation/flowparser.py b/src/rpft/parsers/creation/flowparser.py index 446b0cd..970e64a 100644 --- a/src/rpft/parsers/creation/flowparser.py +++ b/src/rpft/parsers/creation/flowparser.py @@ -513,9 +513,17 @@ def _get_row_action(self, row): scheme=row.urn_scheme or "tel", ) elif row.type == "remove_from_group": - return RemoveContactGroupAction( - groups=[self._get_or_create_group(row.mainarg_groups[0], row.obj_id)] - ) + if not row.mainarg_groups: + LOGGER.warning(f"Removing contact from ALL groups.") + return RemoveContactGroupAction(groups=[], all_groups=True) + elif row.mainarg_groups[0] == "ALL": + return RemoveContactGroupAction(groups=[], all_groups=True) + else: + return RemoveContactGroupAction( + groups=[ + self._get_or_create_group(row.mainarg_groups[0], row.obj_id) + ] + ) elif row.type == "save_flow_result": return SetRunResultAction( row.save_name, row.mainarg_value, category=row.result_category @@ -551,7 +559,7 @@ def _get_or_create_group(self, name, uuid=None): def _get_row_node(self, row): if ( row.type in ["add_to_group", "remove_from_group", "split_by_group"] - and row.obj_id + and row.obj_id and row.mainarg_groups ): self.rapidpro_container.record_group_uuid(row.mainarg_groups[0], row.obj_id) diff --git a/src/rpft/parsers/creation/flowrowmodel.py b/src/rpft/parsers/creation/flowrowmodel.py index c55c6f3..38f66aa 100644 --- a/src/rpft/parsers/creation/flowrowmodel.py +++ b/src/rpft/parsers/creation/flowrowmodel.py @@ -1,5 +1,3 @@ -from typing import List - from rpft.parsers.common.rowparser import ParserModel from rpft.parsers.creation.models import Condition @@ -43,7 +41,7 @@ def dict_to_list_of_pairs(headers): class WhatsAppTemplating(ParserModel): name: str = "" uuid: str = "" - variables: List[str] = [] + variables: list[str] = [] class Edge(ParserModel): @@ -69,15 +67,15 @@ def header_name_to_field_name_with_context(header, row): class FlowRowModel(ParserModel): row_id: str = "" type: str - edges: List[Edge] - loop_variable: List[str] = [] + edges: list[Edge] + loop_variable: list[str] = [] include_if: bool = True mainarg_message_text: str = "" mainarg_value: str = "" - mainarg_groups: List[str] = [] + mainarg_groups: list[str] = [] mainarg_none: str = "" mainarg_dict: list = [] # encoded as list of pairs - mainarg_destination_row_ids: List[str] = [] + mainarg_destination_row_ids: list[str] = [] mainarg_flow_name: str = "" mainarg_expression: str = "" mainarg_iterlist: list = [] @@ -86,13 +84,13 @@ class FlowRowModel(ParserModel): data_sheet: str = "" data_row_id: str = "" template_arguments: list = [] - choices: List[str] = [] + choices: list[str] = [] save_name: str = "" result_category: str = "" image: str = "" audio: str = "" video: str = "" - attachments: List[str] = [] + attachments: list[str] = [] urn_scheme: str = "" obj_name: str = "" obj_id: str = "" @@ -100,14 +98,8 @@ class FlowRowModel(ParserModel): node_uuid: str = "" no_response: str = "" ui_type: str = "" - ui_position: List[str] = [] - - # TODO: Extra validation here, e.g. from must not be empty - # type must come from row_type_to_main_arg.keys() (see below) - # image/audio/video only makes sense if type == send_message - # mainarg_none should be '' - # _ui_position should be '' or a list of two ints - # ... + ui_position: list[str] = [] + def field_name_to_header_name(field): field_map = { "node_uuid": "_nodeId", diff --git a/src/rpft/parsers/creation/models.py b/src/rpft/parsers/creation/models.py index b1f4604..a1c5f7c 100644 --- a/src/rpft/parsers/creation/models.py +++ b/src/rpft/parsers/creation/models.py @@ -1,5 +1,3 @@ -from typing import List - from rpft.parsers.common.rowparser import ParserModel @@ -31,7 +29,7 @@ class ConditionWithMessage(ParserModel): class ConditionsWithMessage(ParserModel): - conditions: List[ConditionWithMessage] = [] + conditions: list[ConditionWithMessage] = [] general_message: str = "" @@ -61,7 +59,7 @@ class Message(ParserModel): image: str = "" audio: str = "" video: str = "" - attachments: List[str] = [] + attachments: list[str] = [] class TemplateSheet: @@ -76,7 +74,7 @@ def __init__( self, flow_definitions, data_sheets, - templates: List[TemplateSheet], + templates: list[TemplateSheet], surveys, ): self.flow_definitions = flow_definitions @@ -119,7 +117,7 @@ class MCQChoice(ParserModel): class PostProcessing(ParserModel): - assignments: List[Assignment] = [] + assignments: list[Assignment] = [] """ Assignments to perform via save_value rows. """ @@ -157,7 +155,7 @@ class SurveyQuestionModel(ParserModel): Type of the question. """ - messages: List[Message] + messages: list[Message] """ Question text. """ @@ -175,7 +173,7 @@ class SurveyQuestionModel(ParserModel): the question ID as {variable}_complete. """ - choices: List[MCQChoice] = [] + choices: list[MCQChoice] = [] """ MCQ specific fields. """ @@ -186,7 +184,7 @@ class SurveyQuestionModel(ParserModel): configuration is used. """ - relevant: List[Condition] = [] + relevant: list[Condition] = [] """ Conditions required to present the question, otherwise skipped. """ @@ -223,7 +221,7 @@ class SurveyQuestionModel(ParserModel): that is triggered. """ - tags: List[str] = [] + tags: list[str] = [] """ Tags allowing to filter questions to appear in a survey. """ diff --git a/src/rpft/parsers/creation/triggerrowmodel.py b/src/rpft/parsers/creation/triggerrowmodel.py index fb2d038..f8a2ed9 100644 --- a/src/rpft/parsers/creation/triggerrowmodel.py +++ b/src/rpft/parsers/creation/triggerrowmodel.py @@ -1,5 +1,3 @@ -from typing import List - from pydantic.v1 import validator from rpft.parsers.common.rowparser import ParserModel @@ -7,10 +5,10 @@ class TriggerRowModel(ParserModel): type: str - keywords: List[str] = "" + keywords: list[str] = "" flow: str = "" - groups: List[str] = [] - exclude_groups: List[str] = [] + groups: list[str] = [] + exclude_groups: list[str] = [] channel: str = "" match_type: str = "" diff --git a/src/rpft/parsers/sheets.py b/src/rpft/parsers/sheets.py index 9cda36d..efc4ebd 100644 --- a/src/rpft/parsers/sheets.py +++ b/src/rpft/parsers/sheets.py @@ -1,7 +1,7 @@ import json from abc import ABC +from collections.abc import Mapping from pathlib import Path -from typing import List, Mapping import tablib from googleapiclient.discovery import build @@ -28,7 +28,7 @@ def sheets(self) -> Mapping[str, Sheet]: def get_sheet(self, name) -> Sheet: return self.sheets.get(name) - def get_sheets_by_name(self, name) -> List[Sheet]: + def get_sheets_by_name(self, name) -> list[Sheet]: return [sheet] if (sheet := self.get_sheet(name)) else [] diff --git a/src/rpft/rapidpro/models/actions.py b/src/rpft/rapidpro/models/actions.py index c7e0647..237d7a4 100644 --- a/src/rpft/rapidpro/models/actions.py +++ b/src/rpft/rapidpro/models/actions.py @@ -373,17 +373,17 @@ def assign_global_uuids(self, uuid_dict): group.assign_uuid(uuid_dict) def main_value(self): - return self.groups[0].name + return self.groups[0].name if self.groups else "" def render(self): return NotImplementedError def get_row_model_fields(self): # abstract method + obj_id = [group.uuid for group in self.groups][0] if self.groups else "" return { "mainarg_groups": [group.name for group in self.groups], - "obj_id": [group.uuid for group in self.groups][0] - or "", # 0th element as obj_id is not yet a list. + "obj_id": obj_id or "", # 0th element as obj_id is not yet a list. } diff --git a/src/rpft/rapidpro/models/routers.py b/src/rpft/rapidpro/models/routers.py index a916577..1610262 100644 --- a/src/rpft/rapidpro/models/routers.py +++ b/src/rpft/rapidpro/models/routers.py @@ -20,7 +20,7 @@ def from_dict(data, exits): elif data["type"] == "switch": return SwitchRouter.from_dict(data, exits) else: - raise ValueError("Router data has invalid router type.") + raise RapidProRouterError("Router data has invalid router type.") def _get_result_name_and_categories_from_data(data, exits): categories = [ @@ -150,7 +150,9 @@ def from_dict(data, exits): if category.uuid == data["default_category_uuid"] ] if not default_categories: - raise ValueError("Default category uuid does not match any category.") + raise RapidProRouterError( + "Default category uuid does not match any category." + ) no_response_category = None no_response_category_id = None wait_timeout = None @@ -165,7 +167,7 @@ def from_dict(data, exits): if category.uuid == no_response_category_id ] if not no_response_categories: - raise ValueError( + raise RapidProRouterError( "No Response category uuid does not match any category." ) no_response_category = no_response_categories[0] @@ -200,8 +202,8 @@ def create_case(self, comparison_type, arguments, category): def generate_category_name(self, comparison_arguments): # Auto-generate a category name that is guaranteed to be unique - # TODO: Write tests for this category_name = "_".join([str(a).title() for a in comparison_arguments]) + category_name = category_name or "Yes" while self._get_category_or_none(category_name): category_name += "_alt" return category_name @@ -451,7 +453,7 @@ def from_dict(data, exits): """ matching_exits = [exit for exit in exits if exit.uuid == data["exit_uuid"]] if not matching_exits: - raise ValueError("RouterCategory with no matching exit.") + raise RapidProRouterError("RouterCategory with no matching exit.") return RouterCategory( name=data["name"], uuid=data["uuid"], exit=matching_exits[0] ) @@ -485,36 +487,40 @@ class RouterCase: } TEST_VALIDATIONS = { - "all_words": lambda x: len(x) == 1, - "has_any_word": lambda x: len(x) == 1, - "has_beginning": lambda x: len(x) == 1, - "has_category": lambda x: len(x) >= 1, + "all_words": lambda x: len(x) == 1 and x[0], + "has_any_word": lambda x: len(x) == 1 and x[0], + "has_beginning": lambda x: len(x) == 1 and x[0], + "has_category": lambda x: len(x) >= 1 and all(x), "has_date": lambda x: len(x) == 0, - "has_date_eq": lambda x: len(x) == 1, - "has_date_gt": lambda x: len(x) == 1, - "has_date_lt": lambda x: len(x) == 1, - "has_district": lambda x: len(x) == 1, + "has_date_eq": lambda x: len(x) == 1 and x[0], + "has_date_gt": lambda x: len(x) == 1 and x[0], + "has_date_lt": lambda x: len(x) == 1 and x[0], + "has_district": lambda x: len(x) == 1 and x[0], "has_email": lambda x: len(x) == 0, "has_error": lambda x: len(x) == 0, - "has_group": lambda x: len(x) in {1, 2}, # uuid obligatory, name optional? - "has_intent": lambda x: len(x) == 2, + # For has_group: First is UUID and second is name. + # In imported flows, UUID is obligatory, but the toolkit + # uses blank as a placeholder. Instead, the toolkit requires + # a name, which usually is optional. + "has_group": lambda x: len(x) in {1, 2} and (x[0] or x[1]), + "has_intent": lambda x: len(x) == 2 and all(x), "has_number": lambda x: len(x) == 0, - "has_number_between": lambda x: len(x) == 2, - "has_number_eq": lambda x: len(x) == 1, - "has_number_gt": lambda x: len(x) == 1, - "has_number_gte": lambda x: len(x) == 1, - "has_number_lt": lambda x: len(x) == 1, - "has_number_lte": lambda x: len(x) == 1, - "has_only_phrase": lambda x: len(x) == 1, - "has_only_text": lambda x: len(x) == 1, - "has_pattern": lambda x: len(x) == 1, - "has_phone": lambda x: len(x) in {0, 1}, - "has_phrase": lambda x: len(x) == 1, + "has_number_between": lambda x: len(x) == 2 and all(x), + "has_number_eq": lambda x: len(x) == 1 and x[0], + "has_number_gt": lambda x: len(x) == 1 and x[0], + "has_number_gte": lambda x: len(x) == 1 and x[0], + "has_number_lt": lambda x: len(x) == 1 and x[0], + "has_number_lte": lambda x: len(x) == 1 and x[0], + "has_only_phrase": lambda x: len(x) == 1 and x[0], + "has_only_text": lambda x: len(x) == 1 and x[0], + "has_pattern": lambda x: len(x) == 1 and x[0], + "has_phone": lambda x: len(x) in {0, 1} and all(x), + "has_phrase": lambda x: len(x) == 1 and x[0], "has_state": lambda x: len(x) == 0, "has_text": lambda x: len(x) == 0, "has_time": lambda x: len(x) == 0, - "has_top_intent": lambda x: len(x) == 2, - "has_ward": lambda x: len(x) == 2, + "has_top_intent": lambda x: len(x) == 2 and all(x), + "has_ward": lambda x: len(x) == 2 and all(x), } def __init__(self, comparison_type, arguments, category_uuid, uuid=None): @@ -534,11 +540,11 @@ def from_dict(data): def validate(self): if self.type not in RouterCase.TEST_VALIDATIONS: - raise ValueError(f'Invalid router test type: "{self.type}"') + raise RapidProRouterError(f'Invalid router test type: "{self.type}"') if not RouterCase.TEST_VALIDATIONS[self.type](self.arguments): - print( - f"Warning: Invalid number of arguments {len(self.arguments)} for router" - f'test type "{self.type}"' + raise RapidProRouterError( + f"Invalid number of arguments {len(self.arguments)} or blank " + f'arguments for router test type "{self.type}"' ) def render(self): diff --git a/tests/input/no_switch_nodes.csv b/tests/input/no_switch_nodes.csv index 328a081..3ad1b48 100644 --- a/tests/input/no_switch_nodes.csv +++ b/tests/input/no_switch_nodes.csv @@ -10,3 +10,4 @@ 9,"set_contact_language",8,,,,,,"eng",,,,,,,,,,,,,,,,,,"711ea340-dc20-4471-a0e4-6af2c4a278b9",,"execute_actions","280;1020" 10,"set_contact_name",9,,,,,,"John Doe",,,,,,,,,,,,,,,,,,"711ea340-dc20-4471-a0e4-6af2c4a278b9",,"execute_actions","280;1020" 11,"add_contact_urn",10,,,,,,"@results.internation_phone_number",,,,,,,,,,,,,,,,,,"768afc12-93e8-4cf8-9cdc-c92cce36c365",,, +12,"remove_from_group",11,,,,,,,,,,,,,,,,,,,,,,,,"12340fc2-f28c-4ad0-8c02-4afd63ad31ab",,, diff --git a/tests/input/no_switch_nodes_without_row_ids.csv b/tests/input/no_switch_nodes_without_row_ids.csv index aa68b63..673b521 100644 --- a/tests/input/no_switch_nodes_without_row_ids.csv +++ b/tests/input/no_switch_nodes_without_row_ids.csv @@ -10,3 +10,4 @@ ,"set_contact_language",,,,,,,"eng",,,,,,,,,,,,,,,,,"711ea340-dc20-4471-a0e4-6af2c4a278b9",,"execute_actions","280;1020" ,"set_contact_name",,,,,,,"John Doe",,,,,,,,,,,,,,,,,"711ea340-dc20-4471-a0e4-6af2c4a278b9",,"execute_actions","280;1020" ,"add_contact_urn",,,,,,,"@results.internation_phone_number",,,,,,,,,,,,,,,,,"768afc12-93e8-4cf8-9cdc-c92cce36c365",,, +,"remove_from_group",,,,,,,,,,,,,,,,,,,,,,,,"12340fc2-f28c-4ad0-8c02-4afd63ad31ab",,, diff --git a/tests/output/all_test_flows.json b/tests/output/all_test_flows.json index 6faca4b..e725beb 100644 --- a/tests/output/all_test_flows.json +++ b/tests/output/all_test_flows.json @@ -172,6 +172,23 @@ "exits": [ { "uuid": "51548997-3f36-487d-9498-b48e33907b49", + "destination_uuid": "12340fc2-f28c-4ad0-8c02-4afd63ad31ab" + } + ] + }, + { + "uuid": "12340fc2-f28c-4ad0-8c02-4afd63ad31ab", + "actions": [ + { + "type": "remove_contact_groups", + "groups": [], + "all_groups": true, + "uuid": "1234d6d0-02a2-4edf-9025-8e00959e4d05" + } + ], + "exits": [ + { + "uuid": "d9482afc-9802-4c49-87e0-6b6512b19632", "destination_uuid": null } ] diff --git a/tests/test_flowparser.py b/tests/test_flowparser.py index d0d6908..7238a46 100644 --- a/tests/test_flowparser.py +++ b/tests/test_flowparser.py @@ -155,7 +155,7 @@ def test_no_switch_node_rows(self): nodes = output["nodes"] all_node_uuids = [row.node_uuid for row in self.rows] # Rows 0,1,2,3 and rows -3,-2 are actions joined into a single node. - expected_node_uuids = all_node_uuids[3:-2] + all_node_uuids[-1:] + expected_node_uuids = all_node_uuids[3:-3] + all_node_uuids[-2:] self.assertEqual( expected_node_uuids, [node["uuid"] for node in nodes], @@ -165,7 +165,7 @@ def test_no_switch_node_rows(self): self.assertEqual(output["type"], "messaging") self.assertEqual(output["language"], "eng") - self.assertEqual(len(nodes), 7) + self.assertEqual(len(nodes), 8) node_0 = nodes[0] self.assertEqual(len(node_0["actions"]), 4) @@ -239,7 +239,13 @@ def test_no_switch_node_rows(self): node_6 = nodes[6] self.assertEqual(node_5["exits"][0]["destination_uuid"], node_6["uuid"]) - self.assertIsNone(node_6["exits"][0]["destination_uuid"]) + + node_7 = nodes[7] + self.assertEqual(len(node_7["actions"]), 1) + self.assertEqual("remove_contact_groups", node_7["actions"][0]["type"]) + self.assertEqual(len(node_7["actions"][0]["groups"]), 0) + self.assertTrue(node_7["actions"][0]["all_groups"]) + self.assertIsNone(node_7["exits"][0]["destination_uuid"]) # Check UI positions/types of the first two nodes render_ui = output["_ui"]["nodes"] diff --git a/tests/test_routers.py b/tests/test_routers.py index e278836..24bfe9a 100644 --- a/tests/test_routers.py +++ b/tests/test_routers.py @@ -1,5 +1,6 @@ import unittest +from rpft.rapidpro.models.exceptions import RapidProRouterError from rpft.rapidpro.models.routers import SwitchRouter, RandomRouter @@ -178,7 +179,7 @@ def test_no_args_tests(self): def test_invalid_test(self): switch_router = SwitchRouter(operand="@fields.field") - with self.assertRaises(ValueError): + with self.assertRaises(RapidProRouterError): switch_router.add_choice( "@fields.field", "has_junk", diff --git a/tests/utils.py b/tests/utils.py index db36a35..069b5a6 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -234,7 +234,9 @@ def find_destination_uuid(current_node, context): "enter_flow": (lambda x: x["flow"]["name"]), "open_ticket": (lambda x: x["subject"]), "play_audio": (lambda x: x["audio_url"]), - "remove_contact_groups": (lambda x: x["groups"][0]["name"]), + "remove_contact_groups": ( + lambda x: x["groups"][0]["name"] if x["groups"] else "ALL" + ), "say_msg": (lambda x: x["text"]), "send_broadcast": (lambda x: x["text"]), "send_email": (lambda x: x["subject"]),