diff --git a/examples/configs/nemoguard-local/config.yml b/examples/configs/nemoguard-local/config.yml new file mode 100644 index 000000000..8f5df7c12 --- /dev/null +++ b/examples/configs/nemoguard-local/config.yml @@ -0,0 +1,26 @@ +models: + - type: main + engine: nim + model: meta/llama-3.1-70b-instruct + + - type: content_safety + engine: nim + parameters: + base_url: http://10.63.135.193:8000/v1 + model_name: "llama-3.1-nemoguard-8b-content-safety" + - type: topic_control + engine: nim + parameters: + base_url: http://10.34.0.68:8000/v1 + model_name: "llama-3.1-nemoguard-8b-topic-control" + +rails: + input: + flows: + - topic safety check input $model=topic_control + output: + flows: + - content safety check output $model=content_safety + dialog: + single_call: + enabled: False diff --git a/examples/configs/nemoguard-local/prompts.yml b/examples/configs/nemoguard-local/prompts.yml new file mode 100644 index 000000000..5654f8542 --- /dev/null +++ b/examples/configs/nemoguard-local/prompts.yml @@ -0,0 +1,103 @@ +prompts: + - task: content_safety_check_input $model=content_safety + content: | + Task: Check if there is unsafe content in the user or agent messages in the below conversation in accordance with our safety policy with the below categories. Provide a 'safe' or 'unsafe' rating for both the user message and the agent response. If there is unsafe content, please also provide a list of violated categories according to our safety policy below. + + + S1: Violence. + S2: Sexual. + S3: Criminal Planning/Confessions. + S4: Guns and Illegal Weapons. + S5: Controlled/Regulated Substances. + S6: Suicide and Self Harm. + S7: Sexual (minor). + S8: Hate/Identity Hate. + S9: PII/Privacy. + S10: Harassment. + S11: Threat. + S12: Profanity. + S13: Needs Caution. + S14: Other. + S15: Manipulation. + S16: Fraud/Deception. + S17: Malware. + S18: High Risk Gov Decision Making. + S19: Political/Misinformation/Conspiracy. + S20: Copyright/Trademark/Plagiarism. + S21: Unauthorized Advice. + S22: Illegal Activity. + S23: Immoral/Unethical. + + + + + user: {{ user_input }} + + + + Provide your safety assessment for the above conversation in the below output JSON format: + {"User Safety": safe or unsafe rating for the user message, "Response Safety": safe or unsafe rating for the agent response. Omit if no agent response present. "Safety Categories": a comma-separated list of applicable safety categories from the provided taxonomy. Omit if all safe.} + + Do not include anything other than the output JSON in your response. + Output JSON: + output_parser: nemoguard_parse_prompt_safety + max_tokens: 50 + + - task: content_safety_check_output $model=content_safety + content: | + Task: Check if there is unsafe content in the user or agent messages in the below conversation in accordance with our safety policy with the below categories. Provide a 'safe' or 'unsafe' rating for both the user message and the agent response. If there is unsafe content, please also provide a list of violated categories according to our safety policy below. + + + S1: Violence. + S2: Sexual. + S3: Criminal Planning/Confessions. + S4: Guns and Illegal Weapons. + S5: Controlled/Regulated Substances. + S6: Suicide and Self Harm. + S7: Sexual (minor). + S8: Hate/Identity Hate. + S9: PII/Privacy. + S10: Harassment. + S11: Threat. + S12: Profanity. + S13: Needs Caution. + S14: Other. + S15: Manipulation. + S16: Fraud/Deception. + S17: Malware. + S18: High Risk Gov Decision Making. + S19: Political/Misinformation/Conspiracy. + S20: Copyright/Trademark/Plagiarism. + S21: Unauthorized Advice. + S22: Illegal Activity. + S23: Immoral/Unethical. + + + + + user: {{ user_input }} + + response: agent: {{ bot_response }} + + + + Provide your safety assessment for the above conversation in the below output JSON format: + {"User Safety": safe or unsafe rating for the user message, "Response Safety": safe or unsafe rating for the agent response. Omit if no agent response present. "Safety Categories": a comma-separated list of applicable safety categories from the provided taxonomy. Omit if all safe.} + + Do not include anything other than the output JSON in your response. + Output JSON: + output_parser: nemoguard_parse_response_safety + max_tokens: 50 + + - task: topic_safety_check_input $model=topic_control + content: | + You are to act as a customer service agent, providing users with factual information in accordance to the knowledge base. Your role is to ensure that you respond only to relevant queries and adhere to the following guidelines + + Guidelines for the user messages: + - Do not answer questions related to personal opinions or advice on user's order, future recommendations + - Do not provide any information on non-company products or services. + - Do not answer enquiries unrelated to the company policies. + - Do not answer questions asking for personal details about the agent or its creators. + - Do not answer questions about sensitive topics related to politics, religion, or other sensitive subjects. + - If a user asks topics irrelevant to the company's customer service relations, politely redirect the conversation or end the interaction. + - Your responses should be professional, accurate, and compliant with customer relations guidelines, focusing solely on providing transparent, up-to-date information about the company that is already publicly available. diff --git a/nemoguardrails/rails/llm/llmrails.py b/nemoguardrails/rails/llm/llmrails.py index 21e90725c..aab0c2854 100644 --- a/nemoguardrails/rails/llm/llmrails.py +++ b/nemoguardrails/rails/llm/llmrails.py @@ -1277,6 +1277,7 @@ def _update_explain_info(): ) async for chunk_list, chunk_str_rep in buffer_strategy(streaming_handler): + print(f"chunk_list: {chunk_list}") chunk_str = " ".join(chunk_list) if stream_first: @@ -1299,6 +1300,7 @@ def _update_explain_info(): ) # Execute the action. (Your execute_action returns only the result.) + print(f"applyig action: {action_name}") result = await self.runtime.action_dispatcher.execute_action( action_name, params ) @@ -1336,8 +1338,7 @@ def _get_action_details_from_flow_id( supported_prefixes = [ "content safety check output", - "content safety check", - "topic safety check", + "topic safety check output", ] if prefixes: supported_prefixes.extend(prefixes)