@@ -42,15 +42,29 @@ func main() {
4242 // Define the flags
4343 helpFlag := flag .Bool ("h" , false , "Show this message" )
4444 convFlag := flag .Bool ("c" , false , "Start a conversation with Moki" )
45- aiFlag := flag .String ("llm" , aiutil .DefaultProvider , "Selct the LLM provider, either OpenAI, Replicate , or Anyscale " )
45+ aiFlag := flag .String ("llm" , aiutil .DefaultProvider , "Selct the LLM provider, either OpenAI, Anyscale , or Replicate " )
4646 modelFlag := flag .String ("m" , "" , "Set the model to use for the LLM response" )
4747 temperatureFlag := flag .Float64 ("t" , aiutil .DefaultTemp , "Set the temperature for the LLM response" )
4848 maxTokensFlag := flag .Int ("max-tokens" , aiutil .DefaultMaxTokens , "Set the maximum number of tokens to generate per response" )
4949 ragFlag := flag .Bool ("r" , true , "Enable RAG functionality" )
50+ flagFlag := flag .Bool ("flags" , false , "Log the flags used for this request" )
5051
5152 // Parse the flags
5253 flag .Parse ()
5354
55+ // Log the flags for this request
56+ if * flagFlag {
57+ logger .WithFields (logrus.Fields {
58+ "helpFlag" : * helpFlag ,
59+ "convFlag" : * convFlag ,
60+ "aiFlag" : * aiFlag ,
61+ "modelFlag" : * modelFlag ,
62+ "temperatureFlag" : * temperatureFlag ,
63+ "maxTokensFlag" : * maxTokensFlag ,
64+ "ragFlag" : * ragFlag ,
65+ }).Infoln ("Flags" )
66+ }
67+
5468 // Show the help message
5569 if * helpFlag {
5670 fmt .Println (tools .HelpMessage )
0 commit comments