@@ -42,15 +42,29 @@ func main() {
42
42
// Define the flags
43
43
helpFlag := flag .Bool ("h" , false , "Show this message" )
44
44
convFlag := flag .Bool ("c" , false , "Start a conversation with Moki" )
45
- aiFlag := flag .String ("llm" , aiutil .DefaultProvider , "Selct the LLM provider, either OpenAI, Replicate , or Anyscale " )
45
+ aiFlag := flag .String ("llm" , aiutil .DefaultProvider , "Selct the LLM provider, either OpenAI, Anyscale , or Replicate " )
46
46
modelFlag := flag .String ("m" , "" , "Set the model to use for the LLM response" )
47
47
temperatureFlag := flag .Float64 ("t" , aiutil .DefaultTemp , "Set the temperature for the LLM response" )
48
48
maxTokensFlag := flag .Int ("max-tokens" , aiutil .DefaultMaxTokens , "Set the maximum number of tokens to generate per response" )
49
49
ragFlag := flag .Bool ("r" , true , "Enable RAG functionality" )
50
+ flagFlag := flag .Bool ("flags" , false , "Log the flags used for this request" )
50
51
51
52
// Parse the flags
52
53
flag .Parse ()
53
54
55
+ // Log the flags for this request
56
+ if * flagFlag {
57
+ logger .WithFields (logrus.Fields {
58
+ "helpFlag" : * helpFlag ,
59
+ "convFlag" : * convFlag ,
60
+ "aiFlag" : * aiFlag ,
61
+ "modelFlag" : * modelFlag ,
62
+ "temperatureFlag" : * temperatureFlag ,
63
+ "maxTokensFlag" : * maxTokensFlag ,
64
+ "ragFlag" : * ragFlag ,
65
+ }).Infoln ("Flags" )
66
+ }
67
+
54
68
// Show the help message
55
69
if * helpFlag {
56
70
fmt .Println (tools .HelpMessage )
0 commit comments