@@ -279,6 +279,25 @@ def cancel_fine_tuning_job(
279
279
"""
280
280
try :
281
281
optional_params = GenericLiteLLMParams (** kwargs )
282
+ ### TIMEOUT LOGIC ###
283
+ timeout = optional_params .timeout or kwargs .get ("request_timeout" , 600 ) or 600
284
+ # set timeout for 10 minutes by default
285
+
286
+ if (
287
+ timeout is not None
288
+ and isinstance (timeout , httpx .Timeout )
289
+ and supports_httpx_timeout (custom_llm_provider ) == False
290
+ ):
291
+ read_timeout = timeout .read or 600
292
+ timeout = read_timeout # default 10 min timeout
293
+ elif timeout is not None and not isinstance (timeout , httpx .Timeout ):
294
+ timeout = float (timeout ) # type: ignore
295
+ elif timeout is None :
296
+ timeout = 600.0
297
+
298
+ _is_async = kwargs .pop ("acancel_fine_tuning_job" , False ) is True
299
+
300
+ # OpenAI
282
301
if custom_llm_provider == "openai" :
283
302
284
303
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
@@ -301,25 +320,6 @@ def cancel_fine_tuning_job(
301
320
or litellm .openai_key
302
321
or os .getenv ("OPENAI_API_KEY" )
303
322
)
304
- ### TIMEOUT LOGIC ###
305
- timeout = (
306
- optional_params .timeout or kwargs .get ("request_timeout" , 600 ) or 600
307
- )
308
- # set timeout for 10 minutes by default
309
-
310
- if (
311
- timeout is not None
312
- and isinstance (timeout , httpx .Timeout )
313
- and supports_httpx_timeout (custom_llm_provider ) == False
314
- ):
315
- read_timeout = timeout .read or 600
316
- timeout = read_timeout # default 10 min timeout
317
- elif timeout is not None and not isinstance (timeout , httpx .Timeout ):
318
- timeout = float (timeout ) # type: ignore
319
- elif timeout is None :
320
- timeout = 600.0
321
-
322
- _is_async = kwargs .pop ("acancel_fine_tuning_job" , False ) is True
323
323
324
324
response = openai_fine_tuning_apis_instance .cancel_fine_tuning_job (
325
325
api_base = api_base ,
@@ -330,6 +330,40 @@ def cancel_fine_tuning_job(
330
330
max_retries = optional_params .max_retries ,
331
331
_is_async = _is_async ,
332
332
)
333
+ # Azure OpenAI
334
+ elif custom_llm_provider == "azure" :
335
+ api_base = optional_params .api_base or litellm .api_base or get_secret ("AZURE_API_BASE" ) # type: ignore
336
+
337
+ api_version = (
338
+ optional_params .api_version
339
+ or litellm .api_version
340
+ or get_secret ("AZURE_API_VERSION" )
341
+ ) # type: ignore
342
+
343
+ api_key = (
344
+ optional_params .api_key
345
+ or litellm .api_key
346
+ or litellm .azure_key
347
+ or get_secret ("AZURE_OPENAI_API_KEY" )
348
+ or get_secret ("AZURE_API_KEY" )
349
+ ) # type: ignore
350
+
351
+ extra_body = optional_params .get ("extra_body" , {})
352
+ azure_ad_token : Optional [str ] = None
353
+ if extra_body is not None :
354
+ azure_ad_token = extra_body .pop ("azure_ad_token" , None )
355
+ else :
356
+ azure_ad_token = get_secret ("AZURE_AD_TOKEN" ) # type: ignore
357
+
358
+ response = azure_fine_tuning_apis_instance .cancel_fine_tuning_job (
359
+ api_base = api_base ,
360
+ api_key = api_key ,
361
+ api_version = api_version ,
362
+ fine_tuning_job_id = fine_tuning_job_id ,
363
+ timeout = timeout ,
364
+ max_retries = optional_params .max_retries ,
365
+ _is_async = _is_async ,
366
+ )
333
367
else :
334
368
raise litellm .exceptions .BadRequestError (
335
369
message = "LiteLLM doesn't support {} for 'create_batch'. Only 'openai' is supported." .format (
@@ -405,6 +439,25 @@ def list_fine_tuning_jobs(
405
439
"""
406
440
try :
407
441
optional_params = GenericLiteLLMParams (** kwargs )
442
+ ### TIMEOUT LOGIC ###
443
+ timeout = optional_params .timeout or kwargs .get ("request_timeout" , 600 ) or 600
444
+ # set timeout for 10 minutes by default
445
+
446
+ if (
447
+ timeout is not None
448
+ and isinstance (timeout , httpx .Timeout )
449
+ and supports_httpx_timeout (custom_llm_provider ) == False
450
+ ):
451
+ read_timeout = timeout .read or 600
452
+ timeout = read_timeout # default 10 min timeout
453
+ elif timeout is not None and not isinstance (timeout , httpx .Timeout ):
454
+ timeout = float (timeout ) # type: ignore
455
+ elif timeout is None :
456
+ timeout = 600.0
457
+
458
+ _is_async = kwargs .pop ("alist_fine_tuning_jobs" , False ) is True
459
+
460
+ # OpenAI
408
461
if custom_llm_provider == "openai" :
409
462
410
463
# for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
@@ -427,25 +480,6 @@ def list_fine_tuning_jobs(
427
480
or litellm .openai_key
428
481
or os .getenv ("OPENAI_API_KEY" )
429
482
)
430
- ### TIMEOUT LOGIC ###
431
- timeout = (
432
- optional_params .timeout or kwargs .get ("request_timeout" , 600 ) or 600
433
- )
434
- # set timeout for 10 minutes by default
435
-
436
- if (
437
- timeout is not None
438
- and isinstance (timeout , httpx .Timeout )
439
- and supports_httpx_timeout (custom_llm_provider ) == False
440
- ):
441
- read_timeout = timeout .read or 600
442
- timeout = read_timeout # default 10 min timeout
443
- elif timeout is not None and not isinstance (timeout , httpx .Timeout ):
444
- timeout = float (timeout ) # type: ignore
445
- elif timeout is None :
446
- timeout = 600.0
447
-
448
- _is_async = kwargs .pop ("alist_fine_tuning_jobs" , False ) is True
449
483
450
484
response = openai_fine_tuning_apis_instance .list_fine_tuning_jobs (
451
485
api_base = api_base ,
@@ -457,6 +491,41 @@ def list_fine_tuning_jobs(
457
491
max_retries = optional_params .max_retries ,
458
492
_is_async = _is_async ,
459
493
)
494
+ # Azure OpenAI
495
+ elif custom_llm_provider == "azure" :
496
+ api_base = optional_params .api_base or litellm .api_base or get_secret ("AZURE_API_BASE" ) # type: ignore
497
+
498
+ api_version = (
499
+ optional_params .api_version
500
+ or litellm .api_version
501
+ or get_secret ("AZURE_API_VERSION" )
502
+ ) # type: ignore
503
+
504
+ api_key = (
505
+ optional_params .api_key
506
+ or litellm .api_key
507
+ or litellm .azure_key
508
+ or get_secret ("AZURE_OPENAI_API_KEY" )
509
+ or get_secret ("AZURE_API_KEY" )
510
+ ) # type: ignore
511
+
512
+ extra_body = optional_params .get ("extra_body" , {})
513
+ azure_ad_token : Optional [str ] = None
514
+ if extra_body is not None :
515
+ azure_ad_token = extra_body .pop ("azure_ad_token" , None )
516
+ else :
517
+ azure_ad_token = get_secret ("AZURE_AD_TOKEN" ) # type: ignore
518
+
519
+ response = azure_fine_tuning_apis_instance .list_fine_tuning_jobs (
520
+ api_base = api_base ,
521
+ api_key = api_key ,
522
+ api_version = api_version ,
523
+ after = after ,
524
+ limit = limit ,
525
+ timeout = timeout ,
526
+ max_retries = optional_params .max_retries ,
527
+ _is_async = _is_async ,
528
+ )
460
529
else :
461
530
raise litellm .exceptions .BadRequestError (
462
531
message = "LiteLLM doesn't support {} for 'create_batch'. Only 'openai' is supported." .format (
0 commit comments