chore: initial public snapshot for github upload

This commit is contained in:
Your Name
2026-03-26 20:06:14 +08:00
commit 0e5ecd930e
3497 changed files with 1586236 additions and 0 deletions

View File

@@ -0,0 +1,33 @@
from litellm._logging import verbose_logger
from litellm.llms.base_llm.image_generation.transformation import (
BaseImageGenerationConfig,
)
from .dall_e_2_transformation import AzureFoundryDallE2ImageGenerationConfig
from .dall_e_3_transformation import AzureFoundryDallE3ImageGenerationConfig
from .flux_transformation import AzureFoundryFluxImageGenerationConfig
from .gpt_transformation import AzureFoundryGPTImageGenerationConfig
__all__ = [
"AzureFoundryFluxImageGenerationConfig",
"AzureFoundryGPTImageGenerationConfig",
"AzureFoundryDallE2ImageGenerationConfig",
"AzureFoundryDallE3ImageGenerationConfig",
]
def get_azure_ai_image_generation_config(model: str) -> BaseImageGenerationConfig:
model = model.lower()
model = model.replace("-", "")
model = model.replace("_", "")
if model == "" or "dalle2" in model: # empty model is dall-e-2
return AzureFoundryDallE2ImageGenerationConfig()
elif "dalle3" in model:
return AzureFoundryDallE3ImageGenerationConfig()
elif "flux" in model:
return AzureFoundryFluxImageGenerationConfig()
else:
verbose_logger.debug(
f"Using AzureGPTImageGenerationConfig for model: {model}. This follows the gpt-image-1 model format."
)
return AzureFoundryGPTImageGenerationConfig()

View File

@@ -0,0 +1,27 @@
from typing import Any
import litellm
from litellm.types.utils import ImageResponse
def cost_calculator(
model: str,
image_response: Any,
) -> float:
"""
Recraft image generation cost calculator
"""
_model_info = litellm.get_model_info(
model=model,
custom_llm_provider=litellm.LlmProviders.AZURE_AI.value,
)
output_cost_per_image: float = _model_info.get("output_cost_per_image") or 0.0
num_images: int = 0
if isinstance(image_response, ImageResponse):
if image_response.data:
num_images = len(image_response.data)
return output_cost_per_image * num_images
else:
raise ValueError(
f"image_response must be of type ImageResponse got type={type(image_response)}"
)

View File

@@ -0,0 +1,9 @@
from litellm.llms.openai.image_generation import DallE2ImageGenerationConfig
class AzureFoundryDallE2ImageGenerationConfig(DallE2ImageGenerationConfig):
"""
Azure dall-e-2 image generation config
"""
pass

View File

@@ -0,0 +1,9 @@
from litellm.llms.openai.image_generation import DallE3ImageGenerationConfig
class AzureFoundryDallE3ImageGenerationConfig(DallE3ImageGenerationConfig):
"""
Azure dall-e-3 image generation config
"""
pass

View File

@@ -0,0 +1,68 @@
from typing import Optional
from litellm.llms.openai.image_generation import GPTImageGenerationConfig
class AzureFoundryFluxImageGenerationConfig(GPTImageGenerationConfig):
"""
Azure Foundry flux image generation config
From manual testing it follows the gpt-image-1 image generation config
(Azure Foundry does not have any docs on supported params at the time of writing)
From our test suite - following GPTImageGenerationConfig is working for this model
"""
@staticmethod
def get_flux2_image_generation_url(
api_base: Optional[str],
model: str,
api_version: Optional[str],
) -> str:
"""
Constructs the complete URL for Azure AI FLUX 2 image generation.
FLUX 2 models on Azure AI use a different URL pattern than standard Azure OpenAI:
- Standard: /openai/deployments/{model}/images/generations
- FLUX 2: /providers/blackforestlabs/v1/flux-2-pro
Args:
api_base: Base URL (e.g., https://litellm-ci-cd-prod.services.ai.azure.com)
model: Model name (e.g., flux.2-pro)
api_version: API version (e.g., preview)
Returns:
Complete URL for the FLUX 2 image generation endpoint
"""
if api_base is None:
raise ValueError(
"api_base is required for Azure AI FLUX 2 image generation"
)
api_base = api_base.rstrip("/")
api_version = api_version or "preview"
# If the api_base already contains /providers/, it's already a complete path
if "/providers/" in api_base:
if "?" in api_base:
return api_base
return f"{api_base}?api-version={api_version}"
# Construct the FLUX 2 provider path
# Model name flux.2-pro maps to endpoint flux-2-pro
return f"{api_base}/providers/blackforestlabs/v1/flux-2-pro?api-version={api_version}"
@staticmethod
def is_flux2_model(model: str) -> bool:
"""
Check if the model is an Azure AI FLUX 2 model.
Args:
model: Model name (e.g., flux.2-pro, azure_ai/flux.2-pro)
Returns:
True if the model is a FLUX 2 model
"""
model_lower = model.lower().replace(".", "-").replace("_", "-")
return "flux-2" in model_lower or "flux2" in model_lower

View File

@@ -0,0 +1,9 @@
from litellm.llms.openai.image_generation import GPTImageGenerationConfig
class AzureFoundryGPTImageGenerationConfig(GPTImageGenerationConfig):
"""
Azure gpt-image-1 image generation config
"""
pass