123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121 |
- from __future__ import annotations
- from aiohttp import ClientSession
- import time
- import asyncio
- from ...typing import AsyncResult, Messages
- from ...providers.response import ImageResponse, Reasoning, JsonConversation
- from ..helper import format_image_prompt, get_random_string
- from .Janus_Pro_7B import Janus_Pro_7B, get_zerogpu_token
- from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev
- from .raise_for_status import raise_for_status
- class FluxDev(BlackForestLabsFlux1Dev):
- url = "https://roxky-flux-1-dev.hf.space"
- space = "roxky/FLUX.1-dev"
- class G4F(Janus_Pro_7B):
- label = "G4F framework"
- space = "roxky/Janus-Pro-7B"
- url = f"https://huggingface.co/spaces/roxky/g4f-space"
- api_url = "https://roxky-janus-pro-7b.hf.space"
- url_flux = "https://roxky-g4f-flux.hf.space/run/predict"
- referer = f"{api_url}?__theme=light"
- default_model = "flux"
- model_aliases = {"flux-schnell": default_model}
- image_models = [Janus_Pro_7B.default_image_model, default_model, "flux-dev", *model_aliases.keys()]
- models = [Janus_Pro_7B.default_model, *image_models]
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- prompt: str = None,
- width: int = 1024,
- height: int = 1024,
- seed: int = None,
- cookies: dict = None,
- zerogpu_token: str = None,
- zerogpu_uuid: str = "[object Object]",
- **kwargs
- ) -> AsyncResult:
- if model in ("flux", "flux-dev"):
- async for chunk in FluxDev.create_async_generator(
- model, messages,
- proxy=proxy,
- prompt=prompt,
- width=width,
- height=height,
- seed=seed,
- cookies=cookies,
- zerogpu_token=zerogpu_token,
- zerogpu_uuid=zerogpu_uuid,
- **kwargs
- ):
- yield chunk
- return
- if cls.default_model not in model:
- async for chunk in super().create_async_generator(
- model, messages,
- proxy=proxy,
- prompt=prompt,
- seed=seed,
- cookies=cookies,
- zerogpu_token=zerogpu_token,
- zerogpu_uuid=zerogpu_uuid,
- **kwargs
- ):
- yield chunk
- return
- model = cls.get_model(model)
- width = max(32, width - (width % 8))
- height = max(32, height - (height % 8))
- if prompt is None:
- prompt = format_image_prompt(messages)
- if seed is None:
- seed = int(time.time())
- payload = {
- "data": [
- prompt,
- seed,
- width,
- height,
- True,
- 1
- ],
- "event_data": None,
- "fn_index": 3,
- "session_hash": get_random_string(),
- "trigger_id": 10
- }
- async with ClientSession() as session:
- if zerogpu_token is None:
- yield Reasoning(status="Acquiring GPU Token")
- zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
- headers = {
- "x-zerogpu-token": zerogpu_token,
- "x-zerogpu-uuid": zerogpu_uuid,
- }
- headers = {k: v for k, v in headers.items() if v is not None}
- async def generate():
- async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
- await raise_for_status(response)
- response_data = await response.json()
- image_url = response_data["data"][0]['url']
- return ImageResponse(image_url, alt=prompt)
- background_tasks = set()
- started = time.time()
- task = asyncio.create_task(generate())
- background_tasks.add(task)
- task.add_done_callback(background_tasks.discard)
- while background_tasks:
- yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
- await asyncio.sleep(0.2)
- yield await task
- yield Reasoning(status=f"Finished {time.time() - started:.2f}s")
|