G4F.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import time
  4. import asyncio
  5. from ...typing import AsyncResult, Messages
  6. from ...providers.response import ImageResponse, Reasoning, JsonConversation
  7. from ..helper import format_image_prompt, get_random_string
  8. from .Janus_Pro_7B import Janus_Pro_7B, get_zerogpu_token
  9. from .BlackForestLabsFlux1Dev import BlackForestLabsFlux1Dev
  10. from .raise_for_status import raise_for_status
  11. class FluxDev(BlackForestLabsFlux1Dev):
  12. url = "https://roxky-flux-1-dev.hf.space"
  13. space = "roxky/FLUX.1-dev"
  14. class G4F(Janus_Pro_7B):
  15. label = "G4F framework"
  16. space = "roxky/Janus-Pro-7B"
  17. url = f"https://huggingface.co/spaces/roxky/g4f-space"
  18. api_url = "https://roxky-janus-pro-7b.hf.space"
  19. url_flux = "https://roxky-g4f-flux.hf.space/run/predict"
  20. referer = f"{api_url}?__theme=light"
  21. default_model = "flux"
  22. model_aliases = {"flux-schnell": default_model}
  23. image_models = [Janus_Pro_7B.default_image_model, default_model, "flux-dev", *model_aliases.keys()]
  24. models = [Janus_Pro_7B.default_model, *image_models]
  25. @classmethod
  26. async def create_async_generator(
  27. cls,
  28. model: str,
  29. messages: Messages,
  30. proxy: str = None,
  31. prompt: str = None,
  32. width: int = 1024,
  33. height: int = 1024,
  34. seed: int = None,
  35. cookies: dict = None,
  36. zerogpu_token: str = None,
  37. zerogpu_uuid: str = "[object Object]",
  38. **kwargs
  39. ) -> AsyncResult:
  40. if model in ("flux", "flux-dev"):
  41. async for chunk in FluxDev.create_async_generator(
  42. model, messages,
  43. proxy=proxy,
  44. prompt=prompt,
  45. width=width,
  46. height=height,
  47. seed=seed,
  48. cookies=cookies,
  49. zerogpu_token=zerogpu_token,
  50. zerogpu_uuid=zerogpu_uuid,
  51. **kwargs
  52. ):
  53. yield chunk
  54. return
  55. if cls.default_model not in model:
  56. async for chunk in super().create_async_generator(
  57. model, messages,
  58. proxy=proxy,
  59. prompt=prompt,
  60. seed=seed,
  61. cookies=cookies,
  62. zerogpu_token=zerogpu_token,
  63. zerogpu_uuid=zerogpu_uuid,
  64. **kwargs
  65. ):
  66. yield chunk
  67. return
  68. model = cls.get_model(model)
  69. width = max(32, width - (width % 8))
  70. height = max(32, height - (height % 8))
  71. if prompt is None:
  72. prompt = format_image_prompt(messages)
  73. if seed is None:
  74. seed = int(time.time())
  75. payload = {
  76. "data": [
  77. prompt,
  78. seed,
  79. width,
  80. height,
  81. True,
  82. 1
  83. ],
  84. "event_data": None,
  85. "fn_index": 3,
  86. "session_hash": get_random_string(),
  87. "trigger_id": 10
  88. }
  89. async with ClientSession() as session:
  90. if zerogpu_token is None:
  91. yield Reasoning(status="Acquiring GPU Token")
  92. zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
  93. headers = {
  94. "x-zerogpu-token": zerogpu_token,
  95. "x-zerogpu-uuid": zerogpu_uuid,
  96. }
  97. headers = {k: v for k, v in headers.items() if v is not None}
  98. async def generate():
  99. async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
  100. await raise_for_status(response)
  101. response_data = await response.json()
  102. image_url = response_data["data"][0]['url']
  103. return ImageResponse(image_url, alt=prompt)
  104. background_tasks = set()
  105. started = time.time()
  106. task = asyncio.create_task(generate())
  107. background_tasks.add(task)
  108. task.add_done_callback(background_tasks.discard)
  109. while background_tasks:
  110. yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
  111. await asyncio.sleep(0.2)
  112. yield await task
  113. yield Reasoning(status=f"Finished {time.time() - started:.2f}s")