AiAsk.py 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. from ...typing import AsyncResult, Messages
  4. from ..base_provider import AsyncGeneratorProvider
  5. class AiAsk(AsyncGeneratorProvider):
  6. url = "https://e.aiask.me"
  7. supports_message_history = True
  8. supports_gpt_35_turbo = True
  9. working = False
  10. @classmethod
  11. async def create_async_generator(
  12. cls,
  13. model: str,
  14. messages: Messages,
  15. proxy: str = None,
  16. **kwargs
  17. ) -> AsyncResult:
  18. headers = {
  19. "accept": "application/json, text/plain, */*",
  20. "origin": cls.url,
  21. "referer": f"{cls.url}/chat",
  22. }
  23. async with ClientSession(headers=headers) as session:
  24. data = {
  25. "continuous": True,
  26. "id": "fRMSQtuHl91A4De9cCvKD",
  27. "list": messages,
  28. "models": "0",
  29. "prompt": "",
  30. "temperature": kwargs.get("temperature", 0.5),
  31. "title": "",
  32. }
  33. buffer = ""
  34. rate_limit = "您的免费额度不够使用这个模型啦,请点击右上角登录继续使用!"
  35. async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
  36. response.raise_for_status()
  37. async for chunk in response.content.iter_any():
  38. buffer += chunk.decode()
  39. if not rate_limit.startswith(buffer):
  40. yield buffer
  41. buffer = ""
  42. elif buffer == rate_limit:
  43. raise RuntimeError("Rate limit reached")