ChatAnywhere.py 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354
  1. from __future__ import annotations
  2. from aiohttp import ClientSession, ClientTimeout
  3. from ..typing import AsyncResult, Messages
  4. from .base_provider import AsyncGeneratorProvider
  5. class ChatAnywhere(AsyncGeneratorProvider):
  6. url = "https://chatanywhere.cn"
  7. supports_gpt_35_turbo = True
  8. supports_message_history = True
  9. working = False
  10. @classmethod
  11. async def create_async_generator(
  12. cls,
  13. model: str,
  14. messages: Messages,
  15. proxy: str = None,
  16. timeout: int = 120,
  17. temperature: float = 0.5,
  18. **kwargs
  19. ) -> AsyncResult:
  20. headers = {
  21. "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
  22. "Accept": "application/json, text/plain, */*",
  23. "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
  24. "Accept-Encoding": "gzip, deflate, br",
  25. "Content-Type": "application/json",
  26. "Referer": f"{cls.url}/",
  27. "Origin": cls.url,
  28. "Sec-Fetch-Dest": "empty",
  29. "Sec-Fetch-Mode": "cors",
  30. "Sec-Fetch-Site": "same-origin",
  31. "Authorization": "",
  32. "Connection": "keep-alive",
  33. "TE": "trailers"
  34. }
  35. async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
  36. data = {
  37. "list": messages,
  38. "id": "s1_qYuOLXjI3rEpc7WHfQ",
  39. "title": messages[-1]["content"],
  40. "prompt": "",
  41. "temperature": temperature,
  42. "models": "61490748",
  43. "continuous": True
  44. }
  45. async with session.post(f"{cls.url}/v1/chat/gpt/", json=data, proxy=proxy) as response:
  46. response.raise_for_status()
  47. async for chunk in response.content.iter_any():
  48. if chunk:
  49. yield chunk.decode()