Chatxyz.py 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. from __future__ import annotations
  2. import json
  3. from aiohttp import ClientSession
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider
  6. class Chatxyz(AsyncGeneratorProvider):
  7. url = "https://chat.3211000.xyz"
  8. working = False
  9. supports_gpt_35_turbo = True
  10. supports_message_history = True
  11. @classmethod
  12. async def create_async_generator(
  13. cls,
  14. model: str,
  15. messages: Messages,
  16. proxy: str = None,
  17. **kwargs
  18. ) -> AsyncResult:
  19. headers = {
  20. 'Accept': 'text/event-stream',
  21. 'Accept-Encoding': 'gzip, deflate, br',
  22. 'Accept-Language': 'en-US,en;q=0.5',
  23. 'Alt-Used': 'chat.3211000.xyz',
  24. 'Content-Type': 'application/json',
  25. 'Host': 'chat.3211000.xyz',
  26. 'Origin': 'https://chat.3211000.xyz',
  27. 'Referer': 'https://chat.3211000.xyz/',
  28. 'Sec-Fetch-Dest': 'empty',
  29. 'Sec-Fetch-Mode': 'cors',
  30. 'Sec-Fetch-Site': 'same-origin',
  31. 'TE': 'trailers',
  32. 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0',
  33. 'x-requested-with': 'XMLHttpRequest'
  34. }
  35. async with ClientSession(headers=headers) as session:
  36. data = {
  37. "messages": messages,
  38. "stream": True,
  39. "model": "gpt-3.5-turbo",
  40. "temperature": 0.5,
  41. "presence_penalty": 0,
  42. "frequency_penalty": 0,
  43. "top_p": 1,
  44. **kwargs
  45. }
  46. async with session.post(f'{cls.url}/api/openai/v1/chat/completions', json=data, proxy=proxy) as response:
  47. response.raise_for_status()
  48. async for chunk in response.content:
  49. line = chunk.decode()
  50. if line.startswith("data: [DONE]"):
  51. break
  52. elif line.startswith("data: "):
  53. line = json.loads(line[6:])
  54. chunk = line["choices"][0]["delta"].get("content")
  55. if(chunk):
  56. yield chunk