GptGo.py 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. from __future__ import annotations
  2. from aiohttp import ClientSession
  3. import json
  4. import base64
  5. from ..typing import AsyncResult, Messages
  6. from .base_provider import AsyncGeneratorProvider, format_prompt
  7. class GptGo(AsyncGeneratorProvider):
  8. url = "https://gptgo.ai"
  9. supports_gpt_35_turbo = True
  10. working = True
  11. @classmethod
  12. async def create_async_generator(
  13. cls,
  14. model: str,
  15. messages: Messages,
  16. proxy: str = None,
  17. **kwargs
  18. ) -> AsyncResult:
  19. headers = {
  20. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
  21. "Accept": "*/*",
  22. "Accept-language": "en-US",
  23. "Origin": cls.url,
  24. "Referer": f"{cls.url}/",
  25. "sec-ch-ua": '"Google Chrome";v="116", "Chromium";v="116", "Not?A_Brand";v="24"',
  26. "sec-ch-ua-mobile": "?0",
  27. "sec-ch-ua-platform": '"Windows"',
  28. "Sec-Fetch-Dest": "empty",
  29. "Sec-Fetch-Mode": "cors",
  30. "Sec-Fetch-Site": "same-origin",
  31. }
  32. async with ClientSession(
  33. headers=headers
  34. ) as session:
  35. async with session.post(
  36. "https://gptgo.ai/get_token.php",
  37. data={"ask": format_prompt(messages)},
  38. proxy=proxy
  39. ) as response:
  40. response.raise_for_status()
  41. token = await response.text();
  42. token = base64.b64decode(token[10:-20]).decode()
  43. async with session.get(
  44. "https://api.gptgo.ai/web.php",
  45. params={"array_chat": token},
  46. proxy=proxy
  47. ) as response:
  48. response.raise_for_status()
  49. async for line in response.content:
  50. if line.startswith(b"data: [DONE]"):
  51. break
  52. if line.startswith(b"data: "):
  53. line = json.loads(line[6:])
  54. content = line["choices"][0]["delta"].get("content")
  55. if content and content != "\n#GPTGO ":
  56. yield content