Berlin.py 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. from __future__ import annotations
  2. import secrets
  3. import uuid
  4. import json
  5. from aiohttp import ClientSession
  6. from ..typing import AsyncResult, Messages
  7. from .base_provider import AsyncGeneratorProvider
  8. from .helper import format_prompt
  9. class Berlin(AsyncGeneratorProvider):
  10. url = "https://ai.berlin4h.top"
  11. working = False
  12. supports_gpt_35_turbo = True
  13. _token = None
  14. @classmethod
  15. async def create_async_generator(
  16. cls,
  17. model: str,
  18. messages: Messages,
  19. proxy: str = None,
  20. **kwargs
  21. ) -> AsyncResult:
  22. if not model:
  23. model = "gpt-3.5-turbo"
  24. headers = {
  25. "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
  26. "Accept": "*/*",
  27. "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
  28. "Accept-Encoding": "gzip, deflate, br",
  29. "Referer": f"{cls.url}/",
  30. "Content-Type": "application/json",
  31. "Origin": cls.url,
  32. "Alt-Used": "ai.berlin4h.top",
  33. "Connection": "keep-alive",
  34. "Sec-Fetch-Dest": "empty",
  35. "Sec-Fetch-Mode": "cors",
  36. "Sec-Fetch-Site": "same-origin",
  37. "Pragma": "no-cache",
  38. "Cache-Control": "no-cache",
  39. "TE": "trailers",
  40. }
  41. async with ClientSession(headers=headers) as session:
  42. if not cls._token:
  43. data = {
  44. "account": '免费使用GPT3.5模型@163.com',
  45. "password": '659e945c2d004686bad1a75b708c962f'
  46. }
  47. async with session.post(f"{cls.url}/api/login", json=data, proxy=proxy) as response:
  48. response.raise_for_status()
  49. cls._token = (await response.json())["data"]["token"]
  50. headers = {
  51. "token": cls._token
  52. }
  53. prompt = format_prompt(messages)
  54. data = {
  55. "prompt": prompt,
  56. "parentMessageId": str(uuid.uuid4()),
  57. "options": {
  58. "model": model,
  59. "temperature": 0,
  60. "presence_penalty": 0,
  61. "frequency_penalty": 0,
  62. "max_tokens": 1888,
  63. **kwargs
  64. },
  65. }
  66. async with session.post(f"{cls.url}/api/chat/completions", json=data, proxy=proxy, headers=headers) as response:
  67. response.raise_for_status()
  68. async for chunk in response.content:
  69. if chunk.strip():
  70. try:
  71. yield json.loads(chunk)["content"]
  72. except:
  73. raise RuntimeError(f"Response: {chunk.decode()}")