Liaobots.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. from __future__ import annotations
  2. import uuid
  3. from aiohttp import ClientSession, BaseConnector
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from .helper import get_connector
  7. from ..errors import RateLimitError
  8. models = {
  9. "gpt-4": {
  10. "id": "gpt-4",
  11. "name": "GPT-4",
  12. "maxLength": 24000,
  13. "tokenLimit": 8000,
  14. },
  15. "gpt-4-0613": {
  16. "id": "gpt-4-0613",
  17. "name": "GPT-4",
  18. "maxLength": 32000,
  19. "tokenLimit": 8000,
  20. },
  21. "gpt-3.5-turbo": {
  22. "id": "gpt-3.5-turbo",
  23. "name": "GPT-3.5-Turbo",
  24. "maxLength": 48000,
  25. "tokenLimit": 14000,
  26. "context": "16K",
  27. },
  28. "gpt-3.5-turbo-16k": {
  29. "id": "gpt-3.5-turbo-16k",
  30. "name": "GPT-3.5-16k",
  31. "maxLength": 48000,
  32. "tokenLimit": 16000,
  33. },
  34. "gpt-4-1106-preview": {
  35. "id": "gpt-4-1106-preview",
  36. "name": "GPT-4-Turbo",
  37. "maxLength": 260000,
  38. "tokenLimit": 126000,
  39. "context": "128K",
  40. },
  41. "gpt-4-plus": {
  42. "id": "gpt-4-plus",
  43. "name": "GPT-4-Plus",
  44. "maxLength": 130000,
  45. "tokenLimit": 31000,
  46. "context": "32K",
  47. },
  48. "gemini-pro": {
  49. "id": "gemini-pro",
  50. "name": "Gemini-Pro",
  51. "maxLength": 120000,
  52. "tokenLimit": 30000,
  53. "context": "32K",
  54. },
  55. "claude-2": {
  56. "id": "claude-2",
  57. "name": "Claude-2-200k",
  58. "maxLength": 800000,
  59. "tokenLimit": 200000,
  60. "context": "200K",
  61. },
  62. "claude-instant-1": {
  63. "id": "claude-instant-1",
  64. "name": "Claude-instant-1",
  65. "maxLength": 400000,
  66. "tokenLimit": 100000,
  67. "context": "100K",
  68. }
  69. }
  70. class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
  71. url = "https://liaobots.site"
  72. working = True
  73. supports_message_history = True
  74. supports_gpt_35_turbo = True
  75. supports_gpt_4 = True
  76. default_model = "gpt-3.5-turbo"
  77. models = list(models)
  78. model_aliases = {
  79. "claude-v2": "claude-2"
  80. }
  81. _auth_code = None
  82. _cookie_jar = None
  83. @classmethod
  84. async def create_async_generator(
  85. cls,
  86. model: str,
  87. messages: Messages,
  88. auth: str = None,
  89. proxy: str = None,
  90. connector: BaseConnector = None,
  91. **kwargs
  92. ) -> AsyncResult:
  93. headers = {
  94. "authority": "liaobots.com",
  95. "content-type": "application/json",
  96. "origin": cls.url,
  97. "referer": f"{cls.url}/",
  98. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
  99. }
  100. async with ClientSession(
  101. headers=headers,
  102. cookie_jar=cls._cookie_jar,
  103. connector=get_connector(connector, proxy, True)
  104. ) as session:
  105. cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
  106. if not cls._auth_code:
  107. async with session.post(
  108. "https://liaobots.work/recaptcha/api/login",
  109. proxy=proxy,
  110. data={"token": "abcdefghijklmnopqrst"},
  111. verify_ssl=False
  112. ) as response:
  113. response.raise_for_status()
  114. async with session.post(
  115. "https://liaobots.work/api/user",
  116. proxy=proxy,
  117. json={"authcode": ""},
  118. verify_ssl=False
  119. ) as response:
  120. if response.status == 401:
  121. raise RateLimitError("Rate limit reached. Use a other provider or ip address")
  122. response.raise_for_status()
  123. cls._auth_code = (await response.json(content_type=None))["authCode"]
  124. cls._cookie_jar = session.cookie_jar
  125. data = {
  126. "conversationId": str(uuid.uuid4()),
  127. "model": models[cls.get_model(model)],
  128. "messages": messages,
  129. "key": "",
  130. "prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully."),
  131. }
  132. async with session.post(
  133. "https://liaobots.work/api/chat",
  134. proxy=proxy,
  135. json=data,
  136. headers={"x-auth-code": cls._auth_code},
  137. verify_ssl=False
  138. ) as response:
  139. response.raise_for_status()
  140. async for chunk in response.content.iter_any():
  141. if b"<html coupert-item=" in chunk:
  142. raise RuntimeError("Invalid session")
  143. if chunk:
  144. yield chunk.decode()