Liaobots.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. from __future__ import annotations
  2. import uuid
  3. from aiohttp import ClientSession, BaseConnector
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from .helper import get_connector
  7. from ..requests import raise_for_status
  8. models = {
  9. "gpt-4o-mini-free": {
  10. "id": "gpt-4o-mini-free",
  11. "name": "GPT-4o-Mini-Free",
  12. "model": "ChatGPT",
  13. "provider": "OpenAI",
  14. "maxLength": 31200,
  15. "tokenLimit": 7800,
  16. "context": "8K",
  17. },
  18. "gpt-4o-2024-11-20": {
  19. "id": "gpt-4o-2024-11-20",
  20. "name": "GPT-4o",
  21. "model": "ChatGPT",
  22. "provider": "OpenAI",
  23. "maxLength": 260000,
  24. "tokenLimit": 126000,
  25. "context": "128K",
  26. },
  27. "gpt-4o-mini-2024-07-18": {
  28. "id": "gpt-4o-mini-2024-07-18",
  29. "name": "GPT-4o-Mini",
  30. "model": "ChatGPT",
  31. "provider": "OpenAI",
  32. "maxLength": 260000,
  33. "tokenLimit": 126000,
  34. "context": "128K",
  35. },
  36. "o1-preview-2024-09-12": {
  37. "id": "o1-preview-2024-09-12",
  38. "name": "o1-preview",
  39. "model": "o1",
  40. "provider": "OpenAI",
  41. "maxLength": 400000,
  42. "tokenLimit": 100000,
  43. "context": "128K",
  44. },
  45. "o1-mini-2024-09-12": {
  46. "id": "o1-mini-2024-09-12",
  47. "name": "o1-mini",
  48. "model": "o1",
  49. "provider": "OpenAI",
  50. "maxLength": 400000,
  51. "tokenLimit": 100000,
  52. "context": "128K",
  53. },
  54. "DeepSeek-R1-Distill-Llama-70b": {
  55. "id": "DeepSeek-R1-Distill-Llama-70b",
  56. "name": "DeepSeek-R1-70B",
  57. "model": "DeepSeek-R1-70B",
  58. "provider": "DeepSeek",
  59. "maxLength": 400000,
  60. "tokenLimit": 100000,
  61. "context": "128K",
  62. },
  63. "DeepSeek-R1": {
  64. "id": "DeepSeek-R1",
  65. "name": "DeepSeek-R1",
  66. "model": "DeepSeek-R1",
  67. "provider": "DeepSeek",
  68. "maxLength": 400000,
  69. "tokenLimit": 100000,
  70. "context": "128K",
  71. },
  72. "DeepSeek-V3": {
  73. "id": "DeepSeek-V3",
  74. "name": "DeepSeek-V3",
  75. "model": "DeepSeek-V3",
  76. "provider": "DeepSeek",
  77. "maxLength": 400000,
  78. "tokenLimit": 100000,
  79. "context": "128K",
  80. },
  81. "grok-2": {
  82. "id": "grok-2",
  83. "name": "Grok-2",
  84. "model": "Grok",
  85. "provider": "x.ai",
  86. "maxLength": 400000,
  87. "tokenLimit": 100000,
  88. "context": "100K",
  89. },
  90. "claude-3-opus-20240229": {
  91. "id": "claude-3-opus-20240229",
  92. "name": "Claude-3-Opus",
  93. "model": "Claude",
  94. "provider": "Anthropic",
  95. "maxLength": 800000,
  96. "tokenLimit": 200000,
  97. "context": "200K",
  98. },
  99. "claude-3-5-sonnet-20240620": {
  100. "id": "claude-3-5-sonnet-20240620",
  101. "name": "Claude-3.5-Sonnet",
  102. "model": "Claude",
  103. "provider": "Anthropic",
  104. "maxLength": 800000,
  105. "tokenLimit": 200000,
  106. "context": "200K",
  107. },
  108. "claude-3-5-sonnet-20241022": {
  109. "id": "claude-3-5-sonnet-20241022",
  110. "name": "Claude-3.5-Sonnet-V2",
  111. "model": "Claude",
  112. "provider": "Anthropic",
  113. "maxLength": 800000,
  114. "tokenLimit": 200000,
  115. "context": "200K",
  116. },
  117. "claude-3-sonnet-20240229": {
  118. "id": "claude-3-sonnet-20240229",
  119. "name": "Claude-3-Sonnet",
  120. "model": "Claude",
  121. "provider": "Anthropic",
  122. "maxLength": 800000,
  123. "tokenLimit": 200000,
  124. "context": "200K",
  125. },
  126. "claude-3-opus-20240229-t": {
  127. "id": "claude-3-opus-20240229-t",
  128. "name": "Claude-3-Opus-T",
  129. "model": "Claude",
  130. "provider": "Anthropic",
  131. "maxLength": 800000,
  132. "tokenLimit": 200000,
  133. "context": "200K",
  134. },
  135. "claude-3-5-sonnet-20241022-t": {
  136. "id": "claude-3-5-sonnet-20241022-t",
  137. "name": "Claude-3.5-Sonnet-V2-T",
  138. "model": "Claude",
  139. "provider": "Anthropic",
  140. "maxLength": 800000,
  141. "tokenLimit": 200000,
  142. "context": "200K",
  143. },
  144. "gemini-2.0-flash-exp": {
  145. "id": "gemini-2.0-flash-exp",
  146. "name": "Gemini-2.0-Flash-Exp",
  147. "model": "Gemini",
  148. "provider": "Google",
  149. "maxLength": 4000000,
  150. "tokenLimit": 1000000,
  151. "context": "1024K",
  152. },
  153. "gemini-2.0-flash-thinking-exp": {
  154. "id": "gemini-2.0-flash-thinking-exp",
  155. "name": "Gemini-2.0-Flash-Thinking-Exp",
  156. "model": "Gemini",
  157. "provider": "Google",
  158. "maxLength": 4000000,
  159. "tokenLimit": 1000000,
  160. "context": "1024K",
  161. },
  162. "gemini-1.5-flash-002": {
  163. "id": "gemini-1.5-flash-002",
  164. "name": "Gemini-1.5-Flash-1M",
  165. "model": "Gemini",
  166. "provider": "Google",
  167. "maxLength": 4000000,
  168. "tokenLimit": 1000000,
  169. "context": "1024K",
  170. },
  171. "gemini-1.5-pro-002": {
  172. "id": "gemini-1.5-pro-002",
  173. "name": "Gemini-1.5-Pro-1M",
  174. "model": "Gemini",
  175. "provider": "Google",
  176. "maxLength": 4000000,
  177. "tokenLimit": 1000000,
  178. "context": "1024K",
  179. },
  180. }
  181. class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
  182. url = "https://liaobots.site"
  183. working = True
  184. supports_message_history = True
  185. supports_system_message = True
  186. default_model = "gpt-4o-2024-11-20"
  187. models = list(models.keys())
  188. model_aliases = {
  189. "gpt-4o-mini": "gpt-4o-mini-free",
  190. "gpt-4o": default_model,
  191. "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
  192. "gpt-4": default_model,
  193. "o1-preview": "o1-preview-2024-09-12",
  194. "o1-mini": "o1-mini-2024-09-12",
  195. "deepseek-r1": "DeepSeek-R1-Distill-Llama-70b",
  196. "deepseek-r1": "DeepSeek-R1",
  197. "deepseek-v3": "DeepSeek-V3",
  198. "claude-3-opus": "claude-3-opus-20240229",
  199. "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
  200. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
  201. "claude-3-sonnet": "claude-3-sonnet-20240229",
  202. "claude-3-opus": "claude-3-opus-20240229-t",
  203. "claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
  204. "gemini-2.0-flash": "gemini-2.0-flash-exp",
  205. "gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
  206. "gemini-1.5-flash": "gemini-1.5-flash-002",
  207. "gemini-1.5-pro": "gemini-1.5-pro-002"
  208. }
  209. _auth_code = ""
  210. _cookie_jar = None
  211. @classmethod
  212. def get_model(cls, model: str) -> str:
  213. """
  214. Retrieve the internal model identifier based on the provided model name or alias.
  215. """
  216. if model in cls.model_aliases:
  217. model = cls.model_aliases[model]
  218. if model not in models:
  219. raise ValueError(f"Model '{model}' is not supported.")
  220. return model
  221. @classmethod
  222. def is_supported(cls, model: str) -> bool:
  223. """
  224. Check if the given model is supported.
  225. """
  226. return model in models or model in cls.model_aliases
  227. @classmethod
  228. async def create_async_generator(
  229. cls,
  230. model: str,
  231. messages: Messages,
  232. proxy: str = None,
  233. connector: BaseConnector = None,
  234. **kwargs
  235. ) -> AsyncResult:
  236. model = cls.get_model(model)
  237. headers = {
  238. "authority": "liaobots.com",
  239. "content-type": "application/json",
  240. "origin": cls.url,
  241. "referer": f"{cls.url}/",
  242. "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
  243. }
  244. async with ClientSession(
  245. headers=headers,
  246. cookie_jar=cls._cookie_jar,
  247. connector=get_connector(connector, proxy, True)
  248. ) as session:
  249. data = {
  250. "conversationId": str(uuid.uuid4()),
  251. "model": models[model],
  252. "messages": messages,
  253. "key": "",
  254. "prompt": kwargs.get("system_message", "You are a helpful assistant."),
  255. }
  256. if not cls._auth_code:
  257. async with session.post(
  258. "https://liaobots.work/recaptcha/api/login",
  259. data={"token": "abcdefghijklmnopqrst"},
  260. verify_ssl=False
  261. ) as response:
  262. await raise_for_status(response)
  263. try:
  264. async with session.post(
  265. "https://liaobots.work/api/user",
  266. json={"authcode": cls._auth_code},
  267. verify_ssl=False
  268. ) as response:
  269. await raise_for_status(response)
  270. cls._auth_code = (await response.json(content_type=None))["authCode"]
  271. if not cls._auth_code:
  272. raise RuntimeError("Empty auth code")
  273. cls._cookie_jar = session.cookie_jar
  274. async with session.post(
  275. "https://liaobots.work/api/chat",
  276. json=data,
  277. headers={"x-auth-code": cls._auth_code},
  278. verify_ssl=False
  279. ) as response:
  280. await raise_for_status(response)
  281. async for chunk in response.content.iter_any():
  282. if b"<html coupert-item=" in chunk:
  283. raise RuntimeError("Invalid session")
  284. if chunk:
  285. yield chunk.decode(errors="ignore")
  286. except:
  287. async with session.post(
  288. "https://liaobots.work/api/user",
  289. json={"authcode": "pTIQr4FTnVRfr"},
  290. verify_ssl=False
  291. ) as response:
  292. await raise_for_status(response)
  293. cls._auth_code = (await response.json(content_type=None))["authCode"]
  294. if not cls._auth_code:
  295. raise RuntimeError("Empty auth code")
  296. cls._cookie_jar = session.cookie_jar
  297. async with session.post(
  298. "https://liaobots.work/api/chat",
  299. json=data,
  300. headers={"x-auth-code": cls._auth_code},
  301. verify_ssl=False
  302. ) as response:
  303. await raise_for_status(response)
  304. async for chunk in response.content.iter_any():
  305. if b"<html coupert-item=" in chunk:
  306. raise RuntimeError("Invalid session")
  307. if chunk:
  308. yield chunk.decode(errors="ignore")
  309. @classmethod
  310. async def initialize_auth_code(cls, session: ClientSession) -> None:
  311. """
  312. Initialize the auth code by making the necessary login requests.
  313. """
  314. async with session.post(
  315. "https://liaobots.work/api/user",
  316. json={"authcode": "pTIQr4FTnVRfr"},
  317. verify_ssl=False
  318. ) as response:
  319. await raise_for_status(response)
  320. cls._auth_code = (await response.json(content_type=None))["authCode"]
  321. if not cls._auth_code:
  322. raise RuntimeError("Empty auth code")
  323. cls._cookie_jar = session.cookie_jar
  324. @classmethod
  325. async def ensure_auth_code(cls, session: ClientSession) -> None:
  326. """
  327. Ensure the auth code is initialized, and if not, perform the initialization.
  328. """
  329. if not cls._auth_code:
  330. await cls.initialize_auth_code(session)