PerplexityLabs.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. from __future__ import annotations
  2. import random
  3. import json
  4. from ..typing import AsyncResult, Messages
  5. from ..requests import StreamSession, raise_for_status
  6. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  7. API_URL = "https://www.perplexity.ai/socket.io/"
  8. WS_URL = "wss://www.perplexity.ai/socket.io/"
  9. class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
  10. url = "https://labs.perplexity.ai"
  11. working = True
  12. default_model = "llama-3.1-70b-instruct"
  13. models = [
  14. "llama-3.1-sonar-large-128k-online",
  15. "llama-3.1-sonar-small-128k-online",
  16. "llama-3.1-sonar-large-128k-chat",
  17. "llama-3.1-sonar-small-128k-chat",
  18. "llama-3.1-8b-instruct",
  19. "llama-3.1-70b-instruct",
  20. "/models/LiquidCloud",
  21. ]
  22. model_aliases = {
  23. "sonar-online": "llama-3.1-sonar-large-128k-online",
  24. "sonar-online": "sonar-small-128k-online",
  25. "sonar-chat": "llama-3.1-sonar-large-128k-chat",
  26. "sonar-chat": "llama-3.1-sonar-small-128k-chat",
  27. "llama-3.1-8b": "llama-3.1-8b-instruct",
  28. "llama-3.1-70b": "llama-3.1-70b-instruct",
  29. "lfm-40b": "/models/LiquidCloud",
  30. }
  31. @classmethod
  32. async def create_async_generator(
  33. cls,
  34. model: str,
  35. messages: Messages,
  36. proxy: str = None,
  37. **kwargs
  38. ) -> AsyncResult:
  39. headers = {
  40. "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0",
  41. "Accept": "*/*",
  42. "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
  43. "Accept-Encoding": "gzip, deflate, br",
  44. "Origin": cls.url,
  45. "Connection": "keep-alive",
  46. "Referer": f"{cls.url}/",
  47. "Sec-Fetch-Dest": "empty",
  48. "Sec-Fetch-Mode": "cors",
  49. "Sec-Fetch-Site": "same-site",
  50. "TE": "trailers",
  51. }
  52. async with StreamSession(headers=headers, proxies={"all": proxy}) as session:
  53. t = format(random.getrandbits(32), "08x")
  54. async with session.get(
  55. f"{API_URL}?EIO=4&transport=polling&t={t}"
  56. ) as response:
  57. await raise_for_status(response)
  58. text = await response.text()
  59. assert text.startswith("0")
  60. sid = json.loads(text[1:])["sid"]
  61. post_data = '40{"jwt":"anonymous-ask-user"}'
  62. async with session.post(
  63. f"{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}",
  64. data=post_data
  65. ) as response:
  66. await raise_for_status(response)
  67. assert await response.text() == "OK"
  68. async with session.ws_connect(f"{WS_URL}?EIO=4&transport=websocket&sid={sid}", autoping=False) as ws:
  69. await ws.send_str("2probe")
  70. assert(await ws.receive_str() == "3probe")
  71. await ws.send_str("5")
  72. assert(await ws.receive_str())
  73. assert(await ws.receive_str() == "6")
  74. message_data = {
  75. "version": "2.5",
  76. "source": "default",
  77. "model": cls.get_model(model),
  78. "messages": messages
  79. }
  80. await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
  81. last_message = 0
  82. while True:
  83. message = await ws.receive_str()
  84. if message == "2":
  85. if last_message == 0:
  86. raise RuntimeError("Unknown error")
  87. await ws.send_str("3")
  88. continue
  89. try:
  90. data = json.loads(message[2:])[1]
  91. yield data["output"][last_message:]
  92. last_message = len(data["output"])
  93. if data["final"]:
  94. break
  95. except:
  96. raise RuntimeError(f"Message: {message}")