Ylokh.py 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. from __future__ import annotations
  2. import json
  3. from ...requests import StreamSession
  4. from ..base_provider import AsyncGeneratorProvider
  5. from ...typing import AsyncResult, Messages
  6. class Ylokh(AsyncGeneratorProvider):
  7. url = "https://chat.ylokh.xyz"
  8. working = False
  9. supports_message_history = True
  10. supports_gpt_35_turbo = True
  11. @classmethod
  12. async def create_async_generator(
  13. cls,
  14. model: str,
  15. messages: Messages,
  16. stream: bool = True,
  17. proxy: str = None,
  18. timeout: int = 120,
  19. **kwargs
  20. ) -> AsyncResult:
  21. model = model if model else "gpt-3.5-turbo"
  22. headers = {"Origin": cls.url, "Referer": f"{cls.url}/"}
  23. data = {
  24. "messages": messages,
  25. "model": model,
  26. "temperature": 1,
  27. "presence_penalty": 0,
  28. "top_p": 1,
  29. "frequency_penalty": 0,
  30. "allow_fallback": True,
  31. "stream": stream,
  32. **kwargs
  33. }
  34. async with StreamSession(
  35. headers=headers,
  36. proxies={"https": proxy},
  37. timeout=timeout
  38. ) as session:
  39. async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response:
  40. response.raise_for_status()
  41. if stream:
  42. async for line in response.iter_lines():
  43. line = line.decode()
  44. if line.startswith("data: "):
  45. if line.startswith("data: [DONE]"):
  46. break
  47. line = json.loads(line[6:])
  48. content = line["choices"][0]["delta"].get("content")
  49. if content:
  50. yield content
  51. else:
  52. chat = await response.json()
  53. yield chat["choices"][0]["message"].get("content")