Vitalentum.py 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. from __future__ import annotations
  2. import json
  3. from aiohttp import ClientSession
  4. from ..base_provider import AsyncGeneratorProvider
  5. from ...typing import AsyncResult, Messages
  6. class Vitalentum(AsyncGeneratorProvider):
  7. url = "https://app.vitalentum.io"
  8. supports_gpt_35_turbo = True
  9. @classmethod
  10. async def create_async_generator(
  11. cls,
  12. model: str,
  13. messages: Messages,
  14. proxy: str = None,
  15. **kwargs
  16. ) -> AsyncResult:
  17. headers = {
  18. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
  19. "Accept": "text/event-stream",
  20. "Accept-language": "de,en-US;q=0.7,en;q=0.3",
  21. "Origin": cls.url,
  22. "Referer": f"{cls.url}/",
  23. "Sec-Fetch-Dest": "empty",
  24. "Sec-Fetch-Mode": "cors",
  25. "Sec-Fetch-Site": "same-origin",
  26. }
  27. conversation = json.dumps({"history": [{
  28. "speaker": "human" if message["role"] == "user" else "bot",
  29. "text": message["content"],
  30. } for message in messages]})
  31. data = {
  32. "conversation": conversation,
  33. "temperature": 0.7,
  34. **kwargs
  35. }
  36. async with ClientSession(
  37. headers=headers
  38. ) as session:
  39. async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
  40. response.raise_for_status()
  41. async for line in response.content:
  42. line = line.decode()
  43. if line.startswith("data: "):
  44. if line.startswith("data: [DONE]"):
  45. break
  46. line = json.loads(line[6:-1])
  47. content = line["choices"][0]["delta"].get("content")
  48. if content:
  49. yield content