Raycast.py 2.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. from __future__ import annotations
  2. import json
  3. import requests
  4. from ...typing import CreateResult, Messages
  5. from ..base_provider import AbstractProvider
  6. class Raycast(AbstractProvider):
  7. url = "https://raycast.com"
  8. supports_gpt_35_turbo = True
  9. supports_gpt_4 = True
  10. supports_stream = True
  11. needs_auth = True
  12. working = True
  13. models = [
  14. "gpt-3.5-turbo",
  15. "gpt-4"
  16. ]
  17. @staticmethod
  18. def create_completion(
  19. model: str,
  20. messages: Messages,
  21. stream: bool,
  22. proxy: str = None,
  23. **kwargs,
  24. ) -> CreateResult:
  25. auth = kwargs.get('auth')
  26. if not auth:
  27. raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter")
  28. headers = {
  29. 'Accept': 'application/json',
  30. 'Accept-Language': 'en-US,en;q=0.9',
  31. 'Authorization': f'Bearer {auth}',
  32. 'Content-Type': 'application/json',
  33. 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
  34. }
  35. parsed_messages = [
  36. {'author': message['role'], 'content': {'text': message['content']}}
  37. for message in messages
  38. ]
  39. data = {
  40. "debug": False,
  41. "locale": "en-CN",
  42. "messages": parsed_messages,
  43. "model": model,
  44. "provider": "openai",
  45. "source": "ai_chat",
  46. "system_instruction": "markdown",
  47. "temperature": 0.5
  48. }
  49. response = requests.post(
  50. "https://backend.raycast.com/api/v1/ai/chat_completions",
  51. headers=headers,
  52. json=data,
  53. stream=True,
  54. proxies={"https": proxy}
  55. )
  56. for token in response.iter_lines():
  57. if b'data: ' not in token:
  58. continue
  59. completion_chunk = json.loads(token.decode().replace('data: ', ''))
  60. token = completion_chunk['text']
  61. if token != None:
  62. yield token