Vercel.py 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. from __future__ import annotations
  2. import json, base64, requests, random, os
  3. try:
  4. import execjs
  5. has_requirements = True
  6. except ImportError:
  7. has_requirements = False
  8. from ..typing import Messages, TypedDict, CreateResult, Any
  9. from .base_provider import AbstractProvider
  10. from ..errors import MissingRequirementsError
  11. class Vercel(AbstractProvider):
  12. url = 'https://chat.vercel.ai'
  13. working = True
  14. supports_message_history = True
  15. supports_gpt_35_turbo = True
  16. supports_stream = True
  17. supports_gpt_4 = False
  18. @staticmethod
  19. def create_completion(
  20. model: str,
  21. messages: Messages,
  22. stream: bool,
  23. proxy: str = None,
  24. **kwargs
  25. ) -> CreateResult:
  26. if not has_requirements:
  27. raise MissingRequirementsError('Install "PyExecJS" package')
  28. headers = {
  29. 'authority': 'chat.vercel.ai',
  30. 'accept': '*/*',
  31. 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
  32. 'cache-control': 'no-cache',
  33. 'content-type': 'application/json',
  34. 'custom-encoding': get_anti_bot_token(),
  35. 'origin': 'https://chat.vercel.ai',
  36. 'pragma': 'no-cache',
  37. 'referer': 'https://chat.vercel.ai/',
  38. 'sec-ch-ua': '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
  39. 'sec-ch-ua-mobile': '?0',
  40. 'sec-ch-ua-platform': '"macOS"',
  41. 'sec-fetch-dest': 'empty',
  42. 'sec-fetch-mode': 'cors',
  43. 'sec-fetch-site': 'same-origin',
  44. 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
  45. }
  46. json_data = {
  47. 'messages': messages,
  48. 'id' : f'{os.urandom(3).hex()}a',
  49. }
  50. max_retries = kwargs.get('max_retries', 6)
  51. for _ in range(max_retries):
  52. response = requests.post('https://chat.vercel.ai/api/chat',
  53. headers=headers, json=json_data, stream=True, proxies={"https": proxy})
  54. try:
  55. response.raise_for_status()
  56. except:
  57. continue
  58. for token in response.iter_content(chunk_size=None):
  59. yield token.decode()
  60. break
  61. def get_anti_bot_token() -> str:
  62. headers = {
  63. 'authority': 'sdk.vercel.ai',
  64. 'accept': '*/*',
  65. 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
  66. 'cache-control': 'no-cache',
  67. 'pragma': 'no-cache',
  68. 'referer': 'https://sdk.vercel.ai/',
  69. 'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
  70. 'sec-ch-ua-mobile': '?0',
  71. 'sec-ch-ua-platform': '"macOS"',
  72. 'sec-fetch-dest': 'empty',
  73. 'sec-fetch-mode': 'cors',
  74. 'sec-fetch-site': 'same-origin',
  75. 'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
  76. }
  77. response = requests.get('https://chat.vercel.ai/openai.jpeg',
  78. headers=headers).text
  79. raw_data = json.loads(base64.b64decode(response,
  80. validate=True))
  81. js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
  82. return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
  83. sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"]
  84. raw_token = json.dumps({'r': sec_list, 't': raw_data['t']},
  85. separators = (",", ":"))
  86. return base64.b64encode(raw_token.encode('utf-8')).decode()