AmigoChat.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. from __future__ import annotations
  2. import json
  3. import uuid
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from ..image import ImageResponse
  7. from ..requests import StreamSession, raise_for_status
  8. from ..errors import ResponseStatusError
  9. class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
  10. url = "https://amigochat.io/chat/"
  11. chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
  12. image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
  13. working = True
  14. supports_stream = True
  15. supports_system_message = True
  16. supports_message_history = True
  17. default_model = 'gpt-4o-mini'
  18. chat_models = [
  19. 'gpt-4o',
  20. default_model,
  21. 'o1-preview',
  22. 'o1-mini',
  23. 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
  24. 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
  25. 'claude-3-sonnet-20240229',
  26. 'gemini-1.5-pro',
  27. ]
  28. image_models = [
  29. 'flux-pro/v1.1',
  30. 'flux-realism',
  31. 'flux-pro',
  32. 'dalle-e-3',
  33. ]
  34. models = [*chat_models, *image_models]
  35. model_aliases = {
  36. "o1": "o1-preview",
  37. "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
  38. "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
  39. "claude-3.5-sonnet": "claude-3-sonnet-20240229",
  40. "gemini-pro": "gemini-1.5-pro",
  41. "flux-pro": "flux-pro/v1.1",
  42. "dalle-3": "dalle-e-3",
  43. }
  44. persona_ids = {
  45. 'gpt-4o': "gpt",
  46. 'gpt-4o-mini': "amigo",
  47. 'o1-preview': "openai-o-one",
  48. 'o1-mini': "openai-o-one-mini",
  49. 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
  50. 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
  51. 'claude-3-sonnet-20240229': "claude",
  52. 'gemini-1.5-pro': "gemini-1-5-pro",
  53. 'flux-pro/v1.1': "flux-1-1-pro",
  54. 'flux-realism': "flux-realism",
  55. 'flux-pro': "flux-pro",
  56. 'dalle-e-3': "dalle-three",
  57. }
  58. @classmethod
  59. def get_personaId(cls, model: str) -> str:
  60. return cls.persona_ids[model]
  61. @classmethod
  62. async def create_async_generator(
  63. cls,
  64. model: str,
  65. messages: Messages,
  66. proxy: str = None,
  67. stream: bool = False,
  68. timeout: int = 300,
  69. frequency_penalty: float = 0,
  70. max_tokens: int = 4000,
  71. presence_penalty: float = 0,
  72. temperature: float = 0.5,
  73. top_p: float = 0.95,
  74. **kwargs
  75. ) -> AsyncResult:
  76. model = cls.get_model(model)
  77. device_uuid = str(uuid.uuid4())
  78. max_retries = 3
  79. retry_count = 0
  80. while retry_count < max_retries:
  81. try:
  82. headers = {
  83. "accept": "*/*",
  84. "accept-language": "en-US,en;q=0.9",
  85. "authorization": "Bearer",
  86. "cache-control": "no-cache",
  87. "content-type": "application/json",
  88. "origin": cls.url,
  89. "pragma": "no-cache",
  90. "priority": "u=1, i",
  91. "referer": f"{cls.url}/",
  92. "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
  93. "sec-ch-ua-mobile": "?0",
  94. "sec-ch-ua-platform": '"Linux"',
  95. "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
  96. "x-device-language": "en-US",
  97. "x-device-platform": "web",
  98. "x-device-uuid": device_uuid,
  99. "x-device-version": "1.0.41"
  100. }
  101. async with StreamSession(headers=headers, proxy=proxy) as session:
  102. if model not in cls.image_models:
  103. data = {
  104. "messages": messages,
  105. "model": model,
  106. "personaId": cls.get_personaId(model),
  107. "frequency_penalty": frequency_penalty,
  108. "max_tokens": max_tokens,
  109. "presence_penalty": presence_penalty,
  110. "stream": stream,
  111. "temperature": temperature,
  112. "top_p": top_p
  113. }
  114. async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
  115. await raise_for_status(response)
  116. async for line in response.iter_lines():
  117. line = line.decode('utf-8').strip()
  118. if line.startswith('data: '):
  119. if line == 'data: [DONE]':
  120. break
  121. try:
  122. chunk = json.loads(line[6:]) # Remove 'data: ' prefix
  123. if 'choices' in chunk and len(chunk['choices']) > 0:
  124. choice = chunk['choices'][0]
  125. if 'delta' in choice:
  126. content = choice['delta'].get('content')
  127. elif 'text' in choice:
  128. content = choice['text']
  129. else:
  130. content = None
  131. if content:
  132. yield content
  133. except json.JSONDecodeError:
  134. pass
  135. else:
  136. # Image generation
  137. prompt = messages[-1]['content']
  138. data = {
  139. "prompt": prompt,
  140. "model": model,
  141. "personaId": cls.get_personaId(model)
  142. }
  143. async with session.post(cls.image_api_endpoint, json=data) as response:
  144. await raise_for_status(response)
  145. response_data = await response.json()
  146. if "data" in response_data:
  147. image_urls = []
  148. for item in response_data["data"]:
  149. if "url" in item:
  150. image_url = item["url"]
  151. image_urls.append(image_url)
  152. if image_urls:
  153. yield ImageResponse(image_urls, prompt)
  154. else:
  155. yield None
  156. break
  157. except (ResponseStatusError, Exception) as e:
  158. retry_count += 1
  159. if retry_count >= max_retries:
  160. raise e
  161. device_uuid = str(uuid.uuid4())