DDG.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. from __future__ import annotations
  2. import json
  3. import aiohttp
  4. from aiohttp import ClientSession, BaseConnector
  5. from ..typing import AsyncResult, Messages
  6. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
  7. from .helper import format_prompt
  8. from ..requests.aiohttp import get_connector
  9. from ..requests.raise_for_status import raise_for_status
  10. from .. import debug
  11. MODELS = [
  12. {"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"},
  13. {"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"},
  14. {"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"},
  15. {"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"},
  16. {"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"},
  17. {"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"},
  18. {"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"}
  19. ]
  20. class Conversation(BaseConversation):
  21. vqd: str = None
  22. message_history: Messages = []
  23. def __init__(self, model: str):
  24. self.model = model
  25. class DDG(AsyncGeneratorProvider, ProviderModelMixin):
  26. url = "https://duckduckgo.com"
  27. api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
  28. working = True
  29. supports_stream = True
  30. supports_system_message = True
  31. supports_message_history = True
  32. default_model = "gpt-4o-mini"
  33. models = [model.get("model") for model in MODELS]
  34. model_aliases = {
  35. "claude-3-haiku": "claude-3-haiku-20240307",
  36. "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
  37. "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
  38. "gpt-4": "gpt-4o-mini"
  39. }
  40. @classmethod
  41. async def get_vqd(cls, proxy: str, connector: BaseConnector = None):
  42. status_url = "https://duckduckgo.com/duckchat/v1/status"
  43. headers = {
  44. 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
  45. 'Accept': 'text/event-stream',
  46. 'x-vqd-accept': '1'
  47. }
  48. async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session:
  49. async with session.get(status_url, headers=headers) as response:
  50. await raise_for_status(response)
  51. return response.headers.get("x-vqd-4")
  52. @classmethod
  53. async def create_async_generator(
  54. cls,
  55. model: str,
  56. messages: Messages,
  57. conversation: Conversation = None,
  58. return_conversation: bool = False,
  59. proxy: str = None,
  60. connector: BaseConnector = None,
  61. **kwargs
  62. ) -> AsyncResult:
  63. model = cls.get_model(model)
  64. is_new_conversation = False
  65. if conversation is None:
  66. conversation = Conversation(model)
  67. is_new_conversation = True
  68. debug.last_model = model
  69. if conversation.vqd is None:
  70. conversation.vqd = await cls.get_vqd(proxy, connector)
  71. if not conversation.vqd:
  72. raise Exception("Failed to obtain VQD token")
  73. headers = {
  74. 'accept': 'text/event-stream',
  75. 'content-type': 'application/json',
  76. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
  77. 'x-vqd-4': conversation.vqd,
  78. }
  79. async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
  80. if is_new_conversation:
  81. conversation.message_history = [{"role": "user", "content": format_prompt(messages)}]
  82. else:
  83. conversation.message_history = [
  84. *conversation.message_history,
  85. messages[-2],
  86. messages[-1]
  87. ]
  88. if return_conversation:
  89. yield conversation
  90. data = {
  91. "model": conversation.model,
  92. "messages": conversation.message_history
  93. }
  94. async with session.post(cls.api_endpoint, json=data) as response:
  95. conversation.vqd = response.headers.get("x-vqd-4")
  96. await raise_for_status(response)
  97. async for line in response.content:
  98. if line:
  99. decoded_line = line.decode('utf-8')
  100. if decoded_line.startswith('data: '):
  101. json_str = decoded_line[6:]
  102. if json_str == '[DONE]':
  103. break
  104. try:
  105. json_data = json.loads(json_str)
  106. if 'message' in json_data:
  107. yield json_data['message']
  108. except json.JSONDecodeError:
  109. pass