BlackboxAPI.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. from __future__ import annotations
  2. import json
  3. from aiohttp import ClientSession
  4. from ..typing import AsyncResult, Messages
  5. from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
  6. from ..requests.raise_for_status import raise_for_status
  7. from ..providers.response import Reasoning
  8. from .helper import format_prompt
  9. class BlackboxAPI(AsyncGeneratorProvider, ProviderModelMixin):
  10. label = "Blackbox AI API"
  11. url = "https://api.blackbox.ai"
  12. api_endpoint = "https://api.blackbox.ai/api/chat"
  13. working = True
  14. needs_auth = False
  15. supports_stream = False
  16. supports_system_message = True
  17. supports_message_history = True
  18. default_model = 'deepseek-ai/DeepSeek-V3'
  19. reasoning_models = ['deepseek-ai/DeepSeek-R1']
  20. models = [
  21. default_model,
  22. 'mistralai/Mistral-Small-24B-Instruct-2501',
  23. 'deepseek-ai/deepseek-llm-67b-chat',
  24. 'databricks/dbrx-instruct',
  25. 'Qwen/QwQ-32B-Preview',
  26. 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO'
  27. ] + reasoning_models
  28. model_aliases = {
  29. "deepseek-v3": "deepseek-ai/DeepSeek-V3",
  30. "deepseek-r1": "deepseek-ai/DeepSeek-R1",
  31. "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
  32. "mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501",
  33. "dbrx-instruct": "databricks/dbrx-instruct",
  34. "qwq-32b": "Qwen/QwQ-32B-Preview",
  35. "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
  36. }
  37. @classmethod
  38. async def create_async_generator(
  39. cls,
  40. model: str,
  41. messages: Messages,
  42. proxy: str = None,
  43. max_tokens: str = None,
  44. **kwargs
  45. ) -> AsyncResult:
  46. model = cls.get_model(model)
  47. headers = {
  48. "Content-Type": "application/json",
  49. }
  50. async with ClientSession(headers=headers) as session:
  51. data = {
  52. "messages": messages,
  53. "model": model,
  54. "max_tokens": max_tokens
  55. }
  56. async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
  57. await raise_for_status(response)
  58. is_reasoning = False
  59. current_reasoning = ""
  60. async for chunk in response.content:
  61. if not chunk:
  62. continue
  63. text = chunk.decode(errors='ignore')
  64. if model in cls.reasoning_models:
  65. if "<think>" in text:
  66. text = text.replace("<think>", "")
  67. is_reasoning = True
  68. current_reasoning = text
  69. continue
  70. if "</think>" in text:
  71. text = text.replace("</think>", "")
  72. is_reasoning = False
  73. current_reasoning += text
  74. yield Reasoning(status=current_reasoning.strip())
  75. current_reasoning = ""
  76. continue
  77. if is_reasoning:
  78. current_reasoning += text
  79. continue
  80. try:
  81. if text:
  82. yield text
  83. except Exception as e:
  84. return
  85. if is_reasoning and current_reasoning:
  86. yield Reasoning(status=current_reasoning.strip())