client.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. from __future__ import annotations
  2. import re
  3. from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
  4. from .typing import Union, Generator, Messages, ImageType
  5. from .base_provider import BaseProvider, ProviderType
  6. from .image import ImageResponse as ImageProviderResponse
  7. from .Provider import BingCreateImages, Gemini, OpenaiChat
  8. from .errors import NoImageResponseError
  9. from . import get_model_and_provider
  10. ImageProvider = Union[BaseProvider, object]
  11. Proxies = Union[dict, str]
  12. def read_json(text: str) -> dict:
  13. """
  14. Parses JSON code block from a string.
  15. Args:
  16. text (str): A string containing a JSON code block.
  17. Returns:
  18. dict: A dictionary parsed from the JSON code block.
  19. """
  20. match = re.search(r"```(json|)\n(?P<code>[\S\s]+?)\n```", text)
  21. if match:
  22. return match.group("code")
  23. return text
  24. def iter_response(
  25. response: iter,
  26. stream: bool,
  27. response_format: dict = None,
  28. max_tokens: int = None,
  29. stop: list = None
  30. ) -> Generator:
  31. content = ""
  32. finish_reason = None
  33. last_chunk = None
  34. for idx, chunk in enumerate(response):
  35. if last_chunk is not None:
  36. yield ChatCompletionChunk(last_chunk, finish_reason)
  37. content += str(chunk)
  38. if max_tokens is not None and idx + 1 >= max_tokens:
  39. finish_reason = "max_tokens"
  40. first = -1
  41. word = None
  42. if stop is not None:
  43. for word in list(stop):
  44. first = content.find(word)
  45. if first != -1:
  46. content = content[:first]
  47. break
  48. if stream and first != -1:
  49. first = chunk.find(word)
  50. if first != -1:
  51. chunk = chunk[:first]
  52. else:
  53. first = 0
  54. if first != -1:
  55. finish_reason = "stop"
  56. if stream:
  57. last_chunk = chunk
  58. if finish_reason is not None:
  59. break
  60. if last_chunk is not None:
  61. yield ChatCompletionChunk(last_chunk, finish_reason)
  62. if not stream:
  63. if response_format is not None and "type" in response_format:
  64. if response_format["type"] == "json_object":
  65. response = read_json(response)
  66. yield ChatCompletion(content, finish_reason)
  67. class Client():
  68. proxies: Proxies = None
  69. chat: Chat
  70. images: Images
  71. def __init__(
  72. self,
  73. provider: ProviderType = None,
  74. image_provider: ImageProvider = None,
  75. proxies: Proxies = None,
  76. **kwargs
  77. ) -> None:
  78. self.chat = Chat(self, provider)
  79. self.images = Images(self, image_provider)
  80. self.proxies: Proxies = proxies
  81. def get_proxy(self) -> Union[str, None]:
  82. if isinstance(self.proxies, str) or self.proxies is None:
  83. return self.proxies
  84. elif "all" in self.proxies:
  85. return self.proxies["all"]
  86. elif "https" in self.proxies:
  87. return self.proxies["https"]
  88. return None
  89. class Completions():
  90. def __init__(self, client: Client, provider: ProviderType = None):
  91. self.client: Client = client
  92. self.provider: ProviderType = provider
  93. def create(
  94. self,
  95. messages: Messages,
  96. model: str,
  97. provider: ProviderType = None,
  98. stream: bool = False,
  99. response_format: dict = None,
  100. max_tokens: int = None,
  101. stop: Union[list. str] = None,
  102. **kwargs
  103. ) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]:
  104. if max_tokens is not None:
  105. kwargs["max_tokens"] = max_tokens
  106. if stop:
  107. kwargs["stop"] = stop
  108. model, provider = get_model_and_provider(
  109. model,
  110. self.provider if provider is None else provider,
  111. stream,
  112. **kwargs
  113. )
  114. response = provider.create_completion(model, messages, stream=stream, **kwargs)
  115. stop = [stop] if isinstance(stop, str) else stop
  116. response = iter_response(response, stream, response_format, max_tokens, stop)
  117. return response if stream else next(response)
  118. class Chat():
  119. completions: Completions
  120. def __init__(self, client: Client, provider: ProviderType = None):
  121. self.completions = Completions(client, provider)
  122. class ImageModels():
  123. gemini = Gemini
  124. openai = OpenaiChat
  125. def __init__(self, client: Client) -> None:
  126. self.client = client
  127. self.default = BingCreateImages(proxy=self.client.get_proxy())
  128. def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
  129. return getattr(self, name) if hasattr(self, name) else default or self.default
  130. class Images():
  131. def __init__(self, client: Client, provider: ImageProvider = None):
  132. self.client: Client = client
  133. self.provider: ImageProvider = provider
  134. self.models: ImageModels = ImageModels(client)
  135. def generate(self, prompt, model: str = None, **kwargs):
  136. provider = self.models.get(model, self.provider)
  137. if isinstance(provider, BaseProvider) or isinstance(provider, type) and issubclass(provider, BaseProvider):
  138. prompt = f"create a image: {prompt}"
  139. response = provider.create_completion(
  140. "",
  141. [{"role": "user", "content": prompt}],
  142. True,
  143. proxy=self.client.get_proxy(),
  144. **kwargs
  145. )
  146. else:
  147. response = provider.create(prompt)
  148. for chunk in response:
  149. if isinstance(chunk, ImageProviderResponse):
  150. images = [chunk.images] if isinstance(chunk.images, str) else chunk.images
  151. return ImagesResponse([Image(image) for image in images])
  152. raise NoImageResponseError()
  153. def create_variation(self, image: ImageType, model: str = None, **kwargs):
  154. provider = self.models.get(model, self.provider)
  155. result = None
  156. if isinstance(provider, type) and issubclass(provider, BaseProvider):
  157. response = provider.create_completion(
  158. "",
  159. [{"role": "user", "content": "create a image like this"}],
  160. True,
  161. image=image,
  162. proxy=self.client.get_proxy(),
  163. **kwargs
  164. )
  165. for chunk in response:
  166. if isinstance(chunk, ImageProviderResponse):
  167. result = ([chunk.images] if isinstance(chunk.images, str) else chunk.images)
  168. result = ImagesResponse([Image(image)for image in result])
  169. if result is None:
  170. raise NoImageResponseError()
  171. return result