client.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. from __future__ import annotations
  2. import re
  3. import os
  4. import time
  5. import random
  6. import string
  7. from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
  8. from .typing import Union, Iterator, Messages, ImageType
  9. from .providers.types import BaseProvider, ProviderType
  10. from .image import ImageResponse as ImageProviderResponse
  11. from .Provider.BingCreateImages import BingCreateImages
  12. from .Provider.needs_auth import Gemini, OpenaiChat
  13. from .errors import NoImageResponseError
  14. from . import get_model_and_provider, get_last_provider
  15. ImageProvider = Union[BaseProvider, object]
  16. Proxies = Union[dict, str]
  17. IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
  18. def read_json(text: str) -> dict:
  19. """
  20. Parses JSON code block from a string.
  21. Args:
  22. text (str): A string containing a JSON code block.
  23. Returns:
  24. dict: A dictionary parsed from the JSON code block.
  25. """
  26. match = re.search(r"```(json|)\n(?P<code>[\S\s]+?)\n```", text)
  27. if match:
  28. return match.group("code")
  29. return text
  30. def iter_response(
  31. response: iter[str],
  32. stream: bool,
  33. response_format: dict = None,
  34. max_tokens: int = None,
  35. stop: list = None
  36. ) -> IterResponse:
  37. content = ""
  38. finish_reason = None
  39. completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
  40. for idx, chunk in enumerate(response):
  41. content += str(chunk)
  42. if max_tokens is not None and idx + 1 >= max_tokens:
  43. finish_reason = "length"
  44. first = -1
  45. word = None
  46. if stop is not None:
  47. for word in list(stop):
  48. first = content.find(word)
  49. if first != -1:
  50. content = content[:first]
  51. break
  52. if stream and first != -1:
  53. first = chunk.find(word)
  54. if first != -1:
  55. chunk = chunk[:first]
  56. else:
  57. first = 0
  58. if first != -1:
  59. finish_reason = "stop"
  60. if stream:
  61. yield ChatCompletionChunk(chunk, None, completion_id, int(time.time()))
  62. if finish_reason is not None:
  63. break
  64. finish_reason = "stop" if finish_reason is None else finish_reason
  65. if stream:
  66. yield ChatCompletionChunk(None, finish_reason, completion_id, int(time.time()))
  67. else:
  68. if response_format is not None and "type" in response_format:
  69. if response_format["type"] == "json_object":
  70. content = read_json(content)
  71. yield ChatCompletion(content, finish_reason, completion_id, int(time.time()))
  72. def iter_append_model_and_provider(response: IterResponse) -> IterResponse:
  73. last_provider = None
  74. for chunk in response:
  75. last_provider = get_last_provider(True) if last_provider is None else last_provider
  76. chunk.model = last_provider.get("model")
  77. chunk.provider = last_provider.get("name")
  78. yield chunk
  79. class Client():
  80. def __init__(
  81. self,
  82. api_key: str = None,
  83. proxies: Proxies = None,
  84. provider: ProviderType = None,
  85. image_provider: ImageProvider = None,
  86. **kwargs
  87. ) -> None:
  88. self.api_key: str = api_key
  89. self.proxies: Proxies = proxies
  90. self.chat: Chat = Chat(self, provider)
  91. self.images: Images = Images(self, image_provider)
  92. def get_proxy(self) -> Union[str, None]:
  93. if isinstance(self.proxies, str):
  94. return self.proxies
  95. elif self.proxies is None:
  96. return os.environ.get("G4F_PROXY")
  97. elif "all" in self.proxies:
  98. return self.proxies["all"]
  99. elif "https" in self.proxies:
  100. return self.proxies["https"]
  101. def filter_none(**kwargs):
  102. for key in list(kwargs.keys()):
  103. if kwargs[key] is None:
  104. del kwargs[key]
  105. return kwargs
  106. class Completions():
  107. def __init__(self, client: Client, provider: ProviderType = None):
  108. self.client: Client = client
  109. self.provider: ProviderType = provider
  110. def create(
  111. self,
  112. messages: Messages,
  113. model: str,
  114. provider: ProviderType = None,
  115. stream: bool = False,
  116. response_format: dict = None,
  117. max_tokens: int = None,
  118. stop: Union[list[str], str] = None,
  119. api_key: str = None,
  120. **kwargs
  121. ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
  122. model, provider = get_model_and_provider(
  123. model,
  124. self.provider if provider is None else provider,
  125. stream,
  126. **kwargs
  127. )
  128. stop = [stop] if isinstance(stop, str) else stop
  129. response = provider.create_completion(
  130. model, messages, stream,
  131. **filter_none(
  132. proxy=self.client.get_proxy(),
  133. max_tokens=max_tokens,
  134. stop=stop,
  135. api_key=self.client.api_key if api_key is None else api_key
  136. ),
  137. **kwargs
  138. )
  139. response = iter_response(response, stream, response_format, max_tokens, stop)
  140. response = iter_append_model_and_provider(response)
  141. return response if stream else next(response)
  142. class Chat():
  143. completions: Completions
  144. def __init__(self, client: Client, provider: ProviderType = None):
  145. self.completions = Completions(client, provider)
  146. class ImageModels():
  147. gemini = Gemini
  148. openai = OpenaiChat
  149. def __init__(self, client: Client) -> None:
  150. self.client = client
  151. self.default = BingCreateImages(proxy=self.client.get_proxy())
  152. def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
  153. return getattr(self, name) if hasattr(self, name) else default or self.default
  154. class Images():
  155. def __init__(self, client: Client, provider: ImageProvider = None):
  156. self.client: Client = client
  157. self.provider: ImageProvider = provider
  158. self.models: ImageModels = ImageModels(client)
  159. def generate(self, prompt, model: str = None, **kwargs):
  160. provider = self.models.get(model, self.provider)
  161. if isinstance(provider, BaseProvider) or isinstance(provider, type) and issubclass(provider, BaseProvider):
  162. prompt = f"create a image: {prompt}"
  163. response = provider.create_completion(
  164. "",
  165. [{"role": "user", "content": prompt}],
  166. True,
  167. proxy=self.client.get_proxy(),
  168. **kwargs
  169. )
  170. else:
  171. response = provider.create(prompt)
  172. for chunk in response:
  173. if isinstance(chunk, ImageProviderResponse):
  174. images = [chunk.images] if isinstance(chunk.images, str) else chunk.images
  175. return ImagesResponse([Image(image) for image in images])
  176. raise NoImageResponseError()
  177. def create_variation(self, image: ImageType, model: str = None, **kwargs):
  178. provider = self.models.get(model, self.provider)
  179. result = None
  180. if isinstance(provider, type) and issubclass(provider, BaseProvider):
  181. response = provider.create_completion(
  182. "",
  183. [{"role": "user", "content": "create a image like this"}],
  184. True,
  185. image=image,
  186. proxy=self.client.get_proxy(),
  187. **kwargs
  188. )
  189. for chunk in response:
  190. if isinstance(chunk, ImageProviderResponse):
  191. result = ([chunk.images] if isinstance(chunk.images, str) else chunk.images)
  192. result = ImagesResponse([Image(image)for image in result])
  193. if result is None:
  194. raise NoImageResponseError()
  195. return result