Bing.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. from __future__ import annotations
  2. import random
  3. import json
  4. import os
  5. import uuid
  6. import time
  7. from urllib import parse
  8. from aiohttp import ClientSession, ClientTimeout
  9. from ..typing import AsyncResult, Messages
  10. from .base_provider import AsyncGeneratorProvider
  11. from ..webdriver import get_browser, get_driver_cookies
  12. from .bing.upload_image import upload_image
  13. from .bing.create_images import create_images, format_images_markdown, wait_for_login
  14. from .bing.conversation import Conversation, create_conversation, delete_conversation
  15. class Tones():
  16. creative = "Creative"
  17. balanced = "Balanced"
  18. precise = "Precise"
  19. class Bing(AsyncGeneratorProvider):
  20. url = "https://bing.com/chat"
  21. working = True
  22. supports_message_history = True
  23. supports_gpt_4 = True
  24. @staticmethod
  25. def create_async_generator(
  26. model: str,
  27. messages: Messages,
  28. proxy: str = None,
  29. cookies: dict = None,
  30. tone: str = Tones.creative,
  31. image: str = None,
  32. web_search: bool = False,
  33. **kwargs
  34. ) -> AsyncResult:
  35. if len(messages) < 2:
  36. prompt = messages[0]["content"]
  37. context = None
  38. else:
  39. prompt = messages[-1]["content"]
  40. context = create_context(messages[:-1])
  41. if not cookies:
  42. cookies = Defaults.cookies
  43. else:
  44. for key, value in Defaults.cookies.items():
  45. if key not in cookies:
  46. cookies[key] = value
  47. gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
  48. return stream_generate(prompt, tone, image, context, proxy, cookies, web_search, gpt4_turbo)
  49. def create_context(messages: Messages):
  50. return "".join(
  51. f"[{message['role']}]" + ("(#message)" if message['role']!="system" else "(#additional_instructions)") + f"\n{message['content']}\n\n"
  52. for message in messages
  53. )
  54. class Defaults:
  55. delimiter = "\x1e"
  56. ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
  57. allowedMessageTypes = [
  58. "ActionRequest",
  59. "Chat",
  60. "Context",
  61. # "Disengaged", unwanted
  62. "Progress",
  63. # "AdsQuery", unwanted
  64. "SemanticSerp",
  65. "GenerateContentQuery",
  66. "SearchQuery",
  67. # The following message types should not be added so that it does not flood with
  68. # useless messages (such as "Analyzing images" or "Searching the web") while it's retrieving the AI response
  69. # "InternalSearchQuery",
  70. # "InternalSearchResult",
  71. "RenderCardRequest",
  72. # "RenderContentRequest"
  73. ]
  74. sliceIds = [
  75. 'abv2',
  76. 'srdicton',
  77. 'convcssclick',
  78. 'stylewv2',
  79. 'contctxp2tf',
  80. '802fluxv1pc_a',
  81. '806log2sphs0',
  82. '727savemem',
  83. '277teditgnds0',
  84. '207hlthgrds0',
  85. ]
  86. location = {
  87. "locale": "en-US",
  88. "market": "en-US",
  89. "region": "US",
  90. "locationHints": [
  91. {
  92. "country": "United States",
  93. "state": "California",
  94. "city": "Los Angeles",
  95. "timezoneoffset": 8,
  96. "countryConfidence": 8,
  97. "Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
  98. "RegionType": 2,
  99. "SourceType": 1,
  100. }
  101. ],
  102. }
  103. headers = {
  104. 'accept': '*/*',
  105. 'accept-language': 'en-US,en;q=0.9',
  106. 'cache-control': 'max-age=0',
  107. 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
  108. 'sec-ch-ua-arch': '"x86"',
  109. 'sec-ch-ua-bitness': '"64"',
  110. 'sec-ch-ua-full-version': '"110.0.1587.69"',
  111. 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
  112. 'sec-ch-ua-mobile': '?0',
  113. 'sec-ch-ua-model': '""',
  114. 'sec-ch-ua-platform': '"Windows"',
  115. 'sec-ch-ua-platform-version': '"15.0.0"',
  116. 'sec-fetch-dest': 'document',
  117. 'sec-fetch-mode': 'navigate',
  118. 'sec-fetch-site': 'none',
  119. 'sec-fetch-user': '?1',
  120. 'upgrade-insecure-requests': '1',
  121. 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
  122. 'x-edge-shopping-flag': '1',
  123. 'x-forwarded-for': ip_address,
  124. }
  125. optionsSets = [
  126. 'nlu_direct_response_filter',
  127. 'deepleo',
  128. 'disable_emoji_spoken_text',
  129. 'responsible_ai_policy_235',
  130. 'enablemm',
  131. 'iyxapbing',
  132. 'iycapbing',
  133. 'gencontentv3',
  134. 'fluxsrtrunc',
  135. 'fluxtrunc',
  136. 'fluxv1',
  137. 'rai278',
  138. 'replaceurl',
  139. 'eredirecturl',
  140. 'nojbfedge'
  141. ]
  142. cookies = {
  143. 'SRCHD' : 'AF=NOFORM',
  144. 'PPLState' : '1',
  145. 'KievRPSSecAuth': '',
  146. 'SUID' : '',
  147. 'SRCHUSR' : '',
  148. 'SRCHHPGUSR' : f'HV={int(time.time())}',
  149. }
  150. def format_message(msg: dict) -> str:
  151. return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
  152. def create_message(
  153. conversation: Conversation,
  154. prompt: str,
  155. tone: str,
  156. context: str = None,
  157. image_info: dict = None,
  158. web_search: bool = False,
  159. gpt4_turbo: bool = False
  160. ) -> str:
  161. options_sets = Defaults.optionsSets
  162. if tone == Tones.creative:
  163. options_sets.append("h3imaginative")
  164. elif tone == Tones.precise:
  165. options_sets.append("h3precise")
  166. elif tone == Tones.balanced:
  167. options_sets.append("galileo")
  168. else:
  169. options_sets.append("harmonyv3")
  170. if not web_search:
  171. options_sets.append("nosearchall")
  172. if gpt4_turbo:
  173. options_sets.append("dlgpt4t")
  174. request_id = str(uuid.uuid4())
  175. struct = {
  176. 'arguments': [
  177. {
  178. 'source': 'cib',
  179. 'optionsSets': options_sets,
  180. 'allowedMessageTypes': Defaults.allowedMessageTypes,
  181. 'sliceIds': Defaults.sliceIds,
  182. 'traceId': os.urandom(16).hex(),
  183. 'isStartOfSession': True,
  184. 'requestId': request_id,
  185. 'message': {**Defaults.location, **{
  186. 'author': 'user',
  187. 'inputMethod': 'Keyboard',
  188. 'text': prompt,
  189. 'messageType': 'Chat',
  190. 'requestId': request_id,
  191. 'messageId': request_id,
  192. }},
  193. "scenario": "SERP",
  194. 'tone': tone,
  195. 'spokenTextMode': 'None',
  196. 'conversationId': conversation.conversationId,
  197. 'participant': {
  198. 'id': conversation.clientId
  199. },
  200. }
  201. ],
  202. 'invocationId': '1',
  203. 'target': 'chat',
  204. 'type': 4
  205. }
  206. if image_info and "imageUrl" in image_info and "originalImageUrl" in image_info:
  207. struct['arguments'][0]['message']['originalImageUrl'] = image_info['originalImageUrl']
  208. struct['arguments'][0]['message']['imageUrl'] = image_info['imageUrl']
  209. struct['arguments'][0]['experienceType'] = None
  210. struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
  211. if context:
  212. struct['arguments'][0]['previousMessages'] = [{
  213. "author": "user",
  214. "description": context,
  215. "contextType": "WebPage",
  216. "messageType": "Context",
  217. "messageId": "discover-web--page-ping-mriduna-----"
  218. }]
  219. return format_message(struct)
  220. async def stream_generate(
  221. prompt: str,
  222. tone: str,
  223. image: str = None,
  224. context: str = None,
  225. proxy: str = None,
  226. cookies: dict = None,
  227. web_search: bool = False,
  228. gpt4_turbo: bool = False
  229. ):
  230. headers = Defaults.headers
  231. if cookies:
  232. headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
  233. async with ClientSession(
  234. timeout=ClientTimeout(total=900),
  235. headers=headers
  236. ) as session:
  237. conversation = await create_conversation(session, proxy)
  238. image_info = None
  239. if image:
  240. image_info = await upload_image(session, image, tone, proxy)
  241. try:
  242. async with session.ws_connect('wss://sydney.bing.com/sydney/ChatHub', autoping=False, params={'sec_access_token': conversation.conversationSignature}, proxy=proxy) as wss:
  243. await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
  244. await wss.receive(timeout=900)
  245. await wss.send_str(create_message(conversation, prompt, tone, context, image_info, web_search, gpt4_turbo))
  246. response_txt = ''
  247. returned_text = ''
  248. final = False
  249. while not final:
  250. msg = await wss.receive(timeout=900)
  251. objects = msg.data.split(Defaults.delimiter)
  252. for obj in objects:
  253. if obj is None or not obj:
  254. continue
  255. response = json.loads(obj)
  256. if response.get('type') == 1 and response['arguments'][0].get('messages'):
  257. message = response['arguments'][0]['messages'][0]
  258. if (message['contentOrigin'] != 'Apology'):
  259. if 'adaptiveCards' in message:
  260. card = message['adaptiveCards'][0]['body'][0]
  261. if "text" in card:
  262. response_txt = card.get('text')
  263. if message.get('messageType'):
  264. inline_txt = card['inlines'][0].get('text')
  265. response_txt += inline_txt + '\n'
  266. elif message.get('contentType') == "IMAGE":
  267. prompt = message.get('text')
  268. try:
  269. response_txt += format_images_markdown(await create_images(session, prompt, proxy), prompt)
  270. except:
  271. response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
  272. final = True
  273. if response_txt.startswith(returned_text):
  274. new = response_txt[len(returned_text):]
  275. if new != "\n":
  276. yield new
  277. returned_text = response_txt
  278. elif response.get('type') == 2:
  279. result = response['item']['result']
  280. if result.get('error'):
  281. if result["value"] == "CaptchaChallenge":
  282. driver = get_browser(proxy=proxy)
  283. try:
  284. for chunk in wait_for_login(driver):
  285. yield chunk
  286. cookies = get_driver_cookies(driver)
  287. finally:
  288. driver.quit()
  289. async for chunk in stream_generate(prompt, tone, image, context, proxy, cookies, web_search, gpt4_turbo):
  290. yield chunk
  291. else:
  292. raise Exception(f"{result['value']}: {result['message']}")
  293. return
  294. finally:
  295. await delete_conversation(session, conversation, proxy)