readme_table.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. import re
  2. from urllib.parse import urlparse
  3. import asyncio
  4. from g4f import models, ChatCompletion
  5. from g4f.base_provider import BaseProvider, BaseRetryProvider, ProviderType
  6. from etc.testing._providers import get_providers
  7. from g4f import debug
  8. debug.logging = True
  9. async def test_async(provider: ProviderType):
  10. if not provider.working:
  11. return False
  12. messages = [{"role": "user", "content": "Hello Assistant!"}]
  13. try:
  14. response = await asyncio.wait_for(ChatCompletion.create_async(
  15. model=models.default,
  16. messages=messages,
  17. provider=provider
  18. ), 30)
  19. return bool(response)
  20. except Exception as e:
  21. if debug.logging:
  22. print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
  23. return False
  24. def test_async_list(providers: list[ProviderType]):
  25. responses: list = [
  26. asyncio.run(test_async(_provider))
  27. for _provider in providers
  28. ]
  29. return responses
  30. def print_providers():
  31. providers = get_providers()
  32. responses = test_async_list(providers)
  33. for type in ("GPT-4", "GPT-3.5", "Other"):
  34. lines = [
  35. "",
  36. f"### {type}",
  37. "",
  38. "| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |",
  39. "| ------ | ------- | ------- | ----- | ------ | ------ | ---- |",
  40. ]
  41. for is_working in (True, False):
  42. for idx, _provider in enumerate(providers):
  43. if is_working != _provider.working:
  44. continue
  45. do_continue = False
  46. if type == "GPT-4" and _provider.supports_gpt_4:
  47. do_continue = True
  48. elif type == "GPT-3.5" and not _provider.supports_gpt_4 and _provider.supports_gpt_35_turbo:
  49. do_continue = True
  50. elif type == "Other" and not _provider.supports_gpt_4 and not _provider.supports_gpt_35_turbo:
  51. do_continue = True
  52. if not do_continue:
  53. continue
  54. netloc = urlparse(_provider.url).netloc.replace("www.", "")
  55. website = f"[{netloc}]({_provider.url})"
  56. provider_name = f"`g4f.Provider.{_provider.__name__}`"
  57. has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
  58. has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
  59. stream = "✔️" if _provider.supports_stream else "❌"
  60. if _provider.working:
  61. status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
  62. if responses[idx]:
  63. status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
  64. else:
  65. status = '![Unknown](https://img.shields.io/badge/Unknown-grey)'
  66. else:
  67. status = '![Inactive](https://img.shields.io/badge/Inactive-red)'
  68. auth = "✔️" if _provider.needs_auth else "❌"
  69. lines.append(
  70. f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
  71. )
  72. print("\n".join(lines))
  73. def print_models():
  74. base_provider_names = {
  75. "google": "Google",
  76. "openai": "OpenAI",
  77. "huggingface": "Huggingface",
  78. "anthropic": "Anthropic",
  79. "inflection": "Inflection"
  80. }
  81. provider_urls = {
  82. "google": "https://gemini.google.com/",
  83. "openai": "https://openai.com/",
  84. "huggingface": "https://huggingface.co/",
  85. "anthropic": "https://www.anthropic.com/",
  86. "inflection": "https://inflection.ai/",
  87. }
  88. lines = [
  89. "| Model | Base Provider | Provider | Website |",
  90. "| ----- | ------------- | -------- | ------- |",
  91. ]
  92. for name, model in models.ModelUtils.convert.items():
  93. if name.startswith("gpt-3.5") or name.startswith("gpt-4"):
  94. if name not in ("gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"):
  95. continue
  96. name = re.split(r":|/", model.name)[-1]
  97. base_provider = base_provider_names[model.base_provider]
  98. if not isinstance(model.best_provider, BaseRetryProvider):
  99. provider_name = f"g4f.Provider.{model.best_provider.__name__}"
  100. else:
  101. provider_name = f"{len(model.best_provider.providers)}+ Providers"
  102. provider_url = provider_urls[model.base_provider]
  103. netloc = urlparse(provider_url).netloc.replace("www.", "")
  104. website = f"[{netloc}]({provider_url})"
  105. lines.append(f"| {name} | {base_provider} | {provider_name} | {website} |")
  106. print("\n".join(lines))
  107. if __name__ == "__main__":
  108. #print_providers()
  109. #print("\n", "-" * 50, "\n")
  110. print_models()