No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.

open_ai_session.py 3.0KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677
  1. from bot.session_manager import Session
  2. from common.log import logger
  3. class OpenAISession(Session):
  4. def __init__(self, session_id, system_prompt=None, model= "text-davinci-003"):
  5. super().__init__(session_id, system_prompt)
  6. self.conversation = []
  7. self.model = model
  8. self.reset()
  9. def reset(self):
  10. pass
  11. def add_query(self, query):
  12. question = {'type': 'question', 'content': query}
  13. self.conversation.append(question)
  14. def add_reply(self, reply):
  15. answer = {'type': 'answer', 'content': reply}
  16. self.conversation.append(answer)
  17. def __str__(self):
  18. '''
  19. e.g. Q: xxx
  20. A: xxx
  21. Q: xxx
  22. '''
  23. prompt = self.system_prompt
  24. if prompt:
  25. prompt += "<|endoftext|>\n\n\n"
  26. for item in self.conversation:
  27. if item['type'] == 'question':
  28. prompt += "Q: " + item['content'] + "\n"
  29. elif item['type'] == 'answer':
  30. prompt += "\n\nA: " + item['content'] + "<|endoftext|>\n"
  31. if len(self.conversation) > 0 and self.conversation[-1]['type'] == 'question':
  32. prompt += "A: "
  33. return prompt
  34. def discard_exceeding(self, max_tokens, cur_tokens= None):
  35. precise = True
  36. try:
  37. cur_tokens = num_tokens_from_string(str(self), self.model)
  38. except Exception as e:
  39. precise = False
  40. if cur_tokens is None:
  41. raise e
  42. logger.debug("Exception when counting tokens precisely for query: {}".format(e))
  43. while cur_tokens > max_tokens:
  44. if len(self.conversation) > 1:
  45. self.conversation.pop(0)
  46. elif len(self.conversation) == 1 and self.conversation[0]["type"] == "answer":
  47. self.conversation.pop(0)
  48. if precise:
  49. cur_tokens = num_tokens_from_string(str(self), self.model)
  50. else:
  51. cur_tokens = len(str(self))
  52. break
  53. elif len(self.conversation) == 1 and self.conversation[0]["type"] == "question":
  54. logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens))
  55. break
  56. else:
  57. logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.conversation)))
  58. break
  59. if precise:
  60. cur_tokens = num_tokens_from_string(str(self), self.model)
  61. else:
  62. cur_tokens = len(str(self))
  63. return cur_tokens
  64. # refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
  65. def num_tokens_from_string(string: str, model: str) -> int:
  66. """Returns the number of tokens in a text string."""
  67. import tiktoken
  68. encoding = tiktoken.encoding_for_model(model)
  69. num_tokens = len(encoding.encode(string,disallowed_special=()))
  70. return num_tokens