1
0

coder.py 4.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #!/usr/bin/env python3
  2. import os, sys, traceback
  3. sys.path.append(os.getcwd())
  4. from io import StringIO
  5. from contextlib import redirect_stdout
  6. from tinygrad import Tensor, nn, Device, dtypes
  7. from tinygrad.helpers import Timing, colored, getenv, fetch
  8. from extra.models.llama import Transformer, convert_from_huggingface, fix_bf16
  9. from sentencepiece import SentencePieceProcessor
  10. def create_fixed_tokenizer(output_file):
  11. print("creating fixed tokenizer")
  12. import extra.junk.sentencepiece_model_pb2 as spb2
  13. mp = spb2.ModelProto()
  14. mp.ParseFromString(fetch("https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/resolve/main/tokenizer.model?download=true").read_bytes())
  15. mp.pieces.append(spb2.ModelProto.SentencePiece(piece="<|im_end|>", score=0))
  16. mp.pieces.append(spb2.ModelProto.SentencePiece(piece="<|im_start|>", score=0))
  17. with open(output_file, "wb") as f:
  18. f.write(mp.SerializeToString())
  19. # example:
  20. # echo -en "write 2+2\nwrite hello world\ny\n" | TEMP=0 python3 examples/coder.py
  21. if __name__ == "__main__":
  22. Tensor.no_grad = True
  23. # https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/blob/main/config.json
  24. with Timing("create model: "):
  25. model = Transformer(4096, 14336, n_heads=32, n_layers=32, norm_eps=1e-5, vocab_size=32002, n_kv_heads=8, max_context=4096, jit=getenv("JIT", 1))
  26. with Timing("download weights: "):
  27. part1 = nn.state.torch_load(fetch("https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/resolve/main/pytorch_model-00001-of-00002.bin?download=true"))
  28. part2 = nn.state.torch_load(fetch("https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/resolve/main/pytorch_model-00002-of-00002.bin?download=true"))
  29. with Timing("weights -> model: "):
  30. nn.state.load_state_dict(model, fix_bf16(convert_from_huggingface(part1, model, 32, 8)), strict=False)
  31. nn.state.load_state_dict(model, fix_bf16(convert_from_huggingface(part2, model, 32, 8)), strict=False)
  32. if not os.path.isfile("/tmp/tokenizer.model"): create_fixed_tokenizer("/tmp/tokenizer.model")
  33. spp = SentencePieceProcessor(model_file="/tmp/tokenizer.model")
  34. # https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B/blob/main/tokenizer_config.json
  35. # "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
  36. IM_END = 32000
  37. IM_START = 32001
  38. def encode_prompt(k, v): return [IM_START]+spp.encode(f"{k}\n{v}")+[IM_END]+spp.encode("\n")
  39. def start_prompt(k): return [IM_START]+spp.encode(f"{k}\n")
  40. def output(outputted, toks, color):
  41. cur = spp.decode(toks)[len(outputted):]
  42. sys.stdout.write(colored(cur, color))
  43. sys.stdout.flush()
  44. outputted += cur
  45. return outputted
  46. # *** app below this line ***
  47. toks = [spp.bos_id()] + encode_prompt("system", "You are Quentin. Quentin is a useful assistant who writes Python code to answer questions. He keeps the code as short as possible and doesn't read from user input")
  48. PROMPT = getenv("PROMPT", 1)
  49. temperature = getenv("TEMP", 0.7)
  50. start_pos = 0
  51. outputted = output("", toks, "green")
  52. turn = True
  53. while 1:
  54. if PROMPT:
  55. toks += encode_prompt("user", input("Q: ")) + start_prompt("assistant")
  56. else:
  57. toks += start_prompt("user" if turn else "assistant")
  58. turn = not turn
  59. old_output_len = len(outputted)
  60. while 1:
  61. tok = model(Tensor([toks[start_pos:]]), start_pos, temperature).item()
  62. start_pos = len(toks)
  63. toks.append(tok)
  64. outputted = output(outputted, toks, "blue" if not turn else "cyan")
  65. if tok == IM_END: break
  66. if tok == spp.eos_id(): break
  67. new_output = outputted[old_output_len:]
  68. if new_output.endswith("```") and '```python\n' in new_output:
  69. python_code = new_output.split('```python\n')[1].split("```")[0]
  70. # AI safety. Warning to user. Do not press y if the AI is trying to do unsafe things.
  71. if input(colored(f" <-- PYTHON DETECTED, RUN IT? ", "red")).lower() == 'y':
  72. my_stdout = StringIO()
  73. try:
  74. with redirect_stdout(my_stdout): exec(python_code)
  75. result = my_stdout.getvalue()
  76. except Exception as e:
  77. result = ''.join(traceback.format_exception_only(e))
  78. toks += spp.encode(f"\nOutput:\n```\n{result}```")
  79. outputted = output(outputted, toks, "yellow")
  80. old_output_len = len(outputted)
  81. print("")