Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,8 @@ import gradio as gr
|
|
| 2 |
from model import DecoderTransformer, Tokenizer
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
import torch
|
|
|
|
|
|
|
| 5 |
|
| 6 |
|
| 7 |
vocab_size=33
|
|
@@ -25,7 +27,10 @@ tokenizer = Tokenizer.from_pretrained(tokenizer_path)
|
|
| 25 |
|
| 26 |
def generate(prompt):
|
| 27 |
model_input = torch.tensor(tokenizer.encode(prompt), dtype=torch.long, device=device).view((1, len(prompt)))
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
with gr.Blocks() as demo:
|
|
@@ -42,14 +47,14 @@ with gr.Blocks() as demo:
|
|
| 42 |
|
| 43 |
submit = gr.Button("Submit")
|
| 44 |
submit.click(generate, [prompt], [output])
|
| 45 |
-
|
| 46 |
gr.Examples(
|
| 47 |
[
|
| 48 |
["1. e4", ],
|
| 49 |
["1. e4 g6 2."],
|
| 50 |
],
|
| 51 |
inputs=[prompt],
|
| 52 |
-
outputs=[output],
|
| 53 |
fn=generate
|
| 54 |
)
|
| 55 |
demo.launch()
|
|
|
|
| 2 |
from model import DecoderTransformer, Tokenizer
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
import torch
|
| 5 |
+
import chess
|
| 6 |
+
import chess.svg
|
| 7 |
|
| 8 |
|
| 9 |
vocab_size=33
|
|
|
|
| 27 |
|
| 28 |
def generate(prompt):
|
| 29 |
model_input = torch.tensor(tokenizer.encode(prompt), dtype=torch.long, device=device).view((1, len(prompt)))
|
| 30 |
+
pgn = tokenizer.decode(model.generate(model_input, max_new_tokens=4, context_size=context_size)[0].tolist())
|
| 31 |
+
game = chess.pgn.read_game(pgn)
|
| 32 |
+
img = chess.svg.board(game.board())
|
| 33 |
+
return pgn, img
|
| 34 |
|
| 35 |
|
| 36 |
with gr.Blocks() as demo:
|
|
|
|
| 47 |
|
| 48 |
submit = gr.Button("Submit")
|
| 49 |
submit.click(generate, [prompt], [output])
|
| 50 |
+
img = gr.Image()
|
| 51 |
gr.Examples(
|
| 52 |
[
|
| 53 |
["1. e4", ],
|
| 54 |
["1. e4 g6 2."],
|
| 55 |
],
|
| 56 |
inputs=[prompt],
|
| 57 |
+
outputs=[output, img],
|
| 58 |
fn=generate
|
| 59 |
)
|
| 60 |
demo.launch()
|