|
|
|
"""chatgpt-alphabet.ipynb |
|
|
|
Automatically generated by Colaboratory. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/drive/1BFy2tYkYPr7mev8Gg9AMT8ZD_Nj0hH3U |
|
""" |
|
|
|
|
|
|
|
import numpy as np |
|
import tensorflow as tf |
|
from tensorflow.keras.models import Sequential |
|
from tensorflow.keras.layers import LSTM, Dense, Embedding |
|
|
|
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
|
|
|
|
|
char_to_int = dict((c, i) for i, c in enumerate(alphabet)) |
|
int_to_char = dict((i, c) for i, c in enumerate(alphabet)) |
|
|
|
|
|
seq_length = 1 |
|
dataX = [] |
|
dataY = [] |
|
for i in range(0, len(alphabet) - seq_length, 1): |
|
seq_in = alphabet[i:i + seq_length] |
|
seq_out = alphabet[i + seq_length] |
|
dataX.append([char_to_int[char] for char in seq_in]) |
|
dataY.append(char_to_int[seq_out]) |
|
|
|
X = np.reshape(dataX, (len(dataX), seq_length, 1)) |
|
y = tf.keras.utils.to_categorical(dataY) |
|
|
|
model = Sequential() |
|
model.add(LSTM(32, input_shape=(X.shape[1], X.shape[2]))) |
|
model.add(Dense(y.shape[1], activation='softmax')) |
|
model.compile(loss='categorical_crossentropy', optimizer='adam') |
|
|
|
model.fit(X, y, epochs=500, batch_size=1, verbose=2) |
|
|
|
for pattern in dataX: |
|
x = np.reshape(pattern, (1, len(pattern), 1)) |
|
prediction = model.predict(x, verbose=0) |
|
index = np.argmax(prediction) |
|
result = int_to_char[index] |
|
seq_in = [int_to_char[value] for value in pattern] |
|
print(seq_in, "->", result) |
|
|
|
model.save('alphabet_model.h5') |
|
from google.colab import files |
|
files.download('alphabet_model.h5') |
|
|