#!/usr/bin/python3.6 # -*- coding: utf-8 -*- from __future__ import print_function from keras.callbacks import LambdaCallback from keras.models import Sequential from keras.models import load_model from keras.layers import Dense, Activation from keras.layers import LSTM from keras.optimizers import RMSprop from keras.utils.data_utils import get_file import numpy as np import random import sys import io import os import warnings argvs = sys.argv argc = len(argvs) if argc != 2: quit() path = get_file('ginga.txt', origin='https://www.terra.jp.net/dysd/keras/ginga.txt') with io.open(path, encoding='utf-8') as f: text = f.read().lower() chars = sorted(list(set(text))) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) # cut the text in semi-redundant sequences of maxlen characters maxlen = 40 # load the model model = load_model('ginga_300.h5') def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) def on_epoch_end(epoch): # Function invoked at end of each epoch. Prints generated text. for diversity in [0.5]: generated = '' sentence = argvs[1] generated += sentence sentence = '\n' * maxlen + sentence sentence = sentence[len(sentence)-maxlen-1:len(sentence)-1] for i in range(800): x_pred = np.zeros((1, maxlen, len(chars))) for t, char in enumerate(sentence): x_pred[0, t, char_indices[char]] = 1. preds = model.predict(x_pred, verbose=0)[0] next_index = sample(preds, diversity) next_char = indices_char[next_index] generated += next_char # print(sentence, ':', next_char) if len(sentence)