The principle of LSTM, given here as a simple example code, is the prediction of the last letter of a word.

# LSTM principle to find their own, Import tensorflow as tf import numpy as NP tf.reset_default_graph() # Predict the last letter words = [' make 'and' need 'and' coal ', 'word', 'love' and 'hate', 'live', 'home', 'hash', 'star'] # dictionary set chars = [for c in c 'abcdefghijklmnopqrstuvwxyz] # generated character indexing dictionary word2idx = {v: k for k, v in enumerate (chars)} idx2word = {k: v for k, v in Enumerate (chars)} V = len(chars) # step = 3 # time step grow small hidden = 50 # dim = 32 # input_batch, target_batch = [], [] for word in words: Target = word2idx[word[-1]] # Input = [word2idx[c] for c in word[:-1]] # Input = [word2idx[c] for c in word[:-1]] # Input_batch.append (input) target_batch.append(np.eye(V)[target]) Return input_batch will be used later when calculating softmax_cross_ENTROpy_with_logits_v2, Get_variable (shape=[V, dim]); initializer=tf.random_normal_initializer) X = tf.placeholder(tf.int32, [None, XX = tf.nn.embedding_lookup(embedding, X) Y = tf.int32, [None, LSTM cell cell = tf.nn.rnn_cell.basiclstmcell (hidden) dtype=tf.float32) # output: [batch_size, step, hidden] states: (c=[batch_size, hidden], h=[batch_size, hidden]) W = tf.variable (tf.random_normal) V])) b = tf.variable (tf.random_normal([V])) # tf.matmul(states[0], W) + b # [batch_size, Cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=feature, Labels =Y)) optimizer = tf.train.adamoptimizer (0.001). Minimize (cost) # predictive prediction = tf.argmax(feature, 1) # initialize tf init = tf.global_variables_initializer() sess = tf.session () sess.run(init) # Produce input and tag input_batch, Target_batch = make_batch(words) # training model for epoch in range(1000): _, loss = sess.run([optimizer, cost], feed_dict={X:input_batch, Y:target_batch}) if (epoch+1)%100 == 0: print('epoch: 04 ', '% d' % (epoch + 1), 'cost =', '%' 04 f # % (loss) predicted results predict = sess. Run ((prediction), feed_dict={X:input_batch}) print([words[i][:-1]+' '+idx2word[c] for i,c in enumerate(predict[0])])Copy the code

Print the results:

Epoch: 0100 cost= 0.003784 EPOCH: 0200 cost= 0.001891 EPOCH: 0300 cost= 0.001122 EPOCH: 0400 cost= 0.0039 epoch: 0500 cost= 0.000522 EPOCH: 0600 cost= 0.000388 epoch: 0700 cost= 0.000300 epoch: 0800 cost= 0.000238 epoch: 0900 cost = 0.000193 epoch: 1000 cost = 0.000160 [' mak e ', 'nee d', 'coa l', 'wor d', 'lov e', 'hat e', 'liv e', '" hom e', 'from the h', 'the sta r]Copy the code