preface

One, packaging basic operations

1. Convolution operation

def __conv2d(self, input, weights, stride, padding='SAME'): Layer = tf.nn. Conv2d (input=input, # input original tensors filter=weights, # convolution kernel tensors, Strides =[1, stride, stride, 1], padding=padding) Return layer (Filter_height, filter_width, in_channels,out_channels) Strides =[1, stride, stride, 1], padding=paddingCopy the code

2. Maximum pooling operation

def __maxpool2d(self, input, stride=2, padding='SAME'): Layer = tf.nn.max_pool(value=input, # this is a four-dimension tensor of float32 elements and shapes. Ksize =[1, stride, stride, 1], # Strides =[1, stride, stride, 1], # For moving the window in each dimension. Padding =padding) # VALID or SAME return layerCopy the code

3. Flattening operation

Def __flattener layer(self,layer): layer_shape = layer.get_shape() # flattener (? , 8, 8, 64) num_features = layer_shape[1:4].num_elements() # [1:4]: (8, 8, 64),num_features: 4096 RE_layer = tF.0 (Layer, [-1, num_features])# 0 , 4096) return re_layerCopy the code

2. Encapsulate the basic neural layer

1. Convolution operation

Def _define_fc_layer(self,inputs, num_inputs, num_inputs, outputs, activation_function, # convolution layer name c_names=None, regularizer__function=None, is_historgram=True): "" With tf.variable_scope(layer_name, reuse= tf.auto_reuse): weights = self.__define_weights(shape=[num_inputs, num_outputs], c_names=c_names, regularizer__function=regularizer__function) biases = self.__define_biases(size=num_outputs, c_names=c_names) with tf.variable_scope('wx_plus_b'): Size wx_plus_b = tf.matmul(inputs, weights) + biases # outputs = wx_plus_b else: outputs = activation_function(wx_plus_b) if is_historgram: Outputs (layer_name + '/outputs', outputs) # return outputs from the neural layerCopy the code

2. Encapsulate convolutional neural layer

Def _define_conv2d_layer(self,inputs,# num_input_channels,# Stride,# convolution step activation_function, # activation function layer_name, # convolution layer name c_names=None, regularizer__function=None, Is_historgram =True): """ with tf.variable_scope(layer_name, reuse= tf.auto_reuse): weights = self.__define_weights( shape=[conv_filter_size, conv_filter_size, num_input_channels, num_filters],c_names=c_names,regularizer__function=regularizer__function) biases = self.__define_biases( size=num_filters,c_names=c_names) with tf.variable_scope('conv_plus_b'): # Value of neuron inactivation, Conv_plus_b = self.__conv2d(inputs, weights, stride) + biases outputs = conv_plus_b else: outputs = activation_function(conv_plus_b) if is_historgram: Outputs (layer_name + '/outputs', outputs) # return outputs from the neural layerCopy the code

3. Encapsulate basic neural networks

L1 layer_conv1 = self._define_conv2d_layer(inputs=input, conv_filter_size=8, num_input_channels=4, num_filters=32, stride =4 , activation_function=tf.nn.relu, c_names = c_names, Layer_name = 'layer_conv1') layer_conv_pool1 = self.__maxPool2d (layer_conv1) # define convolution layer L2 layer_conv2 = self._define_conv2d_layer(inputs=layer_conv_pool1, conv_filter_size=4, num_input_channels=32, num_filters=64, stride=2, activation_function=tf.nn.relu, c_names=c_names, Layer_name ='layer_conv2') layer_conv3 = self._define_conv2d_layer(inputs=layer_conv2, conv_filter_size=3, num_input_channels=64, num_filters=64, stride=1, activation_function=tf.nn.relu, c_names=c_names, layer_name='layer_conv3') layer_conv3_flat = tf.reshape(layer_conv3, [-1, 1190]) self.__wave layer(layer_conv3) # flattening the flattener = self._define_fc_layer(inputs=layer_conv3_flat num_inputs=1600, num_outputs=512, activation_function=tf.nn.relu, c_names=c_names, Output = self._define_fc_layer(inputs= layer_fn4, num_inputs=512, num_outputs=self.n_actions, activation_function=None, c_names=c_names, layer_name='layer_fnn5') return outputCopy the code

Photo source: www.coubai.com/