首页 > 编程知识 正文

matlab二维插值,conv2d函数实现

时间:2023-05-03 16:31:51 阅读:24427 作者:3416

class Conv2D(Layer): """A 2D Convolution Layer. Parameters: ----------- n_filters: int The number of filters that will convolve over the input matrix. The number of channels of the output shape. filter_shape: tuple A tuple (filter_height, filter_width). input_shape: tuple The shape of the expected input of the layer. (batch_size, channels, height, width) Only needs to be specified for first layer in the network. padding: string Either 'same' or 'valid'. 'same' results in padding being added so that the output height and width matches the input height and width. For 'valid' no padding is added. stride: int The stride length of the filters during the convolution over the input. """ def __init__(self, n_filters, filter_shape, input_shape=None, padding='same', stride=1): self.n_filters = n_filters self.filter_shape = filter_shape self.padding = padding self.stride = stride self.input_shape = input_shape self.trainable = True def initialize(self, optimizer): # Initialize the weights filter_height, filter_width = self.filter_shape channels = self.input_shape[0] limit = 1 / math.sqrt(np.prod(self.filter_shape)) self.W = np.random.uniform(-limit, limit, size=(self.n_filters, channels, filter_height, filter_width)) self.w0 = np.zeros((self.n_filters, 1)) # Weight optimizers self.W_opt = copy.copy(optimizer) self.w0_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.W.shape) + np.prod(self.w0.shape) def forward_pass(self, X, training=True): batch_size, channels, height, width = X.shape self.layer_input = X # Turn image shape into column shape # (enables dot product between input and weights) self.X_col = image_to_column(X, self.filter_shape, stride=self.stride, output_shape=self.padding) # Turn weights into column shape self.W_col = self.W.reshape((self.n_filters, -1)) # Calculate output output = self.W_col.dot(self.X_col) + self.w0 # Reshape into (n_filters, out_height, out_width, batch_size) output = output.reshape(self.output_shape() + (batch_size, )) # Redistribute axises so that batch size comes first return output.transpose(3,0,1,2) def backward_pass(self, accum_grad): # Reshape accumulated gradient into column shape accum_grad = accum_grad.transpose(1, 2, 3, 0).reshape(self.n_filters, -1) if self.trainable: # Take dot product between column shaped accum. gradient and column shape # layer input to determine the gradient at the layer with respect to layer weights grad_w = accum_grad.dot(self.X_col.T).reshape(self.W.shape) # The gradient with respect to bias terms is the sum similarly to in Dense layer grad_w0 = np.sum(accum_grad, axis=1, keepdims=True) # Update the layers weights self.W = self.W_opt.update(self.W, grad_w) self.w0 = self.w0_opt.update(self.w0, grad_w0) # Recalculate the gradient which will be propogated back to prev. layer accum_grad = self.W_col.T.dot(accum_grad) # Reshape from column shape to image shape accum_grad = column_to_image(accum_grad, self.layer_input.shape, self.filter_shape, stride=self.stride, output_shape=self.padding) return accum_grad def output_shape(self): channels, height, width = self.input_shape pad_h, pad_w = determine_padding(self.filter_shape, output_shape=self.padding) output_height = (height + np.sum(pad_h) - self.filter_shape[0]) / self.stride + 1 output_width = (width + np.sum(pad_w) - self.filter_shape[1]) / self.stride + 1 return self.n_filters, int(output_height), int(output_width)

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。