首页 > 编程知识 正文

tensorflow2教程,tensorflow分类器

时间:2023-05-05 02:35:06 阅读:19467 作者:4232

1DCNN是一维卷积

2DCNN是双层折叠、池化层

leNet5是2级卷积层池化层,最后添加3级全部连接层

VGGNet16共分为8个区段:

fromtensorflow.keras.modelsimportsequentialfromtensorflow.kerasimportsequentialfromtensorflowimportkerasdeflenet痴情的自定义模型ntial(layers.conv2d(filters=64,kernel _ size=(3,3 ),padding=' same ' layers.max pooling 2d (pool _ size ) layers.max pooling 2d (pool _ size=(2,2 ),padding='same ' ),#layers.dropout(0.25 ),# 5,5,16 ) 400layers activation='relu ',#layers.dropout(0.5 ),#layers.dense ) 84,activation='relu ',layers.dense ) 128 layers activationtldys ) ) ) compile model.com pile (loss=' sparse _ categorical _ cross entropy ' ) ) ) ) ) ) 652 metty class _ weight=class _ weightdeflenet痴情的蜗牛(: t1=time.time ) model=LeNet痴情的蜗牛model ) x _ train=TF.re se 1] ) model.summary ) history=model.fit ) x_train_y,summary (model.fit ) ) train batch_size=128,vvve verbose=0) T2=time.time (pred _ y=model.predict (x _ test ) print ) scores ) print('baselineerror3360%.) t2-t1 ) print(history.history ) return scores,pred_y#simple痴情蜗牛) (def oneD_cNNmodel ) ) : model=keras layers.maxpooling1d(3),layers.conV1d ) 50,7,input _ shap PPE layers.globalaveragepooling 1d (,#layers.dropout activationtldys ' ) model.compiinse optimizer='adam ', 测量结果=[ ' accuracy ' ] (returnmodeldefoned _ CNN ) : t1=time.time ) model=oned_cned 8) x _ test=TF.reshed

est, test_y), nb_epoch=25, batch_size=128) scores = model.evaluate(X_test, test_y, verbose=0) t2 = time.time() pred_y = model.predict(X_test) print(scores) print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1) print(history.history) return scores,pred_ydef two痴情的蜗牛model(): model = keras.models.Sequential([ layers.Conv2D(64, kernel_size=(3, 3),padding='same',input_shape=(16, 16, 1), activation='relu'), layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu'), layers.MaxPooling2D(pool_size=(2, 2), padding='same'), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dropout(0.5), layers.Dense(12, activation = 'softmax') ]) model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy']) return modeldef twoD痴情的蜗牛(): t1 = time.time() model = two痴情的蜗牛model() X_train = tf.reshape(train_x,[-1,16,16,1]) X_test = tf.reshape(test_x, [-1,16,16,1]) model.summary() history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2) scores = model.evaluate(X_test, test_y, verbose=0) t2 = time.time() pred_y = model.predict(X_test) print(scores) print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1) print(history.history) return scores,pred_ydef VGGNet16_model(): model = keras.models.Sequential([ layers.Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(16, 16, 1)), layers.Conv2D(64, (3, 3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2), padding='same'), # block 2 layers.Conv2D(128, (3, 3), activation='relu', padding='same'), layers.Conv2D(128, (3, 3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2), padding='same'), #block3 layers.Conv2D(256, (3, 3), activation='relu', padding='same'), layers.Conv2D(256, (3, 3), activation='relu', padding='same'), layers.Conv2D(256, (3, 3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2), padding='same'), #block4 layers.Conv2D(512, (3, 3), activation='relu', padding='same'), layers.Conv2D(512, (3, 3), activation='relu', padding='same'), layers.Conv2D(512, (3, 3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2), padding='same'), # block5 layers.Conv2D(512, (3, 3), activation='relu', padding='same'), layers.Conv2D(512, (3, 3), activation='relu', padding='same'), layers.Conv2D(512, (3, 3), activation='relu', padding='same'), layers.MaxPooling2D(pool_size=(2, 2), padding='same'), # layers.Dropout(0.25), # (5,5,16) > 400 layers.Flatten(), layers.Dense(256, activation='relu'), # layers.Dropout(0.5), # layers.Dense(84, activation='relu'), layers.Dense(128, activation='relu'), # layers.Dropout(0.5), layers.Dense(12, activationtldys') ]) # Compile model model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy']) return modeldef VGGNet16(): t1 = time.time() model = VGG16_Model() X_train = tf.reshape(train_x,[-1,16,16,1]) X_test = tf.reshape(test_x, [-1,16,16,1]) model.summary() history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2) scores = model.evaluate(X_test, test_y, verbose=0) t2 = time.time() pred_y = model.predict(X_test) print(scores) print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1) print(history.history) return scores,pred_y

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。