Keras:Unet网络实现多类语义分割方式

1 介绍

U-Net最初是用来对医学图像的语义分割,后来也有人将其应用于其他领域。但大多还是用来进行二分类,即将原始图像分成两个灰度级或者色度,依次找到图像中感兴趣的目标部分。

本文主要利用U-Net网络结构实现了多类的语义分割,并展示了部分测试效果,希望对你有用!

2 源代码

(1)训练模型

from __future__ import print_function
import os
import datetime
import numpy as np
from keras.models import Model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, AveragePooling2D, Dropout, \
 BatchNormalization
from keras.optimizers import Adam
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from keras.layers.advanced_activations import LeakyReLU, ReLU
import cv2

PIXEL = 512 #set your image size
BATCH_SIZE = 5
lr = 0.001
EPOCH = 100
X_CHANNEL = 3 # training images channel
Y_CHANNEL = 1 # label iamges channel
X_NUM = 422 # your traning data number

pathX = 'I:\\Pascal VOC Dataset\\train1\\images\\' #change your file path
pathY = 'I:\\Pascal VOC Dataset\\train1\\SegmentationObject\\' #change your file path

#data processing
def generator(pathX, pathY,BATCH_SIZE):
 while 1:
  X_train_files = os.listdir(pathX)
  Y_train_files = os.listdir(pathY)
  a = (np.arange(1, X_NUM))
  X = []
  Y = []
  for i in range(BATCH_SIZE):
   index = np.random.choice(a)
   # print(index)
   img = cv2.imread(pathX + X_train_files[index], 1)
   img = np.array(img).reshape(PIXEL, PIXEL, X_CHANNEL)
   X.append(img)
   img1 = cv2.imread(pathY + Y_train_files[index], 1)
   img1 = np.array(img1).reshape(PIXEL, PIXEL, Y_CHANNEL)
   Y.append(img1)

  X = np.array(X)
  Y = np.array(Y)
  yield X, Y

 #creat unet network
inputs = Input((PIXEL, PIXEL, 3))
conv1 = Conv2D(8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
pool1 = AveragePooling2D(pool_size=(2, 2))(conv1) # 16

conv2 = BatchNormalization(momentum=0.99)(pool1)
conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
conv2 = BatchNormalization(momentum=0.99)(conv2)
conv2 = Conv2D(64, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
conv2 = Dropout(0.02)(conv2)
pool2 = AveragePooling2D(pool_size=(2, 2))(conv2) # 8

conv3 = BatchNormalization(momentum=0.99)(pool2)
conv3 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
conv3 = BatchNormalization(momentum=0.99)(conv3)
conv3 = Conv2D(128, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
conv3 = Dropout(0.02)(conv3)
pool3 = AveragePooling2D(pool_size=(2, 2))(conv3) # 4

conv4 = BatchNormalization(momentum=0.99)(pool3)
conv4 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
conv4 = BatchNormalization(momentum=0.99)(conv4)
conv4 = Conv2D(256, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
conv4 = Dropout(0.02)(conv4)
pool4 = AveragePooling2D(pool_size=(2, 2))(conv4)

conv5 = BatchNormalization(momentum=0.99)(pool4)
conv5 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
conv5 = BatchNormalization(momentum=0.99)(conv5)
conv5 = Conv2D(512, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
conv5 = Dropout(0.02)(conv5)
pool4 = AveragePooling2D(pool_size=(2, 2))(conv4)
# conv5 = Conv2D(35, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)
# drop4 = Dropout(0.02)(conv5)
pool4 = AveragePooling2D(pool_size=(2, 2))(pool3) # 2
pool5 = AveragePooling2D(pool_size=(2, 2))(pool4) # 1

conv6 = BatchNormalization(momentum=0.99)(pool5)
conv6 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)

conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)
up7 = (UpSampling2D(size=(2, 2))(conv7)) # 2
conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up7)
merge7 = concatenate([pool4, conv7], axis=3)

conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)
up8 = (UpSampling2D(size=(2, 2))(conv8)) # 4
conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up8)
merge8 = concatenate([pool3, conv8], axis=3)

conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)
up9 = (UpSampling2D(size=(2, 2))(conv9)) # 8
conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up9)
merge9 = concatenate([pool2, conv9], axis=3)

conv10 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)
up10 = (UpSampling2D(size=(2, 2))(conv10)) # 16
conv10 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up10)

conv11 = Conv2D(16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv10)
up11 = (UpSampling2D(size=(2, 2))(conv11)) # 32
conv11 = Conv2D(8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up11)

# conv12 = Conv2D(3, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv11)
conv12 = Conv2D(3, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv11)

model = Model(input=inputs, output=conv12)
print(model.summary())
model.compile(optimizer=Adam(lr=1e-3), loss='mse', metrics=['accuracy'])

history = model.fit_generator(generator(pathX, pathY,BATCH_SIZE),
        steps_per_epoch=600, nb_epoch=EPOCH)
end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

 #save your training model
model.save(r'V1_828.h5')

#save your loss data
mse = np.array((history.history['loss']))
np.save(r'V1_828.npy', mse)

(2)测试模型

from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2

model = load_model('V1_828.h5')
test_images_path = 'I:\\Pascal VOC Dataset\\test\\test_images\\'
test_gt_path = 'I:\\Pascal VOC Dataset\\test\\SegmentationObject\\'
pre_path = 'I:\\Pascal VOC Dataset\\test\\pre\\'

X = []
for info in os.listdir(test_images_path):
 A = cv2.imread(test_images_path + info)
 X.append(A)
 # i += 1
X = np.array(X)
print(X.shape)
Y = model.predict(X)

groudtruth = []
for info in os.listdir(test_gt_path):
 A = cv2.imread(test_gt_path + info)
 groudtruth.append(A)
groudtruth = np.array(groudtruth)

i = 0
for info in os.listdir(test_images_path):
 cv2.imwrite(pre_path + info,Y[i])
 i += 1

a = range(10)
n = np.random.choice(a)
cv2.imwrite('prediction.png',Y[n])
cv2.imwrite('groudtruth.png',groudtruth[n])
fig, axs = plt.subplots(1, 3)
# cnt = 1
# for j in range(1):
axs[0].imshow(np.abs(X[n]))
axs[0].axis('off')
axs[1].imshow(np.abs(Y[n]))
axs[1].axis('off')
axs[2].imshow(np.abs(groudtruth[n]))
axs[2].axis('off')
 # cnt += 1
fig.savefig("imagestest.png")
plt.close()

3 效果展示

说明:从左到右依次是预测图像,真实图像,标注图像。可以看出,对于部分数据的分割效果还有待改进,主要原因还是数据集相对复杂,模型难于找到其中的规律。

以上这篇Keras:Unet网络实现多类语义分割方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持我们。

(0)

相关推荐

  • keras 获取某层的输入/输出 tensor 尺寸操作

    获取单输入尺寸,该层只被使用了一次. import keras from keras.layers import Input, LSTM, Dense, Conv2D from keras.models import Model a = Input(shape=(32, 32, 3)) b = Input(shape=(64, 64, 3)) conv = Conv2D(16, (3, 3), padding='same') conved_a = conv(a) # 到目前为止只有一个输入,以下

  • keras 多gpu并行运行案例

    一.多张gpu的卡上使用keras 有多张gpu卡时,推荐使用tensorflow 作为后端.使用多张gpu运行model,可以分为两种情况,一是数据并行,二是设备并行. 二.数据并行 数据并行将目标模型在多个设备上各复制一份,并使用每个设备上的复制品处理整个数据集的不同部分数据. 利用multi_gpu_model实现 keras.utils.multi_gpu_model(model, gpus=None, cpu_merge=True, cpu_relocation=False) 具体来说

  • 浅谈keras中的目标函数和优化函数MSE用法

    mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true)**2).mean() model = Sequential() model.add(Dense(64, init='uniform', input_dim=10)) model.add(Activation('tanh')) model.add(Activation('softmax')) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, ne

  • keras多显卡训练方式

    使用keras进行训练,默认使用单显卡,即使设置了os.environ['CUDA_VISIBLE_DEVICES']为两张显卡,也只是占满了显存,再设置tf.GPUOptions(allow_growth=True)之后可以清楚看到,只占用了第一张显卡,第二张显卡完全没用. 要使用多张显卡,需要按如下步骤: (1)import multi_gpu_model函数:from keras.utils import multi_gpu_model (2)在定义好model之后,使用multi_gpu

  • Keras自定义IOU方式

    我就废话不多说了,大家还是直接看代码吧! def iou(y_true, y_pred, label: int): """ Return the Intersection over Union (IoU) for a given label. Args: y_true: the expected y values as a one-hot y_pred: the predicted y values as a one-hot or softmax output label:

  • Keras:Unet网络实现多类语义分割方式

    1 介绍 U-Net最初是用来对医学图像的语义分割,后来也有人将其应用于其他领域.但大多还是用来进行二分类,即将原始图像分成两个灰度级或者色度,依次找到图像中感兴趣的目标部分. 本文主要利用U-Net网络结构实现了多类的语义分割,并展示了部分测试效果,希望对你有用! 2 源代码 (1)训练模型 from __future__ import print_function import os import datetime import numpy as np from keras.models i

  • Python深度学习之Unet 语义分割模型(Keras)

    目录 前言 一.什么是语义分割 二.Unet 1.基本原理 2.mini_unet 3. Mobilenet_unet 4.数据加载部分 参考 前言 最近由于在寻找方向上迷失自我,准备了解更多的计算机视觉任务重的模型.看到语义分割任务重Unet一个有意思的模型,我准备来复现一下它. 一.什么是语义分割 语义分割任务,如下图所示: 简而言之,语义分割任务就是将图片中的不同类别,用不同的颜色标记出来,每一个类别使用一种颜色.常用于医学图像,卫星图像任务. 那如何做到将像素点上色呢? 其实语义分割的输

  • 基于Android studio3.6的JNI教程之ncnn之语义分割ENet

     代码链接: https://github.com/watersink/enet-as-linux 本代码可以在模拟器下进行跑. 环境: Android studio 3.6 Sdk:android10 api 29 Ndk:r15c Ncnn:20200226 Opencv:Opencv3.4.1 android sdk Linux下的代码测试: mkdir build cd build cmake .. make ./enet 运行效果, Android开始: (1)新建工程, New->N

  • keras实现基于孪生网络的图片相似度计算方式

    我就废话不多说了,大家还是直接看代码吧! import keras from keras.layers import Input,Dense,Conv2D from keras.layers import MaxPooling2D,Flatten,Convolution2D from keras.models import Model import os import numpy as np from PIL import Image from keras.optimizers import S

  • 使用pytorch实现论文中的unet网络

    设计神经网络的一般步骤: 1. 设计框架 2. 设计骨干网络 Unet网络设计的步骤: 1. 设计Unet网络工厂模式 2. 设计编解码结构 3. 设计卷积模块 4. unet实例模块 Unet网络最重要的特征: 1. 编解码结构. 2. 解码结构,比FCN更加完善,采用连接方式. 3. 本质是一个框架,编码部分可以使用很多图像分类网络. 示例代码: import torch import torch.nn as nn class Unet(nn.Module): #初始化参数:Encoder,

  • 浅谈tensorflow语义分割api的使用(deeplab训练cityscapes)

    浅谈tensorflow语义分割api的使用(deeplab训练cityscapes) 安装教程: cityscapes训练: 遇到的坑: 1. 环境: - tensorflow1.8+CUDA9.0+cudnn7.0+annaconda3+py3.5 - 使用最新的tensorflow1.12或者1.10都不行,报错:报错不造卷积算法(convolution algorithm...) 2. 数据集转换 # Exit immediately if a command exits with a

  • 在keras下实现多个模型的融合方式

    在网上搜过发现关于keras下的模型融合框架其实很简单,奈何网上说了一大堆,这个东西官方文档上就有,自己写了个demo: # Function:基于keras框架下实现,多个独立任务分类 # Writer: PQF # Time: 2019/9/29 import numpy as np from keras.layers import Input, Dense from keras.models import Model import tensorflow as tf # 生成训练集 data

  • keras Lambda自定义层实现数据的切片方式,Lambda传参数

    1.代码如下: import numpy as np from keras.models import Sequential from keras.layers import Dense, Activation,Reshape from keras.layers import merge from keras.utils.visualize_util import plot from keras.layers import Input, Lambda from keras.models impo

  • 使用Keras中的ImageDataGenerator进行批次读图方式

    ImageDataGenerator位于keras.preprocessing.image模块当中,可用于做数据增强,或者仅仅用于一个批次一个批次的读进图片数据.一开始以为ImageDataGenerator是用来做数据增强的,但我的目的只是想一个batch一个batch的读进图片而已,所以一开始没用它,后来发现它是有这个功能的,而且使用起来很方便. ImageDataGenerator类包含了如下参数:(keras中文教程) ImageDataGenerator(featurewise_cen

  • 使用keras实现非线性回归(两种加激活函数的方式)

    我就废话不多说了,大家还是直接看代码吧~ import keras import numpy as np import matplotlib.pyplot as plt #Sequential 按顺序构成的模型 from keras.models import Sequential#Sequential是模型结构,输入层,隐藏层,输出层 #Dense 全连接层,Activation激活函数 from keras.layers import Dense,Activation from keras.

随机推荐