文档服务地址:http://47.92.0.57:3000/ 周报索引地址:http://47.92.0.57:3000/s/NruNXRYmV

Commit 77ddba8d by 王肇一

ready to process on real dataset

parent 9ef386bf
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from multiprocessing import Pool
from tqdm import tqdm
import argparse
from cvBasedMethod.kmeans import kmeans, kmeans_back
from cvBasedMethod.threshold import threshold
import os
def method_kmeans(imglist, core = 5):
edges = [kmeans(x, core) for x in imglist]
for pair, edge in tqdm(zip(path, edges)):
kmeans_back(pair, edge)
def method_threshold(imglist, process = 8):
pool = Pool(process)
pool.map(threshold, imglist)
pool.close()
pool.join()
def method_newThreshold(imglist, process = 8):
pool = Pool(process)
pool.map(lambda x: threshold(x, 'fft'), imglist)
pool.close()
pool.join()
def get_args():
parser = argparse.ArgumentParser(description = 'Identify targets from background by KMeans or Threshold',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-m', '--method', metavar = 'M', type = int, default = 0,
help = '0 for KMeans; 1 for Threshold; 2 for newThreshold; 3 for Unet; 4 for further process',
dest = 'method')
parser.add_argument('-c', '--core', metavar = 'C', type = int, default = 5, help = 'Num of cluster', dest = 'core')
parser.add_argument('-p', '--process', metavar = 'P', type = int, default = 8, help = 'Num of process',
dest = 'process')
# Unet para
parser.add_argument('--load', '-L', default = 'data/module/MODEL.pth', metavar = 'FILE',
help = "Specify the file in which the model is stored")
parser.add_argument('--mask-threshold', '-t', type = float,
help = "Minimum probability value to consider a mask pixel white", default = 0.5)
parser.add_argument('--scale', '-s', type = float, help = "Scale factor for the input images", default = 0.5)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
path = [(y, x) for y in filter(lambda x: x != '.DS_Store', os.listdir('data/imgs')) for x in filter(
lambda x: x.endswith('.tif') and not x.endswith('dc.tif') and not x.endswith('DC.tif') and not x.endswith(
'dc .tif'), os.listdir('data/imgs/' + y))]
if args.method == 0:
method_kmeans(path, args.core)
elif args.method == 1:
method_threshold(path, args.process)
elif args.method == 2:
method_newThreshold(path, args.process)
\ No newline at end of file
......@@ -17,7 +17,7 @@ import re
from unet import UNet
from utils.predict import predict_img
from resCalc import save_img, get_subarea_info
from resCalc import save_img, get_subarea_info, save_img_mask
def divide_list(list, step):
......@@ -33,7 +33,8 @@ def step_1(net, args, device, list, position):
device = device)
result = (mask * 255).astype(np.uint8)
save_img({'ori': img, 'mask': result}, fn[0], fn[1])
#save_img({'ori': img, 'mask': result}, fn[0], fn[1])
save_img_mask(img, result, fn[0], fn[1])
try:
os.makedirs('data/masks/' + fn[0])
except:
......@@ -43,24 +44,31 @@ def step_1(net, args, device, list, position):
def step_2(list, position):
for num, dir in enumerate(list):
df = pd.DataFrame(columns = ('ug', 'iter', 'id', 'size', 'area_mean', 'back_mean'))
#df = pd.DataFrame(columns = ('ug', 'iter', 'id', 'size', 'area_mean', 'back_mean', 'Intensity (a. u.)'))
values = []
names = [x for x in filter(lambda x: x != '.DS_Store', os.listdir('data/imgs/' + dir))]
for name in tqdm(names, desc = f'Period{num+1}/{len(list)}', position = position):
match_group = re.match('.*\s([dD]2[oO]|[lL][bB]|.*ug).*\s(.+)\.tif', name)
img = cv.imread('data/imgs/' + dir + '/' + name, 0)
mask = cv.imread('data/masks/' + dir + '/' + name, 0)
tmp_df = pd.DataFrame(get_subarea_info(img, mask))
tmp_df['ug'] = str.lower(match_group.group(1))[:-2] if str.lower(match_group.group(1)).endswith(
'ug') else str.lower(match_group.group(1))
tmp_df['iter'] = str.lower(match_group.group(2))
df = df.append(tmp_df, ignore_index = True, sort = True)
df['Intensity (a. u.)'] = df['area_mean'] - df['back_mean']
df.sort_values('ug',inplace = True)
baseline = df[df['ug']=='d2o']['Intensity (a. u.)'].mean()*0.62
value = get_subarea_info(img, mask)
ug = 0.0
if str.lower(match_group.group(1)).endswith('ug'):
ug = float(str.lower(match_group.group(1))[:-2])
elif str.lower(match_group.group(1))== 'd2o':
ug = 0
elif str.lower(match_group.group(1)) == 'd2o':
ug = -1
iter = str.lower(match_group.group(2))
values.append({'Intensity (a. u.)':value,'ug':ug,'iter':iter})
df = pd.DataFrame(values)
df.sort_values('ug', inplace = True)
baseline = df[df['ug'] == 0]['Intensity (a. u.)'].mean()*0.62
sns.set_style("darkgrid")
sns.catplot(x = 'ug', y = 'Intensity (a. u.)', kind = 'bar', palette = 'vlag', data = df)
sns.swarmplot(x = "ug", y = "Intensity (a. u.)", data = df, size = 2, color = ".3", linewidth = 0)
#sns.swarmplot(x = "ug", y = "Intensity (a. u.)", data = df, size = 2, color = ".3", linewidth = 0)
plt.axhline(y=baseline)
plt.savefig('data/output/'+dir+'.png')
......@@ -115,5 +123,6 @@ def cli_main():
pool.close()
pool.join()
if __name__ == '__main__':
cli_main()
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from .ceunet_model import CEUnet
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
from .ceunet_parts import DACblock, SPPblock, DecoderBlock, nonlinearity
class CEUnet(nn.Module):
def __init__(self, n_channels, n_classes):
super(CEUnet, self).__init__()
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained = True)
weight = resnet.conv1.weight
self.n_channels = n_channels
self.n_classes = n_classes
self.inc = nn.Conv2d(in_channels = n_channels, out_channels = 64, kernel_size = 7, stride = 2, padding = 1)
self.inc.weight = nn.Parameter(weight[:, :1, :, :])
self.bn1 = resnet.bn1
self.relu = resnet.relu
self.maxpool1 = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
self.dblock = DACblock(512)
self.spp = SPPblock(512)
self.decoder4 = DecoderBlock(516, filters[2])
self.decoder3 = DecoderBlock(filters[2], filters[1])
self.decoder2 = DecoderBlock(filters[1], filters[0])
self.decoder1 = DecoderBlock(filters[0], filters[0])
self.finaldeconv1 = nn.ConvTranspose2d(filters[0], 32, 4, 2, 1)
self.finalrelu1 = nonlinearity
self.finalconv2 = nn.Conv2d(32, 32, 3, padding = 1)
self.finalrelu2 = nonlinearity
self.finalconv3 = nn.Conv2d(32, n_classes, 3, padding = 1)
def forward(self,x):
x = self.inc(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool1(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
db = self.dblock(e4)
spp = self.spp(db)
d4 = self.decoder4(spp)+e3
d3 = self.decoder3(d4)+e2
d2 = self.decoder2(d3)+e1
d1 = self.decoder1(d2)
out = self.finaldeconv1(d1)
out = self.finalrelu1(out)
out = self.finalconv2(out)
out = self.finalrelu2(out)
out = self.finalconv3(out)
return F.sigmoid(out)
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
nonlinearity = partial(F.relu, inplace = True)
class DACblock(nn.Module):
def __init__(self, channel):
super(DACblock, self).__init__()
self.dilate1 = nn.Conv2d(channel, channel, kernel_size = 3, dilation = 1, padding = 1)
self.dilate2 = nn.Conv2d(channel, channel, kernel_size = 3, dilation = 3, padding = 3)
self.dilate3 = nn.Conv2d(channel, channel, kernel_size = 3, dilation = 5, padding = 5)
self.conv1x1 = nn.Conv2d(channel, channel, kernel_size = 1, dilation = 1, padding = 0)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
dilate1_out = nonlinearity(self.dilate1(x))
dilate2_out = nonlinearity(self.conv1x1(self.dilate2(x)))
dilate3_out = nonlinearity(self.conv1x1(self.dilate2(self.dilate1(x))))
dilate4_out = nonlinearity(self.conv1x1(self.dilate3(self.dilate2(self.dilate1(x)))))
out = x + dilate1_out + dilate2_out + dilate3_out + dilate4_out
return out
class SPPblock(nn.Module):
def __init__(self, in_channels):
super(SPPblock, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size = [2, 2], stride = 2)
self.pool2 = nn.MaxPool2d(kernel_size = [3, 3], stride = 3)
self.pool3 = nn.MaxPool2d(kernel_size = [5, 5], stride = 5)
self.pool4 = nn.MaxPool2d(kernel_size = [6, 6], stride = 6)
self.conv = nn.Conv2d(in_channels = in_channels, out_channels = 1, kernel_size = 1, padding = 0)
def forward(self, x):
self.in_channels, h, w = x.size(1), x.size(2), x.size(3)
self.layer1 = F.upsample(self.conv(self.pool1(x)), size = (h, w), mode = 'bilinear')
self.layer2 = F.upsample(self.conv(self.pool2(x)), size = (h, w), mode = 'bilinear')
self.layer3 = F.upsample(self.conv(self.pool3(x)), size = (h, w), mode = 'bilinear')
self.layer4 = F.upsample(self.conv(self.pool4(x)), size = (h, w), mode = 'bilinear')
out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1)
return out
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters):
super(DecoderBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3, stride = 2, padding = 1,
output_padding = 1)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import cv2 as cv\n",
"import os\n",
"import numpy as np\n",
"from PIL import Image\n",
"import matplotlib.pyplot as plt\n",
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"inputdir = '../trans/'\n",
"outputdir = '../trans_out/'\n",
"for name in filter(lambda x: x!='.DS_Store',os.listdir(inputdir)):\n",
" img = cv.imread(inputdir+name,flags = cv.IMREAD_ANYDEPTH)\n",
" norm = cv.normalize(img, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)\n",
" cv.imwrite(outputdir+name,norm)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"inputdir = '../trans/'\n",
"outputdir = '../trans_out/'\n",
"for path in filter(lambda x: x!='.DS_Store',os.listdir(inputdir)):\n",
" os.makedirs(outputdir+path)\n",
" for name in filter(lambda x: x!='.DS_Store',os.listdir(inputdir+path+'/')):\n",
" img = cv.imread(inputdir+path+'/'+name,flags = cv.IMREAD_ANYDEPTH)\n",
" norm = cv.normalize(img, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)\n",
" cv.imwrite(outputdir+path+'/'+name,norm)"
]
},
{
"cell_type": "code",
"execution_count": 139,
"metadata": {},
"outputs": [],
"source": [
"img = cv.imread(\n",
" '../trans/Step size0Dwell time50 E.coil ATCC25922 PBb Ampicillin 1ug 852nm 30mw 300mw tune 43.02 001.tif',\n",
" flags = cv.IMREAD_ANYDEPTH\n",
")\n",
"rgb = cv.cvtColor(img,cv.COLOR_GRAY2RGBA)"
]
},
{
"cell_type": "code",
"execution_count": 141,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[26.925753, 30.179176, 29.493279, ..., 35.128834, 33.911213,\n",
" 37.070652],\n",
" [29.153067, 32.967182, 31.098747, ..., 33.459312, 36.05935 ,\n",
" 37.754295],\n",
" [30.153427, 34.2952 , 34.5086 , ..., 35.76774 , 34.47738 ,\n",
" 35.299744],\n",
" ...,\n",
" [38.53965 , 34.990433, 33.560703, ..., 44.66187 , 39.009254,\n",
" 40.129025],\n",
" [35.40049 , 34.312904, 35.812477, ..., 38.997345, 37.611065,\n",
" 34.732616],\n",
" [31.351091, 35.448124, 33.840405, ..., 37.300144, 36.81348 ,\n",
" 35.960213]], dtype=float32)"
]
},
"execution_count": 141,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"img"
]
},
{
"cell_type": "code",
"execution_count": 142,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[ 6, 35, 29, ..., 79, 69, 97],\n",
" [ 26, 60, 44, ..., 65, 88, 103],\n",
" [ 35, 72, 74, ..., 85, 74, 81],\n",
" ...,\n",
" [110, 78, 65, ..., 164, 114, 124],\n",
" [ 82, 72, 86, ..., 114, 102, 76],\n",
" [ 46, 82, 68, ..., 99, 94, 87]], dtype=uint8)"
]
},
"execution_count": 142,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"cv.normalize(img, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
def butterworth(img, d, n):
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
mask = np.zeros(img.shape)
center = tuple(map(lambda x: (x - 1) / 2, img.shape))
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
def cal_distance(pa, pb):
from math import sqrt
return sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
dis = cal_distance(center, (i, j))
mask[i, j] = 1 - 1 / ((1 + (d / dis)) ** n)
if dis < 1:
mask[i, j] = 1
# mask[100,:] = 0
new_img = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift * mask)))
return new_img.astype(np.uint8)
def fft_mask(img, d):
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
mask = np.zeros(img.shape)
center = tuple(map(lambda x: (x - 1) / 2, img.shape))
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
def cal_distance(pa, pb):
from math import sqrt
return sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
dis = cal_distance(center, (i, j))
mask[i, j] = 0 if dis > d else 1
new_img = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift * mask))).astype(np.uint8)
return new_img
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from sklearn.cluster import KMeans
from cvBasedMethod.util import *
import matplotlib.pyplot as plt
from resCalc import save_img,calcRes
def kmeans(pair, cluster_num = 5,filter='butter'):
img_list = pre(pair,'clahe',filter)
denoise = img_list['denoise']
km = KMeans(n_clusters = cluster_num)
feature = [[denoise[k, j]] for k in range(200) for j in range(200)]
ll = km.fit_predict(feature)
mask = [np.array([x for x in map(lambda x: 255 if x == i else 0, ll)]).reshape([200, 200]) for i in
range(cluster_num)]
edge = select_mask(img_list['ori'], mask).astype(np.uint8)
return edge
def kmeans_back(pair, edge):
img_list = pre(pair, 'clahe', filter)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
close = cv.morphologyEx(edge, cv.MORPH_CLOSE, kernel, iterations = 5)
img_list['close'] = close
save_img(img_list, pair[0], pair[1][:-4])
calcRes(img_list['ori'], img_list['close'], pair[0], pair[1][:-4])
def select_mask(ori_img, mask_list):
num = len(mask_list) + 1
plt.figure(figsize = (100, 20))
plt.title('ori_img')
plt.subplot(1, num, 1)
plt.imshow(ori_img, 'gray')
for i, mask in enumerate(mask_list):
plt.subplot(1, num, i + 2)
plt.title('mask ' + str(i))
plt.imshow(mask, 'gray')
plt.show()
i = input('Which mask fits the signal best (int): ')
plt.close()
return mask_list[int(i)]
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from cvBasedMethod.util import *
from resCalc import save_img, calcRes
def threshold(pair,filter='butter'):
img_list =pre(pair,filter)
edge = cv.Canny(img_list['blr'], 40, 100)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
close = cv.morphologyEx(edge, cv.MORPH_CLOSE, kernel, iterations = 5)
open = cv.morphologyEx(close, cv.MORPH_OPEN, kernel, iterations = 3)
img_list['edge']=edge
img_list['close'] = close
img_list['open'] = open
save_img(img_list,pair[0],pair[1][:-4])
calcRes(img_list['ori'], img_list['open'], pair[0], pair[1][:-4])
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import cv2 as cv
from cvBasedMethod.filters import fft_mask,butterworth
def remove_scratch(img):
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
fshift[100, 0:90] = 0
fshift[100, 110:] = 0
transed = np.abs(np.fft.ifft2(np.fft.ifftshift(fshift))).astype(np.uint8)
return transed
def unevenLightCompensate(img, blockSize, GaussianBlur_kernelsize = 7):
gray = img
average = np.mean(gray)
rows_new = int(np.ceil(gray.shape[0] / blockSize))
cols_new = int(np.ceil(gray.shape[1] / blockSize))
blockImage = np.zeros((rows_new, cols_new), dtype = np.float32)
for r in range(rows_new):
for c in range(cols_new):
rowmin = r * blockSize
rowmax = (r + 1) * blockSize
if (rowmax > gray.shape[0]):
rowmax = gray.shape[0]
colmin = c * blockSize
colmax = (c + 1) * blockSize
if (colmax > gray.shape[1]):
colmax = gray.shape[1]
imageROI = gray[rowmin:rowmax, colmin:colmax]
temaver = np.mean(imageROI)
blockImage[r, c] = temaver
blockImage = blockImage - average
blockImage2 = cv.resize(blockImage, (gray.shape[1], gray.shape[0]), interpolation = cv.INTER_CUBIC)
gray2 = gray.astype(np.float32)
dst = gray2 - blockImage2
dst = dst.astype(np.uint8)
# dst = cv.GaussianBlur(dst, (GaussianBlur_kernelsize, GaussianBlur_kernelsize), 0)
return dst
def pre(pair,even='clahe',filter='butter'):
lab = pair[0]
pic = pair[1]
img = cv.imread('img/' + lab + '/' + pic, 0)
even = unevenLightCompensate(img, 40) if even== 'light' else cv.createCLAHE(clipLimit = 2.0, tileGridSize = (8, 8)).apply(img)
filtered = fft_mask(even, 10) if filter == 'fft' else butterworth(even, 10, 2)
denoise = cv.fastNlMeansDenoising(even)
blr = cv.GaussianBlur(denoise, (7, 7), 0)
return {'ori': img, 'even': even, 'filtered': filtered, 'denoise': denoise, 'blr': blr}
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch
from torch.nn import Module, Sequential, Conv2d, ReLU, AdaptiveMaxPool2d, AdaptiveAvgPool2d, NLLLoss, BCELoss, \
CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding
from torch.nn import functional as F
from torch import nn
__all__ = ['PAM_Module', 'CAM_Module', 'semanticModule']
class _EncoderBlock(Module):
def __init__(self, in_channels, out_channels, dropout = False):
super(_EncoderBlock, self).__init__()
layers = [nn.Conv2d(in_channels, out_channels, kernel_size = 3, padding = 1), nn.BatchNorm2d(out_channels),
nn.ReLU(inplace = True), nn.Conv2d(out_channels, out_channels, kernel_size = 3, padding = 1),
nn.BatchNorm2d(out_channels), nn.ReLU(inplace = True), ]
if dropout:
layers.append(nn.Dropout())
layers.append(nn.MaxPool2d(kernel_size = 2, stride = 2))
self.encode = nn.Sequential(*layers)
def forward(self, x):
return self.encode(x)
class _DecoderBlock(Module):
def __init__(self, in_channels, middle_channels, out_channels):
super(_DecoderBlock, self).__init__()
self.decode = nn.Sequential(nn.Conv2d(in_channels, middle_channels, kernel_size = 3, padding = 1),
nn.BatchNorm2d(middle_channels), nn.ReLU(inplace = True),
nn.Conv2d(middle_channels, middle_channels, kernel_size = 3, padding = 1), nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace = True), nn.ConvTranspose2d(middle_channels, out_channels, kernel_size = 2, stride = 2), )
def forward(self, x):
return self.decode(x)
class semanticModule(Module):
""" Semantic attention module"""
def __init__(self, in_dim):
super(semanticModule, self).__init__()
self.chanel_in = in_dim
self.enc1 = _EncoderBlock(in_dim, in_dim * 2)
self.enc2 = _EncoderBlock(in_dim * 2, in_dim * 4)
self.dec2 = _DecoderBlock(in_dim * 4, in_dim * 2, in_dim * 2)
self.dec1 = _DecoderBlock(in_dim * 2, in_dim, in_dim)
def forward(self, x):
enc1 = self.enc1(x)
enc2 = self.enc2(enc1)
dec2 = self.dec2(enc2)
dec1 = self.dec1(F.upsample(dec2, enc1.size()[2:], mode = 'bilinear'))
return enc2.view(-1), dec1
class PAM_Module(Module):
""" Position attention module"""
# Ref from SAGAN
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels = in_dim, out_channels = in_dim // 8, kernel_size = 1)
self.key_conv = Conv2d(in_channels = in_dim, out_channels = in_dim // 8, kernel_size = 1)
self.value_conv = Conv2d(in_channels = in_dim, out_channels = in_dim, kernel_size = 1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim = -1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
class CAM_Module(Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_Module, self).__init__()
self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim = -1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim = True)[0].expand_as(energy) - energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma * out + x
return out
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch.nn.functional as F
from torch import nn
class Net(nn.Module):
def __init__(self, n_channels, n_classes, bilinear = True):
super(Net, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
def forward(self, x):
return logits
\ No newline at end of file
......@@ -8,6 +8,22 @@ import logging
import os
import re
def save_img_mask(img,mask,dir,name):
plt.figure(dpi = 300)
plt.suptitle(name)
plt.imshow(img,'gray')
#print(img.shape)
mask = cv.cvtColor(mask, cv.COLOR_GRAY2RGB)
mask[:,:,2] = 0
mask[:,:,0] = 0
plt.imshow(mask, alpha = 0.25, cmap = 'rainbow')
try:
os.makedirs('data/output/' + dir)
except:
logging.info('Existing dir: data/output/' + dir)
plt.savefig('data/output/' + dir + '/' + name[:-4] + '.png')
plt.close()
def save_img(img_list, dir, name):
num = len(img_list)
......@@ -34,22 +50,36 @@ def get_subarea_info(img, mask):
group = np.where(labels == i)
area_size = len(group[0])
if area_size > 10: # 过小的区域直接剔除
area_value = img[group]
area_mean = np.mean(area_value)
#if area_size > 10: # 过小的区域直接剔除
area_value = img[group]
area_mean = np.mean(area_value)
# Background Value
pos = [(group[0][k], group[1][k]) for k in range(len(group[0]))]
area_points = np.array([mask[x, y] if (x, y) in pos else 0 for x in range(200) for y in range(200)],dtype = np.uint8).reshape([200,200])
kernel = np.ones((15, 15), np.uint8)
bg_area_mask = cv.erode(area_points, kernel)
surround_bg_mask = cv.bitwise_xor(bg_area_mask, 255 - area_points)
real_bg_mask = cv.bitwise_and(surround_bg_mask, 255 - mask)
back_value = img[np.where(real_bg_mask != 0)]
back_mean = np.mean(back_value)
info.append({'id': i, 'size': area_size, 'area_mean': area_mean, 'back_mean': back_mean})
# endif
df = pd.DataFrame(info)
df['Intensity (a. u.)'] = df['area_mean'] - df['back_mean']
# Background Value
pos = [(group[0][k], group[1][k]) for k in range(len(group[0]))]
median = np.median(df['Intensity (a. u.)'])
b = 1.4826
mad = b * np.median(np.abs(df['Intensity (a. u.)'] - median))
lower_limit = median - (3 * mad)
upper_limit = median + (3 * mad)
area_points = np.array([mask[x, y] if (x, y) in pos else 0 for x in range(200) for y in range(200)],dtype = np.uint8).reshape([200,200])
#cv.imwrite('mask.png',area_points)
kernel = np.ones((15, 15), np.uint8)
bg_area_mask = cv.erode(area_points, kernel)
surround_bg_mask = cv.bitwise_xor(bg_area_mask, 255 - area_points)
real_bg_mask = cv.bitwise_and(surround_bg_mask, 255 - mask)
#df = df[df['Intensity (a. u.)'] > lower_limit]
#df = df[df['Intensity (a. u.)'] < upper_limit]
back_value = img[np.where(real_bg_mask != 0)]
back_mean = np.mean(back_value)
value = df['Intensity (a. u.)'].mean()
info.append({'id': i, 'size': area_size, 'area_mean': area_mean, 'back_mean': back_mean})
return info
return value
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import argparse
import logging
import os
import sys
import torch
from ceunet import CEUnet
from train import get_args,train_net
dir_img = 'data/train_imgs/'
dir_mask = 'data/train_masks/'
dir_checkpoint = 'ce_checkpoints/'
if __name__ == '__main__':
logging.basicConfig(level = logging.INFO, format = '%(levelname)s: %(message)s')
args = get_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
net = CEUnet(n_channels = 1, n_classes = 1)
logging.info(f'Network:\n'
f'\t{net.n_channels} input channels\n'
f'\t{net.n_classes} output channels (classes)\n')
if args.load:
net.load_state_dict(torch.load(args.load, map_location = device))
logging.info(f'Model loaded from {args.load}')
net.to(device = device)
try:
train_net(net = net, device=device, epochs = args.epochs, batch_size = args.batchsize, lr = args.lr,
img_scale = args.scale, val_percent = args.val / 100)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment