文档服务地址:http://47.92.0.57:3000/ 周报索引地址:http://47.92.0.57:3000/s/NruNXRYmV

Commit 515970c7 by 王肇一

ignite workflow

parent 21be8b98
......@@ -18,7 +18,7 @@ import re
from unet import UNet
from mrnet import MultiUnet
from utils.predict import predict_img,predict
from resCalc import save_img, get_subarea_info, save_img_mask
from resCalc import save_img, get_subarea_info, save_img_mask,get_subarea_info_avgBG
def divide_list(list, step):
......@@ -54,6 +54,7 @@ def step_2(list, position=1):
mask = cv.imread('data/masks/' + dir + '/' + name, 0)
value, count = get_subarea_info(img, mask)
# value = get_subarea_info_avgBG(img,mask)
ug = 0.0
if str.lower(match_group.group(1)).endswith('ug'):
ug = float(str.lower(match_group.group(1))[:-2])
......@@ -61,16 +62,16 @@ def step_2(list, position=1):
ug = 0
elif str.lower(match_group.group(1)) == 'lb':
ug = -1
values.append({'Intensity (a. u.)': value, 'ug': ug, 'count': count})
values.append({'Intensity(a.u.)': value, 'ug': ug})
df = pd.DataFrame(values)
df.sort_values('ug', inplace = True)
df.replace(-1, 'lb', inplace = True)
df.replace(0, 'd2o', inplace = True)
baseline = df[df['ug'] == 'd2o']['Intensity (a. u.)'].mean()*0.62
baseline = df[df['ug'] == 'd2o']['Intensity(a.u.)'].mean()*0.62
sns.set_style("darkgrid")
sns.catplot(x = 'ug', y = 'Intensity (a. u.)', kind = 'bar', palette = 'vlag', data = df)
#sns.swarmplot(x = "ug", y = "Intensity (a. u.)", data = df, size = 2, color = ".3", linewidth = 0)
sns.catplot(x = 'ug', y = 'Intensity(a.u.)', kind = 'bar', palette = 'vlag', data = df)
# sns.swarmplot(x = "ug", y = "Intensity (a. u.)", data = df, size = 2, color = ".3", linewidth = 0)
plt.axhline(y=baseline)
plt.savefig('data/output/'+dir+'.png')
......
......@@ -5,19 +5,9 @@ import os
import cv2 as cv
import argparse
def get_args():
parser = argparse.ArgumentParser(description = 'Identify targets from background by KMeans or Threshold',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', metavar = 'I', type = str, default = './',
help = 'input_dir', dest = 'input')
parser.add_argument('-o','--output',metavar = 'O',type = str,default = './out/',help='output dir',dest = 'output')
return parser.parse_args()
args = get_args()
os.mkdir(args.output)
for name in os.listdir(args.input):
img = cv.imread(args.input+name,flags = cv.IMREAD_GRAYSCALE)
cv.imwrite(args.output+name,img)
\ No newline at end of file
inputdir = '../data/imgs/3 E.coil with Ceftazidame/'
outputdir = '../data/imgs/E.coil/'
for name in filter(lambda x: x!='.DS_Store',os.listdir(inputdir)):
img = cv.imread(inputdir+name,flags = cv.IMREAD_ANYDEPTH)
norm = cv.normalize(img, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
cv.imwrite(outputdir+name,norm)
\ No newline at end of file
......@@ -12,7 +12,7 @@ from torchvision import transforms
from torch.utils.data import DataLoader, random_split
from utils.dataset import BasicDataset, VOCSegmentation
from utils.eval import eval_net,eval_multi,eval_jac
from utils.eval import eval_net, eval_jac
dir_checkpoint = 'checkpoint/'
......
......@@ -6,4 +6,5 @@ numpy
opencv-python
seaborn
torch
torchvision
\ No newline at end of file
torchvision
tensorboard
\ No newline at end of file
......@@ -57,7 +57,7 @@ def get_subarea_info(img, mask):
# Background Value
pos = [(group[0][k], group[1][k]) for k in range(len(group[0]))]
area_points = np.array([mask[x, y] if (x, y) in pos else 0 for x in range(200) for y in range(200)],dtype = np.uint8).reshape([200,200])
area_points = np.array([mask[x, y] if (x, y) in pos else 0 for x in range(200) for y in range(200)], dtype = np.uint8).reshape([200,200])
kernel = np.ones((15, 15), np.uint8)
bg_area_mask = cv.erode(area_points, kernel)
surround_bg_mask = cv.bitwise_xor(bg_area_mask, 255 - area_points)
......@@ -79,4 +79,20 @@ def get_subarea_info(img, mask):
df = df[df['mean'] <= upper_limit]
df['value'] = df['mean']-df['back']
return (df['value'] * df['size']).sum() / df['size'].sum(),df.shape[0]
return (df['value'] * df['size']).sum() / df['size'].sum(), df.shape[0]
def get_subarea_info_avgBG(img,mask):
area_num, labels, stats, centroids = cv.connectedComponentsWithStats(mask, connectivity = 8)
value = 0
size = 0
bg = np.mean(img[np.where(labels == 0)])
for i in filter(lambda x: x != 0, range(area_num)):
group = np.where(labels == i)
area_size = len(group[0])
area_value = img[group]
area_mean = np.mean(area_value)
size += area_size
value += (area_mean-bg)*area_size
return value / size
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from torch import nn
from torch.nn import functional as F
import torch
from torchvision import models
import torchvision
def conv3x3(in_, out):
return nn.Conv2d(in_, out, 3, padding = 1)
class ConvRelu(nn.Module):
def __init__(self, in_, out):
super().__init__()
self.conv = conv3x3(in_, out)
self.activation = nn.ReLU(inplace = True)
def forward(self, x):
x = self.conv(x)
x = self.activation(x)
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.block = nn.Sequential(ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size = 3, stride = 2, padding = 1,
output_padding = 1), nn.ReLU(inplace = True))
def forward(self, x):
return self.block(x)
class Interpolate(nn.Module):
def __init__(self, size = None, scale_factor = None, mode = 'nearest', align_corners = False):
super(Interpolate, self).__init__()
self.interp = nn.functional.interpolate
self.size = size
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
def forward(self, x):
x = self.interp(x, size = self.size, scale_factor = self.scale_factor, mode = self.mode,
align_corners = self.align_corners)
return x
class DecoderBlockV2(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, is_deconv = True):
super(DecoderBlockV2, self).__init__()
self.in_channels = in_channels
if is_deconv:
"""
Paramaters for Deconvolution were chosen to avoid artifacts, following
link https://distill.pub/2016/deconv-checkerboard/
"""
self.block = nn.Sequential(ConvRelu(in_channels, middle_channels),
nn.ConvTranspose2d(middle_channels, out_channels, kernel_size = 4, stride = 2, padding = 1),
nn.ReLU(inplace = True))
else:
self.block = nn.Sequential(Interpolate(scale_factor = 2, mode = 'bilinear'),
ConvRelu(in_channels, middle_channels), ConvRelu(middle_channels, out_channels), )
def forward(self, x):
return self.block(x)
class UNet11(nn.Module):
def __init__(self, num_filters = 32, pretrained = False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network is used
True - encoder is pre-trained with VGG11
"""
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.encoder = models.vgg11(pretrained = pretrained).features
self.relu = self.encoder[1]
self.conv1 = self.encoder[0]
self.conv2 = self.encoder[3]
self.conv3s = self.encoder[6]
self.conv3 = self.encoder[8]
self.conv4s = self.encoder[11]
self.conv4 = self.encoder[13]
self.conv5s = self.encoder[16]
self.conv5 = self.encoder[18]
self.center = DecoderBlock(num_filters * 8 * 2, num_filters * 8 * 2, num_filters * 8)
self.dec5 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 8)
self.dec4 = DecoderBlock(num_filters * (16 + 8), num_filters * 8 * 2, num_filters * 4)
self.dec3 = DecoderBlock(num_filters * (8 + 4), num_filters * 4 * 2, num_filters * 2)
self.dec2 = DecoderBlock(num_filters * (4 + 2), num_filters * 2 * 2, num_filters)
self.dec1 = ConvRelu(num_filters * (2 + 1), num_filters)
self.final = nn.Conv2d(num_filters, 1, kernel_size = 1)
def forward(self, x):
conv1 = self.relu(self.conv1(x))
conv2 = self.relu(self.conv2(self.pool(conv1)))
conv3s = self.relu(self.conv3s(self.pool(conv2)))
conv3 = self.relu(self.conv3(conv3s))
conv4s = self.relu(self.conv4s(self.pool(conv3)))
conv4 = self.relu(self.conv4(conv4s))
conv5s = self.relu(self.conv5s(self.pool(conv4)))
conv5 = self.relu(self.conv5(conv5s))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
return self.final(dec1)
class UNet16(nn.Module):
def __init__(self, num_classes = 1, num_filters = 32, pretrained = False, is_deconv = False):
"""
:param num_classes:
:param num_filters:
:param pretrained:
False - no pre-trained network used
True - encoder pre-trained with VGG16
:is_deconv:
False: bilinear interpolation is used in decoder
True: deconvolution is used in decoder
"""
super().__init__()
self.n_classes = num_classes
self.pool = nn.MaxPool2d(2, 2)
self.encoder = torchvision.models.vgg16(pretrained = pretrained).features
self.relu = nn.ReLU(inplace = True)
self.conv1 = nn.Sequential(self.encoder[0], self.relu, self.encoder[2], self.relu)
self.conv2 = nn.Sequential(self.encoder[5], self.relu, self.encoder[7], self.relu)
self.conv3 = nn.Sequential(self.encoder[10], self.relu, self.encoder[12], self.relu, self.encoder[14],
self.relu)
self.conv4 = nn.Sequential(self.encoder[17], self.relu, self.encoder[19], self.relu, self.encoder[21],
self.relu)
self.conv5 = nn.Sequential(self.encoder[24], self.relu, self.encoder[26], self.relu, self.encoder[28],
self.relu)
self.center = DecoderBlock(512, num_filters * 8 * 2, num_filters * 8)
self.dec5 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec4 = DecoderBlock(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8)
self.dec3 = DecoderBlock(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2)
self.dec2 = DecoderBlock(128 + num_filters * 2, num_filters * 2 * 2, num_filters)
self.dec1 = ConvRelu(64 + num_filters, num_filters)
self.final = nn.Conv2d(num_filters, num_classes, kernel_size = 1)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(self.pool(conv1))
conv3 = self.conv3(self.pool(conv2))
conv4 = self.conv4(self.pool(conv3))
conv5 = self.conv5(self.pool(conv4))
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([center, conv5], 1))
dec4 = self.dec4(torch.cat([dec5, conv4], 1))
dec3 = self.dec3(torch.cat([dec4, conv3], 1))
dec2 = self.dec2(torch.cat([dec3, conv2], 1))
dec1 = self.dec1(torch.cat([dec2, conv1], 1))
if self.n_classes > 1:
x_out = F.log_softmax(self.final(dec1), dim = 1)
else:
x_out = self.final(dec1)
return x_out
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch
from torch import optim
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from ignite.contrib.handlers.param_scheduler import LRScheduler
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss, DiceCoefficient, ConfusionMatrix, RunningAverage
from ignite.contrib.handlers import ProgressBar
from argparse import ArgumentParser
import mrnet
from mrnet import MultiUnet
from utils.dataset import VOCSegmentation
def get_data_loaders(train_batch_size, val_batch_size):
trans = transforms.Compose([transforms.Resize(256), transforms.ToTensor()])
trainset = VOCSegmentation('data', 'train', trans, trans)
evalset = VOCSegmentation('data', 'traineval', trans, trans)
train_loader = DataLoader(trainset, batch_size = train_batch_size, shuffle = True, num_workers = 8, pin_memory = True)
val_loader = DataLoader(evalset, batch_size = val_batch_size, shuffle = False, num_workers = 8, pin_memory = True)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr):
model = MultiUnet(n_channels = 1, n_classes = 1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device = device)
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
optimizer = optim.Adam(model.parameters(), lr = lr)
cm = ConfusionMatrix(num_classes = 1)
dice = DiceCoefficient(cm)
loss = torch.nn.BCELoss() # torch.nn.NLLLoss()
scheduler = LRScheduler(lr_scheduler.ReduceLROnPlateau(optimizer))
trainer = create_supervised_trainer(model, optimizer, loss, device = device)
evaluator = create_supervised_evaluator(model, metrics = {'accuracy': Accuracy(), 'dice': dice, 'nll': Loss(loss)},
device = device)
RunningAverage(output_transform = lambda x: x).attach(trainer, 'loss')
trainer.add_event_handler(Events.EPOCH_COMPLETED, scheduler)
pbar = ProgressBar(persist = True)
pbar.attach(trainer, metric_names = 'all')
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_dice = metrics['dice']
avg_nll = metrics['nll']
pbar.log_message(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg dice :{:.2f}Avg loss: {:.2f}"
.format(engine.state.epoch,avg_accuracy,avg_dice ,avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
avg_dice = metrics['dice']
pbar.log_message(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg dice :{:.2f}Avg loss: {:.2f}"
.format(engine.state.epoch,avg_accuracy, avg_dice,avg_nll))
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs = epochs)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--batch_size', type = int, default = 5, help = 'input batch size for training (default: 5)')
parser.add_argument('--val_batch_size', type = int, default = 5,
help = 'input batch size for validation (default: 5)')
parser.add_argument('--epochs', type = int, default = 10, help = 'number of epochs to train (default: 10)')
parser.add_argument('--lr', type = float, default = 0.01, help = 'learning rate (default: 0.01)')
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr)
\ No newline at end of file
......@@ -4,7 +4,6 @@ from tqdm import tqdm
from sklearn.metrics import jaccard_score
from utils.dice_loss import dice_coeff, dice_coef
from .metrics import eval_metrics
def eval_net(net, loader, device, n_val):
......@@ -31,29 +30,6 @@ def eval_net(net, loader, device, n_val):
return tot / n_val
def eval_multi(net, loader, device, n_val):
net.eval()
overall_acc = 0
avg_per_class_acc = 0
avg_jacc = 0
avg_dice = 0
with tqdm(total = n_val, desc = 'Validation round', unit = 'img', leave = False) as pbar:
for imgs, true_masks in loader:
imgs = imgs.to(device = device, dtype = torch.float32)
mask_type = torch.float32 if net.n_classes == 1 else torch.long
true_masks = true_masks.to(device = device, dtype = mask_type)
pred_mask = net(imgs)
oac, apca, aj, ad = eval_metrics(true_masks, pred_mask, 1)
overall_acc += oac
avg_per_class_acc += apca
avg_jacc += aj
avg_dice += ad
pbar.update(imgs.shape[0])
return
def eval_jac(net, loader, device, n_val):
net.eval()
jac = 0
......
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Common image segmentation metrics.
"""
import torch
EPS = 1e-10
def nanmean(x):
"""Computes the arithmetic mean ignoring any NaNs."""
return torch.mean(x[x == x])
def _fast_hist(true, pred, num_classes):
mask = (true >= 0) & (true < num_classes)
hist = torch.bincount(num_classes * true[mask] + pred[mask], minlength = num_classes ** 2).reshape(num_classes,num_classes).float()
return hist
def overall_pixel_accuracy(hist):
"""Computes the total pixel accuracy.
The overall pixel accuracy provides an intuitive
approximation for the qualitative perception of the
label when it is viewed in its overall shape but not
its details.
Args:
hist: confusion matrix.
Returns:
overall_acc: the overall pixel accuracy.
"""
correct = torch.diag(hist).sum()
total = hist.sum()
overall_acc = correct / (total + EPS)
return overall_acc
def per_class_pixel_accuracy(hist):
"""Computes the average per-class pixel accuracy.
The per-class pixel accuracy is a more fine-grained
version of the overall pixel accuracy. A model could
score a relatively high overall pixel accuracy by
correctly predicting the dominant labels or areas
in the image whilst incorrectly predicting the
possibly more important/rare labels. Such a model
will score a low per-class pixel accuracy.
Args:
hist: confusion matrix.
Returns:
avg_per_class_acc: the average per-class pixel accuracy.
"""
correct_per_class = torch.diag(hist)
total_per_class = hist.sum(dim = 1)
per_class_acc = correct_per_class / (total_per_class + EPS)
avg_per_class_acc = nanmean(per_class_acc)
return avg_per_class_acc
def jaccard_index(hist):
"""Computes the Jaccard index, a.k.a the Intersection over Union (IoU).
Args:
hist: confusion matrix.
Returns:
avg_jacc: the average per-class jaccard index.
"""
A_inter_B = torch.diag(hist)
A = hist.sum(dim = 1)
B = hist.sum(dim = 0)
jaccard = A_inter_B / (A + B - A_inter_B + EPS)
avg_jacc = nanmean(jaccard)
return avg_jacc
def dice_coefficient(hist):
"""Computes the Sørensen–Dice coefficient, a.k.a the F1 score.
Args:
hist: confusion matrix.
Returns:
avg_dice: the average per-class dice coefficient.
"""
A_inter_B = torch.diag(hist)
A = hist.sum(dim = 1)
B = hist.sum(dim = 0)
dice = (2 * A_inter_B) / (A + B + EPS)
avg_dice = nanmean(dice)
return avg_dice
def eval_metrics(true, pred, num_classes):
"""Computes various segmentation metrics on 2D feature maps.
Args:
true: a tensor of shape [B, H, W] or [B, 1, H, W].
pred: a tensor of shape [B, H, W] or [B, 1, H, W].
num_classes: the number of classes to segment. This number
should be less than the ID of the ignored class.
Returns:
overall_acc: the overall pixel accuracy.
avg_per_class_acc: the average per-class pixel accuracy.
avg_jacc: the jaccard index.
avg_dice: the dice coefficient.
"""
hist = torch.zeros((num_classes, num_classes))
for t, p in zip(true, pred):
hist += _fast_hist(t.flatten(), p.flatten(), num_classes)
overall_acc = overall_pixel_accuracy(hist)
avg_per_class_acc = per_class_pixel_accuracy(hist)
avg_jacc = jaccard_index(hist)
avg_dice = dice_coefficient(hist)
return overall_acc, avg_per_class_acc, avg_jacc, avg_dice
class AverageMeter(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n = 1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment