文档服务地址:http://47.92.0.57:3000/ 周报索引地址:http://47.92.0.57:3000/s/NruNXRYmV

Commit 8679ba65 by 王肇一

two step cli-based interact

parent c6cabaa8
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding:utf-8 -*- # -*- coding:utf-8 -*-
from multiprocessing import Pool import numpy as np
import pandas as pd
import torch
from PIL import Image
import cv2 as cv
from unet import UNet
from tqdm import tqdm from tqdm import tqdm
import argparse import argparse
import logging
import os import os
from utils.predict import predict import re
from resCalc import draw_bar
from utils.predict import predict_img
from resCalc import draw_bar,save_img,calcRes,get_subarea_info
def get_args(): def get_args():
...@@ -31,7 +40,36 @@ if __name__ == '__main__': ...@@ -31,7 +40,36 @@ if __name__ == '__main__':
'dc .tif'), os.listdir('data/imgs/' + y))] 'dc .tif'), os.listdir('data/imgs/' + y))]
if args.step == 1: if args.step == 1:
predict(path, args.load, args.scale, args.mask_threshold) net = UNet(n_channels = 1, n_classes = 1)
logging.info("Loading model {}".format(args.module))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
net.to(device = device)
net.load_state_dict(torch.load(args.module, map_location = device))
logging.info("Model loaded !")
for i, fn in enumerate(tqdm(path)):
logging.info("\nPredicting image {} ...".format(fn[0] + '/' + fn[1]))
img = Image.open('data/imgs/' + fn[0] + '/' + fn[1])
mask = predict_img(net = net, full_img = img, scale_factor = args.scale, out_threshold = args.mask_threshold,
device = device)
result = (mask * 255).astype(np.uint8)
save_img({'ori': img, 'mask': result}, fn[0], fn[1])
try:
os.makedirs('data/masks/'+fn[0])
except:
logging.info("path already exist")
cv.imwrite('data/masks/' + fn[0] + '/' + fn[1],result)
#calcRes(cv.cvtColor(np.asarray(img), cv.COLOR_RGB2BGR), result, fn[0], fn[1][:-4])
elif args.step == 2:
df = pd.DataFrame(columns = ('ug','iter', 'id', 'size', 'area_mean', 'back_mean'))
for i,fn in enumerate(tqdm(path)):
img = cv.imread('data/imgs/' + fn[0] + '/' + fn[1],0)
mask = cv.imread('data/masks/'+ fn[0] + '/' + fn[1],0)
calcRes(img, mask, fn[0], fn[1][:-4])
for exName in filter(lambda x: x != '.DS_Store', os.listdir('data/output')): #for exName in filter(lambda x: x != '.DS_Store', os.listdir('data/output')):
draw_bar(exName, os.listdir('data/output/' + exName + '/csv')) # draw_bar(exName, os.listdir('data/output/' + exName + '/csv'))
...@@ -9,6 +9,7 @@ import logging ...@@ -9,6 +9,7 @@ import logging
import os import os
import re import re
def save_img(img_list, dir, name): def save_img(img_list, dir, name):
num = len(img_list) num = len(img_list)
plt.figure(figsize = (100, 20)) plt.figure(figsize = (100, 20))
...@@ -28,7 +29,7 @@ def save_img(img_list, dir, name): ...@@ -28,7 +29,7 @@ def save_img(img_list, dir, name):
def calcRes(img, mask, dir = 'default_output', name = 'output'): def calcRes(img, mask, dir = 'default_output', name = 'output'):
dic = get_subarea_infor(img, mask) dic = get_subarea_info(img, mask)
df = pd.DataFrame(dic) df = pd.DataFrame(dic)
try: try:
os.makedirs('data/output/' + dir + '/csv') os.makedirs('data/output/' + dir + '/csv')
...@@ -38,6 +39,17 @@ def calcRes(img, mask, dir = 'default_output', name = 'output'): ...@@ -38,6 +39,17 @@ def calcRes(img, mask, dir = 'default_output', name = 'output'):
df.to_csv('data/output/' + dir + '/csv/' + name + '.csv', index = False) df.to_csv('data/output/' + dir + '/csv/' + name + '.csv', index = False)
def merge_df(dir):
df = pd.DataFrame(columns = ('ug', 'iter', 'id', 'size', 'area_mean', 'back_mean'))
for name in os.listdir(dir):
match_group = re.match('.*\s([dD]2[oO]|[lL][bB]|.*ug).*\s(.+)\.tif', name)
tmp = pd.read_csv('data/output/' + dir + '/csv/' + name)
tmp['ug'] = str.lower(match_group.group(1))[:-2] if str.lower(match_group.group(1)).endswith(
'ug') else str.lower(match_group.group(1))
tmp['iter'] = str.lower(match_group.group(2))
df = df.append(tmp, ignore_index = True, sort = True)
def draw_bar(exName, names): def draw_bar(exName, names):
df = pd.DataFrame(columns = ('class', 'perc', 'Label', 'Area', 'Mean', 'Std', 'BackMean', 'BackStd')) df = pd.DataFrame(columns = ('class', 'perc', 'Label', 'Area', 'Mean', 'Std', 'BackMean', 'BackStd'))
for name in names: for name in names:
...@@ -58,35 +70,58 @@ def draw_bar(exName, names): ...@@ -58,35 +70,58 @@ def draw_bar(exName, names):
plt.show() plt.show()
def get_subarea_infor(img, mask): # def get_subarea_infor(img, mask):
area_num, labels, stats, centroids = cv.connectedComponentsWithStats(mask) # area_num, labels, stats, centroids = cv.connectedComponentsWithStats(mask)
info = [] # info = []
#
# for i in filter(lambda x: x != 0, range(area_num)):
# group = np.where(labels == i)
# img_value = img[group]
# area_tmp = len(group[0])
# mean_tmp = np.mean(img_value)
# std_tmp = np.std(img_value)
#
# pos = [(group[0][i], group[1][i]) for i in range(len(group[0]))]
# res = np.zeros([200, 200], np.uint8)
# for x in range(200):
# for y in range(200):
# if (x, y) in pos:
# res[x, y] = mask[x, y]
# else:
# res[x, y] = 0
#
# kernel = np.ones((17, 17), np.uint8)
# mask_background = cv.erode(255 - res, kernel)
# minimask = cv.bitwise_xor(mask_background, 255 - res)
# realminimask = cv.bitwise_and(minimask, 255 - mask)
#
# img_background = img[np.where(realminimask != 0)]
# mean_value = np.mean(img_background)
# std_value = np.std(img_background)
#
# info.append({'Label': i, 'Area': area_tmp, 'Mean': mean_tmp, 'Std': std_tmp, 'BackMean': mean_value,
# 'BackStd': std_value})
# return info
def get_subarea_info(img, mask):
area_num, labels, stats, centroids = cv.connectedComponentsWithStats(mask, connectivity = 8)
info = []
for i in filter(lambda x: x != 0, range(area_num)): for i in filter(lambda x: x != 0, range(area_num)):
group = np.where(labels == i) group = np.where(labels == i)
img_value = img[group] area_value = img[group]
area_tmp = len(group[0]) area_size = len(area_value)
mean_tmp = np.mean(img_value) area_mean = np.mean(area_value)
std_tmp = np.std(img_value)
# Background Value
pos = [(group[0][i], group[1][i]) for i in range(len(group[0]))] pos = [(group[0][i], group[1][i]) for i in range(len(group[0]))]
res = np.zeros([200, 200], np.uint8) area_points = np.array([mask[x, y] if (x, y) in pos else 0 for x in range(200) for y in range(200)])
for x in range(200): kernel = np.ones((15, 15), np.uint8)
for y in range(200): bg_area_mask = cv.erode(area_points, kernel)
if (x, y) in pos: surround_bg_mask = cv.bitwise_xor(bg_area_mask, 255 - area_points)
res[x, y] = mask[x, y] real_bg_mask = cv.bitwise_and(surround_bg_mask, 255 - mask)
else:
res[x, y] = 0 back_value = img[np.where(real_bg_mask != 0)]
back_mean = np.mean(back_value)
kernel = np.ones((17, 17), np.uint8)
mask_background = cv.erode(255 - res, kernel) info.append({'id': i, 'size': area_size, 'area_mean': area_mean, 'back_mean': back_mean})
minimask = cv.bitwise_xor(mask_background, 255 - res) return info
realminimask = cv.bitwise_and(minimask, 255 - mask)
img_background = img[np.where(realminimask != 0)]
mean_value = np.mean(img_background)
std_value = np.std(img_background)
info.append({'Label': i, 'Area': area_tmp, 'Mean': mean_tmp, 'Std': std_tmp, 'BackMean': mean_value,
'BackStd': std_value})
return info
\ No newline at end of file
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from flask import Flask, render_template, url_for
app = Flask("Im")
@app.route('/')
def layout():
return render_template('layout.html')
@app.route('/compute/')
def filetree():
return 'filetree'
if __name__ == '__main__':
app.run()
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link rel="stylesheet" href="https://unpkg.com/purecss@1.0.0/build/pure-min.css"
integrity="sha384-CCTZv2q9I9m3UOxRLaJneXrrqKwUNOzZ6NGEUMwHtShDJ+nCoiXJCAgi05KfkLGY" crossorigin="anonymous">
<script src="https://apps.bdimg.com/libs/jquery/2.1.4/jquery.min.js"></script>
<script src="../static/echarts.js"></script>
<title>main</title>
</head>
<body>
<div class="pure-g">
<div class="pure-u-1-3">
{% include 'tree.html' %}
</div>
<div class="pure-u-1-3">
</div>
<div class="pure-u-1-3">
</div>
</div>
<script type="text/javascript">
</script>
</body>
</html>
\ No newline at end of file
<ul id="browser" class="filetree treeview-famfamfam">
<li><span class="folder">Folder 1</span>
<ul>
<li><span class="folder">Item 1.1</span>
<ul>
<li><span class="file">Item 1.1.1</span></li>
</ul>
</li>
<li><span class="folder">Folder 2</span>
<ul>
<li><span class="folder">Subfolder 2.1</span>
<ul id="folder21">
<li><span class="file">File 2.1.1</span></li>
<li><span class="file">File 2.1.2</span></li>
</ul>
</li>
<li><span class="folder">Subfolder 2.2</span>
<ul>
<li><span class="file">File 2.2.1</span></li>
<li><span class="file">File 2.2.2</span></li>
</ul>
</li>
</ul>
</li>
<li class="closed"><span class="folder">Folder 3 (closed at start)</span>
<ul>
<li><span class="file">File 3.1</span></li>
</ul>
</li>
<li><span class="file">File 4</span></li>
</ul>
</li>
</ul>
\ No newline at end of file
import logging
import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from PIL import Image
import cv2 as cv
from tqdm import tqdm
from torchvision import transforms from torchvision import transforms
from unet import UNet
from utils.dataset import BasicDataset from utils.dataset import BasicDataset
from cvBasedMethod.util import save_img,calcRes
def predict_img(net, full_img, device, scale_factor = 1, out_threshold = 0.5): def predict_img(net, full_img, device, scale_factor = 1, out_threshold = 0.5):
net.eval() net.eval()
...@@ -30,27 +22,4 @@ def predict_img(net, full_img, device, scale_factor = 1, out_threshold = 0.5): ...@@ -30,27 +22,4 @@ def predict_img(net, full_img, device, scale_factor = 1, out_threshold = 0.5):
probs = tf(probs.cpu()) probs = tf(probs.cpu())
full_mask = probs.squeeze().cpu().numpy() full_mask = probs.squeeze().cpu().numpy()
return full_mask > out_threshold return full_mask > out_threshold
\ No newline at end of file
def predict(file_names, model, scale, mask_threshold):
net = UNet(n_channels = 1, n_classes = 1)
logging.info("Loading model {}".format(model))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Using device {device}')
net.to(device = device)
net.load_state_dict(torch.load(model, map_location = device))
logging.info("Model loaded !")
for i, fn in enumerate(tqdm(file_names)):
logging.info("\nPredicting image {} ...".format(fn[0]+'/'+fn[1]))
img = Image.open('data/imgs/' + fn[0] + '/' + fn[1])
mask = predict_img(net = net, full_img = img, scale_factor = scale, out_threshold = mask_threshold,
device = device)
result = (mask * 255).astype(np.uint8) # result.save(out_files[i]) # logging.info("Mask saved to {}".format(out_files[i]))
save_img({'ori':img,'mask':result},fn[0],fn[1])
calcRes(cv.cvtColor(np.asarray(img), cv.COLOR_RGB2BGR),result,fn[0],fn[1][:-4])
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment