Spaces:
Running
Running
import glob | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from sklearn import svm | |
import zipfile | |
from PIL import Image | |
from sklearn.decomposition import PCA | |
from PIL import Image | |
import numpy as np | |
from sklearn.preprocessing import StandardScaler | |
from sklearn.svm import OneClassSVM | |
import numpy as np | |
import skimage | |
from skimage.feature import hog | |
from skimage.color import rgb2gray | |
from skimage import io | |
from sklearn.decomposition import PCA | |
from sklearn.svm import OneClassSVM | |
from sklearn.preprocessing import StandardScaler | |
import os | |
from tqdm import tqdm | |
import pickle | |
import joblib | |
def extract_hog_features(image_path): | |
""" | |
画像ファイルからHOG特徴量を抽出します。 | |
:param image_path: 画像ファイルのパス | |
:return: HOG特徴量のNumPy配列 | |
""" | |
# 画像を読み込む | |
img = io.imread(image_path) | |
img = img[:,:,:3] | |
# 画像をグレースケールに変換 | |
gray_img = rgb2gray(img) | |
# HOG特徴量を抽出 | |
features, _ = hog(gray_img, visualize=True, block_norm='L2-Hys') | |
return features | |
def prepare_features(image_paths): | |
""" | |
複数の画像からHOG特徴量を抽出し、特徴量の行列を作成します。 | |
:param image_paths: 画像ファイルのパスのリスト | |
:return: 特徴量のNumPy配列 | |
""" | |
features = [] | |
for path in tqdm(image_paths): | |
features.append(extract_hog_features(path)) | |
return np.array(features) | |
import streamlit as st | |
with st.sidebar: | |
st.image("logo.png") | |
file_uploaded = st.file_uploader("Upload", type=["zip"]) | |
if file_uploaded is not None: | |
if file_uploaded.type == "application/zip": | |
with zipfile.ZipFile(file_uploaded, "r") as z: | |
z.extractall("./data/") | |
test_img_path = st.file_uploader("Test image", type=["png","JPG"]) | |
if test_img_path is not None: | |
test_img = Image.open(test_img_path) | |
test_img.resize((320,240)).save("input.png") | |
st.write("サイドバーより学習データをZipファイルとしてアップロードしボタンをクリック.") | |
if st.button("訓練開始"): | |
with st.spinner("1分ほどお待ちください..."): | |
image_paths = glob.glob("data/*/*.JPG") | |
col1, col2, col3 = st.columns(3) # 2列のコンテナを用意する | |
with col1: | |
st.image(image_paths[0]) | |
with col2: | |
st.image(image_paths[1]) | |
with col3: | |
st.image(image_paths[2]) | |
features = prepare_features(image_paths) | |
print(features.shape) | |
scaler = StandardScaler() | |
features_scaled = scaler.fit_transform(features) | |
joblib.dump(scaler,"scaler.save") | |
print(features_scaled) | |
pca = PCA(n_components=4) | |
z_train = pca.fit_transform(features_scaled) | |
joblib.dump(pca,"pca.save") | |
print(z_train) | |
clf = svm.OneClassSVM(nu=0.2, kernel="rbf", gamma=0.001) | |
clf.fit(z_train) | |
with open('model.pickle', mode='wb') as fp: | |
pickle.dump(clf, fp) | |
st.info("学習が完了しました。テスト画像を入力してください。") | |
st.write("サイドバーよりテストデータを画像ファイルとしてアップロードしボタンをクリック.") | |
if st.button("推論開始"): | |
with open('model.pickle', mode='rb') as fp: | |
clf = pickle.load(fp) | |
features_test = prepare_features(["input.png"]) | |
scaler = joblib.load("scaler.save") | |
features_scaled_test = scaler.transform(features_test) | |
pca = joblib.load("pca.save") | |
z_test = pca.transform(features_scaled_test) | |
pred = clf.predict(z_test) | |
print(pred) | |
st.image(test_img) | |
if pred[0] == 1: | |
st.info("入力画像は「正常」です。") | |
else: | |
st.info("入力画像は「異常」である可能性があります。") | |