diff --git a/.gitignore b/.gitignore index b479755..de03f38 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ -"これ見えてない?" .idea +.env diff --git a/mepi_test.java b/mepi_test.java deleted file mode 100644 index bb70802..0000000 --- a/mepi_test.java +++ /dev/null @@ -1,5 +0,0 @@ -class Hello { - public static void main(String[] args) { - System.out.println("Hello, World!"); - } -} \ No newline at end of file diff --git a/mepi_test.py b/mepi_test.py new file mode 100644 index 0000000..cd8c781 --- /dev/null +++ b/mepi_test.py @@ -0,0 +1,49 @@ +import cv2 +import numpy as np + +# 画像読み込み +image = cv2.imread("test2.png") +image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + +# ノイズ軽減 +image_blur = cv2.GaussianBlur(image_gray, (5,5), 0) + +# Cannyエッジ +edges = cv2.Canny(image_blur, 30, 100) + +# 線を太らせる +kernel = np.ones((3,3), np.uint8) +edges = cv2.dilate(edges, kernel, iterations=2) + +# 輪郭検出 +contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) + +image_result = image.copy() + +# 四角形数をカウント +square_count = 0 + +# 白背景画像(描画用) +image_blank = np.ones_like(image) * 255 + +for cnt in contours: + area = cv2.contourArea(cnt) + if area < 5000 or area>20000: # 小さい輪郭を除外 + cv2.drawContours(image_result, [cnt], -1, (255,0,0), 2), + continue + + peri = cv2.arcLength(cnt, True) + approx = cv2.approxPolyDP(cnt, 0.04 * peri, True) + + if len(approx) == 4: + square_count += 1 + cv2.drawContours(image_result, [approx], -1, (0,255,0), 2) + else: + cv2.drawContours(image_result, [approx], -1, (0,0,255), 2) + + +# 結果保存 +cv2.imwrite('detected_squares.png', image_result) + +# 結果出力 +print(f"検出された四角形の数: {square_count}") \ No newline at end of file diff --git a/mepi_test_1.py b/mepi_test_1.py new file mode 100644 index 0000000..5d164cf --- /dev/null +++ b/mepi_test_1.py @@ -0,0 +1,48 @@ +from sklearn import svm +import cv2 +import numpy as np +import glob +from joblib import dump + +# HOG +hog = cv2.HOGDescriptor( + _winSize=(64,64), + _blockSize=(16,16), + _blockStride=(8,8), + _cellSize=(8,8), + _nbins=9 +) + +x = [] +y = [] + +# データセット読み込み +for label in ['fu_up','fu_down', + 'tokinn_up','tokinn_down', + 'ou_up','ou_down', + 'gyoku_up','gyoku_down', + 'keima_up','keima_down', + 'None']: + for file in glob.glob(f'data/{label}/*.png'): + img = cv2.imread(file, cv2.IMREAD_GRAYSCALE) + img = cv2.resize(img, (64,64)) + descriptor = hog.compute(img) + x.append(descriptor.flatten()) + y.append(label) + +x = np.array(x) +y = np.array(y) + +# SVM学習 +clf = svm.SVC(kernel='linear') +clf.fit(X, y) +dump(clf, 'shogi_model.joblib') + + +img = cv2.imread('test_keima.png', cv2.IMREAD_GRAYSCALE) +img = cv2.resize(img, (64,64)) +descriptor = hog.compute(img) +predicted = clf.predict([descriptor.flatten()]) + +print(f"推論結果: {predicted[0]}") +