superi

主にキャリアと金融を嗜むメディア

石原さとみがどのポケモンに似てるか深層学習してみる


スポンサーリンク

よっ、でぃーぷらーにんぐー、したかったのが今回の趣旨です、、

やりたいこと

ポケモンの画像を学習させ、石原さとみがどのポケモンに似ているか、類似度の高いポケモンを表示するプログラムを作成します。

ライブラリ

deep learningの実装には、TensorFlowkerasで実行しています。
画像ファイルの読み込みはPILを使っています。 TensorFlowとKerasのインストールは別途書くことにしよう。

モデルの学習

学習に利用したポケモンは♫ポケモンいえるかなの、ピカチュウ、カイリュウ、ヤドラン、〜、メノクラゲまで。それぞれ5枚ずつ画像集めました。いやー、、画像集めるのがめんどくさすぎてポケモン図鑑は完成させれなかった汗

from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
from PIL import Image
import os
image_list = []
label_list = []
# ./data/train 以下のディレクトリの画像を読み込む。
for dir in os.listdir("data_pokemon/train"):
    if dir == ".DS_Store":
        continue

    dir1 = "data_pokemon/train/" + dir 
    label = 0

    if dir == "pikatyu":    
        label = 0
    elif dir == "kairyu": 
        label = 1
    elif dir == "yadoran": 
        label = 2
    elif dir == "pijyon": 
        label = 3
    elif dir == "kodack": 
        label = 4        
    elif dir == "koratta": 
        label = 5
    elif dir == "zubat": 
        label = 6
    elif dir == "galop": 
        label = 7        
    elif dir == "sanders": 
        label = 8
    elif dir == "menokurage": 
        label = 9
        
    for file in os.listdir(dir1):
        if file != ".DS_Store":
            # 配列label_listに正解ラベルを追加
            label_list.append(label)
            filepath = dir1 + "/" + file
            #jpgをpngに変換
            img = Image.open(filepath, 'r')
            img.save(filepath, 'PNG', quality=100, optimize=True)
            # 画像を25x25pixelに変換し、1要素が[R,G,B]3要素を含む配列の25x25の2次元配列として読み込む。
            # [R,G,B]はそれぞれが0-255の配列。
            image = np.array(Image.open(filepath).resize((25, 25)))
            print(filepath,image.shape)
            # 配列を変換し、[[Redの配列],[Greenの配列],[Blueの配列]] のような形にする。
            image = image.transpose(2, 0, 1)
            # さらにフラットな1次元配列に変換。最初の1/3はRed、次がGreenの、最後がBlueの要素がフラットに並ぶ。
            image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype("float32")[0]
            # 出来上がった配列をimage_listに追加。
            image_list.append(image / 255.)
data_pokemon/train/menokurage/menokurage1.jpeg (25, 25, 3)
data_pokemon/train/menokurage/menokurage5.jpg_small (25, 25, 3)
data_pokemon/train/menokurage/menokurage4.jpeg (25, 25, 3)
data_pokemon/train/menokurage/menokurage2.png (25, 25, 3)
data_pokemon/train/menokurage/menokurage3.jpeg (25, 25, 3)
data_pokemon/train/zubat/zubat4.jpeg (25, 25, 3)
data_pokemon/train/zubat/zubat5.jpg (25, 25, 3)
data_pokemon/train/zubat/zubat1.png (25, 25, 3)
data_pokemon/train/zubat/zubat3.jpg (25, 25, 3)
data_pokemon/train/zubat/zubat2.jpg (25, 25, 3)
data_pokemon/train/kodack/kodack2.jpeg (25, 25, 3)
data_pokemon/train/kodack/kodack4.jpg (25, 25, 3)
data_pokemon/train/kodack/kodack5.jpg (25, 25, 3)
data_pokemon/train/kodack/kodack3.jpg (25, 25, 3)
data_pokemon/train/kodack/kodack1.png (25, 25, 3)
data_pokemon/train/koratta/koratta1.jpg (25, 25, 3)
data_pokemon/train/koratta/koratta3.jpg (25, 25, 3)
data_pokemon/train/koratta/koratta2.jpeg (25, 25, 3)
data_pokemon/train/koratta/koratta5.png (25, 25, 3)
data_pokemon/train/koratta/koratta4.jpeg (25, 25, 3)
data_pokemon/train/galop/galop2.jpeg (25, 25, 3)
data_pokemon/train/galop/galop4.jpeg (25, 25, 3)
data_pokemon/train/galop/galop3.jpg (25, 25, 3)
data_pokemon/train/galop/galop1.jpg (25, 25, 3)
data_pokemon/train/galop/galop5.jpg (25, 25, 3)
data_pokemon/train/yadoran/yadoran4.png (25, 25, 3)
data_pokemon/train/yadoran/yadoran5.png (25, 25, 3)
data_pokemon/train/yadoran/yadoran2.png (25, 25, 3)
data_pokemon/train/yadoran/yadoran3.png (25, 25, 3)
data_pokemon/train/yadoran/yodoran1.png (25, 25, 3)
data_pokemon/train/pijyon/pijyon1.jpeg (25, 25, 3)
data_pokemon/train/pijyon/pijyon2.png (25, 25, 3)
data_pokemon/train/pijyon/pijyon3.jpeg (25, 25, 3)
data_pokemon/train/pijyon/pijyon5.png (25, 25, 3)
data_pokemon/train/pijyon/pijyon4.png (25, 25, 3)
data_pokemon/train/pikatyu/pikatyu3.png (25, 25, 3)
data_pokemon/train/pikatyu/pikatyu2.png (25, 25, 3)
data_pokemon/train/pikatyu/pikatyu1.png (25, 25, 3)
data_pokemon/train/pikatyu/pikatyu5.png (25, 25, 3)
data_pokemon/train/pikatyu/pikatyu4.png (25, 25, 3)
data_pokemon/train/kairyu/kairyu1.png (25, 25, 3)
data_pokemon/train/kairyu/kairyu2.png (25, 25, 3)
data_pokemon/train/kairyu/kairyu3.png (25, 25, 3)
data_pokemon/train/kairyu/kairyu4.png (25, 25, 3)
data_pokemon/train/kairyu/kairyu5.png (25, 25, 3)
data_pokemon/train/sanders/sanders4.jpeg (25, 25, 3)
data_pokemon/train/sanders/sanders5.jpeg (25, 25, 3)
data_pokemon/train/sanders/sanders2.jpeg (25, 25, 3)
data_pokemon/train/sanders/sanders3.jpeg (25, 25, 3)
data_pokemon/train/sanders/sanders1.jpeg (25, 25, 3)
# kerasに渡すためにnumpy配列に変換。
image_list = np.array(image_list)
# ラベルの配列を1と0からなるラベル配列に変更
# 0 -> [1,0], 1 -> [0,1] という感じ。
Y = to_categorical(label_list)
# モデルを生成してニューラルネットを構築
model = Sequential()
model.add(Dense(200, input_dim=1875))
model.add(Activation("relu"))
model.add(Dropout(0.2))

model.add(Dense(100))
model.add(Activation("relu"))
model.add(Dropout(0.2))

model.add(Dense(10))
model.add(Activation("softmax"))
# オプティマイザにAdamを使用
opt = Adam(lr=0.001)
# モデルをコンパイル
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# 学習を実行。10%はテストに使用。
model.fit(image_list, Y, nb_epoch=100, batch_size=50, validation_split=0.2)
/Users/*********/anaconda3/lib/python3.5/site-packages/keras/models.py:844: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.
  warnings.warn('The `nb_epoch` argument in `fit` '


Train on 40 samples, validate on 10 samples
Epoch 1/100
40/40 [==============================] - 0s - loss: 3.0424 - acc: 0.1500 - val_loss: 3.2672 - val_acc: 0.0000e+00
Epoch 2/100
40/40 [==============================] - 0s - loss: 2.6548 - acc: 0.0750 - val_loss: 3.1662 - val_acc: 0.0000e+00
Epoch 3/100
40/40 [==============================] - 0s - loss: 2.2802 - acc: 0.2000 - val_loss: 3.6208 - val_acc: 0.0000e+00
Epoch 4/100
40/40 [==============================] - 0s - loss: 2.4635 - acc: 0.1500 - val_loss: 4.0944 - val_acc: 0.0000e+00
Epoch 5/100
40/40 [==============================] - 0s - loss: 2.2835 - acc: 0.2500 - val_loss: 4.4182 - val_acc: 0.0000e+00
Epoch 6/100
40/40 [==============================] - 0s - loss: 2.3999 - acc: 0.1250 - val_loss: 4.5961 - val_acc: 0.0000e+00
Epoch 7/100
40/40 [==============================] - 0s - loss: 2.4041 - acc: 0.1750 - val_loss: 4.5769 - val_acc: 0.0000e+00
Epoch 8/100
40/40 [==============================] - 0s - loss: 2.4829 - acc: 0.1250 - val_loss: 4.3898 - val_acc: 0.0000e+00
Epoch 9/100
40/40 [==============================] - 0s - loss: 2.2392 - acc: 0.2250 - val_loss: 4.1738 - val_acc: 0.0000e+00
Epoch 10/100
40/40 [==============================] - 0s - loss: 2.2029 - acc: 0.2250 - val_loss: 4.1227 - val_acc: 0.0000e+00
Epoch 11/100
40/40 [==============================] - 0s - loss: 2.1927 - acc: 0.1250 - val_loss: 4.1275 - val_acc: 0.0000e+00
Epoch 12/100
40/40 [==============================] - 0s - loss: 2.1432 - acc: 0.1500 - val_loss: 4.1073 - val_acc: 0.0000e+00
Epoch 13/100
40/40 [==============================] - 0s - loss: 2.1381 - acc: 0.2000 - val_loss: 4.1057 - val_acc: 0.0000e+00
Epoch 14/100
40/40 [==============================] - 0s - loss: 2.1284 - acc: 0.2750 - val_loss: 4.0589 - val_acc: 0.0000e+00
Epoch 15/100
40/40 [==============================] - 0s - loss: 2.0522 - acc: 0.3250 - val_loss: 4.1399 - val_acc: 0.0000e+00
Epoch 16/100
40/40 [==============================] - 0s - loss: 1.9022 - acc: 0.3500 - val_loss: 4.2224 - val_acc: 0.0000e+00
Epoch 17/100
40/40 [==============================] - 0s - loss: 1.7430 - acc: 0.3750 - val_loss: 4.2493 - val_acc: 0.0000e+00
Epoch 18/100
40/40 [==============================] - 0s - loss: 1.9563 - acc: 0.2750 - val_loss: 4.2644 - val_acc: 0.0000e+00
Epoch 19/100
40/40 [==============================] - 0s - loss: 1.7386 - acc: 0.3250 - val_loss: 4.4126 - val_acc: 0.0000e+00
Epoch 20/100
40/40 [==============================] - 0s - loss: 1.9165 - acc: 0.2250 - val_loss: 4.6452 - val_acc: 0.0000e+00
Epoch 21/100
40/40 [==============================] - 0s - loss: 1.8197 - acc: 0.2500 - val_loss: 4.7228 - val_acc: 0.0000e+00
Epoch 22/100
40/40 [==============================] - 0s - loss: 1.8053 - acc: 0.3750 - val_loss: 4.7153 - val_acc: 0.0000e+00
Epoch 23/100
40/40 [==============================] - 0s - loss: 1.7812 - acc: 0.3000 - val_loss: 4.6776 - val_acc: 0.0000e+00
Epoch 24/100
40/40 [==============================] - 0s - loss: 1.6713 - acc: 0.4000 - val_loss: 4.5989 - val_acc: 0.0000e+00
Epoch 25/100
40/40 [==============================] - 0s - loss: 1.7330 - acc: 0.3000 - val_loss: 4.6292 - val_acc: 0.0000e+00
Epoch 26/100
40/40 [==============================] - 0s - loss: 1.6678 - acc: 0.3750 - val_loss: 4.6244 - val_acc: 0.0000e+00
Epoch 27/100
40/40 [==============================] - 0s - loss: 1.6372 - acc: 0.3750 - val_loss: 4.5683 - val_acc: 0.0000e+00
Epoch 28/100
40/40 [==============================] - 0s - loss: 1.7395 - acc: 0.3500 - val_loss: 4.4970 - val_acc: 0.0000e+00
Epoch 29/100
40/40 [==============================] - 0s - loss: 1.4677 - acc: 0.4500 - val_loss: 4.5299 - val_acc: 0.0000e+00
Epoch 30/100
40/40 [==============================] - 0s - loss: 1.5230 - acc: 0.4250 - val_loss: 4.7302 - val_acc: 0.0000e+00
Epoch 31/100
40/40 [==============================] - 0s - loss: 1.4134 - acc: 0.5500 - val_loss: 4.7844 - val_acc: 0.0000e+00
Epoch 32/100
40/40 [==============================] - 0s - loss: 1.5426 - acc: 0.4250 - val_loss: 4.6144 - val_acc: 0.0000e+00
Epoch 33/100
40/40 [==============================] - 0s - loss: 1.5099 - acc: 0.4750 - val_loss: 4.4600 - val_acc: 0.0000e+00
Epoch 34/100
40/40 [==============================] - 0s - loss: 1.2820 - acc: 0.6250 - val_loss: 4.4429 - val_acc: 0.0000e+00
Epoch 35/100
40/40 [==============================] - 0s - loss: 1.2806 - acc: 0.7250 - val_loss: 4.5038 - val_acc: 0.0000e+00
Epoch 36/100
40/40 [==============================] - 0s - loss: 1.2761 - acc: 0.5500 - val_loss: 4.7097 - val_acc: 0.0000e+00
Epoch 37/100
40/40 [==============================] - 0s - loss: 1.3835 - acc: 0.5750 - val_loss: 4.8846 - val_acc: 0.0000e+00
Epoch 38/100
40/40 [==============================] - 0s - loss: 1.2725 - acc: 0.5000 - val_loss: 4.9952 - val_acc: 0.0000e+00
Epoch 39/100
40/40 [==============================] - 0s - loss: 1.3248 - acc: 0.5750 - val_loss: 5.0767 - val_acc: 0.0000e+00
Epoch 40/100
40/40 [==============================] - 0s - loss: 1.3104 - acc: 0.5750 - val_loss: 5.1657 - val_acc: 0.0000e+00
Epoch 41/100
40/40 [==============================] - 0s - loss: 1.1607 - acc: 0.7000 - val_loss: 5.2389 - val_acc: 0.0000e+00
Epoch 42/100
40/40 [==============================] - 0s - loss: 1.1298 - acc: 0.7500 - val_loss: 5.2929 - val_acc: 0.0000e+00
Epoch 43/100
40/40 [==============================] - 0s - loss: 1.2123 - acc: 0.5750 - val_loss: 5.1895 - val_acc: 0.0000e+00
Epoch 44/100
40/40 [==============================] - 0s - loss: 1.1448 - acc: 0.6750 - val_loss: 5.1515 - val_acc: 0.0000e+00
Epoch 45/100
40/40 [==============================] - 0s - loss: 1.1284 - acc: 0.6500 - val_loss: 5.1852 - val_acc: 0.0000e+00
Epoch 46/100
40/40 [==============================] - 0s - loss: 0.9661 - acc: 0.7000 - val_loss: 5.3255 - val_acc: 0.0000e+00
Epoch 47/100
40/40 [==============================] - 0s - loss: 0.9683 - acc: 0.7000 - val_loss: 5.4291 - val_acc: 0.0000e+00
Epoch 48/100
40/40 [==============================] - 0s - loss: 1.0151 - acc: 0.6750 - val_loss: 5.4709 - val_acc: 0.0000e+00
Epoch 49/100
40/40 [==============================] - 0s - loss: 0.9411 - acc: 0.7250 - val_loss: 5.4944 - val_acc: 0.0000e+00
Epoch 50/100
40/40 [==============================] - 0s - loss: 0.9321 - acc: 0.6750 - val_loss: 5.5001 - val_acc: 0.0000e+00
Epoch 51/100
40/40 [==============================] - 0s - loss: 0.7512 - acc: 0.8500 - val_loss: 5.6478 - val_acc: 0.0000e+00
Epoch 52/100
40/40 [==============================] - 0s - loss: 0.9296 - acc: 0.7000 - val_loss: 5.7814 - val_acc: 0.0000e+00
Epoch 53/100
40/40 [==============================] - 0s - loss: 0.8078 - acc: 0.8000 - val_loss: 5.9215 - val_acc: 0.0000e+00
Epoch 54/100
40/40 [==============================] - 0s - loss: 0.8056 - acc: 0.7500 - val_loss: 5.9988 - val_acc: 0.0000e+00
Epoch 55/100
40/40 [==============================] - 0s - loss: 0.7605 - acc: 0.7750 - val_loss: 6.0570 - val_acc: 0.0000e+00
Epoch 56/100
40/40 [==============================] - 0s - loss: 0.6924 - acc: 0.8250 - val_loss: 5.9082 - val_acc: 0.0000e+00
Epoch 57/100
40/40 [==============================] - 0s - loss: 0.7135 - acc: 0.8500 - val_loss: 5.8423 - val_acc: 0.0000e+00
Epoch 58/100
40/40 [==============================] - 0s - loss: 0.7896 - acc: 0.7250 - val_loss: 5.7381 - val_acc: 0.0000e+00
Epoch 59/100
40/40 [==============================] - 0s - loss: 0.7294 - acc: 0.7750 - val_loss: 5.7542 - val_acc: 0.0000e+00
Epoch 60/100
40/40 [==============================] - 0s - loss: 0.6455 - acc: 0.9000 - val_loss: 5.9547 - val_acc: 0.0000e+00
Epoch 61/100
40/40 [==============================] - 0s - loss: 0.6985 - acc: 0.8000 - val_loss: 6.3333 - val_acc: 0.0000e+00
Epoch 62/100
40/40 [==============================] - 0s - loss: 0.6620 - acc: 0.8500 - val_loss: 6.5852 - val_acc: 0.0000e+00
Epoch 63/100
40/40 [==============================] - 0s - loss: 0.6108 - acc: 0.9000 - val_loss: 6.7101 - val_acc: 0.0000e+00
Epoch 64/100
40/40 [==============================] - 0s - loss: 0.6904 - acc: 0.8000 - val_loss: 6.6728 - val_acc: 0.0000e+00
Epoch 65/100
40/40 [==============================] - 0s - loss: 0.6353 - acc: 0.8250 - val_loss: 6.6706 - val_acc: 0.0000e+00
Epoch 66/100
40/40 [==============================] - 0s - loss: 0.5136 - acc: 0.9500 - val_loss: 6.5850 - val_acc: 0.0000e+00
Epoch 67/100
40/40 [==============================] - 0s - loss: 0.6079 - acc: 0.8000 - val_loss: 6.5406 - val_acc: 0.0000e+00
Epoch 68/100
40/40 [==============================] - 0s - loss: 0.5682 - acc: 0.8250 - val_loss: 6.5649 - val_acc: 0.0000e+00
Epoch 69/100
40/40 [==============================] - 0s - loss: 0.5347 - acc: 0.9250 - val_loss: 6.7340 - val_acc: 0.0000e+00
Epoch 70/100
40/40 [==============================] - 0s - loss: 0.5548 - acc: 0.8500 - val_loss: 7.0775 - val_acc: 0.0000e+00
Epoch 71/100
40/40 [==============================] - 0s - loss: 0.6102 - acc: 0.8000 - val_loss: 7.2517 - val_acc: 0.0000e+00
Epoch 72/100
40/40 [==============================] - 0s - loss: 0.4424 - acc: 0.9000 - val_loss: 7.2342 - val_acc: 0.0000e+00
Epoch 73/100
40/40 [==============================] - 0s - loss: 0.4114 - acc: 0.9500 - val_loss: 7.0969 - val_acc: 0.0000e+00
Epoch 74/100
40/40 [==============================] - 0s - loss: 0.4754 - acc: 0.9250 - val_loss: 6.9667 - val_acc: 0.0000e+00
Epoch 75/100
40/40 [==============================] - 0s - loss: 0.4041 - acc: 0.9250 - val_loss: 6.9153 - val_acc: 0.0000e+00
Epoch 76/100
40/40 [==============================] - 0s - loss: 0.4426 - acc: 0.8750 - val_loss: 6.9116 - val_acc: 0.0000e+00
Epoch 77/100
40/40 [==============================] - 0s - loss: 0.3969 - acc: 0.9250 - val_loss: 7.0404 - val_acc: 0.0000e+00
Epoch 78/100
40/40 [==============================] - 0s - loss: 0.4992 - acc: 0.9000 - val_loss: 7.1926 - val_acc: 0.0000e+00
Epoch 79/100
40/40 [==============================] - 0s - loss: 0.3628 - acc: 0.9250 - val_loss: 7.3990 - val_acc: 0.0000e+00
Epoch 80/100
40/40 [==============================] - 0s - loss: 0.4559 - acc: 0.8250 - val_loss: 7.4637 - val_acc: 0.0000e+00
Epoch 81/100
40/40 [==============================] - 0s - loss: 0.4295 - acc: 0.8750 - val_loss: 7.4680 - val_acc: 0.0000e+00
Epoch 82/100
40/40 [==============================] - 0s - loss: 0.3216 - acc: 0.9500 - val_loss: 7.4212 - val_acc: 0.0000e+00
Epoch 83/100
40/40 [==============================] - 0s - loss: 0.3157 - acc: 0.9750 - val_loss: 7.4207 - val_acc: 0.0000e+00
Epoch 84/100
40/40 [==============================] - 0s - loss: 0.2902 - acc: 0.9250 - val_loss: 7.3935 - val_acc: 0.0000e+00
Epoch 85/100
40/40 [==============================] - 0s - loss: 0.4123 - acc: 0.9000 - val_loss: 7.4168 - val_acc: 0.0000e+00
Epoch 86/100
40/40 [==============================] - 0s - loss: 0.2211 - acc: 0.9500 - val_loss: 7.5890 - val_acc: 0.0000e+00
Epoch 87/100
40/40 [==============================] - 0s - loss: 0.2806 - acc: 0.9250 - val_loss: 7.8276 - val_acc: 0.0000e+00
Epoch 88/100
40/40 [==============================] - 0s - loss: 0.2512 - acc: 0.9750 - val_loss: 7.7102 - val_acc: 0.0000e+00
Epoch 89/100
40/40 [==============================] - 0s - loss: 0.2267 - acc: 0.9500 - val_loss: 7.5137 - val_acc: 0.0000e+00
Epoch 90/100
40/40 [==============================] - 0s - loss: 0.2813 - acc: 0.9500 - val_loss: 7.4542 - val_acc: 0.0000e+00
Epoch 91/100
40/40 [==============================] - 0s - loss: 0.1851 - acc: 0.9750 - val_loss: 7.5837 - val_acc: 0.0000e+00
Epoch 92/100
40/40 [==============================] - 0s - loss: 0.2280 - acc: 0.9750 - val_loss: 7.7942 - val_acc: 0.0000e+00
Epoch 93/100
40/40 [==============================] - 0s - loss: 0.2342 - acc: 0.9500 - val_loss: 7.8429 - val_acc: 0.0000e+00
Epoch 94/100
40/40 [==============================] - 0s - loss: 0.2487 - acc: 0.9750 - val_loss: 7.8892 - val_acc: 0.0000e+00
Epoch 95/100
40/40 [==============================] - 0s - loss: 0.1420 - acc: 0.9750 - val_loss: 8.0694 - val_acc: 0.0000e+00
Epoch 96/100
40/40 [==============================] - 0s - loss: 0.1911 - acc: 0.9750 - val_loss: 8.3231 - val_acc: 0.0000e+00
Epoch 97/100
40/40 [==============================] - 0s - loss: 0.2417 - acc: 0.9250 - val_loss: 8.3801 - val_acc: 0.0000e+00
Epoch 98/100
40/40 [==============================] - 0s - loss: 0.1576 - acc: 0.9500 - val_loss: 8.3906 - val_acc: 0.0000e+00
Epoch 99/100
40/40 [==============================] - 0s - loss: 0.1539 - acc: 0.9750 - val_loss: 8.3971 - val_acc: 0.0000e+00
Epoch 100/100
40/40 [==============================] - 0s - loss: 0.1500 - acc: 1.0000 - val_loss: 8.4615 - val_acc: 0.0000e+00





<keras.callbacks.History at 0x12f3825c0>
# テスト用ディレクトリ(./data/train/)の画像でチェック。正解率を表示する。
total = 0.
ok_count = 0.

for dir in os.listdir("data_pokemon/train"):
    if dir == ".DS_Store":
        continue

    dir1 = "data_pokemon/test/" + dir 
    label = 0

    if dir == "pikatyu":    
        label = 0
    elif dir == "kairyu": 
        label = 1
    elif dir == "yadoran": 
        label = 2

    for file in os.listdir(dir1):
        if file != ".DS_Store":
            label_list.append(label)
            filepath = dir1 + "/" + file
            img = Image.open(filepath, 'r')
            img.save(filepath + '.png', 'PNG', quality=100, optimize=True)
            image = np.array(Image.open(filepath).resize((25, 25)))
            print(filepath)
            image = image.transpose(2, 0, 1)
            image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype("float32")[0]
            result = model.predict_classes(np.array([image / 255.]))
            print("label:", label, "result:", result[0], "score:", model.predict_proba(np.array([image / 255.])))

            total += 1.

            if label == result[0]:
                ok_count += 1.

print("seikai: ", ok_count / total * 100, "%")
data_pokemon/test/yadoran/yodoran1.png
1/1 [==============================] - 0s
1/1 [==============================] - 0s
label: 2 result: 2 score: [[ 0.00267951  0.02860479  0.96871567]]
data_pokemon/test/pikatyu/pikatyu1.png
1/1 [==============================] - 0s
1/1 [==============================] - 0s
label: 0 result: 0 score: [[  9.91316438e-01   8.17965996e-03   5.03923628e-04]]
data_pokemon/test/kairyu/kairyu1.png
1/1 [==============================] - 0s
1/1 [==============================] - 0s
label: 1 result: 1 score: [[ 0.23075765  0.71538484  0.05385749]]
seikai:  100.0 %
model_json_str = model.to_json()
open('model.json', 'w').write(model_json_str)
model.save_weights('weights.h5');

判定

今回対象の石原さとみ画像はこちら。

f:id:ukichang:20180910000711p:plain
はい、カワイイですね。 で、先ほど学習したモデルで画像判定したところ、な、なんと、ヤドランでした、、

f:id:ukichang:20180910001041p:plain

(なお、サンプルコードは別の画像の結果を示している)

今回はポケモンが少なかったため、このような結果ですが、お時間ある方は、学習モデルにポケモン追加してやってみてくださいね。

from keras.models import Sequential
from keras.models import model_from_json
from keras.layers import Activation, Dense, Dropout
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
from PIL import Image
import os
image_list = []
label_list = []
model = model_from_json(open('model.json').read())
model.load_weights('weights.h5')
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])
filepath = "images/12565523_1018344161557771_1287741129840044727_n.png"
image = np.array(Image.open(filepath).resize((25, 25)))
image = image.transpose(2, 0, 1)
image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype("float32")[0]
result = model.predict_classes(np.array([image / 255.]))
print(result[0],model.predict_proba(np.array([image / 255.])))
#score = model.evaluate(X_test, Y_test, verbose=0)
1/1 [==============================] - 0s
1/1 [==============================] - 0s
3 [[ 0.12168646  0.00455755  0.00925004  0.62193692  0.12116482  0.05011627
   0.01367795  0.03526022  0.00642292  0.01592684]]

参考

Kerasによる、ものすごくシンプルな画像分類(りんごとオレンジ) - Qiita