quilt code

[AI] cifar10 본문

daily/AI

[AI] cifar10

김뱅쇼 2023. 4. 20. 21:34

 

cifar10 : mnist와 같이 머신러닝 연구에 사용되는 dataset 

 

 

day17)


1. mycifar10_1



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import tensorflow as tf
import numpy as np
 
# 1. Fashion MNIST 데이터셋 임포트
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
 
 
# 2. 데이터 전처리
train_images, test_images = train_images / 255.0, test_images / 255.0
 
 
# 3. 모델 구성
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(2828)),
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')  #10 : class 리스트의 종류
])
 
 
# 4. 모델 컴파일
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
 
 
# 5. 모델 훈련
model.fit(train_images, train_labels, epochs=5)
 
 
# 6. 정확도 평가하기
loss, accuracy = model.evaluate(test_images, test_labels)
print(loss, accuracy)
 
 
# 7. 예측하기
predictions = model.predict(test_images)
print(predictions[0])
print(np.argmax(predictions[0]))
cs





2-1. mycifar10_2_hw



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import tensorflow as tf
import numpy as np
import cv2
 
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
 
# print("train_images",train_images.shape)
# print("train_labels",train_labels.shape)
# print("test_images",test_images.shape)
# print("test_labels",test_labels.shape)
#
# print("train_images",train_images[0])
# print("train_labels",train_labels[0])
# print("test_images",test_images[0])
# print("test_labels",test_labels[0])
 
train_images, test_images = train_images/255.0, test_images/255.0 # **2: 이미지가 안 뜨는 현상 고치기 : or x_test256으로 바꾸기
 
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(2828)),
    tf.keras.layers.Dense(512, activation=tf.nn.relu),
    tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
 
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy']) #accuracy가 중요함... 이게 높아질수록 안정된 데이터를 얻을 수 있다
 
model.fit(train_images, train_labels, epochs=5)
 
print("test_images",test_images.shape) #뇌의 모양 확인 
 
#예측하기 ★★★ 훈련 뒤 얼마나 예측을 잘 하는지, 훈련한것을 바탕으로 예측함 
predict = model.predict(test_images)
 
 
cnt_o = 0 
cnt_x = 0
for i in range(10000):
    pred = np.argmax(predict[i])  #argmax: numpy 안에서 최대값의 index를 보여줌 (배열에는 못 쓰고 numpy에서만 사용) 
    goog = train_labels[i]
    if pred == goog:
        cnt_o += 1
    else:
        cnt_x += 1
        print("i",i,"pred",pred,"goog",goog)
        # i번호_pred_goog.jpg로 fault 폴더에 저장하기(틀린 것들만 뽑아내기)
        print("{}_{}_{}".format(i,pred,goog))
        cv2.imwrite('fault/{}_{}_{}.jpg'.format(i,pred,goog), test_images[i]*255.0#255.0을 곱해야 이미지가 제대로 나옴 안 곱해주면 0만 들어가있는 상황임 **1 **4 :x_test256
 
# print("pred",np.argmax(predict[0]))  #예측한 숫자, 배열 중에 가장 확률이 높은 index를 뽑아라
# print("goog",y_test[0])   #google이 준 숫자 
 
print("cnt_o",cnt_o)
print("cnt_x",cnt_x)
    
 
cs

cnt_o = 0 
cnt_x = 0
for i in range(10000):
    pred = np.argmax(predict[i])  #argmax: numpy 안에서 최대값의 index를 보여줌 (배열에는 못 쓰고 numpy에서만 사용) 
    goog = train_labels[i]
    if pred == goog:        
        cnt_o += 1
    else:
        cnt_x += 1
        print("i",i,"pred",pred,"goog",goog)

        # 틀린 것들만 뽑아내기 : i번호_pred_goog.jpg로 fault 폴더에 저장하기
        print("{}_{}_{}".format(i,pred,goog))
        cv2.imwrite('fault/{}_{}_{}.jpg'.format(i,pred,goog), test_images[i]*255.0) #255.0을 곱해야 이미지가 제대로 나옴 안 곱해주면 0만 들어가있는 상황임




2-2. mycifar10_2 (강사님 코드)




1
2
3
4
5
6
7
8
9
10
11
12
13
14
import tensorflow as tf
import numpy as np
import cv2
 
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
 
for idx,i in enumerate(train_images):
    label = train_labels[idx]
    cv2.imwrite('train/{}/{}.jpg'.format(label,idx), train_images[idx])
 
for idx,i in enumerate(test_images):
    label = test_labels[idx]
    cv2.imwrite('test/{}/{}.jpg'.format(label,idx), test_images[idx])
cs
 

 


3.mycifar10_3_predict



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import tensorflow as tf
import numpy as np
import cv2
 
# 0 : T-shirt/top
# 1 : Trouser
# 2 : Pullover
# 3 : Dress
# 4 : Coat
# 5 : Sandal
# 6 : Shirt
# 7 : Sneaker
# 8 : Bag
# 9 : Ankel boot
 
# 1. Fashion MNIST 데이터셋 임포트
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images_256, test_labels) = fashion_mnist.load_data()
 
train_images, test_images = train_images/255.0, test_images_256/255.0
 
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(2828)),
    tf.keras.layers.Dense(512, activation=tf.nn.relu),
    tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
 
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
 
model.fit(train_images, train_labels, epochs=5)
 
 
predictions = model.predict(test_images)
 
for idx, i in enumerate(predictions):
    idx_goog = test_labels[idx]
    idx_pred = np.argmax(predictions[idx])
    print(idx,idx_pred,idx_goog)
    
    if idx_goog != idx_pred:
        cv2.imwrite('fault/{}_{}_{}.jpg'.format(i,idx_pred,idx_goog),test_images_256[idx]) 
 
 
 
 
 
 
cs



predictions = model.predict(test_images)

for idx, i in enumerate(predictions):
    idx_goog = test_labels[idx]
    idx_pred = np.argmax(predictions[idx])
    print(idx,idx_pred,idx_goog)
    
    if idx_goog != idx_pred:
        cv2.imwrite('fault/{}_{}_{}.jpg'.format(i,idx_pred,idx_goog),test_images_256[idx])  : 틀린 이미지는 fault 폴더에 넣기

 


4. mycifar10_4_save



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import tensorflow as tf
import numpy as np
import cv2
 
import tensorflow as tf
import numpy as np
 
 
 
# 1. Fashion MNIST 데이터셋 임포트
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images_256, test_labels) = fashion_mnist.load_data()
 
 
# 2. 데이터 전처리
train_images, test_images = train_images / 255.0, test_images_256/255.0
 
 
# 3. 모델 구성
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(2828)), #한줄로만들기
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax'#10가지 클래스
])
 
 
# 4. 모델 컴파일
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
 
 
# 5. 모델 훈련
model.fit(train_images, train_labels, epochs=10)
 
model.save('fashion.h5')
 
 
cs


model.save('fashion.h5')

 


5. mycifar10_5_load



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import tensorflow as tf
import numpy as np
import cv2
import tensorflow as tf
import numpy as np
from keras.models import load_model
import keras
 
 
 
# 1. Fashion MNIST 데이터셋 임포트
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images_256, test_labels) = fashion_mnist.load_data()
 
 
# 2. 데이터 전처리
train_images, test_images = train_images/255.0, test_images_256/255.0 
 
 
 
model = keras.models.load_model("fashion.h5")
 
# 6. 정확도 평가하기
loss, accuracy = model.evaluate(test_images, test_labels)
print(loss, accuracy)
 
#
# # 7. 예측하기
predictions = model.predict(test_images)
print("idx_goog :",test_labels[0])
print("predictions[0]: ",predictions[0])
print("idx_pred:",np.argmax(predictions[0]))
 
 
cs



model = keras.models.load_model("fashion.h5")

 

 

 


6. mycifar10_6_Internet



1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import tensorflow as tf
import numpy as np
import cv2
import tensorflow as tf
import numpy as np
from keras.models import load_model
import keras
 
classes = [
    "T-shirt/top","Trouser","Pullover","Dress","Coat",
    "Sandal","Shirt","Sneaker","Bag","Ankel boot"
           ]
 
img = cv2.imread('37.jpg')  #numpy
img_gray = cv2.cvtColor(img,  cv2.COLOR_BGR2GRAY)
resize_img = cv2.resize(img_gray, (2828))/255 
 
 
# resize_img = 1 - resize_img         #이미지 반전
resize_img = np.reshape(resize_img,(1,28,28))
 
 
 
model = keras.models.load_model("fashion.h5")
 
 
# # 7. 예측하기
predictions = model.predict(resize_img)
idx_pred = np.argmax(predictions[0])
 
 
print(idx_pred, classes[idx_pred])
 
 
 
 
 
 
cs


 

 

 

 

 

'daily > AI' 카테고리의 다른 글

[AI] OpenCV  (0) 2023.04.22
[AI] tensorflow (3)  (0) 2023.04.22
[AI] tensorflow (2)  (0) 2023.04.19
[AI] tensorflow (1)  (0) 2023.04.19
[AI] numpy  (0) 2023.04.17