今天是第二十三天我們可以寫一個lstm斑馬魚的藥物分析作用系統,以下是程式碼
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import os
# 設定影像尺寸和批次大小
img_width, img_height = 150, 150
batch_size = 32
epochs = 20
# 資料路徑
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
# 設定數據增強
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary'
)
# 建立模型
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(img_width, img_height, 3)),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(1, activation='sigmoid')
])
# 編譯模型
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
# 設定回調函數
checkpoint = ModelCheckpoint('best_model.h5', monitor='val_accuracy', save_best_only=True, mode='max')
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
# 訓練模型
history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size,
callbacks=[checkpoint, early_stopping]
)
# 儲存最終模型
model.save('final_model.h5')
# 評估模型
loss, accuracy = model.evaluate(validation_generator)
print(f'Validation Loss: {loss}')
print(f'Validation Accuracy: {accuracy}')
# 使用模型進行預測
from tensorflow.keras.preprocessing import image
import numpy as np
def predict_class(img_path, model):
img = image.load_img(img_path, target_size=(img_width, img_height))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.0
prediction = model.predict(img_array)
class_idx = int(prediction > 0.5)
return 'Drug Effect' if class_idx == 1 else 'No Drug Effect'
# 測試預測
test_img_path = 'data/test/test_image.jpg'
prediction = predict_class(test_img_path, model)
print(f'Prediction: {prediction}')
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import os
這程式導入了我們在構建和訓練模型時所需的 TensorFlow 庫和一些 Keras 模塊。ImageDataGenerator
用於數據增強,ModelCheckpoint
和 EarlyStopping
用於模型訓練過程中的監控和早停。
img_width, img_height = 150, 150
batch_size = 32
epochs = 20
這裡我們設定影像的寬度和高度為 150x150 像素,批次大小為 32,訓練迭代次數(epochs)為 20 次。
train_data_dir = 'data/train'
validation_data_dir = 'data/validation'
指定訓練數據和驗證數據的資料夾路徑。你需要將你的影像數據組織在這些資料夾中。
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
validation_datagen = ImageDataGenerator(rescale=1./255)
這裡我們使用 ImageDataGenerator
進行數據增強:
rescale=1./255
:將影像像素值標準化到 [0, 1] 範圍。shear_range=0.2
:隨機剪切影像。zoom_range=0.2
:隨機縮放影像。horizontal_flip=True
:隨機水平翻轉影像。驗證數據只進行標準化,不進行增強,以保證數據的一致性。
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary'
)
flow_from_directory
方法從指定資料夾中加載影像,並對影像進行增強。target_size
設置影像大小,batch_size
設置每個批次的樣本數,class_mode='binary'
表示進行二分類。
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(img_width, img_height, 3)),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(1, activation='sigmoid')
])
這程式建立了一個卷積神經網絡(CNN)模型:
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
使用 Adam 優化器和二元交叉熵損失函數來編譯模型,並指定 accuracy
作為評估指標。
checkpoint = ModelCheckpoint('best_model.h5', monitor='val_accuracy', save_best_only=True, mode='max')
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
save_best_only=True
只保存最佳模型。restore_best_weights=True
恢復到最佳權重。history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_generator.samples // batch_size,
callbacks=[checkpoint, early_stopping]
)
model.save('final_model.h5')
儲存訓練完成的最終模型。
loss, accuracy = model.evaluate(validation_generator)
print(f'Validation Loss: {loss}')
print(f'Validation Accuracy: {accuracy}')
使用驗證數據評估模型的性能,並輸出損失和準確率。
from tensorflow.keras.preprocessing import image
import numpy as np
def predict_class(img_path, model):
img = image.load_img(img_path, target_size=(img_width, img_height))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.0
prediction = model.predict(img_array)
class_idx = int(prediction > 0.5)
return 'Drug Effect' if class_idx == 1 else 'No Drug Effect'
# 測試預測
test_img_path = 'data/test/test_image.jpg'
prediction = predict_class(test_img_path, model)
print(f'Prediction: {prediction}')
這程式的目的是從影像數據中學習藥物的影響,並通過訓練好的模型進行預測。