今天是挑戰第三十天我要先謝謝iT邦幫忙給我這次有持續努力30天寫程式的動力,尤其是現在AI又那麼的熱門,因此結合了深度學習去做斑馬魚行為分析,我寫了那麼多天的程式碼,能力變強了,效率也變好了,以下是我修改過後最有效率的程式碼
import os
import numpy as np
import matplotlib.pyplot as plt
from ultralytics import YOLO
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.utils import to_categorical
# 設定YOLOv8模型與檔案路徑
model = YOLO('yolov8n.pt') # 使用小型YOLOv8模型
video_path = 'path_to_your_video.mp4' # 輸入影片的路徑
output_dir = 'output_results'
os.makedirs(output_dir, exist_ok=True)
# 檢測斑馬魚位置並保存結果
results = model.predict(source=video_path, save=True, project=output_dir)
detections = results.xyxy # 獲取所有幀的檢測結果
# 提取斑馬魚特徵(位置、速度、方向等)
def extract_features(detections):
features = []
for i, detection in enumerate(detections):
frame_features = []
for xyxy in detection.boxes.xyxy:
center_x = (xyxy[0] + xyxy[2]) / 2
center_y = (xyxy[1] + xyxy[3]) / 2
frame_features.append([center_x, center_y])
# 計算速度與方向
if i > 0:
prev_frame_features = features[-1]
for j in range(len(frame_features)):
prev_center_x, prev_center_y = prev_frame_features[j]
curr_center_x, curr_center_y = frame_features[j]
velocity = np.sqrt((curr_center_x - prev_center_x)**2 + (curr_center_y - prev_center_y)**2)
direction = np.arctan2(curr_center_y - prev_center_y, curr_center_x - prev_center_x)
frame_features[j].extend([velocity, direction])
features.append(frame_features)
return np.array(features)
features = extract_features(detections)
# 整理特徵資料以餵給LSTM模型(假設每幀只檢測到一條斑馬魚)
lookback = 10 # LSTM的時間序列長度
num_features = 4 # 每個斑馬魚的特徵數量 (x, y, velocity, direction)
X, y = [], []
for i in range(len(features) - lookback):
X.append(features[i:i + lookback].reshape(lookback, num_features))
y.append(np.random.randint(0, 3)) # 假設有3種行為分類,隨機標籤(需實際標註)
X = np.array(X)
y = to_categorical(y, num_classes=3)
# 建立LSTM模型進行行為預測
model = Sequential()
model.add(LSTM(64, input_shape=(lookback, num_features), return_sequences=True))
model.add(LSTM(64, return_sequences=False))
model.add(Dense(3, activation='softmax')) # 假設3種行為分類
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 訓練模型
model.fit(X, y, epochs=50, batch_size=32, validation_split=0.2)
# 預測新資料的行為
predictions = model.predict(X)
predicted_behavior = np.argmax(predictions, axis=1)
# 行為分析
def analyze_behavior(predicted_behavior):
unique, counts = np.unique(predicted_behavior, return_counts=True)
behavior_distribution = dict(zip(unique, counts))
return behavior_distribution
behavior_distribution = analyze_behavior(predicted_behavior)
# 可視化斑馬魚軌跡
def visualize_trajectory(features):
plt.figure()
for i, feature in enumerate(features):
plt.plot(feature[:, 0], feature[:, 1], 'o-', label=f'Fish {i+1}')
plt.title('Zebrafish Trajectory')
plt.xlabel('X Position')
plt.ylabel('Y Position')
plt.legend()
plt.show()
visualize_trajectory(features)
# 可視化行為分佈
def plot_behavior_distribution(behavior_distribution):
plt.figure()
plt.bar(behavior_distribution.keys(), behavior_distribution.values(), color='blue')
plt.title('Behavior Distribution')
plt.xlabel('Behavior')
plt.ylabel('Count')
plt.show()
plot_behavior_distribution(behavior_distribution)
# 保存模型與結果
model.save(os.path.join(output_dir, 'zebrafish_behavior_lstm.h5'))
# 生成報告
report_path = os.path.join(output_dir, 'behavior_analysis_report.txt')
with open(report_path, 'w') as f:
f.write(f'Behavior Distribution: {behavior_distribution}\n')
f.write(f'Total Predictions: {len(predicted_behavior)}\n')
print(f"Analysis complete. Results saved to {output_dir}")
from ultralytics import YOLO
# 設定YOLOv8模型與檔案路徑
model = YOLO('yolov8n.pt') # 使用小型YOLOv8模型
video_path = 'path_to_your_video.mp4' # 輸入影片的路徑
output_dir = 'output_results'
os.makedirs(output_dir, exist_ok=True)
# 檢測斑馬魚位置並保存結果
results = model.predict(source=video_path, save=True, project=output_dir)
detections = results.xyxy # 獲取所有幀的檢測結果
YOLO('yolov8n.pt')
會載入預訓練的YOLOv8模型(這裡使用的是小型模型yolov8n.pt
),這樣可以加快檢測速度。video_path
指定要處理的影片文件路徑。output_dir
是用來保存結果的資料夾。如果資料夾不存在,則用os.makedirs
創建。model.predict
對影片進行逐幀目標檢測,並保存每幀的檢測結果。detections
保存了每幀斑馬魚的邊界框資訊。import numpy as np
def extract_features(detections):
features = []
for i, detection in enumerate(detections):
frame_features = []
for xyxy in detection.boxes.xyxy:
center_x = (xyxy[0] + xyxy[2]) / 2
center_y = (xyxy[1] + xyxy[3]) / 2
frame_features.append([center_x, center_y])
# 計算速度與方向
if i > 0:
prev_frame_features = features[-1]
for j in range(len(frame_features)):
prev_center_x, prev_center_y = prev_frame_features[j]
curr_center_x, curr_center_y = frame_features[j]
velocity = np.sqrt((curr_center_x - prev_center_x)**2 + (curr_center_y - prev_center_y)**2)
direction = np.arctan2(curr_center_y - prev_center_y, curr_center_x - prev_center_x)
frame_features[j].extend([velocity, direction])
features.append(frame_features)
return np.array(features)
features = extract_features(detections)
extract_features
函數會從detections
中提取每一幀的斑馬魚特徵,這些特徵包括中心位置(center_x
, center_y
)、速度(velocity
)和運動方向(direction
)。center_x
, center_y
是根據斑馬魚邊界框的左上角和右下角座標計算出來的。from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.utils import to_categorical
# 整理特徵資料以餵給LSTM模型(假設每幀只檢測到一條斑馬魚)
lookback = 10 # LSTM的時間序列長度
num_features = 4 # 每個斑馬魚的特徵數量 (x, y, velocity, direction)
X, y = [], []
for i in range(len(features) - lookback):
X.append(features[i:i + lookback].reshape(lookback, num_features))
y.append(np.random.randint(0, 3)) # 假設有3種行為分類,隨機標籤(需實際標註)
X = np.array(X)
y = to_categorical(y, num_classes=3)
# 建立LSTM模型進行行為預測
model = Sequential()
model.add(LSTM(64, input_shape=(lookback, num_features), return_sequences=True))
model.add(LSTM(64, return_sequences=False))
model.add(Dense(3, activation='softmax')) # 假設3種行為分類
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 訓練模型
model.fit(X, y, epochs=50, batch_size=32, validation_split=0.2)
lookback
參數定義了LSTM模型在每次預測時會考慮的歷史時間序列長度。在這裡,LSTM模型每次會根據前10
幀的特徵進行行為預測。y
)是隨機生成的,用於模擬不同的行為分類。在實際應用中,這些標籤需要由專家標註。return_sequences=True
),第二層只輸出最後一個時間步的結果。最後一層是全連接層(Dense
),用於行為分類。# 預測新資料的行為
predictions = model.predict(X)
predicted_behavior = np.argmax(predictions, axis=1)
# 行為分析
def analyze_behavior(predicted_behavior):
unique, counts = np.unique(predicted_behavior, return_counts=True)
behavior_distribution = dict(zip(unique, counts))
return behavior_distribution
behavior_distribution = analyze_behavior(predicted_behavior)
model.predict(X)
用於對新的輸入資料進行預測,predicted_behavior
是預測的行為分類。analyze_behavior
函數統計每種行為在預測結果中的分佈,並返回一個字典形式的結果behavior_distribution
,表示每種行為出現的次數。import matplotlib.pyplot as plt
# 可視化斑馬魚軌跡
def visualize_trajectory(features):
plt.figure()
for i, feature in enumerate(features):
plt.plot(feature[:, 0], feature[:, 1], 'o-', label=f'Fish {i+1}')
plt.title('Zebrafish Trajectory')
plt.xlabel('X Position')
plt.ylabel('Y Position')
plt.legend()
plt.show()
visualize_trajectory(features)
# 可視化行為分佈
def plot_behavior_distribution(behavior_distribution):
plt.figure()
plt.bar(behavior_distribution.keys(), behavior_distribution.values(), color='blue')
plt.title('Behavior Distribution')
plt.xlabel('Behavior')
plt.ylabel('Count')
plt.show()
plot_behavior_distribution(behavior_distribution)
visualize_trajectory
函數會將斑馬魚的運動軌跡繪製在圖表上,顯示斑馬魚在不同幀中的運動路徑。plot_behavior_distribution
函數會將行為預測結果的分佈繪製成柱狀圖,幫助觀察哪種行為出現的頻率較高。import os
# 保存模型與結果
model.save(os.path.join(output_dir, 'zebrafish_behavior_lstm.h5'))
# 生成報告
report_path = os.path.join(output_dir, 'behavior_analysis_report.txt')
with open(report_path, 'w') as f:
f.write(f'Behavior Distribution: {behavior_distribution}\n')
f.write(f'Total Predictions: {len(predicted_behavior)}\n')
print(f"Analysis complete. Results saved to {output_dir}")
.h5
文件,這樣可以在未來使用同一個模型進行預測。
behavior_analysis_report.txt
中,這樣可以方便地查看分析結果。