Files
Depression_Detection_APP/pages/1_analisis_wajah.py
Achillean-1 5d2b8bb14d first commit
2025-07-11 15:28:18 +07:00

584 lines
21 KiB
Python

#1_analisis_wajah.py
import streamlit as st
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
from collections import Counter, deque
import tensorflow as tf
from tensorflow.keras.applications.efficientnet import preprocess_input
from contextlib import contextmanager
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
CATEGORIES = ['Angry', 'Sad', 'Happy', 'Fearful', 'Disgust', 'Neutral', 'Surprised']
POSITIVE_EMOTIONS = {'Happy', 'Surprised', 'Neutral'}
NEGATIVE_EMOTIONS = {'Angry', 'Sad', 'Fearful', 'Disgust'}
FRAME_INTERVAL = 0.5
MODEL_PATH = "model/Model_EfficientNet.tflite"
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 180
MEDIA_WIDTH = 320
MEDIA_HEIGHT = 180
YOUTUBE_URL = "https://www.youtube.com/embed/3XA0bB79oGc?autoplay={}&mute=1"
MAX_PREDICTIONS_BUFFER = 1000
NEUTRAL_INDEX = CATEGORIES.index('Neutral')
def load_custom_css():
css = """
<style>
.main {
background-color: #0b0f2e;
color: white;
}
.stApp {
background-color: #0b0f2e;
}
.metric-container {
background-color: #1a2352;
padding: 15px;
border-radius: 8px;
text-align: center;
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
}
.metric-label {
font-size: 14px;
opacity: 0.8;
margin-bottom: 5px;
}
.metric-value {
font-size: 20px;
font-weight: bold;
color: white;
}
.results-container {
background-color: #111c4e;
border-radius: 10px;
padding: 20px;
margin-top: 20px;
}
.emotion-score {
font-size: 24px;
font-weight: bold;
text-align: center;
margin: 10px 0;
}
.negative-score { color: #ff6b6b; }
.positive-score { color: #69db7c; }
.stButton > button {
background-color: #007bff;
color: white;
border-radius: 8px;
padding: 12px 24px;
width: 100%;
transition: background-color 0.3s ease;
font-size: 16px;
border: none;
cursor: pointer;
}
.stButton > button:hover {
background-color: #0056b3;
}
.status-indicator {
padding: 8px 16px;
border-radius: 20px;
font-size: 14px;
font-weight: 600;
display: inline-block;
margin: 15px 0;
}
.status-analyzing { background-color: #28a745; color: white; }
.status-stopped { background-color: #dc3545; color: white; }
.status-ready { background-color: #ffc107; color: black; }
.youtube-container {
background-color: #1a2352;
border-radius: 8px;
padding: 15px;
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
margin-bottom: 20px;
}
.camera-container {
background-color: #1a2352;
border-radius: 8px;
padding: 15px;
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
margin-bottom: 20px;
}
.control-panel {
background-color: #1a2352;
border-radius: 8px;
padding: 20px;
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
}
.panel-title {
font-size: 18px;
font-weight: bold;
margin-bottom: 15px;
color: white;
}
.main-title {
font-size: 32px;
font-weight: bold;
margin-bottom: 15px;
color: white;
}
.hidden {
display: none;
}
.stImage > img {
border-radius: 8px;
}
</style>
"""
st.markdown(css, unsafe_allow_html=True)
class EmotionAnalyzer:
def __init__(self):
self.interpreter = None
self.input_details = None
self.output_details = None
self.is_loaded = False
@st.cache_resource
def load_model(_self):
if not os.path.exists(MODEL_PATH):
raise FileNotFoundError(f"Model file {MODEL_PATH} tidak ditemukan")
try:
with st.spinner('Memuat model TFLite...'):
start_time = time.time()
interpreter = tf.lite.Interpreter(model_path=MODEL_PATH)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
load_time = time.time() - start_time
logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
return interpreter, input_details, output_details
except Exception as e:
logger.error(f"Error loading model: {e}")
raise
def initialize(self):
if not self.is_loaded:
self.interpreter, self.input_details, self.output_details = self.load_model()
self.is_loaded = True
def preprocess_image(self, image):
try:
image_resized = cv2.resize(image, (224, 224))
image_array = np.expand_dims(image_resized, axis=0).astype(np.float32)
image_array = preprocess_input(image_array)
return image_array
except Exception as e:
logger.error(f"Error in preprocessing: {e}")
return None
def predict(self, image):
if not self.is_loaded:
raise RuntimeError("Model belum dimuat")
try:
processed_image = self.preprocess_image(image)
if processed_image is None:
return NEUTRAL_INDEX, 1.0
self.interpreter.set_tensor(self.input_details[0]['index'], processed_image)
self.interpreter.invoke()
predictions = self.interpreter.get_tensor(self.output_details[0]['index'])[0]
pred_idx = np.argmax(predictions)
confidence = predictions[pred_idx]
return pred_idx, confidence
except Exception as e:
logger.error(f"Error in prediction: {e}")
return NEUTRAL_INDEX, 1.0
class EmotionDataManager:
def __init__(self):
self.predictions = deque(maxlen=MAX_PREDICTIONS_BUFFER)
self.timestamps = deque(maxlen=MAX_PREDICTIONS_BUFFER)
self.start_time = None
def reset(self):
self.predictions.clear()
self.timestamps.clear()
self.start_time = None
def add_prediction(self, pred_idx, timestamp=None):
if self.start_time is None:
self.start_time = time.time()
if timestamp is None:
timestamp = time.time() - self.start_time
self.predictions.append(pred_idx)
self.timestamps.append(timestamp)
def get_emotion_scores(self):
if not self.predictions:
return 0, 0
emotion_names = [CATEGORIES[idx] for idx in self.predictions]
total = len(emotion_names)
negative_count = sum(1 for emotion in emotion_names if emotion in NEGATIVE_EMOTIONS)
positive_count = sum(1 for emotion in emotion_names if emotion in POSITIVE_EMOTIONS)
negative_score = (negative_count / total) * 100
positive_score = (positive_count / total) * 100
return negative_score, positive_score
def get_dominant_emotions(self, top_n=3):
if not self.predictions:
return []
emotion_counts = Counter(self.predictions)
most_common = emotion_counts.most_common(top_n)
total = sum(emotion_counts.values())
return [(CATEGORIES[emotion], count, (count/total)*100)
for emotion, count in most_common]
def get_emotion_changes(self, top_n=3):
if len(self.predictions) < 2:
return []
changes = []
for i in range(1, len(self.predictions)):
if self.predictions[i] != self.predictions[i-1]:
from_emotion = CATEGORIES[self.predictions[i-1]]
to_emotion = CATEGORIES[self.predictions[i]]
change_time = self.timestamps[i]
changes.append((from_emotion, to_emotion, change_time))
return sorted(changes, key=lambda x: x[2])[:top_n]
@contextmanager
def camera_context():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
try:
yield cap
finally:
cap.release()
def create_metric_display(label, value, container_class="metric-container"):
return f"""
<div class="{container_class}">
<div class="metric-label">{label}</div>
<div class="metric-value">{value}</div>
</div>
"""
def create_status_indicator(status):
status_map = {
'analyzing': ('Sedang Menganalisis', 'status-analyzing'),
'stopped': ('Analisis Dihentikan', 'status-stopped'),
'ready': ('Siap Memulai', 'status-ready')
}
text, css_class = status_map.get(status, ('Unknown', 'status-ready'))
return f'<span class="status-indicator {css_class}">{text}</span>'
def initialize_session_state():
defaults = {
'is_analyzing': False,
'results_ready': False,
'current_expression': "-",
'current_accuracy': "-",
'last_capture_time': 0,
'analyzer': None,
'data_manager': None,
'video_started': False,
'analysis_start_time': None
}
for key, value in defaults.items():
if key not in st.session_state:
st.session_state[key] = value
if st.session_state.analyzer is None:
st.session_state.analyzer = EmotionAnalyzer()
if st.session_state.data_manager is None:
st.session_state.data_manager = EmotionDataManager()
def render_results():
data_manager = st.session_state.data_manager
if not data_manager.predictions:
st.warning("Tidak ada data yang dikumpulkan selama analisis.")
return
st.success("✅ Analisis emosi wajah selesai!")
negative_score, positive_score = data_manager.get_emotion_scores()
col1, col2 = st.columns(2)
with col1:
st.markdown(create_metric_display(
"Emosi Negatif",
f"{negative_score:.1f}%",
"metric-container negative-score"
), unsafe_allow_html=True)
with col2:
st.markdown(create_metric_display(
"Emosi Positif",
f"{positive_score:.1f}%",
"metric-container positive-score"
), unsafe_allow_html=True)
st.markdown("<div class='results-container'>", unsafe_allow_html=True)
if len(data_manager.predictions) > 1:
fig, ax = plt.subplots(figsize=(12, 6))
fig.patch.set_facecolor('#f4f4f7')
ax.set_facecolor('#f4f4f7')
timestamps = list(data_manager.timestamps)
expressions = [CATEGORIES[idx] for idx in data_manager.predictions]
ax.scatter(timestamps, expressions, c='#3a7aff', alpha=0.7, s=30)
ax.plot(timestamps, expressions, color='#3a7aff', alpha=0.5, linewidth=1)
ax.set_xlabel("Waktu (detik)", color='#111c4e', fontsize=12)
ax.set_ylabel("Ekspresi", color='#111c4e', fontsize=12)
ax.set_title("Timeline Perubahan Ekspresi", color='#111c4e', fontsize=14, fontweight='bold')
ax.tick_params(axis='both', colors='#111c4e', labelsize=10)
for spine in ax.spines.values():
spine.set_color('#111c4e')
ax.grid(True, alpha=0.3)
plt.tight_layout()
st.pyplot(fig)
plt.close(fig)
col1, col2 = st.columns(2)
with col1:
st.subheader("Emosi Dominan")
dominant_emotions = data_manager.get_dominant_emotions()
for emotion, count, percentage in dominant_emotions:
st.write(f"**{emotion}**: {percentage:.1f}% ({count} deteksi)")
with col2:
st.subheader("Perubahan Tercepat")
emotion_changes = data_manager.get_emotion_changes()
if emotion_changes:
for from_emotion, to_emotion, change_time in emotion_changes:
st.write(f"**{from_emotion}** → **{to_emotion}** ({change_time:.1f}s)")
else:
st.info("Tidak ada perubahan emosi terdeteksi")
st.markdown("</div>", unsafe_allow_html=True)
def main():
st.set_page_config(
page_title="Analisis Emosi Wajah",
layout="wide"
)
initialize_session_state()
load_custom_css()
st.markdown("<h1 class='main-title'>Analisis Emosi Wajah Real-time</h1>", unsafe_allow_html=True)
status = 'analyzing' if st.session_state.is_analyzing else ('ready' if not st.session_state.results_ready else 'stopped')
st.markdown(create_status_indicator(status), unsafe_allow_html=True)
col1, col2 = st.columns([2, 1])
with col1:
youtube_placeholder = st.empty()
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(1 if st.session_state.video_started else 0)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
st.markdown('</div>', unsafe_allow_html=True)
with col2:
video_placeholder = st.empty()
st.markdown('</div>', unsafe_allow_html=True)
st.subheader("📊 Metrik Real-time")
expression_placeholder = st.empty()
accuracy_placeholder = st.empty()
st.subheader("Kontrol")
button_text = 'Akhiri Analisis' if st.session_state.is_analyzing else 'Mulai Analisis'
start_stop_button = st.button(button_text, key="main_control")
st.markdown('</div>', unsafe_allow_html=True)
expression_placeholder.markdown(
create_metric_display("Ekspresi Terdeteksi", st.session_state.current_expression),
unsafe_allow_html=True
)
accuracy_placeholder.markdown(
create_metric_display("Tingkat Keyakinan", st.session_state.current_accuracy),
unsafe_allow_html=True
)
if start_stop_button:
if not st.session_state.is_analyzing:
try:
st.session_state.analyzer.initialize()
st.session_state.data_manager.reset()
st.session_state.is_analyzing = True
st.session_state.results_ready = False
st.session_state.last_capture_time = 0
st.session_state.video_started = False
st.session_state.analysis_start_time = time.time()
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
st.rerun()
except Exception as e:
st.error(f"Error memulai analisis: {e}")
st.session_state.is_analyzing = False
else:
st.session_state.is_analyzing = False
st.session_state.results_ready = True
st.session_state.current_expression = "-"
st.session_state.current_accuracy = "-"
st.session_state.video_started = False
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
video_placeholder.image(np.zeros((MEDIA_HEIGHT, MEDIA_WIDTH, 3), dtype=np.uint8), channels="RGB", width=MEDIA_WIDTH)
st.rerun()
if st.session_state.is_analyzing:
try:
with camera_context() as cap:
if not cap.isOpened():
st.error("❌ Tidak dapat mengakses kamera")
st.session_state.is_analyzing = False
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
st.rerun()
while st.session_state.is_analyzing:
ret, frame = cap.read()
if not ret:
st.error("❌ Error membaca frame dari kamera")
break
current_time = time.time()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
video_placeholder.image(frame_rgb, channels="RGB", width=MEDIA_WIDTH)
if not st.session_state.video_started and (current_time - st.session_state.analysis_start_time) >= 10:
st.session_state.video_started = True
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(1)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
if current_time - st.session_state.last_capture_time >= FRAME_INTERVAL:
pred_idx, confidence = st.session_state.analyzer.predict(frame_rgb)
if pred_idx is not None:
st.session_state.data_manager.add_prediction(pred_idx)
current_expression = CATEGORIES[pred_idx]
current_accuracy = f"{confidence*100:.1f}%"
st.session_state.current_expression = current_expression
st.session_state.current_accuracy = current_accuracy
expression_placeholder.markdown(
create_metric_display("Ekspresi Terdeteksi", current_expression),
unsafe_allow_html=True
)
accuracy_placeholder.markdown(
create_metric_display("Tingkat Keyakinan", current_accuracy),
unsafe_allow_html=True
)
st.session_state.last_capture_time = current_time
time.sleep(0.03)
except Exception as e:
st.error(f"Error selama analisis: {e}")
st.session_state.is_analyzing = False
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
video_placeholder.image(np.zeros((MEDIA_HEIGHT, MEDIA_WIDTH, 3), dtype=np.uint8), channels="RGB", width=MEDIA_WIDTH)
if not st.session_state.is_analyzing:
expression_placeholder.markdown(
create_metric_display("Ekspresi Terdeteksi", "-"),
unsafe_allow_html=True
)
accuracy_placeholder.markdown(
create_metric_display("Tingkat Keyakinan", "-"),
unsafe_allow_html=True
)
if st.session_state.results_ready and not st.session_state.is_analyzing:
render_results()
col1, col2 = st.columns(2)
with col1:
if st.button("🔄 Reset Analisis", key="reset"):
st.session_state.data_manager.reset()
st.session_state.is_analyzing = False
st.session_state.results_ready = False
st.session_state.current_expression = "-"
st.session_state.current_accuracy = "-"
st.session_state.video_started = False
youtube_placeholder.markdown(
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
unsafe_allow_html=True
)
video_placeholder.image(np.zeros((MEDIA_HEIGHT, MEDIA_WIDTH, 3), dtype=np.uint8), channels="RGB", width=MEDIA_WIDTH)
st.rerun()
with col2:
if st.button("📝 Lanjut ke Jurnal", key="journal"):
try:
st.switch_page("pages/2_jurnaling.py")
except Exception as e:
st.error(f"Error: Halaman journaling tidak tersedia - {e}")
if __name__ == "__main__":
main()