first commit
This commit is contained in:
14
README.md
Normal file
14
README.md
Normal file
@ -0,0 +1,14 @@
|
||||
### Setup Environtment
|
||||
python -m venv depression_detection_app
|
||||
|
||||
### Powershell Administrator
|
||||
Set-ExecutionPolicy -Scope CurrentUser -ExecutionPolicy RemoteSigned
|
||||
|
||||
### Environment Activation
|
||||
.\depression_detection_app\Scripts\activate
|
||||
|
||||
### Install Requirements
|
||||
pip install -r requirements.txt
|
||||
|
||||
### Start Program
|
||||
streamlit run main.py
|
71
main.py
Normal file
71
main.py
Normal file
@ -0,0 +1,71 @@
|
||||
import streamlit as st
|
||||
|
||||
def set_custom_css():
|
||||
st.markdown(
|
||||
"""
|
||||
<style>
|
||||
body {
|
||||
background-color: #f4f4f4;
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0; /* Menghilangkan margin default body */
|
||||
padding: 0; /* Menghilangkan padding default body */
|
||||
}
|
||||
|
||||
.title {
|
||||
color: #f4f4f7;
|
||||
text-align: center;
|
||||
font-size: 2rem;
|
||||
font-weight: bold;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
text-align: center;
|
||||
font-size: 1.2rem;
|
||||
color: #f4f4f7;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.stButton > button {
|
||||
background-color: #007bff;
|
||||
color: white;
|
||||
padding: 10px 24px;
|
||||
border-radius: 8px;
|
||||
font-size: 1rem;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
transition: 0.3s;
|
||||
display: block;
|
||||
margin: 0 auto;
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
.stButton > button:hover {
|
||||
background-color: #0056b3;
|
||||
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
||||
}
|
||||
.center-container {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
</style>
|
||||
""",
|
||||
unsafe_allow_html=True,
|
||||
)
|
||||
|
||||
def main():
|
||||
st.set_page_config(page_title="Aplikasi Pendeteksi Depresi", layout="wide")
|
||||
set_custom_css()
|
||||
|
||||
st.markdown('<div class="title">Selamat Datang di Aplikasi Pendeteksi Indikasi Depresi</div>', unsafe_allow_html=True)
|
||||
st.markdown('<div class="subtitle">Aplikasi ini menganalisis ekspresi wajah dan teks untuk mendeteksi potensi indikasi depresi.</div>', unsafe_allow_html=True)
|
||||
|
||||
st.markdown('<div class="center-container">', unsafe_allow_html=True)
|
||||
if st.button("Mulai Analisis"):
|
||||
st.switch_page("pages/1_analisis_wajah.py")
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
BIN
model/Model_EfficientNet.tflite
Normal file
BIN
model/Model_EfficientNet.tflite
Normal file
Binary file not shown.
BIN
model/Pemodelan_GRU_Valid.keras
Normal file
BIN
model/Pemodelan_GRU_Valid.keras
Normal file
Binary file not shown.
BIN
model/tokenizer_Valid.pkl
Normal file
BIN
model/tokenizer_Valid.pkl
Normal file
Binary file not shown.
584
pages/1_analisis_wajah.py
Normal file
584
pages/1_analisis_wajah.py
Normal file
@ -0,0 +1,584 @@
|
||||
#1_analisis_wajah.py
|
||||
import streamlit as st
|
||||
import cv2
|
||||
import numpy as np
|
||||
import time
|
||||
import matplotlib.pyplot as plt
|
||||
from collections import Counter, deque
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.applications.efficientnet import preprocess_input
|
||||
from contextlib import contextmanager
|
||||
import os
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CATEGORIES = ['Angry', 'Sad', 'Happy', 'Fearful', 'Disgust', 'Neutral', 'Surprised']
|
||||
POSITIVE_EMOTIONS = {'Happy', 'Surprised', 'Neutral'}
|
||||
NEGATIVE_EMOTIONS = {'Angry', 'Sad', 'Fearful', 'Disgust'}
|
||||
FRAME_INTERVAL = 0.5
|
||||
MODEL_PATH = "model/Model_EfficientNet.tflite"
|
||||
CAMERA_WIDTH = 320
|
||||
CAMERA_HEIGHT = 180
|
||||
MEDIA_WIDTH = 320
|
||||
MEDIA_HEIGHT = 180
|
||||
YOUTUBE_URL = "https://www.youtube.com/embed/3XA0bB79oGc?autoplay={}&mute=1"
|
||||
MAX_PREDICTIONS_BUFFER = 1000
|
||||
NEUTRAL_INDEX = CATEGORIES.index('Neutral')
|
||||
|
||||
def load_custom_css():
|
||||
css = """
|
||||
<style>
|
||||
.main {
|
||||
background-color: #0b0f2e;
|
||||
color: white;
|
||||
}
|
||||
.stApp {
|
||||
background-color: #0b0f2e;
|
||||
}
|
||||
.metric-container {
|
||||
background-color: #1a2352;
|
||||
padding: 15px;
|
||||
border-radius: 8px;
|
||||
text-align: center;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
|
||||
}
|
||||
.metric-label {
|
||||
font-size: 14px;
|
||||
opacity: 0.8;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
.metric-value {
|
||||
font-size: 20px;
|
||||
font-weight: bold;
|
||||
color: white;
|
||||
}
|
||||
.results-container {
|
||||
background-color: #111c4e;
|
||||
border-radius: 10px;
|
||||
padding: 20px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
.emotion-score {
|
||||
font-size: 24px;
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
margin: 10px 0;
|
||||
}
|
||||
.negative-score { color: #ff6b6b; }
|
||||
.positive-score { color: #69db7c; }
|
||||
.stButton > button {
|
||||
background-color: #007bff;
|
||||
color: white;
|
||||
border-radius: 8px;
|
||||
padding: 12px 24px;
|
||||
width: 100%;
|
||||
transition: background-color 0.3s ease;
|
||||
font-size: 16px;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
}
|
||||
.stButton > button:hover {
|
||||
background-color: #0056b3;
|
||||
}
|
||||
.status-indicator {
|
||||
padding: 8px 16px;
|
||||
border-radius: 20px;
|
||||
font-size: 14px;
|
||||
font-weight: 600;
|
||||
display: inline-block;
|
||||
margin: 15px 0;
|
||||
}
|
||||
.status-analyzing { background-color: #28a745; color: white; }
|
||||
.status-stopped { background-color: #dc3545; color: white; }
|
||||
.status-ready { background-color: #ffc107; color: black; }
|
||||
.youtube-container {
|
||||
background-color: #1a2352;
|
||||
border-radius: 8px;
|
||||
padding: 15px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.camera-container {
|
||||
background-color: #1a2352;
|
||||
border-radius: 8px;
|
||||
padding: 15px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.control-panel {
|
||||
background-color: #1a2352;
|
||||
border-radius: 8px;
|
||||
padding: 20px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.2);
|
||||
}
|
||||
.panel-title {
|
||||
font-size: 18px;
|
||||
font-weight: bold;
|
||||
margin-bottom: 15px;
|
||||
color: white;
|
||||
}
|
||||
.main-title {
|
||||
font-size: 32px;
|
||||
font-weight: bold;
|
||||
margin-bottom: 15px;
|
||||
color: white;
|
||||
}
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
.stImage > img {
|
||||
border-radius: 8px;
|
||||
}
|
||||
</style>
|
||||
"""
|
||||
st.markdown(css, unsafe_allow_html=True)
|
||||
|
||||
class EmotionAnalyzer:
|
||||
def __init__(self):
|
||||
self.interpreter = None
|
||||
self.input_details = None
|
||||
self.output_details = None
|
||||
self.is_loaded = False
|
||||
|
||||
@st.cache_resource
|
||||
def load_model(_self):
|
||||
if not os.path.exists(MODEL_PATH):
|
||||
raise FileNotFoundError(f"Model file {MODEL_PATH} tidak ditemukan")
|
||||
|
||||
try:
|
||||
with st.spinner('Memuat model TFLite...'):
|
||||
start_time = time.time()
|
||||
interpreter = tf.lite.Interpreter(model_path=MODEL_PATH)
|
||||
interpreter.allocate_tensors()
|
||||
|
||||
input_details = interpreter.get_input_details()
|
||||
output_details = interpreter.get_output_details()
|
||||
|
||||
load_time = time.time() - start_time
|
||||
logger.info(f"Model loaded successfully in {load_time:.2f} seconds")
|
||||
|
||||
return interpreter, input_details, output_details
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading model: {e}")
|
||||
raise
|
||||
|
||||
def initialize(self):
|
||||
if not self.is_loaded:
|
||||
self.interpreter, self.input_details, self.output_details = self.load_model()
|
||||
self.is_loaded = True
|
||||
|
||||
def preprocess_image(self, image):
|
||||
try:
|
||||
image_resized = cv2.resize(image, (224, 224))
|
||||
image_array = np.expand_dims(image_resized, axis=0).astype(np.float32)
|
||||
image_array = preprocess_input(image_array)
|
||||
return image_array
|
||||
except Exception as e:
|
||||
logger.error(f"Error in preprocessing: {e}")
|
||||
return None
|
||||
|
||||
def predict(self, image):
|
||||
if not self.is_loaded:
|
||||
raise RuntimeError("Model belum dimuat")
|
||||
|
||||
try:
|
||||
processed_image = self.preprocess_image(image)
|
||||
if processed_image is None:
|
||||
return NEUTRAL_INDEX, 1.0
|
||||
|
||||
self.interpreter.set_tensor(self.input_details[0]['index'], processed_image)
|
||||
|
||||
self.interpreter.invoke()
|
||||
|
||||
predictions = self.interpreter.get_tensor(self.output_details[0]['index'])[0]
|
||||
|
||||
pred_idx = np.argmax(predictions)
|
||||
confidence = predictions[pred_idx]
|
||||
|
||||
return pred_idx, confidence
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in prediction: {e}")
|
||||
return NEUTRAL_INDEX, 1.0
|
||||
|
||||
class EmotionDataManager:
|
||||
def __init__(self):
|
||||
self.predictions = deque(maxlen=MAX_PREDICTIONS_BUFFER)
|
||||
self.timestamps = deque(maxlen=MAX_PREDICTIONS_BUFFER)
|
||||
self.start_time = None
|
||||
|
||||
def reset(self):
|
||||
self.predictions.clear()
|
||||
self.timestamps.clear()
|
||||
self.start_time = None
|
||||
|
||||
def add_prediction(self, pred_idx, timestamp=None):
|
||||
if self.start_time is None:
|
||||
self.start_time = time.time()
|
||||
|
||||
if timestamp is None:
|
||||
timestamp = time.time() - self.start_time
|
||||
|
||||
self.predictions.append(pred_idx)
|
||||
self.timestamps.append(timestamp)
|
||||
|
||||
def get_emotion_scores(self):
|
||||
if not self.predictions:
|
||||
return 0, 0
|
||||
|
||||
emotion_names = [CATEGORIES[idx] for idx in self.predictions]
|
||||
total = len(emotion_names)
|
||||
|
||||
negative_count = sum(1 for emotion in emotion_names if emotion in NEGATIVE_EMOTIONS)
|
||||
positive_count = sum(1 for emotion in emotion_names if emotion in POSITIVE_EMOTIONS)
|
||||
|
||||
negative_score = (negative_count / total) * 100
|
||||
positive_score = (positive_count / total) * 100
|
||||
|
||||
return negative_score, positive_score
|
||||
|
||||
def get_dominant_emotions(self, top_n=3):
|
||||
if not self.predictions:
|
||||
return []
|
||||
|
||||
emotion_counts = Counter(self.predictions)
|
||||
most_common = emotion_counts.most_common(top_n)
|
||||
total = sum(emotion_counts.values())
|
||||
|
||||
return [(CATEGORIES[emotion], count, (count/total)*100)
|
||||
for emotion, count in most_common]
|
||||
|
||||
def get_emotion_changes(self, top_n=3):
|
||||
if len(self.predictions) < 2:
|
||||
return []
|
||||
|
||||
changes = []
|
||||
for i in range(1, len(self.predictions)):
|
||||
if self.predictions[i] != self.predictions[i-1]:
|
||||
from_emotion = CATEGORIES[self.predictions[i-1]]
|
||||
to_emotion = CATEGORIES[self.predictions[i]]
|
||||
change_time = self.timestamps[i]
|
||||
changes.append((from_emotion, to_emotion, change_time))
|
||||
|
||||
return sorted(changes, key=lambda x: x[2])[:top_n]
|
||||
|
||||
@contextmanager
|
||||
def camera_context():
|
||||
cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
|
||||
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
|
||||
|
||||
try:
|
||||
yield cap
|
||||
finally:
|
||||
cap.release()
|
||||
|
||||
def create_metric_display(label, value, container_class="metric-container"):
|
||||
return f"""
|
||||
<div class="{container_class}">
|
||||
<div class="metric-label">{label}</div>
|
||||
<div class="metric-value">{value}</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
def create_status_indicator(status):
|
||||
status_map = {
|
||||
'analyzing': ('Sedang Menganalisis', 'status-analyzing'),
|
||||
'stopped': ('Analisis Dihentikan', 'status-stopped'),
|
||||
'ready': ('Siap Memulai', 'status-ready')
|
||||
}
|
||||
|
||||
text, css_class = status_map.get(status, ('Unknown', 'status-ready'))
|
||||
return f'<span class="status-indicator {css_class}">{text}</span>'
|
||||
|
||||
def initialize_session_state():
|
||||
defaults = {
|
||||
'is_analyzing': False,
|
||||
'results_ready': False,
|
||||
'current_expression': "-",
|
||||
'current_accuracy': "-",
|
||||
'last_capture_time': 0,
|
||||
'analyzer': None,
|
||||
'data_manager': None,
|
||||
'video_started': False,
|
||||
'analysis_start_time': None
|
||||
}
|
||||
|
||||
for key, value in defaults.items():
|
||||
if key not in st.session_state:
|
||||
st.session_state[key] = value
|
||||
|
||||
if st.session_state.analyzer is None:
|
||||
st.session_state.analyzer = EmotionAnalyzer()
|
||||
|
||||
if st.session_state.data_manager is None:
|
||||
st.session_state.data_manager = EmotionDataManager()
|
||||
|
||||
def render_results():
|
||||
data_manager = st.session_state.data_manager
|
||||
|
||||
if not data_manager.predictions:
|
||||
st.warning("Tidak ada data yang dikumpulkan selama analisis.")
|
||||
return
|
||||
|
||||
st.success("✅ Analisis emosi wajah selesai!")
|
||||
|
||||
negative_score, positive_score = data_manager.get_emotion_scores()
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.markdown(create_metric_display(
|
||||
"Emosi Negatif",
|
||||
f"{negative_score:.1f}%",
|
||||
"metric-container negative-score"
|
||||
), unsafe_allow_html=True)
|
||||
|
||||
with col2:
|
||||
st.markdown(create_metric_display(
|
||||
"Emosi Positif",
|
||||
f"{positive_score:.1f}%",
|
||||
"metric-container positive-score"
|
||||
), unsafe_allow_html=True)
|
||||
|
||||
st.markdown("<div class='results-container'>", unsafe_allow_html=True)
|
||||
if len(data_manager.predictions) > 1:
|
||||
fig, ax = plt.subplots(figsize=(12, 6))
|
||||
fig.patch.set_facecolor('#f4f4f7')
|
||||
ax.set_facecolor('#f4f4f7')
|
||||
|
||||
timestamps = list(data_manager.timestamps)
|
||||
expressions = [CATEGORIES[idx] for idx in data_manager.predictions]
|
||||
|
||||
ax.scatter(timestamps, expressions, c='#3a7aff', alpha=0.7, s=30)
|
||||
ax.plot(timestamps, expressions, color='#3a7aff', alpha=0.5, linewidth=1)
|
||||
|
||||
ax.set_xlabel("Waktu (detik)", color='#111c4e', fontsize=12)
|
||||
ax.set_ylabel("Ekspresi", color='#111c4e', fontsize=12)
|
||||
ax.set_title("Timeline Perubahan Ekspresi", color='#111c4e', fontsize=14, fontweight='bold')
|
||||
|
||||
ax.tick_params(axis='both', colors='#111c4e', labelsize=10)
|
||||
for spine in ax.spines.values():
|
||||
spine.set_color('#111c4e')
|
||||
|
||||
ax.grid(True, alpha=0.3)
|
||||
plt.tight_layout()
|
||||
st.pyplot(fig)
|
||||
plt.close(fig)
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.subheader("Emosi Dominan")
|
||||
dominant_emotions = data_manager.get_dominant_emotions()
|
||||
for emotion, count, percentage in dominant_emotions:
|
||||
st.write(f"**{emotion}**: {percentage:.1f}% ({count} deteksi)")
|
||||
|
||||
with col2:
|
||||
st.subheader("Perubahan Tercepat")
|
||||
emotion_changes = data_manager.get_emotion_changes()
|
||||
if emotion_changes:
|
||||
for from_emotion, to_emotion, change_time in emotion_changes:
|
||||
st.write(f"**{from_emotion}** → **{to_emotion}** ({change_time:.1f}s)")
|
||||
else:
|
||||
st.info("Tidak ada perubahan emosi terdeteksi")
|
||||
|
||||
st.markdown("</div>", unsafe_allow_html=True)
|
||||
|
||||
def main():
|
||||
st.set_page_config(
|
||||
page_title="Analisis Emosi Wajah",
|
||||
layout="wide"
|
||||
)
|
||||
|
||||
initialize_session_state()
|
||||
load_custom_css()
|
||||
|
||||
st.markdown("<h1 class='main-title'>Analisis Emosi Wajah Real-time</h1>", unsafe_allow_html=True)
|
||||
|
||||
status = 'analyzing' if st.session_state.is_analyzing else ('ready' if not st.session_state.results_ready else 'stopped')
|
||||
st.markdown(create_status_indicator(status), unsafe_allow_html=True)
|
||||
|
||||
col1, col2 = st.columns([2, 1])
|
||||
|
||||
with col1:
|
||||
youtube_placeholder = st.empty()
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(1 if st.session_state.video_started else 0)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
|
||||
with col2:
|
||||
video_placeholder = st.empty()
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
|
||||
st.subheader("📊 Metrik Real-time")
|
||||
|
||||
expression_placeholder = st.empty()
|
||||
accuracy_placeholder = st.empty()
|
||||
|
||||
st.subheader("Kontrol")
|
||||
button_text = 'Akhiri Analisis' if st.session_state.is_analyzing else 'Mulai Analisis'
|
||||
start_stop_button = st.button(button_text, key="main_control")
|
||||
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
|
||||
expression_placeholder.markdown(
|
||||
create_metric_display("Ekspresi Terdeteksi", st.session_state.current_expression),
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
accuracy_placeholder.markdown(
|
||||
create_metric_display("Tingkat Keyakinan", st.session_state.current_accuracy),
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
if start_stop_button:
|
||||
if not st.session_state.is_analyzing:
|
||||
try:
|
||||
st.session_state.analyzer.initialize()
|
||||
|
||||
st.session_state.data_manager.reset()
|
||||
st.session_state.is_analyzing = True
|
||||
st.session_state.results_ready = False
|
||||
st.session_state.last_capture_time = 0
|
||||
st.session_state.video_started = False
|
||||
st.session_state.analysis_start_time = time.time()
|
||||
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
st.rerun()
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error memulai analisis: {e}")
|
||||
st.session_state.is_analyzing = False
|
||||
else:
|
||||
st.session_state.is_analyzing = False
|
||||
st.session_state.results_ready = True
|
||||
st.session_state.current_expression = "-"
|
||||
st.session_state.current_accuracy = "-"
|
||||
st.session_state.video_started = False
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
video_placeholder.image(np.zeros((MEDIA_HEIGHT, MEDIA_WIDTH, 3), dtype=np.uint8), channels="RGB", width=MEDIA_WIDTH)
|
||||
st.rerun()
|
||||
|
||||
if st.session_state.is_analyzing:
|
||||
try:
|
||||
with camera_context() as cap:
|
||||
if not cap.isOpened():
|
||||
st.error("❌ Tidak dapat mengakses kamera")
|
||||
st.session_state.is_analyzing = False
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
st.rerun()
|
||||
|
||||
while st.session_state.is_analyzing:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
st.error("❌ Error membaca frame dari kamera")
|
||||
break
|
||||
|
||||
current_time = time.time()
|
||||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
|
||||
video_placeholder.image(frame_rgb, channels="RGB", width=MEDIA_WIDTH)
|
||||
|
||||
if not st.session_state.video_started and (current_time - st.session_state.analysis_start_time) >= 10:
|
||||
st.session_state.video_started = True
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(1)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
if current_time - st.session_state.last_capture_time >= FRAME_INTERVAL:
|
||||
pred_idx, confidence = st.session_state.analyzer.predict(frame_rgb)
|
||||
|
||||
if pred_idx is not None:
|
||||
st.session_state.data_manager.add_prediction(pred_idx)
|
||||
|
||||
current_expression = CATEGORIES[pred_idx]
|
||||
current_accuracy = f"{confidence*100:.1f}%"
|
||||
|
||||
st.session_state.current_expression = current_expression
|
||||
st.session_state.current_accuracy = current_accuracy
|
||||
|
||||
expression_placeholder.markdown(
|
||||
create_metric_display("Ekspresi Terdeteksi", current_expression),
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
accuracy_placeholder.markdown(
|
||||
create_metric_display("Tingkat Keyakinan", current_accuracy),
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
st.session_state.last_capture_time = current_time
|
||||
|
||||
time.sleep(0.03)
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error selama analisis: {e}")
|
||||
st.session_state.is_analyzing = False
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
video_placeholder.image(np.zeros((MEDIA_HEIGHT, MEDIA_WIDTH, 3), dtype=np.uint8), channels="RGB", width=MEDIA_WIDTH)
|
||||
|
||||
if not st.session_state.is_analyzing:
|
||||
expression_placeholder.markdown(
|
||||
create_metric_display("Ekspresi Terdeteksi", "-"),
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
accuracy_placeholder.markdown(
|
||||
create_metric_display("Tingkat Keyakinan", "-"),
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
if st.session_state.results_ready and not st.session_state.is_analyzing:
|
||||
render_results()
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
if st.button("🔄 Reset Analisis", key="reset"):
|
||||
st.session_state.data_manager.reset()
|
||||
st.session_state.is_analyzing = False
|
||||
st.session_state.results_ready = False
|
||||
st.session_state.current_expression = "-"
|
||||
st.session_state.current_accuracy = "-"
|
||||
st.session_state.video_started = False
|
||||
youtube_placeholder.markdown(
|
||||
f'<iframe width="100%" height="350" src="{YOUTUBE_URL.format(0)}" '
|
||||
f'frameborder="0" allowfullscreen style="border-radius: 10px;"></iframe>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
video_placeholder.image(np.zeros((MEDIA_HEIGHT, MEDIA_WIDTH, 3), dtype=np.uint8), channels="RGB", width=MEDIA_WIDTH)
|
||||
st.rerun()
|
||||
|
||||
with col2:
|
||||
if st.button("📝 Lanjut ke Jurnal", key="journal"):
|
||||
try:
|
||||
st.switch_page("pages/2_jurnaling.py")
|
||||
except Exception as e:
|
||||
st.error(f"Error: Halaman journaling tidak tersedia - {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
334
pages/2_jurnaling.py
Normal file
334
pages/2_jurnaling.py
Normal file
@ -0,0 +1,334 @@
|
||||
#2_jurnaling.py
|
||||
import streamlit as st
|
||||
import numpy as np
|
||||
import pickle
|
||||
import tensorflow as tf
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
import gdown
|
||||
import os
|
||||
import re
|
||||
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
|
||||
from nltk.corpus import stopwords
|
||||
|
||||
import nltk
|
||||
nltk.download('stopwords')
|
||||
|
||||
st.set_page_config(
|
||||
page_title="Analisis Journaling",
|
||||
layout="wide",
|
||||
initial_sidebar_state="collapsed"
|
||||
)
|
||||
|
||||
MODEL_URL = "https://drive.google.com/uc?id=1ArBDPUBcPMdsUzH_dKwdjVEK-DyXG9qz"
|
||||
TOKENIZER_URL = "https://drive.google.com/uc?id=1YVdwW-58y1Jie01MOjkd-4nY3bWFmt0E"
|
||||
MODEL_PATH = "model/Pemodelan_GRU_Valid.keras"
|
||||
TOKENIZER_PATH = "model/tokenizer_Valid.pkl"
|
||||
|
||||
os.makedirs("model", exist_ok=True)
|
||||
|
||||
@st.cache_resource
|
||||
def load_model():
|
||||
try:
|
||||
if not os.path.exists(MODEL_PATH):
|
||||
st.info("Mengunduh model dari Google Drive...")
|
||||
gdown.download(MODEL_URL, MODEL_PATH, quiet=False)
|
||||
return tf.keras.models.load_model(MODEL_PATH)
|
||||
except Exception as e:
|
||||
st.error(f"Gagal memuat model: {str(e)}")
|
||||
st.warning("Model tidak ditemukan. Menggunakan data demo.")
|
||||
return None
|
||||
|
||||
@st.cache_resource
|
||||
def load_tokenizer():
|
||||
try:
|
||||
if not os.path.exists(TOKENIZER_PATH):
|
||||
st.info("Mengunduh tokenizer dari Google Drive...")
|
||||
gdown.download(TOKENIZER_URL, TOKENIZER_PATH, quiet=False)
|
||||
with open(TOKENIZER_PATH, "rb") as handle:
|
||||
return pickle.load(handle)
|
||||
except Exception as e:
|
||||
st.error(f"Gagal memuat tokenizer: {str(e)}")
|
||||
st.warning("Tokenizer tidak ditemukan. Menggunakan data demo.")
|
||||
return None
|
||||
|
||||
model = load_model()
|
||||
tokenizer = load_tokenizer()
|
||||
|
||||
MAXLEN = 14
|
||||
|
||||
stop_words = set(stopwords.words('indonesian'))
|
||||
stemmer = StemmerFactory().create_stemmer()
|
||||
|
||||
st.markdown("""
|
||||
<style>
|
||||
.main-header {
|
||||
color: white;
|
||||
background-color: #1E1E5A;
|
||||
padding: 1.5rem;
|
||||
text-align: center;
|
||||
border-radius: 10px 10px 0 0;
|
||||
margin-bottom: 0;
|
||||
font-size: 2rem;
|
||||
font-weight: bold;
|
||||
}
|
||||
.sub-header {
|
||||
color: white;
|
||||
background-color: #1E1E5A;
|
||||
font-size: 1rem;
|
||||
padding: 0.7rem;
|
||||
text-align: center;
|
||||
border-radius: 0 0 10px 10px;
|
||||
margin-bottom: 2rem;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
.text-input-container {
|
||||
background-color: #f7f7f7;
|
||||
padding: 1rem;
|
||||
border-radius: 10px;
|
||||
margin-bottom: 1.5rem;
|
||||
box-shadow: 0 2px 5px rgba(0,0,0,0.1);
|
||||
}
|
||||
.stButton > button {
|
||||
background-color: #007bff;
|
||||
color: white;
|
||||
padding: 10px 24px;
|
||||
border-radius: 8px;
|
||||
font-size: 1rem;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
transition: 0.3s;
|
||||
display: block;
|
||||
margin: 0 auto;
|
||||
width: 100%;
|
||||
}
|
||||
.stButton > button:hover {
|
||||
background-color: #0056b3;
|
||||
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
||||
}
|
||||
.result-container {
|
||||
background-color: white;
|
||||
border-radius: 10px;
|
||||
padding: 1.5rem;
|
||||
box-shadow: 0 3px 10px rgba(0,0,0,0.1);
|
||||
margin-top: 2rem;
|
||||
margin-bottom: 2rem;
|
||||
animation: fadeIn 0.5s;
|
||||
}
|
||||
.custom-button-container {
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
@keyframes fadeIn {
|
||||
0% { opacity: 0; }
|
||||
100% { opacity: 1; }
|
||||
}
|
||||
.emotion-label {
|
||||
font-weight: bold;
|
||||
font-size: 1.1rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
.emotion-score {
|
||||
margin-left: 10px;
|
||||
color: #555;
|
||||
}
|
||||
.sentiment-label {
|
||||
font-weight: bold;
|
||||
font-size: 1.1rem;
|
||||
margin-top: 1rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
.chart-title {
|
||||
text-align: center;
|
||||
font-weight: bold;
|
||||
margin-bottom: 1rem;
|
||||
color: #333;
|
||||
}
|
||||
.divider {
|
||||
margin-top: 1.5rem;
|
||||
margin-bottom: 1.5rem;
|
||||
border-top: 1px solid #eee;
|
||||
}
|
||||
</style>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
st.markdown('<div class="main-header">Analisis Jurnaling</div>', unsafe_allow_html=True)
|
||||
st.markdown('<div class="sub-header">TULISKAN EKSPRESIMU DENGAN KATA-KATA</div>', unsafe_allow_html=True)
|
||||
|
||||
text_input = st.text_area("", height=200, placeholder="Tuliskan isi jurnal anda di sini...")
|
||||
|
||||
col1, col2, col3 = st.columns([1, 2, 1])
|
||||
with col2:
|
||||
st.markdown('<div class="custom-button-container">', unsafe_allow_html=True)
|
||||
analyze_button = st.button("Analisis Teks", key="analyze", use_container_width=True)
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
|
||||
def clean_text(text):
|
||||
text = text.lower()
|
||||
text = re.sub(r'http\S+', '', text)
|
||||
text = re.sub(r'[^a-zA-Z\s]', '', text)
|
||||
text = re.sub(r'\s+', ' ', text).strip()
|
||||
|
||||
words = text.split()
|
||||
words = [stemmer.stem(word) for word in words if word not in stop_words]
|
||||
return ' '.join(words)
|
||||
|
||||
def analyze_text(text):
|
||||
if model is None or tokenizer is None:
|
||||
return {
|
||||
"emotions": {
|
||||
"marah": 0.01,
|
||||
"sedih": 0.02,
|
||||
"jijik": 0.048,
|
||||
"takut": 0.01,
|
||||
"bahagia": 0.01,
|
||||
"netral": 0.945,
|
||||
"terkejut": 0.005
|
||||
},
|
||||
"dominant_emotion": "netral",
|
||||
"text": text
|
||||
}
|
||||
|
||||
clean_text_input = clean_text(text)
|
||||
|
||||
text_seq = tokenizer.texts_to_sequences([clean_text_input])
|
||||
|
||||
if not text_seq[0]:
|
||||
st.warning("Teks tidak mengandung kata yang dikenali oleh model. Coba gunakan kata-kata yang lebih umum.")
|
||||
return {
|
||||
"emotions": {label: 0.0 for label in ["marah", "sedih", "bahagia", "takut", "jijik", "netral", "terkejut"]},
|
||||
"dominant_emotion": "tidak_dikenali",
|
||||
"text": text
|
||||
}
|
||||
|
||||
text_padded = tf.keras.preprocessing.sequence.pad_sequences(text_seq, maxlen=MAXLEN, padding='post')
|
||||
|
||||
prediction = model.predict(text_padded, verbose=0)
|
||||
predicted_class = np.argmax(prediction, axis=1)[0]
|
||||
|
||||
label_mapping = {0: "marah", 1: "sedih", 2: "bahagia", 3: "takut", 4: "jijik", 5: "netral", 6: "terkejut"}
|
||||
emotion_label = label_mapping[predicted_class]
|
||||
|
||||
emotions = {}
|
||||
for i, label in label_mapping.items():
|
||||
emotions[label] = float(prediction[0][i])
|
||||
|
||||
return {
|
||||
"emotions": emotions,
|
||||
"dominant_emotion": emotion_label,
|
||||
"text": text
|
||||
}
|
||||
|
||||
if analyze_button:
|
||||
if text_input:
|
||||
result = analyze_text(text_input)
|
||||
st.session_state.text_analysis_result = result
|
||||
st.rerun()
|
||||
else:
|
||||
st.warning("Silakan masukkan teks terlebih dahulu.")
|
||||
|
||||
if 'text_analysis_result' in st.session_state:
|
||||
result = st.session_state.text_analysis_result
|
||||
|
||||
st.markdown("### Hasil:")
|
||||
st.markdown("#### Emosi Yang Terdeteksi:")
|
||||
|
||||
emotion_colors = {
|
||||
"marah": "#E53935",
|
||||
"sedih": "#7986CB",
|
||||
"jijik": "#8BC34A",
|
||||
"takut": "#FFB74D",
|
||||
"bahagia": "#4CAF50",
|
||||
"netral": "#9E9E9E",
|
||||
"terkejut": "#1E88E5"
|
||||
}
|
||||
|
||||
top_emotions = sorted(result["emotions"].items(), key=lambda x: x[1], reverse=True)[:3]
|
||||
|
||||
for emotion, score in top_emotions:
|
||||
emotion_name = emotion.capitalize()
|
||||
score_percent = score * 100
|
||||
color = emotion_colors.get(emotion, "#FFFFFF")
|
||||
|
||||
st.markdown(
|
||||
f'<div class="emotion-label" style="color:{color};">{emotion_name} <span class="emotion-score">{score_percent:.1f}%</span></div>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
st.markdown('<div class="chart-title">Top 3 Emosi</div>', unsafe_allow_html=True)
|
||||
|
||||
emotions = [e[0].capitalize() for e in top_emotions]
|
||||
scores = [e[1]*100 for e in top_emotions]
|
||||
|
||||
fig, ax = plt.subplots(figsize=(10, 4))
|
||||
|
||||
colors = [emotion_colors.get(emotion.lower(), "#1E88E5") for emotion in emotions]
|
||||
|
||||
bars = ax.barh(emotions, scores, color=colors, height=0.5)
|
||||
|
||||
for bar in bars:
|
||||
width = bar.get_width()
|
||||
ax.text(width + 1, bar.get_y() + bar.get_height()/2, f'{width:.1f}%',
|
||||
va='center', fontweight='bold')
|
||||
|
||||
ax.set_xlim(0, 100)
|
||||
ax.set_xlabel('Confidence (%)')
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
ax.spines['bottom'].set_color('#DDDDDD')
|
||||
ax.spines['left'].set_color('#DDDDDD')
|
||||
ax.tick_params(bottom=False, left=False)
|
||||
ax.set_axisbelow(True)
|
||||
ax.grid(axis='x', linestyle='-', alpha=0.2)
|
||||
|
||||
st.pyplot(fig)
|
||||
|
||||
positive_emotions = ["bahagia", "netral", "terkejut"]
|
||||
negative_emotions = ["marah", "sedih", "jijik", "takut"]
|
||||
|
||||
positive_score = sum(result["emotions"][e] for e in positive_emotions) * 100
|
||||
negative_score = sum(result["emotions"][e] for e in negative_emotions) * 100
|
||||
|
||||
st.markdown('<div class="divider"></div>', unsafe_allow_html=True)
|
||||
|
||||
st.markdown(
|
||||
f'<div class="sentiment-label" style="color:#4CAF50;">Positive Sentiment <span class="emotion-score">{positive_score:.1f}%</span></div>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
st.markdown(
|
||||
f'<div class="sentiment-label" style="color:#E53935;">Negative Sentiment <span class="emotion-score">{negative_score:.1f}%</span></div>',
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
fig2, ax2 = plt.subplots(figsize=(10, 2))
|
||||
|
||||
sentiments = ["Positive", "Negative"]
|
||||
sentiment_scores = [positive_score, negative_score]
|
||||
sentiment_colors = ["#4CAF50", "#E53935"]
|
||||
|
||||
bars2 = ax2.barh(sentiments, sentiment_scores, color=sentiment_colors, height=0.5)
|
||||
|
||||
for bar in bars2:
|
||||
width = bar.get_width()
|
||||
ax2.text(width + 1, bar.get_y() + bar.get_height()/2, f'{width:.1f}%',
|
||||
va='center', fontweight='bold')
|
||||
|
||||
ax2.set_xlim(0, 100)
|
||||
ax2.set_xlabel('Sentiment Score (%)')
|
||||
ax2.spines['top'].set_visible(False)
|
||||
ax2.spines['right'].set_visible(False)
|
||||
ax2.spines['bottom'].set_color('#DDDDDD')
|
||||
ax2.spines['left'].set_color('#DDDDDD')
|
||||
ax2.tick_params(bottom=False, left=False)
|
||||
ax2.set_axisbelow(True)
|
||||
ax2.grid(axis='x', linestyle='-', alpha=0.2)
|
||||
|
||||
st.pyplot(fig2)
|
||||
|
||||
st.markdown('<div class="custom-button-container">', unsafe_allow_html=True)
|
||||
multimodal_button = st.button("Lihat Hasil Multimodal", key="multimodal", use_container_width=True)
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
|
||||
if multimodal_button:
|
||||
st.switch_page("pages/3_hasil.py")
|
340
pages/3_hasil.py
Normal file
340
pages/3_hasil.py
Normal file
@ -0,0 +1,340 @@
|
||||
#3_hasil.py
|
||||
import streamlit as st
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from collections import Counter
|
||||
|
||||
CATEGORIES = ['Angry', 'Sad', 'Happy', 'Fearful', 'Disgust', 'Neutral', 'Surprised']
|
||||
|
||||
st.set_page_config(page_title="Analisis Multimodal", layout="wide")
|
||||
|
||||
st.markdown("""
|
||||
<style>
|
||||
.main-header {
|
||||
color: white;
|
||||
background-color: #1E1E5A;
|
||||
padding: 1.5rem;
|
||||
text-align: center;
|
||||
border-radius: 10px 10px 0 0;
|
||||
margin-bottom: 2rem;
|
||||
font-size: 2rem;
|
||||
font-weight: bold;
|
||||
}
|
||||
.section-header {
|
||||
color: #FFFFFF;
|
||||
font-weight: bold;
|
||||
margin-bottom: 20px;
|
||||
border-bottom: 2px solid #3498db;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
.positive-score {
|
||||
color: #2ecc71;
|
||||
font-weight: bold;
|
||||
}
|
||||
.negative-score {
|
||||
color: #e74c3c;
|
||||
font-weight: bold;
|
||||
}
|
||||
.stButton > button {
|
||||
background-color: #007bff;
|
||||
color: white;
|
||||
padding: 10px 24px;
|
||||
border-radius: 8px;
|
||||
font-size: 1rem;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
transition: 0.3s;
|
||||
display: block;
|
||||
margin: 0 auto;
|
||||
width: 100%;
|
||||
}
|
||||
.stButton > button:hover {
|
||||
background-color: #0056b3;
|
||||
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
|
||||
}
|
||||
.custom-button-container {
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.divider {
|
||||
margin-top: 1.5rem;
|
||||
margin-bottom: 1.5rem;
|
||||
border-top: 1px solid #eee;
|
||||
}
|
||||
</style>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
st.markdown('<div class="main-header">📊 Laporan Analisis Multimodal</div>', unsafe_allow_html=True)
|
||||
|
||||
emotion_mapping = {
|
||||
'Happy': 'bahagia',
|
||||
'Surprised': 'terkejut',
|
||||
'Neutral': 'netral',
|
||||
'Angry': 'marah',
|
||||
'Sad': 'sedih',
|
||||
'Fearful': 'takut',
|
||||
'Disgust': 'jijik',
|
||||
'bahagia': 'bahagia',
|
||||
'terkejut': 'terkejut',
|
||||
'netral': 'netral',
|
||||
'marah': 'marah',
|
||||
'sedih': 'sedih',
|
||||
'takut': 'takut',
|
||||
'jijik': 'jijik'
|
||||
}
|
||||
|
||||
positive_emotions = ['bahagia', 'terkejut', 'netral']
|
||||
negative_emotions = ['marah', 'sedih', 'takut', 'jijik']
|
||||
|
||||
if 'data_manager' not in st.session_state or st.session_state.data_manager is None:
|
||||
st.error("Data emosi wajah tidak tersedia. Silakan lakukan analisis wajah terlebih dahulu.")
|
||||
st.markdown('<div class="custom-button-container">', unsafe_allow_html=True)
|
||||
if st.button("Lakukan Analisis Wajah"):
|
||||
try:
|
||||
st.switch_page("pages/1_analisis_wajah.py")
|
||||
except FileNotFoundError:
|
||||
st.error("Halaman analisis wajah tidak ditemukan.")
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
st.stop()
|
||||
|
||||
if 'text_analysis_result' not in st.session_state or not st.session_state.text_analysis_result:
|
||||
st.error("Data analisis teks tidak tersedia. Silakan lakukan analisis teks terlebih dahulu.")
|
||||
st.markdown('<div class="custom-button-container">', unsafe_allow_html=True)
|
||||
if st.button("Lakukan Analisis Teks"):
|
||||
try:
|
||||
st.switch_page("pages/2_analisis_teks.py")
|
||||
except FileNotFoundError:
|
||||
st.error("Halaman analisis teks tidak ditemukan.")
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
st.stop()
|
||||
|
||||
data_manager = st.session_state.data_manager
|
||||
if not data_manager.predictions or not data_manager.timestamps:
|
||||
st.error("Tidak ada data emosi wajah yang valid untuk analisis.")
|
||||
st.stop()
|
||||
|
||||
face_emotion_data = [(timestamp, CATEGORIES[pred_idx]) for pred_idx, timestamp in zip(data_manager.predictions, data_manager.timestamps)]
|
||||
for timestamp, emotion in face_emotion_data:
|
||||
if not isinstance(timestamp, (int, float)) or not isinstance(emotion, str):
|
||||
st.error("Format data emosi wajah tidak valid.")
|
||||
st.stop()
|
||||
if emotion not in emotion_mapping:
|
||||
st.warning(f"Emosi tidak dikenal: {emotion}. Melewati emosi ini.")
|
||||
face_emotion_data = [(t, e) for t, e in face_emotion_data if e in emotion_mapping]
|
||||
if not face_emotion_data:
|
||||
st.error("Tidak ada data emosi wajah yang valid.")
|
||||
st.stop()
|
||||
|
||||
text_result = st.session_state.text_analysis_result
|
||||
if not isinstance(text_result, dict) or 'text' not in text_result:
|
||||
st.error("Format data analisis teks tidak valid.")
|
||||
st.stop()
|
||||
|
||||
# Process face emotions
|
||||
dominant_face_emotions = {}
|
||||
for _, emotion in face_emotion_data:
|
||||
standardized_emotion = emotion_mapping.get(emotion)
|
||||
if standardized_emotion:
|
||||
dominant_face_emotions[standardized_emotion] = dominant_face_emotions.get(standardized_emotion, 0) + 1
|
||||
|
||||
total_face_frames = len(face_emotion_data)
|
||||
if total_face_frames == 0:
|
||||
st.error("Tidak ada data emosi wajah yang valid untuk analisis.")
|
||||
st.stop()
|
||||
|
||||
face_emotion_percentages = {emotion: (count / total_face_frames) * 100
|
||||
for emotion, count in dominant_face_emotions.items()}
|
||||
top_face_emotions = sorted(face_emotion_percentages.items(), key=lambda x: x[1], reverse=True)[:3]
|
||||
|
||||
text_emotions = {}
|
||||
if "emotions" in text_result:
|
||||
text_emotions = {emotion_mapping.get(emotion, emotion): score * 100
|
||||
for emotion, score in text_result["emotions"].items()
|
||||
if emotion_mapping.get(emotion, emotion) in emotion_mapping.values()}
|
||||
elif "top_emotions" in text_result:
|
||||
text_emotions = {emotion_mapping.get(emotion, emotion): score * 100
|
||||
for emotion, score in text_result["top_emotions"]
|
||||
if emotion_mapping.get(emotion, emotion) in emotion_mapping.values()}
|
||||
|
||||
top_text_emotions = sorted(text_emotions.items(), key=lambda x: x[1], reverse=True)[:3]
|
||||
|
||||
combined_emotions = {}
|
||||
for emotion, percentage in face_emotion_percentages.items():
|
||||
combined_emotions[emotion] = combined_emotions.get(emotion, 0) + percentage
|
||||
|
||||
standardized_text_emotions = {emotion_mapping.get(e, e): score for e, score in text_emotions.items()}
|
||||
for emotion, percentage in standardized_text_emotions.items():
|
||||
if emotion in emotion_mapping.values():
|
||||
combined_emotions[emotion] = combined_emotions.get(emotion, 0) + percentage
|
||||
|
||||
for emotion in combined_emotions:
|
||||
appeared_in_face = emotion in face_emotion_percentages
|
||||
appeared_in_text = emotion in standardized_text_emotions
|
||||
divisor = 1 + (1 if appeared_in_face and appeared_in_text else 0)
|
||||
combined_emotions[emotion] /= divisor
|
||||
|
||||
total_combined = sum(combined_emotions.values())
|
||||
if total_combined > 0:
|
||||
combined_emotions = {emotion: (score / total_combined) * 100
|
||||
for emotion, score in combined_emotions.items()}
|
||||
top_combined_emotions = sorted(combined_emotions.items(), key=lambda x: x[1], reverse=True)[:3]
|
||||
|
||||
face_positive_score = sum(face_emotion_percentages.get(emotion, 0) for emotion in positive_emotions)
|
||||
face_negative_score = sum(face_emotion_percentages.get(emotion, 0) for emotion in negative_emotions)
|
||||
total_face_score = face_positive_score + face_negative_score
|
||||
if total_face_score > 0:
|
||||
face_positive_score = (face_positive_score / total_face_score) * 100
|
||||
face_negative_score = (face_negative_score / total_face_score) * 100
|
||||
else:
|
||||
face_positive_score = 0
|
||||
face_negative_score = 0
|
||||
st.warning("Tidak ada data emosi wajah yang valid untuk menghitung skor positif/negatif.")
|
||||
|
||||
if "positive_score" in text_result and "negative_score" in text_result:
|
||||
text_positive_score = text_result['positive_score'] * 100
|
||||
text_negative_score = text_result['negative_score'] * 100
|
||||
else:
|
||||
text_positive_score = sum(text_emotions.get(emotion, 0) for emotion in positive_emotions)
|
||||
text_negative_score = sum(text_emotions.get(emotion, 0) for emotion in negative_emotions)
|
||||
total_text_score = text_positive_score + text_negative_score
|
||||
if total_text_score > 0:
|
||||
text_positive_score = (text_positive_score / total_text_score) * 100
|
||||
text_negative_score = (text_negative_score / total_text_score) * 100
|
||||
else:
|
||||
text_positive_score = 0
|
||||
text_negative_score = 0
|
||||
st.warning("Tidak ada data emosi teks yang valid untuk menghitung skor positif/negatif.")
|
||||
|
||||
avg_positive_score = (face_positive_score + text_positive_score) / 2
|
||||
avg_negative_score = (face_negative_score + text_negative_score) / 2
|
||||
|
||||
emotion_changes = {}
|
||||
for i in range(1, len(face_emotion_data)):
|
||||
if face_emotion_data[i][1] != face_emotion_data[i-1][1]:
|
||||
key = f"{face_emotion_data[i-1][1]} → {face_emotion_data[i][1]}"
|
||||
time_diff = face_emotion_data[i][0] - face_emotion_data[i-1][0]
|
||||
emotion_changes[key] = time_diff
|
||||
|
||||
st.markdown('<h2 class="section-header">📝 Ringkasan Umum</h2>', unsafe_allow_html=True)
|
||||
st.write(f"**Durasi Analisis:** {face_emotion_data[-1][0]:.2f} detik")
|
||||
st.write(f"**Jumlah Perubahan Emosi:** {len(emotion_changes)}")
|
||||
st.markdown("### Skor Rata-rata Emosi (Wajah & Teks)")
|
||||
st.write(f"🟢 Rata-rata Emosi Positif: <span class='positive-score'>{avg_positive_score:.1f}%</span>", unsafe_allow_html=True)
|
||||
st.write(f"🔴 Rata-rata Emosi Negatif: <span class='negative-score'>{avg_negative_score:.1f}%</span>", unsafe_allow_html=True)
|
||||
|
||||
if avg_positive_score > 0 or avg_negative_score > 0:
|
||||
fig, ax = plt.subplots(figsize=(10, 3))
|
||||
labels = ['Positif', 'Negatif']
|
||||
values = [avg_positive_score, avg_negative_score]
|
||||
colors = ['#2ecc71', '#e74c3c']
|
||||
|
||||
bars = ax.barh(labels, values, color=colors, height=0.5)
|
||||
|
||||
for bar in bars:
|
||||
width = bar.get_width()
|
||||
ax.text(width + 1, bar.get_y() + bar.get_height()/2, f'{width:.1f}%',
|
||||
va='center', fontweight='bold')
|
||||
|
||||
ax.set_xlim(0, 100)
|
||||
ax.set_xlabel('Persentase (%)')
|
||||
ax.spines['top'].set_visible(False)
|
||||
ax.spines['right'].set_visible(False)
|
||||
ax.spines['bottom'].set_color('#DDDDDD')
|
||||
ax.spines['left'].set_color('#DDDDDD')
|
||||
ax.tick_params(bottom=False, left=False)
|
||||
ax.set_axisbelow(True)
|
||||
ax.grid(axis='x', linestyle='-', alpha=0.2)
|
||||
|
||||
st.pyplot(fig)
|
||||
else:
|
||||
st.warning("Tidak cukup data untuk menampilkan grafik skor positif/negatif.")
|
||||
|
||||
st.write("### Top 3 Emosi Gabungan (Wajah & Teks)")
|
||||
for emotion, score in top_combined_emotions:
|
||||
st.write(f"- {emotion.capitalize()}: {score:.1f}%")
|
||||
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
st.markdown('<h2 class="section-header">😀 Analisis Ekspresi Wajah</h2>', unsafe_allow_html=True)
|
||||
st.write("### Top 3 Emosi Wajah")
|
||||
for emotion, percentage in top_face_emotions:
|
||||
count = dominant_face_emotions[emotion]
|
||||
st.write(f"- {emotion.capitalize()}: {percentage:.1f}% ({count} kali)")
|
||||
|
||||
st.write(f"### Skor Emosi Wajah")
|
||||
st.write(f"🟢 Skor Positif: <span class='positive-score'>{face_positive_score:.1f}%</span>", unsafe_allow_html=True)
|
||||
st.write(f"🔴 Skor Negatif: <span class='negative-score'>{face_negative_score:.1f}%</span>", unsafe_allow_html=True)
|
||||
|
||||
if len(face_emotion_data) > 1:
|
||||
fig, ax = plt.subplots(figsize=(12, 6))
|
||||
fig.patch.set_facecolor('#f4f4f7')
|
||||
ax.set_facecolor('#f4f4f7')
|
||||
|
||||
timestamps = [data[0] for data in face_emotion_data]
|
||||
emotions = [data[1] for data in face_emotion_data]
|
||||
|
||||
ax.scatter(timestamps, emotions, c='#3a7aff', alpha=0.7, s=30)
|
||||
ax.plot(timestamps, emotions, color='#3a7aff', alpha=0.5, linewidth=1)
|
||||
|
||||
ax.set_xlabel("Waktu (detik)", color='#111c4e', fontsize=12)
|
||||
ax.set_ylabel("Ekspresi", color='#111c4e', fontsize=12)
|
||||
ax.set_title("Timeline Perubahan Ekspresi", color='#111c4e', fontsize=14, fontweight='bold')
|
||||
|
||||
ax.tick_params(axis='both', colors='#111c4e', labelsize=10)
|
||||
for spine in ax.spines.values():
|
||||
spine.set_color('#111c4e')
|
||||
|
||||
ax.grid(True, alpha=0.3)
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
st.pyplot(fig)
|
||||
plt.close(fig)
|
||||
|
||||
if emotion_changes:
|
||||
st.write("### Perubahan Emosi Tercepat")
|
||||
for change, time in sorted(emotion_changes.items(), key=lambda x: x[1])[:3]:
|
||||
st.write(f"- {change} dalam {time:.2f} detik")
|
||||
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
st.markdown('<h2 class="section-header">📝 Analisis Teks Jurnaling</h2>', unsafe_allow_html=True)
|
||||
st.write("### Top 3 Emosi Terdeteksi")
|
||||
for emotion, percentage in top_text_emotions:
|
||||
st.write(f"- {emotion.capitalize()}: {percentage:.1f}%")
|
||||
|
||||
st.write(f"### Skor Emosi")
|
||||
st.write(f"🟢 Skor Positif: <span class='positive-score'>{text_positive_score:.1f}%</span>", unsafe_allow_html=True)
|
||||
st.write(f"🔴 Skor Negatif: <span class='negative-score'>{text_negative_score:.1f}%</span>", unsafe_allow_html=True)
|
||||
st.write("### Teks Jurnal")
|
||||
st.write(text_result['text'])
|
||||
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
||||
st.markdown('<h2 class="section-header">🔍 Kesimpulan Akhir</h2>', unsafe_allow_html=True)
|
||||
st.markdown("### Fusion dengan Majority Voting")
|
||||
st.write("Metode ini menggabungkan keputusan dari setiap modalitas dengan menerapkan prinsip voting mayoritas.")
|
||||
|
||||
face_classification = 1 if face_negative_score >= face_positive_score else 0
|
||||
text_classification = 1 if text_negative_score >= text_positive_score else 0
|
||||
fused_score = np.mean([face_classification, text_classification])
|
||||
|
||||
if avg_negative_score >= 50:
|
||||
st.error("⚠️ Analisis menunjukkan potensi indikasi depresi.")
|
||||
st.write("Disarankan untuk berbicara dengan konselor atau psikolog.")
|
||||
elif avg_negative_score < 49:
|
||||
st.success("✅ Analisis menunjukkan kondisi emosi stabil.")
|
||||
st.write("Tetap jaga kesehatan mental dan lanjutkan kegiatan positif.")
|
||||
else:
|
||||
st.warning("⚖️ Analisis menunjukkan kondisi emosi netral (skor negatif 49%).")
|
||||
st.write("Pertimbangkan konsultasi jika merasa perlu.")
|
||||
|
||||
st.write(f"- Klasifikasi dari analisis wajah: {'Indikasi Negatif' if face_classification == 1 else 'Indikasi Positif'}")
|
||||
st.write(f"- Klasifikasi dari analisis teks: {'Indikasi Negatif' if text_classification == 1 else 'Indikasi Positif'}")
|
||||
|
||||
st.markdown('<div class="custom-button-container">', unsafe_allow_html=True)
|
||||
if st.button("Ulangi Analisis"):
|
||||
st.session_state.pop('data_manager', None)
|
||||
st.session_state.pop('text_analysis_result', None)
|
||||
try:
|
||||
st.switch_page("pages/1_analisis_wajah.py")
|
||||
except FileNotFoundError:
|
||||
st.error("Halaman analisis wajah tidak ditemukan.")
|
||||
st.markdown('</div>', unsafe_allow_html=True)
|
10
requirements.txt
Normal file
10
requirements.txt
Normal file
@ -0,0 +1,10 @@
|
||||
opencv-contrib-python==4.9.0.80
|
||||
opencv-python==4.8.1.78
|
||||
pandas==2.1.4
|
||||
matplotlib==3.9.2
|
||||
numpy==1.26.2
|
||||
tensorflow==2.18.0
|
||||
streamlit==1.39.0
|
||||
gdown==5.2.0
|
||||
Sastrawi==1.0.1
|
||||
nltk==3.9.1
|
Reference in New Issue
Block a user