simonraj commited on
Commit
cf815ef
1 Parent(s): 4139be0

Upload 17 files

Browse files
.env.example ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPENAI_API_KEY=your_openai_api_key
2
+ DATABASE_URL=your_postgres_database_url
.gitignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Virtual environments
2
+ .venv/
3
+ venv/
4
+
5
+ .myenv/
6
+ myenv/
7
+
8
+ # Python cache files
9
+ __pycache__/
10
+ *.pyc
11
+
12
+ # Environment variables
13
+ .env
Install-Instructions.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ Setup Database
2
+ ---------------
3
+ After create DB, get the connection string and update:
4
+ a. Database\DatabaseSetup.py
5
+ line 6: url = "post..."
6
+ b. then run the file via "visual studio code", so the tables are created.
7
+ c. update the db con string in 'database_functions.py'
8
+ line 12: url = "postgr..."
app.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ import gradio as gr
3
+ import asyncio
4
+ import os
5
+ import thinkingframes
6
+ import soundfile as sf
7
+ import numpy as np
8
+ import logging
9
+ from openai import OpenAI
10
+ from dotenv import load_dotenv
11
+ from policy import user_acceptance_policy
12
+ from styles import theme
13
+ from thinkingframes import generate_prompt, strategy_options
14
+ from utils import get_image_html, collect_student_info
15
+ from database_functions import add_submission
16
+ from tab_teachers_dashboard import create_teachers_dashboard_tab
17
+ from config import CLASS_OPTIONS
18
+ from concurrent.futures import ThreadPoolExecutor
19
+
20
+ # Load CSS from external file
21
+ with open('styles.css', 'r') as file:
22
+ css = file.read()
23
+
24
+ # For maintaining user session (to keep track of userID)
25
+ user_state = gr.State(value="")
26
+
27
+ load_dotenv()
28
+
29
+ client = OpenAI()
30
+ image_path = "picturePerformance.jpg"
31
+ img_html = get_image_html(image_path)
32
+
33
+ # Create a thread pool executor
34
+ executor = ThreadPoolExecutor()
35
+
36
+ def transcribe_audio(audio_path):
37
+ with open(audio_path, "rb") as audio_file:
38
+ transcript = client.audio.transcriptions.create(file=audio_file, model="whisper-1", language="en")
39
+ return transcript.text
40
+
41
+ async def generate_feedback(user_id, question_choice, strategy_choice, message, feedback_level):
42
+ current_question_index = thinkingframes.questions.index(question_choice)
43
+ strategy, explanation = thinkingframes.strategy_options[strategy_choice]
44
+
45
+ conversation = [{
46
+ "role": "system",
47
+ "content": f"You are an expert Primary 6 English Language Teacher in a Singapore Primary school, "
48
+ f"directly guiding a Primary 6 student in Singapore in their oral responses. "
49
+ f"Format the feedback in Markdown so that it can be easily read. "
50
+ f"Address the student directly in the second person in your feedback. "
51
+ f"The student is answering the question: '{thinkingframes.questions[current_question_index]}'. "
52
+ f"For Question 1, consider the picture description: '{thinkingframes.description}'. "
53
+ f"For Questions 2 and 3, the picture is not relevant, so the student should not refer to it in their response. "
54
+ f"Analyze the student's response using the following step-by-step approach: "
55
+ f"1. Evaluate the response against the {strategy} thinking frame. "
56
+ f"2. Assess how well the student's response addresses each component of the {strategy} thinking frame: "
57
+ f" - Assign emoticon scores based on how well the student comprehensively covered each component: "
58
+ f" - 😊😊😊 (three smiling faces) for a good coverage "
59
+ f" - 😊😊 (two smiling faces) for an average coverage "
60
+ f" - 😊 (one smiling face) for a poor coverage "
61
+ f" - Provide a clear, direct, and concise explanation of how well the answer addresses each component. "
62
+ f" - Identify specific areas for improvement in students responses, and provide targeted suggestions for improvement. "
63
+ f"3. Identify overall strengths and areas for improvement in the student's response using the {strategy} to format and provide targeted areas for improvement. "
64
+ f"4. Provide specific feedback on grammar, vocabulary, and sentence structure. "
65
+ f" Suggest age-appropriate enhancements that are one level higher than the student's current response. "
66
+ f"5. Conclude with follow-up questions for reflection. "
67
+ f"If the student's response deviates from the question, provide clear and concise feedback to help them refocus and try again. "
68
+ f"Ensure that the vocabulary and sentence structure recommendations are achievable for Primary 6 students in Singapore. "
69
+ f"Example Feedback Structure for Each Component: "
70
+ f"Component: [Component Name] "
71
+ f"Score: [Smiling emoticons] "
72
+ f"Explanation: [Clear, direct, and concise explanation of how well the answer addresses the component. Identify specific areas for improvement, and provide targeted suggestions for improvement.] "
73
+ f"{thinkingframes.generate_prompt(feedback_level)}"
74
+ }, {
75
+ "role": "user",
76
+ "content": message
77
+ }]
78
+
79
+ response = client.chat.completions.create(
80
+ model='gpt-4o-2024-05-13',
81
+ messages=conversation,
82
+ temperature=0.6,
83
+ max_tokens=1000,
84
+ stream=True
85
+ )
86
+
87
+ chat_history = [] # Initialize chat history outside the loop
88
+ full_feedback = "" # Accumulate the entire feedback message
89
+ try:
90
+ for chunk in response:
91
+ if chunk.choices[0].delta and chunk.choices[0].delta.content:
92
+ feedback_chunk = chunk.choices[0].delta.content
93
+ full_feedback += feedback_chunk # Accumulate the feedback
94
+ await asyncio.sleep(0)
95
+
96
+ # Append the complete feedback to the chat history
97
+ chat_history.append(("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", full_feedback))
98
+ yield chat_history # Yield the chat history only once
99
+
100
+ except Exception as e:
101
+ logging.error(f"An error occurred during feedback generation: {str(e)}")
102
+
103
+ questionNo = current_question_index + 1
104
+ # Save complete feedback after streaming
105
+ add_submission(user_id, message, full_feedback, int(0), "", questionNo)
106
+
107
+ async def generate_audio_feedback(feedback_buffer):
108
+ try:
109
+ response = client.audio.speech.create(
110
+ model="tts-1",
111
+ voice="alloy",
112
+ input=feedback_buffer,
113
+ response_format="wav"
114
+ )
115
+
116
+ audio_data = np.frombuffer(response.read(), dtype=np.int16)
117
+ sample_rate = 24000 # Default sample rate for OpenAI's WAV output
118
+
119
+ return (sample_rate, audio_data)
120
+
121
+ except Exception as e:
122
+ logging.error(f"An error occurred during speech generation: {str(e)}")
123
+ return None # Return None in case of an error
124
+
125
+ async def predict(question_choice, strategy_choice, feedback_level, audio):
126
+ current_audio_output = None # Initialize current_audio_output to None
127
+ final_feedback = "" # Store only the assistant's feedback
128
+
129
+ if audio is None:
130
+ yield [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "No audio data received. Please try again.")], current_audio_output
131
+ return
132
+
133
+ sample_rate, audio_data = audio
134
+
135
+ if audio_data is None or len(audio_data) == 0:
136
+ yield [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "No audio data received. Please try again.")], current_audio_output
137
+ return
138
+
139
+ audio_path = "audio.wav"
140
+ if not isinstance(audio_data, np.ndarray):
141
+ raise ValueError("audio_data must be a numpy array")
142
+ sf.write(audio_path, audio_data, sample_rate)
143
+
144
+ chat_history = [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcribing your audio, please listen to your oral response while waiting ...")]
145
+ yield chat_history, current_audio_output
146
+
147
+ try:
148
+ transcription_future = executor.submit(transcribe_audio, audio_path)
149
+ student_response = await asyncio.wrap_future(transcription_future)
150
+
151
+ if not student_response.strip():
152
+ yield [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription failed. Please try again or seek assistance.")], current_audio_output
153
+ return
154
+
155
+ chat_history.append(("Student", student_response)) # Add student's transcript
156
+ yield chat_history, current_audio_output
157
+
158
+ chat_history.append(("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "Transcription complete. Generating feedback. Please continue listening to your oral response while waiting ..."))
159
+ yield chat_history, current_audio_output
160
+
161
+ moderation_response = client.moderations.create(input=student_response)
162
+ flagged = any(result.flagged for result in moderation_response.results)
163
+ if flagged:
164
+ moderated_message = "The message has been flagged. Please see your teacher to clarify."
165
+ questionNo = thinkingframes.questions.index(question_choice) + 1
166
+ add_submission(int(user_state.value), moderated_message, "", int(0), "", questionNo)
167
+ yield chat_history, current_audio_output
168
+ return
169
+
170
+ async for chat_update in generate_feedback(int(user_state.value), question_choice, strategy_choice, student_response, feedback_level):
171
+ # Append the assistant's feedback to the existing chat_history
172
+ chat_history.extend(chat_update)
173
+ final_feedback = chat_history[-1][1] # Update final_feedback with the latest chunk
174
+ yield chat_history, current_audio_output # Yield audio output
175
+
176
+ feedback_buffer = final_feedback # Use final_feedback for TTS
177
+ audio_task = asyncio.create_task(generate_audio_feedback(feedback_buffer))
178
+ current_audio_output = await audio_task # Store audio output
179
+ yield chat_history, current_audio_output # Yield audio output
180
+
181
+ except Exception as e:
182
+ logging.error(f"An error occurred: {str(e)}", exc_info=True)
183
+ yield [("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", "An error occurred. Please try again or seek assistance.")], current_audio_output
184
+
185
+ with gr.Blocks(title="Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", theme=theme, css=css) as app:
186
+ with gr.Tab("Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡"):
187
+ gr.Markdown("## Student Information")
188
+ class_name = gr.Dropdown(label="Class", choices=CLASS_OPTIONS)
189
+ index_no = gr.Dropdown(label="Index No", choices=[f"{i:02}" for i in range(1, 46)])
190
+
191
+ policy_text = gr.Markdown(user_acceptance_policy)
192
+ policy_checkbox = gr.Checkbox(label="I have read and agree to the Things to Note When using the Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡", value=False)
193
+
194
+ submit_info_btn = gr.Button("Submit Info")
195
+ info_output = gr.Text()
196
+
197
+ with gr.Column(visible=False) as oral_coach_content:
198
+ gr.Markdown("## English Language Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡")
199
+ gr.Markdown(img_html) # Display the image
200
+ with gr.Row():
201
+ with gr.Column(scale=1):
202
+ gr.Markdown("### Step 1: Choose a Question")
203
+ question_choice = gr.Radio(thinkingframes.questions, label="Questions", value=thinkingframes.questions[0])
204
+ gr.Markdown("### Step 2: Choose a Thinking Frame")
205
+ strategy_choice = gr.Dropdown(list(strategy_options.keys()), label="Thinking Frame", value=list(strategy_options.keys())[0])
206
+ gr.Markdown("### Step 3: Choose Feedback Level")
207
+ feedback_level = gr.Radio(["Brief Feedback", "Moderate Feedback", "Comprehensive Feedback"], label="Feedback Level")
208
+ feedback_level.value = "Brief Feedback"
209
+
210
+ with gr.Column(scale=1):
211
+ gr.Markdown("### Step 4: Record Your Answer")
212
+ audio_input = gr.Audio(type="numpy", sources=["microphone"], label="Record")
213
+ submit_answer_btn = gr.Button("Submit Oral Response")
214
+
215
+ gr.Markdown("### Step 5: Review your personalised feedback")
216
+ feedback_output = gr.Chatbot(
217
+ label="Feedback",
218
+ scale=4,
219
+ height=700,
220
+ show_label=True
221
+ )
222
+ #audio
223
+ #submit_answer_here
224
+ audio_output = gr.Audio(type="numpy", label="Audio Playback", format="wav", autoplay="True")
225
+
226
+ submit_answer_btn.click(
227
+ predict,
228
+ inputs=[question_choice, strategy_choice, feedback_level, audio_input],
229
+ outputs=[feedback_output, audio_output],
230
+ api_name="predict"
231
+ )
232
+
233
+ def toggle_oral_coach_visibility(class_name, index_no, policy_checked):
234
+ if not policy_checked:
235
+ return "Please agree to the Things to Note When using the Oral Coach ⚡ ϞϞ(๑⚈ ․̫ ⚈๑)∩ ⚡ before submitting.", gr.update(visible=False)
236
+ validation_passed, message, userid = collect_student_info(class_name, index_no)
237
+ if not validation_passed:
238
+ return message, gr.update(visible=False)
239
+ user_state.value = userid
240
+ return message, gr.update(visible=True)
241
+
242
+ submit_info_btn.click(
243
+ toggle_oral_coach_visibility,
244
+ inputs=[class_name, index_no, policy_checkbox],
245
+ outputs=[info_output, oral_coach_content]
246
+ )
247
+
248
+ # Define other tabs like Teacher's Dashboard
249
+ create_teachers_dashboard_tab()
250
+
251
+ app.queue(max_size=20).launch(
252
+ debug=True,
253
+ server_port=int(os.environ.get("PORT", 10000)),
254
+ favicon_path="favicon.ico"
255
+ )
config.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # In constants.py or config.py
2
+ CLASS_OPTIONS = ["Demo","Testing Only", "Teacher"]
database_functions.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # database_functions.py
2
+ import psycopg2
3
+ import random
4
+ import string
5
+ from urllib.parse import urlparse
6
+ from datetime import datetime
7
+ import json
8
+ from dotenv import load_dotenv
9
+ import os
10
+
11
+ # Load environment variables from .env file
12
+ load_dotenv()
13
+
14
+ # Get the database URL from the environment variables
15
+ url = os.getenv("DATABASE_URL")
16
+ if not url:
17
+ raise ValueError("DATABASE_URL is not set in the environment variables")
18
+
19
+ parsed_url = urlparse(url)
20
+
21
+ # Extract connection parameters
22
+ db_config = {
23
+ 'user': parsed_url.username,
24
+ 'password': parsed_url.password,
25
+ 'host': parsed_url.hostname,
26
+ 'port': parsed_url.port,
27
+ 'database': parsed_url.path.lstrip('/')
28
+ }
29
+
30
+ # Since we define password in schema, we will just generate password
31
+ def generate_password():
32
+ characters = string.ascii_letters + string.digits + string.punctuation
33
+ password = ''.join(random.choice(characters) for _ in range(8))
34
+ return password
35
+
36
+ # add student method (privacy with only class & indexNo)
37
+ def add_user_privacy(class_name, index_no):
38
+ connection = psycopg2.connect(**db_config)
39
+ cursor = connection.cursor()
40
+ password = generate_password()
41
+ dbMsg = ""
42
+
43
+ try:
44
+ # Check if user with the same email already exists
45
+ cursor.execute("SELECT id FROM oc_students WHERE class = %s and index_no = %s", (class_name,index_no))
46
+ existing_user = cursor.fetchone()
47
+ if existing_user:
48
+ user_id = existing_user[0]
49
+ dbMsg = "User already exists"
50
+ else:
51
+ # If user doesn't exist, insert a new user
52
+ cursor.execute("INSERT INTO oc_students (index_no, class, hashPassword) VALUES (%s, %s, %s) RETURNING id",
53
+ (index_no, class_name, password))
54
+ user_id = cursor.fetchone()[0] # Fetch the ID of the newly inserted user
55
+ connection.commit() # without this, data is not persist on db!
56
+ dbMsg = "User Created"
57
+ return user_id, dbMsg
58
+
59
+ except psycopg2.Error as e:
60
+ return "Error adding user:" + str(e)
61
+
62
+ def add_submission(userid, transcribed_text, ai_responses, scores, feedback, questionNo):
63
+ connection = psycopg2.connect(**db_config)
64
+ cursor = connection.cursor()
65
+ dbMsg = ""
66
+
67
+ try:
68
+ current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
69
+
70
+ cursor.execute("INSERT INTO oc_submissions (userid, datetime, Transcribed_text, AI_conversation_responses, Scores, Feedback, questionNo) "
71
+ "VALUES (%s, %s, %s, %s, %s, %s, %s)",
72
+ (userid, current_datetime, transcribed_text, ai_responses, scores, feedback, questionNo))
73
+ connection.commit()
74
+ dbMsg = "Submission added"
75
+
76
+ except psycopg2.Error as e:
77
+ print("Error adding submission:", e)
78
+
79
+ finally:
80
+ if connection:
81
+ cursor.close()
82
+ connection.close()
83
+ print("PostgreSQL connection is closed")
84
+
85
+ def get_submissions_by_date_and_class(from_date, to_date, class_name, display_ai_feedback):
86
+ # Connect to the database
87
+ conn = psycopg2.connect(**db_config)
88
+ cursor = conn.cursor()
89
+
90
+ try:
91
+ print(f"From Date: {from_date}")
92
+ print(f"To Date: {to_date}")
93
+ print(f"Class Name: {class_name}")
94
+
95
+ # Swap from_date and to_date if from_date is later than to_date
96
+ if from_date > to_date:
97
+ from_date, to_date = to_date, from_date
98
+
99
+ query = """
100
+ SELECT s.index_no, s.class, sub.datetime, sub.questionNo, sub.transcribed_text,
101
+ CASE WHEN %s THEN sub.ai_conversation_responses ELSE NULL END AS ai_conversation_responses,
102
+ sub.userid
103
+ FROM oc_students AS s
104
+ JOIN oc_submissions AS sub ON s.id = sub.userid
105
+ WHERE TO_DATE(sub.datetime::text, 'YYYY-MM-DD') BETWEEN TO_DATE(%s, 'YYYY-MM-DD') AND TO_DATE(%s, 'YYYY-MM-DD')
106
+ AND s.class = %s
107
+ ORDER BY sub.userid, sub.questionNo, sub.datetime DESC
108
+ """
109
+ cursor.execute(query, (display_ai_feedback, from_date, to_date, class_name))
110
+
111
+ results = cursor.fetchall()
112
+
113
+ if results:
114
+ return generate_report_as_json(results, display_ai_feedback)
115
+ else:
116
+ return [{"Email": "No data found for the selected date range and class", "Name": "", "Class": "", "Datetime": "", "Transcribed Text": "", "AI Conversation Responses": ""}]
117
+ except Exception as e:
118
+ print(f"An error occurred: {e}")
119
+ return [{"Email": "Error occurred while fetching data", "Name": "", "Class": "", "Datetime": "", "Transcribed Text": "", "AI Conversation Responses": ""}]
120
+ finally:
121
+ cursor.close()
122
+ conn.close()
123
+
124
+ def generate_report_as_json(results, display_ai_feedback):
125
+ user_ids_info = [] # To store tuples of (UserID, Name, Class)
126
+ user_question_map = {} # To map UserID to answered questions
127
+
128
+ if results:
129
+ for result in results:
130
+ user_id = result[6] # Assuming UserID is at index 6
131
+
132
+ # Storing tuples of (UserID, Name, Class)
133
+ user_info = (user_id, result[0], result[1]) # (UserID, Name, Class)
134
+ if user_info not in user_ids_info:
135
+ user_ids_info.append(user_info)
136
+
137
+ # Creating a map of UserIDs to answered questions
138
+ question = result[3] # Assuming Question number is at index 3
139
+ details = {
140
+ "Datetime": result[2].strftime("%Y-%m-%d %H:%M:%S") if result[2] else "",
141
+ "Question": question,
142
+ "Student Response": result[4],
143
+ "AI Feedback": result[5] if display_ai_feedback else "Not displayed"
144
+ }
145
+ if user_id in user_question_map:
146
+ user_question_map[user_id].append(details)
147
+ else:
148
+ user_question_map[user_id] = [details]
149
+
150
+ report_data = []
151
+
152
+ for user_info in user_ids_info:
153
+ user_id, name, class_ = user_info
154
+
155
+ user_dict = {
156
+ "Index No": name,
157
+ "Class": class_,
158
+ "Questions": []
159
+ }
160
+
161
+ question_numbers = [1, 2, 3] # List of required question numbers
162
+
163
+ if user_id in user_question_map:
164
+ user_questions = user_question_map[user_id]
165
+
166
+ for question_details in user_questions:
167
+ question_data = {
168
+ "Question": question_details["Question"],
169
+ "Datetime": question_details["Datetime"],
170
+ "Student Response": question_details["Student Response"],
171
+ "AI Feedback": question_details["AI Feedback"]
172
+ }
173
+ user_dict["Questions"].append(question_data)
174
+
175
+ # Remove answered question number from the (fixed list)
176
+ if question_data["Question"] in question_numbers:
177
+ question_numbers.remove(question_data["Question"])
178
+
179
+ # Add NA entries for unanswered questions
180
+ for missing_question in question_numbers:
181
+ missing_question_data = {
182
+ "Question": missing_question,
183
+ "Datetime": "NA",
184
+ "Student Response": "NA",
185
+ "AI Feedback": "NA" if display_ai_feedback else "Not displayed"
186
+ }
187
+ user_dict["Questions"].append(missing_question_data)
188
+
189
+ # Sort the user's questions by question number before appending to report
190
+ user_dict["Questions"] = sorted(user_dict["Questions"], key=lambda x: x['Question'])
191
+
192
+ report_data.append(user_dict)
193
+
194
+ return json.dumps(report_data, indent=4)
195
+
196
+ def getUniqueSubmitDate():
197
+ # Connect to the database
198
+ conn = psycopg2.connect(**db_config)
199
+ cursor = conn.cursor()
200
+
201
+ try:
202
+ # Fetch all submissions on the provided date
203
+ cursor.execute("""
204
+ SELECT DISTINCT DATE(datetime) AS unique_date
205
+ FROM public.oc_submissions
206
+ ORDER BY unique_date desc
207
+ LIMIT 14;
208
+ """)
209
+ dates = [str(row[0]) for row in cursor.fetchall()]
210
+ return dates
211
+ except Exception as e:
212
+ print(f"An error occurred: {e}")
213
+ return [{"Error": "Error occurred while fetching data"}]
214
+ finally:
215
+ cursor.close()
216
+ conn.close()
217
+
218
+ def getUniqueClass():
219
+ # Connect to the database
220
+ conn = psycopg2.connect(**db_config)
221
+ cursor = conn.cursor()
222
+
223
+ try:
224
+ # Fetch all submissions on the provided date
225
+ cursor.execute("""
226
+ SELECT DISTINCT s.class
227
+ FROM oc_students AS s
228
+ JOIN oc_submissions AS sub ON s.id = sub.userid
229
+ ORDER BY s.class
230
+ """)
231
+ listClass = [str(row[0]) for row in cursor.fetchall()]
232
+ return listClass
233
+ except Exception as e:
234
+ print(f"An error occurred: {e}")
235
+ return [{"Error": "Error occurred while fetching data"}]
236
+ finally:
237
+ cursor.close()
238
+ conn.close()
favicon.ico ADDED
picturePerformance.jpg ADDED
policy.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # policy.py
2
+
3
+ user_acceptance_policy = """**Oral Coach App🎤✨**
4
+
5
+ This application uses a Large Language Model to generate responses for educational purposes.
6
+
7
+ **Things to Note When using the Oral Coach**
8
+
9
+ Before practising with this AI-powered Oral Coach, please review these guidelines:
10
+
11
+ 1. **Practising with an AI Coach:**
12
+ - You'll be practising speaking with an AI Oral Coach, which is a computer program that helps you learn. It is not a real person.
13
+
14
+ 2. **Feedback Might Not Be Perfect:**
15
+ - The feedback you get from the AI Oral Coach is usually helpful, but it might not always be right. If you are not sure about something, ask your teacher or parents.
16
+
17
+ 3. **Use Your Own Ideas Too:**
18
+ - If the AI coach gives you ideas, remember to add your own too. It is great to be creative and use your own thoughts!
19
+
20
+ 4. **Keep Personal Info Private:**
21
+ - Don't share your personal details like your NRIC, full name, address, or any passwords with the AI Oral Coach. This keeps you safe!
22
+
23
+ 5. **Be Kind and Positive:**
24
+ - When using the AI Oral Coach, remember to use nice words and be positive. No inappropriate language.
25
+
26
+ 6. **For Learning:**
27
+ - The Oral Coach is designed for improving speaking skills and enhancing learning. It should not enable illegal or unethical activities.
28
+
29
+ 7. **When in Doubt, Ask:**
30
+ - If something seems confusing or you're not sure what to do, always ask your teacher for help.
31
+
32
+ *Large Language Models (LLMs) are a type of AI trained on large datasets to understand language and generate text.
33
+ """
requirements.txt ADDED
Binary file (2.62 kB). View file
 
styles.css ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* style.css */
2
+ /* Overwrite the background color */
3
+ .gradio-container {
4
+ display: flex;
5
+ flex-direction: column;
6
+ flex-grow: 1;
7
+ background: hsl(39deg 57.28% 92.99%);
8
+ }
9
+
10
+ /* Define your light mode default text color */
11
+ :root {
12
+ --custom-text-color-light: #0b0f19; /* Dark color for text in light mode */
13
+ }
14
+
15
+ /* Apply the light mode text color */
16
+ .gradio-container .string.svelte-1kspdo {
17
+ color: var(--custom-text-color-light) !important;
18
+ }
19
+
20
+ /* Dark mode specific styles */
21
+ .dark .gradio-container .string.svelte-1kspdo {
22
+ color: #f9fafb !important; /* Light color for text in dark mode */
23
+ }
24
+
25
+ /* Hide the footer */
26
+ footer {
27
+ display: none !important;
28
+ }
styles.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Set the theme to 'xiaobaiyuan/theme_brief'
4
+ theme = 'xiaobaiyuan/theme_brief'
5
+
6
+ with gr.Blocks(theme=theme) as demo:
7
+ # Your existing components and layout
8
+ ...
tab_how_oc_works.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tab_how_oc_works.py
2
+ import gradio as gr
3
+
4
+ def overview_video_component():
5
+ return gr.HTML("""
6
+ <div style="position: relative; padding-bottom: 56.25%; height: 0;">
7
+ <iframe src="https://www.loom.com/embed/0b1ae013725f41f6ba1c89eec9c65567?sid=eac1f57a-e314-4fab-a399-f82e29de927b"
8
+ frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen
9
+ style="position: absolute; top: 0; left: 0; width: 100%; height: 100%;"></iframe>
10
+ </div>
11
+ """)
12
+
13
+ def how_oral_coach_works_tab():
14
+ with gr.Tab("How Oral Coach Works"):
15
+ gr.Markdown("## How Oral Coach Works")
16
+ gr.Markdown("""
17
+ This tool is designed to enhance students' oral language skills. Here's how it works:
18
+ **Watch the Overview Video Below**:
19
+ """)
20
+ overview_video_component()
tab_teachers_dashboard.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from database_functions import getUniqueSubmitDate, getUniqueClass
3
+ from teachers_dashboard import show_dashboard, updateReportByDateAndClass, chat_with_json_output
4
+
5
+ def create_teachers_dashboard_tab():
6
+ with gr.Tab("Teacher's Dashboard") as teacher_dash_tab:
7
+ with gr.Column() as login_dash:
8
+ password_input = gr.Textbox(label="Enter Password", type="password")
9
+ btn_login_dash = gr.Button("Submit")
10
+ dashboard_content = gr.HTML()
11
+
12
+ with gr.Column(visible=False) as teacher_dash:
13
+ gr.Markdown("## Teacher Dash (unlocked)")
14
+ with gr.Tab("Select Date Range and Class"):
15
+ date_choices = getUniqueSubmitDate()
16
+ ddl_start_date = gr.Dropdown(choices=date_choices, label="Start Date")
17
+ ddl_end_date = gr.Dropdown(choices=date_choices, label="End Date")
18
+ class_choices = getUniqueClass()
19
+ ddl_class = gr.Dropdown(choices=class_choices, label="Select a class")
20
+ display_ai_feedback = gr.Checkbox(label="Display AI Feedback", value=True)
21
+ btn_show_report_date_range_class = gr.Button("Display Submissions")
22
+ submission_report = gr.JSON(label="Submissions for Selected Date Range and Class")
23
+
24
+ gr.Markdown("You can use the following example queries to analyze the student responses:")
25
+ query_input = gr.Textbox(label="Teacher's Query")
26
+ additional_inputs_accordion = gr.Accordion(label="Example Queries", open=True)
27
+ with additional_inputs_accordion:
28
+ gr.Examples(examples=[
29
+ ["General Analysis: Summarize overall performance and identify patterns"],
30
+ ["Specific Analysis: Identify common misconceptions and suggest interventions"],
31
+ ["Specific Analysis: Analyze the effectiveness of strategies used"],
32
+ ["Specific Analysis: Compare performance of different student groups"],
33
+ ["Specific Analysis: Track individual student progress over time"],
34
+ ["Completion Rate Analysis: Breakdown of questions attempted and insights"]
35
+ ], inputs=[query_input])
36
+
37
+ chat_interface = gr.Chatbot(label="Overall Analysis on Students Responses")
38
+ chat_button = gr.Button("Chat")
39
+
40
+ chat_button.click(
41
+ chat_with_json_output,
42
+ inputs=[query_input, submission_report, chat_interface],
43
+ outputs=chat_interface
44
+ )
45
+
46
+ btn_login_dash.click(show_dashboard, inputs=[password_input], outputs=[dashboard_content, teacher_dash, login_dash, ddl_start_date, ddl_class])
47
+ btn_show_report_date_range_class.click(updateReportByDateAndClass, inputs=[ddl_start_date, ddl_end_date, ddl_class, display_ai_feedback], outputs=[submission_report, chat_interface])
48
+
49
+ return teacher_dash_tab
teachers_dashboard.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #teachers_dashboard.py
2
+ import gradio as gr
3
+ import thinkingframes
4
+ from dotenv import load_dotenv
5
+ from openai import OpenAI
6
+ from database_functions import get_submissions_by_date_and_class, getUniqueSubmitDate, getUniqueClass
7
+
8
+ load_dotenv()
9
+ client = OpenAI()
10
+
11
+ def validate_password(password):
12
+ correct_password = "Happyteacher2024"
13
+ return password == correct_password
14
+
15
+ def show_dashboard(password):
16
+ if validate_password(password):
17
+ date_choices = getUniqueSubmitDate()
18
+ class_choices = getUniqueClass()
19
+ return "<p>Dashboard content goes here</p>", gr.update(visible=True), gr.update(visible=False), gr.Dropdown(choices=date_choices, label="Select a date"), gr.Dropdown(choices=class_choices, label="Select a class")
20
+ return "<p>Incorrect password</p>", gr.update(visible=False), gr.update(visible=False), gr.Dropdown(choices='', label="Select a date"), gr.Dropdown(choices='', label="Select a class")
21
+
22
+ def updateReportByDateAndClass(start_date, end_date, class_name, display_ai_feedback):
23
+ json_output = get_submissions_by_date_and_class(start_date, end_date, class_name, display_ai_feedback)
24
+ chat_history = []
25
+ return json_output, chat_history
26
+
27
+ def chat_with_json_output(query, json_output, chat_history):
28
+ questions = thinkingframes.questions
29
+ strategies = [strategy[0] for strategy in thinkingframes.strategy_options.values()]
30
+ picture_description = thinkingframes.description
31
+
32
+ history_openai_format = [
33
+ {"role": "system", "content": f"Here is the JSON output of the student responses and AI interactions:\n{json_output}"},
34
+ {"role": "user", "content": f"Selected Analysis Prompt: {query}"}
35
+ ]
36
+ for human, assistant in chat_history:
37
+ history_openai_format.append({"role": "user", "content": human})
38
+ history_openai_format.append({"role": "assistant", "content": assistant})
39
+
40
+ system_prompt = f"""
41
+ You are an English Language Teacher analyzing student responses to oral questions. The questions and strategies used are:
42
+
43
+ Questions:
44
+ 1. {questions[0]}
45
+ 2. {questions[1]}
46
+ 3. {questions[2]}
47
+
48
+ Strategies:
49
+ 1. {strategies[0]}
50
+ 2. {strategies[1]}
51
+ 3. {strategies[2]}
52
+
53
+ Picture Description (relevant only for Question 1):
54
+ {picture_description}
55
+
56
+ Based on the provided JSON output and the selected analysis prompt, please perform the following:
57
+
58
+ General Analysis:
59
+ - If the selected prompt is "General Analysis: Summarize overall performance and identify patterns":
60
+ - Summarize the overall performance of students for each question, considering the relevant strategies and picture description.
61
+ - Identify notable patterns and trends in student responses and AI feedback.
62
+ - Highlight exemplary responses or feedback that demonstrate effective use of strategies or insightful interpretations.
63
+
64
+ Specific Analysis:
65
+ - If the selected prompt is "Specific Analysis: Identify common misconceptions and suggest interventions":
66
+ - Identify common misconceptions or errors in student responses.
67
+ - Suggest targeted interventions to address these misconceptions and improve student understanding.
68
+
69
+ - If the selected prompt is "Specific Analysis: Analyze the effectiveness of strategies used":
70
+ - Analyze the effectiveness of each strategy used by students.
71
+ - Provide recommendations for improving the use of strategies and enhancing student performance.
72
+
73
+ - If the selected prompt is "Specific Analysis: Compare performance of different student groups":
74
+ - Compare the performance of different student groups (e.g., high performers vs. struggling students).
75
+ - Offer insights and recommendations based on the identified differences and patterns.
76
+
77
+ - If the selected prompt is "Specific Analysis: Track individual student progress over time":
78
+ - Track the progress of individual students over time, if data is available.
79
+ - Highlight areas where students have shown improvement or require additional support.
80
+
81
+ Completion Rate Analysis:
82
+ - If the selected prompt is "Completion Rate Analysis: Breakdown of questions attempted and insights":
83
+ - Identify the students who have attempted all three questions, two questions, only Question 1, or no questions at all.
84
+ - Calculate the percentage of students in each category.
85
+ - Provide insights on the potential reasons for the completion rates (e.g., difficulty level, student engagement, etc.).
86
+ - Offer recommendations for improving completion rates, such as providing additional support or incentives.
87
+
88
+ Please provide the analysis in a clear and organized format, using bullet points, tables, or paragraphs as appropriate. Include specific examples and data-driven insights to support your recommendations. Focus on actionable feedback that can directly impact student learning and engagement.
89
+ """
90
+
91
+ history_openai_format.append({"role": "user", "content": system_prompt})
92
+ history_openai_format.append({"role": "user", "content": query})
93
+
94
+ response = client.chat.completions.create(
95
+ model='gpt-4o-2024-05-13',
96
+ messages=history_openai_format,
97
+ temperature=0.2,
98
+ max_tokens=1000,
99
+ stream=True
100
+ )
101
+
102
+ partial_message = ""
103
+ for chunk in response:
104
+ if chunk.choices[0].delta.content is not None:
105
+ partial_message += chunk.choices[0].delta.content
106
+ yield chat_history + [("Assistant", partial_message)]
107
+
108
+ chat_history.append(("Assistant", partial_message))
109
+ return chat_history
thinkingframes.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # thinkingframes.py
2
+
3
+ description = (
4
+ "In this photograph, we see a group of young girls dressed in shiny, glittery costumes being helped by adults. "
5
+ "It looks like they are preparing for a performance or a dance recital. Each girl has her hair neatly done, and some "
6
+ "are getting decorative hairpieces attached. They are all standing indoors, as we can see windows with light coming "
7
+ "through in the background, which creates a calm and focused atmosphere.\n\n"
8
+ "The adults, possibly parents or teachers, are attentively making sure that the girls look their best. The girls seem "
9
+ "to be patient and maybe a bit nervous as they get ready. One girl in the front is closing her eyes and has a serious "
10
+ "expression on her face, which might mean she is concentrating or thinking about the upcoming event.\n\n"
11
+ "The way everyone is dressed and the care being taken with their appearance suggests that this is an important and "
12
+ "special occasion. It's a busy scene with everyone doing something to prepare, and it captures a moment of quiet "
13
+ "intensity before a performance where everyone wants to do well."
14
+ )
15
+
16
+ strategy_options = {
17
+ "PEEL": ("PEEL strategy (Point, Evidence, Experience(s), Link back to the question)",
18
+ "Structure your feedback using the PEEL strategy. Begin with a Point, provide Evidence to support it, specifically referencing possible details from the photograph: '{description}'. Share an Experience or example, and finally Link back to the question."),
19
+
20
+ "5W1H": ("5W1H thinking frame (Who, What, Where, When, Why, How)",
21
+ "Structure your feedback using the 5W1H thinking frame. Address Who the user is talking about, What they are discussing, Where it's happening, When it's occurring, Why they feel that way, and How they express it."),
22
+
23
+ "OREO": ("OREO thinking frame (Opening Statement, Reasons, Elaborate, Opinion)",
24
+ "Structure your feedback using the OREO thinking frame. Begin with an Opening Statement about the user's response, provide Reasons for your feedback, Elaborate on those reasons, and conclude with an Opinion on their overall response."),
25
+
26
+ "SEP": ("SEP strategy - State, Elaborate, Personal experiences",
27
+ "Structure your feedback using the SEP strategy. Begin with a State (S), where you state your point and answer to the question posed. Next, Elaborate (E) on your statement, justifying and explaining the reasons for your choice of answer specifically referencing possible details from the photograph: '{description}'. S. Use examples from various areas of your life such as Self, Home, School, Community, and Nation to enrich your elaboration. Lastly, share Personal experiences (P) from your own life or others' experiences to support your answer."),
28
+
29
+ "OREOS": ("OREOS thinking frame (Opinion, Reasons, Elaborate, Own Experiences, Suggestion)",
30
+ "Structure your feedback using the OREO thinking frame. Begin with an Opening Statement about the user's response, provide Reasons for your feedback, Elaborate on those reasons, and conclude with an Opinion on their overall response, and make a Suggestion."),
31
+
32
+ "TREES": ("TREES strategy (Thought, Reasons, Elaboration, Experiences, Summary or Suggestions)",
33
+ "Begin your feedback with a Thought that directly addresses the question. "
34
+ "Follow with Reasons that are relevant and logical to support this thought. "
35
+ "Elaborate on your reasons using the 5Ws1H method: Who, What, Where, When, Why, and How, to provide a comprehensive explanation. "
36
+ "Incorporate personal Experiences that are pertinent and provide insight into the topic to make your response more engaging. "
37
+ "Conclude with a Summary that encapsulates your main points succinctly, or offer Suggestions that provide depth and forward-thinking perspectives."),
38
+
39
+ "TREES+": ("TREES+ strategy - Thought, Reasons, Elaboration, Experiences, Summary or Suggestions",
40
+ "Begin your feedback with a Thought that directly addresses the question. "
41
+ "Follow with Reasons to support this thought, making sure they are relevant and logical. "
42
+ "Then, Elaborate on your reasons with examples or explanations that further clarify your point. "
43
+ "Incorporate personal Experiences that are pertinent to the topic to make your response more engaging. "
44
+ "Conclude with a Summary that encapsulates your main points succinctly, or provide Suggestions that offer insight or recommendations. "
45
+ "Ask yourself: Does my Thought align with the question? Are my Reasons strong and supportive of my Thought? "
46
+ "How can I Elaborate on my reasons more effectively? What Experiences can I share that will resonate with my main points? "
47
+ "How can I tie all my points together in a Summary, or what Suggestions can I make for future consideration?")
48
+ }
49
+
50
+ questions = [
51
+ "1. How do you think the people in the photograph might be feeling? Why?",
52
+ "2. Would you like to perform on stage? Why or why not?",
53
+ "3. What are some ways to build confidence in children?",
54
+ ]
55
+
56
+
57
+ # Function to generate the feedback prompt based on feedback level
58
+ def generate_prompt(feedback_level):
59
+ if feedback_level == "Brief Feedback":
60
+ return (f"Provide concise feedback Strictly following a 40 word limit. "
61
+ f"Conclude with one relevant Socratic question for reflect on their response and label them as Follow Up Question:.")
62
+ elif feedback_level == "Moderate Feedback":
63
+ return (f"Provide clear, constructive, concise feedback strictly in a 70 word limit. "
64
+ f"Include two relevant Socratic questions for the student to reflect on their response and label them as Follow Up Questions:")
65
+ elif feedback_level == "Comprehensive Feedback":
66
+ # No additional prompting for comprehensive feedback
67
+ return "Strictly provide feedback of around 300 words limit, including a full suggested response based on the student's response structured using the relevant thinking frame."
68
+ else:
69
+ raise ValueError(f"Invalid feedback level: {feedback_level}")
70
+
71
+
72
+ # Update the generate_system_message function to include feedback_level and strategy_choice
73
+ def generate_system_message(current_question_index, feedback_level):
74
+ feedback_structure = generate_prompt(feedback_level)
75
+
76
+ system_message = f"""
77
+ As your English Oral Coach, I'll guide you through answering the question: '{questions[current_question_index]}'.
78
+
79
+ {feedback_structure}
80
+
81
+ I'll help you reflect on your response, suggesting areas to elaborate and clarify. Remember, our goal is to enhance your critical thinking and independence.
82
+
83
+ Please ensure your response is in English and language and tone is age appropriate for Singapore students in Primary 5.
84
+ """
85
+
86
+ return system_message
utils.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #utils.py
2
+ import base64
3
+ from database_functions import add_user_privacy # Make sure this is correctly implemented
4
+
5
+ def get_image_html(image_path):
6
+ with open(image_path, "rb") as img_file:
7
+ base64_image = base64.b64encode(img_file.read()).decode('utf-8')
8
+ return f'<img src="data:image/jpg;base64,{base64_image}" alt="SBC6" width="450" style="display: block; margin: auto;"/>'
9
+
10
+ def collect_student_info(class_name, index_no):
11
+ # Validate inputs
12
+ if not class_name or not index_no:
13
+ return False, "Please select your class and index number.", None
14
+
15
+ try:
16
+ # Call the updated add_user function with class_name and index_no
17
+ userid, dbMsg = add_user_privacy(class_name, index_no)
18
+ return True, dbMsg, userid
19
+ except Exception as e:
20
+ # Log the exception and handle it appropriately
21
+ print(f"An error occurred: {e}")
22
+ return False, "An error occurred while adding user to the database.", None
23
+