Terry Zhuo
commited on
Commit
·
956ab2c
1
Parent(s):
ae06b8f
update
Browse files- app.py +71 -13
- azure_count_ip_data.py +103 -0
app.py
CHANGED
@@ -3,9 +3,22 @@ import gradio as gr
|
|
3 |
import pandas as pd
|
4 |
from datetime import datetime
|
5 |
import time
|
6 |
-
|
7 |
-
|
8 |
import threading
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Define the path for storing the data
|
11 |
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
|
@@ -29,18 +42,61 @@ def load_stats():
|
|
29 |
last_update = f.read().strip()
|
30 |
return df, last_update
|
31 |
except (FileNotFoundError, pd.errors.EmptyDataError):
|
32 |
-
return pd.DataFrame(columns=['Annotator', '
|
33 |
|
34 |
def update_stats():
|
35 |
-
"""Get the latest battle statistics"""
|
36 |
try:
|
37 |
# Initialize RemoteLogReader
|
38 |
reader = RemoteLogReader()
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
# Get current time
|
46 |
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
@@ -51,7 +107,7 @@ def update_stats():
|
|
51 |
return df, current_time
|
52 |
except Exception as e:
|
53 |
print(f"Error updating stats: {e}")
|
54 |
-
return pd.DataFrame(columns=['Annotator', '
|
55 |
|
56 |
def auto_update(state):
|
57 |
"""Background task to update stats every hour"""
|
@@ -86,7 +142,7 @@ def create_ui():
|
|
86 |
|
87 |
with gr.Blocks(title="Battle Count Statistics") as app:
|
88 |
gr.Markdown("# Battle Count Statistics")
|
89 |
-
gr.Markdown("Displays the count of
|
90 |
|
91 |
with gr.Row():
|
92 |
last_update = gr.Textbox(
|
@@ -100,10 +156,12 @@ def create_ui():
|
|
100 |
value=get_current_stats,
|
101 |
interactive=False,
|
102 |
wrap=True,
|
|
|
|
|
103 |
)
|
104 |
|
105 |
-
|
106 |
-
|
107 |
|
108 |
return app
|
109 |
|
|
|
3 |
import pandas as pd
|
4 |
from datetime import datetime
|
5 |
import time
|
6 |
+
import sys
|
7 |
+
import importlib.util
|
8 |
import threading
|
9 |
+
from log_reader import RemoteLogReader
|
10 |
+
|
11 |
+
# Define the path for the azure_count_ip_data module
|
12 |
+
azure_count_ip_data_path = os.path.join(os.path.dirname(__file__), 'azure_count_ip_data.py')
|
13 |
+
|
14 |
+
# Import the module dynamically
|
15 |
+
spec = importlib.util.spec_from_file_location("azure_count_ip_data", azure_count_ip_data_path)
|
16 |
+
azure_count_ip_data = importlib.util.module_from_spec(spec)
|
17 |
+
spec.loader.exec_module(azure_count_ip_data)
|
18 |
+
|
19 |
+
# Get the functions we need
|
20 |
+
count_files_per_annotator = azure_count_ip_data.count_files_per_annotator
|
21 |
+
count_deduplicated_files_per_annotator = azure_count_ip_data.count_deduplicated_files_per_annotator
|
22 |
|
23 |
# Define the path for storing the data
|
24 |
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
|
|
|
42 |
last_update = f.read().strip()
|
43 |
return df, last_update
|
44 |
except (FileNotFoundError, pd.errors.EmptyDataError):
|
45 |
+
return pd.DataFrame(columns=['Annotator', 'Total Count', 'Unique Count', 'Unique %']), ""
|
46 |
|
47 |
def update_stats():
|
48 |
+
"""Get the latest battle statistics with both total and deduplicated counts"""
|
49 |
try:
|
50 |
# Initialize RemoteLogReader
|
51 |
reader = RemoteLogReader()
|
52 |
+
|
53 |
+
# Get total annotator counts
|
54 |
+
total_counts = count_files_per_annotator(reader)
|
55 |
+
|
56 |
+
# Get deduplicated annotator counts
|
57 |
+
unique_counts = count_deduplicated_files_per_annotator(reader)
|
58 |
+
|
59 |
+
# Combine the data into a single DataFrame
|
60 |
+
data = []
|
61 |
+
all_annotators = set(total_counts.keys()) | set(unique_counts.keys())
|
62 |
+
|
63 |
+
# Track totals for summary row
|
64 |
+
total_sum = 0
|
65 |
+
unique_sum = 0
|
66 |
+
|
67 |
+
for annotator in all_annotators:
|
68 |
+
total = total_counts.get(annotator, 0)
|
69 |
+
unique = unique_counts.get(annotator, 0)
|
70 |
+
# Calculate percentage of unique prompts
|
71 |
+
percentage = round((unique / total * 100) if total > 0 else 0, 1)
|
72 |
+
|
73 |
+
# Add to totals
|
74 |
+
total_sum += total
|
75 |
+
unique_sum += unique
|
76 |
+
|
77 |
+
data.append({
|
78 |
+
'Annotator': annotator,
|
79 |
+
'Total Count': total,
|
80 |
+
'Unique Count': unique,
|
81 |
+
'Unique %': f"{percentage}%"
|
82 |
+
})
|
83 |
+
|
84 |
+
# Add summary row
|
85 |
+
overall_percentage = round((unique_sum / total_sum * 100) if total_sum > 0 else 0, 1)
|
86 |
+
data.append({
|
87 |
+
'Annotator': 'TOTAL',
|
88 |
+
'Total Count': total_sum,
|
89 |
+
'Unique Count': unique_sum,
|
90 |
+
'Unique %': f"{overall_percentage}%"
|
91 |
+
})
|
92 |
+
|
93 |
+
# Convert to DataFrame and sort by total count, keeping TOTAL at the bottom
|
94 |
+
df = pd.DataFrame(data)
|
95 |
+
# Move TOTAL row to the end
|
96 |
+
df = pd.concat([
|
97 |
+
df[df['Annotator'] != 'TOTAL'].sort_values('Total Count', ascending=False),
|
98 |
+
df[df['Annotator'] == 'TOTAL']
|
99 |
+
]).reset_index(drop=True)
|
100 |
|
101 |
# Get current time
|
102 |
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
107 |
return df, current_time
|
108 |
except Exception as e:
|
109 |
print(f"Error updating stats: {e}")
|
110 |
+
return pd.DataFrame(columns=['Annotator', 'Total Count', 'Unique Count', 'Unique %']), ""
|
111 |
|
112 |
def auto_update(state):
|
113 |
"""Background task to update stats every hour"""
|
|
|
142 |
|
143 |
with gr.Blocks(title="Battle Count Statistics") as app:
|
144 |
gr.Markdown("# Battle Count Statistics")
|
145 |
+
gr.Markdown("Displays the count of battles per annotator. 'Total Count' shows all valid battles, while 'Unique Count' shows deduplicated battles based on the first user prompt. Only conversations that pass the vote conditions are counted.")
|
146 |
|
147 |
with gr.Row():
|
148 |
last_update = gr.Textbox(
|
|
|
156 |
value=get_current_stats,
|
157 |
interactive=False,
|
158 |
wrap=True,
|
159 |
+
column_widths=["40%", "20%", "20%", "20%"],
|
160 |
+
height=600
|
161 |
)
|
162 |
|
163 |
+
refresh_btn = gr.Button("Refresh Now")
|
164 |
+
refresh_btn.click(fn=manual_refresh, outputs=[output, last_update])
|
165 |
|
166 |
return app
|
167 |
|
azure_count_ip_data.py
CHANGED
@@ -204,6 +204,109 @@ def get_annotator_name(ip: Optional[str], username: Optional[str]) -> Optional[s
|
|
204 |
|
205 |
return None
|
206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
def count_files_per_annotator(reader: 'RemoteLogReader', start_date_str: str = "2025_02_18") -> Dict[str, int]:
|
208 |
"""Count files per annotator name from the given start date, considering both IP and username"""
|
209 |
# Convert start date string to datetime
|
|
|
204 |
|
205 |
return None
|
206 |
|
207 |
+
def get_first_user_prompt(content: str) -> Optional[str]:
|
208 |
+
"""Extract the first user prompt from the conversation content"""
|
209 |
+
try:
|
210 |
+
lines = [line.strip() for line in content.split('\n') if line.strip()]
|
211 |
+
if not lines:
|
212 |
+
return None
|
213 |
+
|
214 |
+
# Parse the first line to get the messages
|
215 |
+
first_line_data = json.loads(lines[0])
|
216 |
+
|
217 |
+
# Try different message formats
|
218 |
+
|
219 |
+
# Format 1: state.messages array with ["<|im_start|>user", "hello"] format
|
220 |
+
messages = first_line_data.get('state', {}).get('messages', [])
|
221 |
+
if messages and len(messages) > 0:
|
222 |
+
first_message = messages[0]
|
223 |
+
if isinstance(first_message, list) and len(first_message) > 1:
|
224 |
+
# Format: ["<|im_start|>user", "hello"]
|
225 |
+
return first_message[1]
|
226 |
+
|
227 |
+
# Format 2: state.messages array with {"role": "user", "content": "hello"} format
|
228 |
+
if messages and len(messages) > 0:
|
229 |
+
first_message = messages[0]
|
230 |
+
if isinstance(first_message, dict) and 'content' in first_message:
|
231 |
+
return first_message.get('content')
|
232 |
+
|
233 |
+
# Format 3: Direct messages array in the root
|
234 |
+
messages = first_line_data.get('messages', [])
|
235 |
+
if messages and len(messages) > 0:
|
236 |
+
first_message = messages[0]
|
237 |
+
if isinstance(first_message, list) and len(first_message) > 1:
|
238 |
+
return first_message[1]
|
239 |
+
elif isinstance(first_message, dict) and 'content' in first_message:
|
240 |
+
return first_message.get('content')
|
241 |
+
|
242 |
+
# Format 4: Look for a specific user role key
|
243 |
+
for key in ['user', 'human', 'Human']:
|
244 |
+
if key in first_line_data:
|
245 |
+
return first_line_data[key]
|
246 |
+
|
247 |
+
log.warning(f"Could not extract first user prompt from content: {content[:200]}...")
|
248 |
+
return None
|
249 |
+
except Exception as e:
|
250 |
+
log.error(f"Error extracting first user prompt: {e}")
|
251 |
+
return None
|
252 |
+
|
253 |
+
def count_deduplicated_files_per_annotator(reader: 'RemoteLogReader', start_date_str: str = "2025_02_18") -> Dict[str, int]:
|
254 |
+
"""Count deduplicated files per annotator name from the given start date, considering both IP and username
|
255 |
+
|
256 |
+
Deduplication is based on the first user prompt for each annotator.
|
257 |
+
Only counts conversations that pass the vote conditions.
|
258 |
+
"""
|
259 |
+
# Convert start date string to datetime
|
260 |
+
start_date = datetime.strptime(start_date_str, "%Y_%m_%d")
|
261 |
+
|
262 |
+
# Dictionary to store unique prompts per annotator
|
263 |
+
annotator_prompts = defaultdict(set)
|
264 |
+
|
265 |
+
try:
|
266 |
+
# Get current date for iteration
|
267 |
+
current_date = start_date
|
268 |
+
today = datetime.now()
|
269 |
+
|
270 |
+
while current_date <= today:
|
271 |
+
date_str = current_date.strftime("%Y_%m_%d")
|
272 |
+
|
273 |
+
try:
|
274 |
+
# Get conversation logs for battle_anony mode
|
275 |
+
conv_logs = reader.get_conv_logs(date_str)
|
276 |
+
battle_anony_logs = conv_logs.get('battle_anony', {})
|
277 |
+
|
278 |
+
# Process each conversation
|
279 |
+
for conv_id, messages in battle_anony_logs.items():
|
280 |
+
if messages:
|
281 |
+
# Convert messages to file content format
|
282 |
+
content = '\n'.join(json.dumps(msg) for msg in messages)
|
283 |
+
|
284 |
+
# First check if the conversation passes the vote conditions
|
285 |
+
ip, username, vote_conditions_met = get_file_data(content)
|
286 |
+
|
287 |
+
# Only proceed if vote conditions are met
|
288 |
+
if vote_conditions_met:
|
289 |
+
# Get annotator name from either IP or username
|
290 |
+
annotator_name = get_annotator_name(ip, username)
|
291 |
+
if annotator_name:
|
292 |
+
# Extract first user prompt
|
293 |
+
first_prompt = get_first_user_prompt(content)
|
294 |
+
if first_prompt:
|
295 |
+
# Add to set of unique prompts for this annotator
|
296 |
+
annotator_prompts[annotator_name].add(first_prompt.strip())
|
297 |
+
|
298 |
+
except Exception as e:
|
299 |
+
log.error(f"Error processing logs for date {date_str}: {e}")
|
300 |
+
|
301 |
+
# Move to next day
|
302 |
+
current_date += timedelta(days=1)
|
303 |
+
|
304 |
+
except Exception as e:
|
305 |
+
log.error(f"Error accessing logs: {e}")
|
306 |
+
|
307 |
+
# Convert sets to counts
|
308 |
+
return {name: len(prompts) for name, prompts in annotator_prompts.items()}
|
309 |
+
|
310 |
def count_files_per_annotator(reader: 'RemoteLogReader', start_date_str: str = "2025_02_18") -> Dict[str, int]:
|
311 |
"""Count files per annotator name from the given start date, considering both IP and username"""
|
312 |
# Convert start date string to datetime
|