from .config import * from .db import * from .models import * import pandas as pd def get_leaderboard(reveal_prelim = False, hide_battle_votes = False, sort_by_elo = False, hide_proprietary = False): conn = get_db() cursor = conn.cursor() if hide_battle_votes: sql = ''' SELECT m.name, SUM(CASE WHEN v.username NOT LIKE '%_battle' AND v.vote = 1 THEN 1 ELSE 0 END) as upvote, SUM(CASE WHEN v.username NOT LIKE '%_battle' AND v.vote = -1 THEN 1 ELSE 0 END) as downvote FROM model m LEFT JOIN vote v ON m.name = v.model GROUP BY m.name ''' else: sql = ''' SELECT name, SUM(CASE WHEN vote = 1 THEN 1 ELSE 0 END) as upvote, SUM(CASE WHEN vote = -1 THEN 1 ELSE 0 END) as downvote FROM model LEFT JOIN vote ON model.name = vote.model GROUP BY name ''' cursor.execute(sql) data = cursor.fetchall() df = pd.DataFrame(data, columns=['name', 'upvote', 'downvote']) df['name'] = df['name'].replace(model_names).replace('Anonymous Sparkle', 'Fish Speech v1.5') # Calculate total votes and win rate df['votes'] = df['upvote'] + df['downvote'] df['win_rate'] = (df['upvote'] / df['votes'] * 100).round(1) # Remove models with no votes df = df[df['votes'] > 0] # Filter out rows with insufficient votes if not revealing preliminary results if not reveal_prelim: df = df[df['votes'] > 500] ## Calculate ELO SCORE (kept as secondary metric) df['elo'] = 1200 for i in range(len(df)): for j in range(len(df)): if i != j: try: expected_a = 1 / (1 + 10 ** ((df['elo'].iloc[j] - df['elo'].iloc[i]) / 400)) expected_b = 1 / (1 + 10 ** ((df['elo'].iloc[i] - df['elo'].iloc[j]) / 400)) actual_a = df['upvote'].iloc[i] / df['votes'].iloc[i] if df['votes'].iloc[i] > 0 else 0.5 actual_b = df['upvote'].iloc[j] / df['votes'].iloc[j] if df['votes'].iloc[j] > 0 else 0.5 df.iloc[i, df.columns.get_loc('elo')] += 32 * (actual_a - expected_a) df.iloc[j, df.columns.get_loc('elo')] += 32 * (actual_b - expected_b) except Exception as e: print(f"Error in ELO calculation for rows {i} and {j}: {str(e)}") continue df['elo'] = round(df['elo']) # Sort based on user preference sort_column = 'elo' if sort_by_elo else 'win_rate' df = df.sort_values(by=sort_column, ascending=False) df['order'] = ['#' + str(i + 1) for i in range(len(df))] # Select and order columns for display df = df[['order', 'name', 'win_rate', 'votes', 'elo']] # Remove proprietary models if filter is enabled if hide_proprietary: df = df[~df['name'].isin(closed_source)] # Convert DataFrame to markdown table with CSS styling markdown_table = """
""" + ("""""" if sort_by_elo else "") + """ """ def get_win_rate_class(win_rate): if win_rate >= 60: return "win-rate-excellent" elif win_rate >= 55: return "win-rate-good" elif win_rate >= 45: return "win-rate-average" else: return "win-rate-below" for _, row in df.iterrows(): win_rate_class = get_win_rate_class(row['win_rate']) win_rate_html = f'{row["win_rate"]}%' # Add link to model name if available and proprietary badge if closed source model_name = row['name'] original_model_name = model_name if model_name in model_links: model_name = f'{model_name}' if original_model_name in closed_source: model_name += 'Proprietary' markdown_table += f'''''' + ( f'''''' if sort_by_elo else "" ) + "\n" markdown_table += "
Rank Model Win Rate VotesArena Score
{row['order']} {model_name} {win_rate_html} {row['votes']:,}{int(row['elo'])}
" return markdown_table