From b00e4c526bc943579ba6b9731ca9354e070697ee Mon Sep 17 00:00:00 2001 From: federetyk Date: Thu, 15 Jan 2026 11:26:39 +0100 Subject: [PATCH 01/17] refactor: generalize dataset indexing from language-based to dataset_id-based --- CONTRIBUTING.md | 10 +- README.md | 2 +- examples/custom_task_example.py | 6 +- src/workrb/config.py | 10 +- src/workrb/results.py | 23 ++- src/workrb/run.py | 42 ++--- src/workrb/tasks/abstract/base.py | 102 +++++++--- .../tasks/abstract/classification_base.py | 92 ++++++--- src/workrb/tasks/abstract/ranking_base.py | 60 ++++-- src/workrb/tasks/classification/job2skill.py | 40 ++-- src/workrb/tasks/ranking/job2skill.py | 30 +-- src/workrb/tasks/ranking/job_similarity.py | 17 +- src/workrb/tasks/ranking/jobnorm.py | 16 +- src/workrb/tasks/ranking/skill2job.py | 27 ++- src/workrb/tasks/ranking/skill_extraction.py | 16 +- src/workrb/tasks/ranking/skill_similarity.py | 17 +- src/workrb/tasks/ranking/skillnorm.py | 18 +- tests/test_contextmatch_model.py | 2 +- tests/test_curriculum_encoder_model.py | 2 +- tests/test_model_task_compatibility.py | 46 ++--- tests/test_multi_dataset_task.py | 177 ++++++++++++++++++ tests/test_task_registry.py | 6 +- tests/test_utils.py | 20 +- 23 files changed, 570 insertions(+), 211 deletions(-) create mode 100644 tests/test_multi_dataset_task.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 94b2360..278e500 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -180,10 +180,10 @@ class MyCustomRankingTask(RankingTask): """Override default metrics if needed""" return ["map", "mrr", "recall@5", "recall@10"] - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: """ - Load dataset for a specific language and split. - + Load dataset for a specific dataset ID and split. + Returns: RankingDataset with query_texts, target_indices, and target_space """ @@ -195,12 +195,12 @@ class MyCustomRankingTask(RankingTask): [0, 2], # Software Engineer -> Python, SQL [0, 1], # Data Scientist -> Python, Machine Learning ] - + return RankingDataset( query_texts=query_texts, target_indices=target_indices, target_space=target_space, - language=language, + dataset_id=dataset_id, ) ``` diff --git a/README.md b/README.md index 7587625..5c6465a 100644 --- a/README.md +++ b/README.md @@ -112,7 +112,7 @@ Feel free to make a PR to add your models & tasks to the official package! See [ ### Checkpointing & Resuming -WorkRB automatically saves result checkpoints after each task completion in a specific language. +WorkRB automatically saves result checkpoints after each dataset evaluation within a task. **Automatic Resuming** - Simply rerun with the same `output_folder`: diff --git a/examples/custom_task_example.py b/examples/custom_task_example.py index cf44d8f..5b2eaa3 100644 --- a/examples/custom_task_example.py +++ b/examples/custom_task_example.py @@ -78,14 +78,14 @@ def supported_target_languages(self) -> list[Language]: """Supported target languages are English.""" return [Language.EN] - def load_monolingual_data(self, language: Language, split: DatasetSplit) -> RankingDataset: + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: """ Load data for evaluation. This method must return a RankingDataset. Args: - language: Language code (e.g., "en", "de", "fr") + dataset_id: Dataset identifier (e.g., "en", "de", "fr" for language-based tasks) split: Data split ("test", "validation", "train") Returns @@ -121,7 +121,7 @@ def load_monolingual_data(self, language: Language, split: DatasetSplit) -> Rank query_texts=queries, target_indices=labels, target_space=targets, - language=language, + dataset_id=dataset_id, ) # Note: The evaluate() method is inherited from RankingTask and doesn't need diff --git a/src/workrb/config.py b/src/workrb/config.py index b3294fe..0d5c2b3 100644 --- a/src/workrb/config.py +++ b/src/workrb/config.py @@ -208,21 +208,21 @@ def get_pending_work( ) -> list[tuple]: """Determine what work still needs to be done. - Work is defined as a (task, language) combination that is not completed. + Work is defined as a (task, dataset_id) combination that is not completed. """ pending_work = [] for task in tasks: - for language in task.languages: - # Successful completed (task, language) combination + for dataset_id in task.dataset_ids: + # Successful completed (task, dataset_id) combination if ( results is not None and task.name in results.task_results - and language in results.task_results[task.name].language_results + and dataset_id in results.task_results[task.name].language_results ): continue # Add to pending work - pending_work.append((task, language)) + pending_work.append((task, dataset_id)) return pending_work diff --git a/src/workrb/results.py b/src/workrb/results.py index 707039f..5537bcf 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -22,7 +22,7 @@ class TaskResultMetadata(BaseModel): class MetricsResult(BaseModel): """Metric results for a single evaluation run. - In the becnhmark, this is a single evaluation run for a single language. + In the benchmark, this is a single evaluation run for a single dataset. """ evaluation_time: float = Field(ge=0) @@ -34,8 +34,8 @@ class TaskResults(BaseModel): """Results for a task.""" metadata: TaskResultMetadata - language_results: dict[str, MetricsResult] # language -> results - """ Dictionary of language codes to their computed results. """ + language_results: dict[str, MetricsResult] # dataset_id -> results + """Dictionary of dataset IDs to their computed results.""" class BenchmarkMetadata(BaseModel): @@ -292,24 +292,27 @@ def _aggregate_per_language( ) -> dict[ResultTagString, float]: """Aggregate results per language. - Collects language-specific results over all tasks, and aggregates all availble results. + Collects language-specific results over all tasks, and aggregates all available results. Results may be imbalanced if tasks support different languages. """ # Collect metric values per task raw_results = defaultdict(list) for task_result in self.task_results.values(): - for language, metrics_result in task_result.language_results.items(): + for dataset_id, metrics_result in task_result.language_results.items(): for metric_name, metric_value in metrics_result.metrics_dict.items(): - raw_results[(language, metric_name)].append(metric_value) + raw_results[(dataset_id, metric_name)].append(metric_value) # Compute stats results = {} - for (language, metric_name), values in raw_results.items(): + for (dataset_id, metric_name), values in raw_results.items(): stats = self._compute_stats(values) for agg in aggregations: assert agg in stats, f"Aggregation {agg} not found in stats: {stats.keys()}" tag = ResultTagString( - name=tag_name, metric_name=metric_name, aggregation=agg, grouping_name=language + name=tag_name, + metric_name=metric_name, + aggregation=agg, + grouping_name=dataset_id, ) results[tag] = stats[agg] return results @@ -340,7 +343,7 @@ def _get_flat_dataframe(self) -> pd.DataFrame: """Get flat dataframe of the benchmark results with each metric value as a separate row.""" data = [] for task_name, task_result in self.task_results.items(): - for language, metrics_result in task_result.language_results.items(): + for dataset_id, metrics_result in task_result.language_results.items(): for metric_name, metric_value in metrics_result.metrics_dict.items(): data.append( { @@ -349,7 +352,7 @@ def _get_flat_dataframe(self) -> pd.DataFrame: "task_type": str(task_result.metadata.task_type), # "task_label_type": str(task_result.metadata.label_type), # "task_split": str(task_result.metadata.split), - "task_language": str(language), + "dataset_id": str(dataset_id), "metric_name": str(metric_name), "metric_value": float(metric_value), } diff --git a/src/workrb/run.py b/src/workrb/run.py index 1770512..7fd6e3a 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -22,7 +22,7 @@ TaskResultMetadata, TaskResults, ) -from workrb.tasks.abstract.base import Language, Task +from workrb.tasks.abstract.base import Task logger = logging.getLogger(__name__) setup_logger(__name__, verbose=False) @@ -80,10 +80,10 @@ def evaluate( # Group pending work by task for better organization work_by_task = {} - for task, language in pending_work: + for task, dataset_id in pending_work: if task.name not in work_by_task: - work_by_task[task.name] = {"task": task, "languages": []} - work_by_task[task.name]["languages"].append(language) + work_by_task[task.name] = {"task": task, "dataset_ids": []} + work_by_task[task.name]["dataset_ids"].append(dataset_id) # Run pending work start_time_benchmark = time.time() @@ -101,7 +101,7 @@ def evaluate( # Update metadata results.metadata.total_evaluation_time = time.time() - start_time_benchmark results.metadata.resumed_from_checkpoint = len(pending_work) < sum( - len(task.languages) for task in tasks + len(task.dataset_ids) for task in tasks ) # Save config and results @@ -206,11 +206,11 @@ def get_tasks_overview(tasks: Sequence[Task]) -> str: lines.append(f"{task_name:<40} {group:<20} {task_languages:<20}") - # Add size one-liner for each language - for lang in task.languages: - size_info = task.get_size_oneliner(lang) + # Add size one-liner for each dataset + for dataset_id in task.dataset_ids: + size_info = task.get_size_oneliner(dataset_id) if size_info: - lines.append(f" └─ {lang}: {size_info}") + lines.append(f" └─ {dataset_id}: {size_info}") lines.append("-" * 80) @@ -227,7 +227,7 @@ def _get_all_languages(tasks: Sequence[Task]) -> list[str]: def _get_total_evaluations(tasks: Sequence[Task]) -> int: """Get the total number of evaluations.""" - return sum(len(task.languages) for task in tasks) + return sum(len(task.dataset_ids) for task in tasks) def _init_checkpointing( @@ -307,12 +307,12 @@ def _run_pending_work( run_idx = results.get_num_evaluation_results() # Already completed evaluations for work_info in work_by_task.values(): task: Task = work_info["task"] - pending_languages: list[str] = work_info["languages"] + pending_dataset_ids: list[str] = work_info["dataset_ids"] logger.info(f"{'=' * 60}") logger.info(f"Evaluating task: {task.name}") logger.info(f"Completed {run_idx} / {_get_total_evaluations(tasks)} evaluations. ") - logger.info(f"Pending languages for this task: {len(pending_languages)}") + logger.info(f"Pending datasets for this task: {len(pending_dataset_ids)}") # Initialize task results if not exists if task.name not in results.task_results: @@ -327,11 +327,9 @@ def _run_pending_work( language_results={}, ) - # Evaluate pending languages - for language in pending_languages: - logger.info( - f"* Running language: {language} ({task.get_size_oneliner(Language(language))})" - ) + # Evaluate pending datasets + for dataset_id in pending_dataset_ids: + logger.info(f"* Running dataset: {dataset_id} ({task.get_size_oneliner(dataset_id)})") # Get metrics for this task task_metrics = None @@ -340,15 +338,15 @@ def _run_pending_work( try: start_time_eval = time.time() - lang_results: dict[str, float] = task.evaluate( - model=model, metrics=task_metrics, language=Language(language) + dataset_results: dict[str, float] = task.evaluate( + model=model, metrics=task_metrics, dataset_id=dataset_id ) evaluation_time = time.time() - start_time_eval # Store results - results.task_results[task.name].language_results[language] = MetricsResult( + results.task_results[task.name].language_results[dataset_id] = MetricsResult( evaluation_time=evaluation_time, - metrics_dict=lang_results, + metrics_dict=dataset_results, ) # Save incremental results to checkpoint @@ -357,7 +355,7 @@ def _run_pending_work( # Show key metrics key_metric = task.default_metrics[0] - logger.info(f"\t{key_metric}: {lang_results[key_metric]:.3f}") + logger.info(f"\t{key_metric}: {dataset_results[key_metric]:.3f}") run_idx += 1 except Exception as e: logger.error(f"Error: {e}") diff --git a/src/workrb/tasks/abstract/base.py b/src/workrb/tasks/abstract/base.py index 0ea733a..83b7f9b 100644 --- a/src/workrb/tasks/abstract/base.py +++ b/src/workrb/tasks/abstract/base.py @@ -55,10 +55,11 @@ def __init__( f"Invalid split: '{split}'. Supported splits: {list(DatasetSplit)}" ) from e - # Load datasets for all languages - self.lang_datasets = self._load_multilingual_data( - languages=self.languages, split=self.split - ) + # Select dataset identifiers that match the requested languages + self.dataset_ids = self.languages_to_dataset_ids(self.languages) + + # Load datasets for the selected dataset identifiers + self.datasets = self._load_datasets(dataset_ids=self.dataset_ids, split=self.split) def _parse_languages( self, languages: list[str], unsupported_lang_mode: Literal["error", "skip"] @@ -82,6 +83,41 @@ def _parse_languages( parsed_languages.append(lang) return parsed_languages + def languages_to_dataset_ids(self, languages: list[Language]) -> list[str]: + """Convert languages to dataset IDs. + + Default implementation returns language codes as dataset IDs (1:1 mapping). + This provides automatic backward compatibility for tasks that are a union of + monolingual datasets. + + Other tasks with multiple datasets per language can override this method to + return all datasets that use only languages from the provided set. + + Args: + languages: List of Language enums requested for evaluation + + Returns + ------- + List of dataset identifier strings + """ + return [lang.value for lang in languages] + + def _load_datasets(self, dataset_ids: list[str], split: DatasetSplit) -> dict[str, Any]: + """Load datasets for specified IDs. + + Args: + dataset_ids: List of dataset identifiers to load + split: Dataset split to load + + Returns + ------- + Dictionary mapping dataset_id to dataset object + """ + datasets = {} + for dataset_id in dataset_ids: + datasets[dataset_id] = self.load_dataset(dataset_id=dataset_id, split=split) + return datasets + def get_task_config(self) -> dict[str, Any]: """Get task configuration.""" return { @@ -154,8 +190,16 @@ def split_test_fraction(self) -> float: """Default fraction of data to use for test split.""" return 0.8 - def get_size_oneliner(self, language: Language) -> str: - """Get dataset size summary to display status.""" + def get_size_oneliner(self, dataset_id: str) -> str: + """Get dataset size summary to display status. + + Args: + dataset_id: Dataset identifier + + Returns + ------- + Human-readable size string + """ return "" @final @@ -165,27 +209,35 @@ def split_val_fraction(self) -> float: assert 0 <= self.split_test_fraction <= 1, "Split test fraction must be between 0 and 1" return 1 - self.split_test_fraction - def _load_multilingual_data( - self, languages: list[Language], split: DatasetSplit - ) -> dict[Language, Any]: - """Load datasets for all languages.""" - lang_datasets: dict[Language, Any] = {} + @abstractmethod + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> Any: + """Load dataset for specific ID and split. - # Check if languages are supported - non_supported_languages = set(languages) - set(self.supported_languages) - if non_supported_languages: - raise ValueError( - f"The following languages are defined for '{self.name}' but are not supported: {non_supported_languages}. Supported languages: {self.supported_languages}" - ) + For tasks that are a union of monolingual datasets: dataset_id equals + language code (e.g., "en", "de"). - for lang in languages: - lang_datasets[lang] = self.load_monolingual_data(split=split, language=lang) - return lang_datasets + For other tasks: dataset_id can encode additional information like + country and languages (e.g., "deu_q_de_c_de"). - @abstractmethod - def load_monolingual_data(self, language: Language, split: DatasetSplit) -> Any: - pass + Args: + dataset_id: Unique identifier for the dataset + split: Dataset split to load + + Returns + ------- + Dataset object (RankingDataset or ClassificationDataset) + """ @abstractmethod - def evaluate(self, model, metrics=None, language: Language = Language.EN) -> dict[str, float]: - pass + def evaluate(self, model, metrics=None, dataset_id: str = "en") -> dict[str, float]: + """Evaluate model on specific dataset. + + Args: + model: Model to evaluate + metrics: List of metric names. If None, uses default_metrics + dataset_id: Dataset identifier to evaluate on + + Returns + ------- + Dictionary of metric names to values + """ diff --git a/src/workrb/tasks/abstract/classification_base.py b/src/workrb/tasks/abstract/classification_base.py index 78b97b7..999bea9 100644 --- a/src/workrb/tasks/abstract/classification_base.py +++ b/src/workrb/tasks/abstract/classification_base.py @@ -13,7 +13,6 @@ BaseTaskGroup, DatasetSplit, LabelType, - Language, Task, TaskType, ) @@ -43,7 +42,7 @@ def __init__( texts: list[str], labels: list[list[int]], label_space: list[str], - language: Language, + dataset_id: str, ): """Initialize classification dataset with validation. @@ -52,12 +51,12 @@ def __init__( labels: List with list of class indices corresponding to each text. Contains just 1 item per list for single-label classification. label_space: List of class names/labels (e.g., ["skill1", "skill2", "skill3"]) - language: Language enum + dataset_id: Unique identifier for this dataset """ self.texts = self._postprocess_texts(texts) self.labels = self._postprocess_labels(labels) self.label_space = self._postprocess_texts(label_space) - self.language = language + self.dataset_id = dataset_id self.validate_dataset() def validate_dataset( @@ -141,11 +140,10 @@ def get_labels_as_indicator_matrix(self) -> list[list[int]]: class ClassificationTask(Task): - """ - Abstract base class for classification tasks. + """Abstract base class for classification tasks. Supports both binary and multi-class classification. - Tasks should implement load_monolingual_data() to return ClassificationDataset. + Tasks should implement load_dataset() to return ClassificationDataset. """ @property @@ -177,8 +175,16 @@ def threshold(self) -> float | None: """Threshold to use for classification.""" @abstractmethod - def get_output_space_size(self, language: Language) -> int: - """Number of output classes for this classification task.""" + def get_output_space_size(self, dataset_id: str) -> int: + """Number of output classes for this classification task. + + Args: + dataset_id: Dataset identifier + + Returns + ------- + Number of classes in the output space + """ @property @abstractmethod @@ -186,24 +192,43 @@ def input_type(self) -> ModelInputType: """Input type for texts in the classification task.""" @abstractmethod - def load_monolingual_data( - self, split: DatasetSplit, language: Language - ) -> ClassificationDataset: - """Load dataset for a specific language.""" - - def get_size_oneliner(self, language: Language) -> str: - """Get dataset summary to display for progress.""" - dataset: ClassificationDataset = self.lang_datasets[language] + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> ClassificationDataset: + """Load dataset for specific ID and split. + + For tasks that are a union of monolingual datasets: dataset_id equals + language code. + + For other tasks: dataset_id can encode arbitrary information. + + Args: + dataset_id: Unique identifier for the dataset + split: Dataset split to load + + Returns + ------- + ClassificationDataset object + """ + + def get_size_oneliner(self, dataset_id: str) -> str: + """Get dataset summary to display for progress. + + Args: + dataset_id: Dataset identifier + + Returns + ------- + Human-readable size string + """ + dataset: ClassificationDataset = self.datasets[dataset_id] return f"{len(dataset.texts)} samples, {len(dataset.label_space)} classes" def evaluate( self, model: ModelInterface, metrics: list[str] | None = None, - language: Language = Language.EN, + dataset_id: str = "en", ) -> dict[str, float]: - """ - Evaluate the model with threshold optimization. + """Evaluate the model with threshold optimization. For binary classification, this method: 1. Optimizes threshold on validation data @@ -213,7 +238,7 @@ def evaluate( Args: model: Model implementing classification interface metrics: List of metrics to compute - language: Language code for evaluation + dataset_id: Dataset identifier to evaluate on Returns ------- @@ -223,22 +248,22 @@ def evaluate( metrics = self.default_metrics # Get evaluation dataset - eval_dataset: ClassificationDataset = self.lang_datasets[language] + eval_dataset: ClassificationDataset = self.datasets[dataset_id] # Validate model output if it has a fixed classification label space model_label_space = model.classification_label_space if model_label_space is not None: # Model has fixed label space (e.g., classification head) - if len(model_label_space) != self.get_output_space_size(language): + if len(model_label_space) != self.get_output_space_size(dataset_id): raise ValueError( f"Model output size mismatch: model has {len(model_label_space)} outputs, " - f"but task requires {self.get_output_space_size(language)} outputs." + f"but task requires {self.get_output_space_size(dataset_id)} outputs." ) # Validate label order matches (critical for correct evaluation) self._validate_model_label_space(model_label_space, eval_dataset) best_threshold = ( - self.get_threshold_on_val_data(model, language) + self.get_threshold_on_val_data(model, dataset_id) if self.best_threshold_on_val_data else self.threshold ) @@ -307,12 +332,21 @@ def _validate_model_label_space( "The model must use the exact same label ordering as the task." ) - def get_threshold_on_val_data(self, model: ModelInterface, language: Language) -> float: - """Get the best threshold on validation data.""" + def get_threshold_on_val_data(self, model: ModelInterface, dataset_id: str) -> float: + """Get the best threshold on validation data. + + Args: + model: Model to evaluate + dataset_id: Dataset identifier + + Returns + ------- + Optimized threshold value + """ # Step 1: Optimize threshold on validation data # Load validation data (even if we're evaluating on test) - logger.info(f"Optimizing threshold on validation data for {language}...") - val_dataset = self.load_monolingual_data(DatasetSplit.VAL, language) + logger.info(f"Optimizing threshold on validation data for {dataset_id}...") + val_dataset = self.load_dataset(dataset_id, DatasetSplit.VAL) val_predictions = model.compute_classification( texts=val_dataset.texts, targets=val_dataset.label_space, diff --git a/src/workrb/tasks/abstract/ranking_base.py b/src/workrb/tasks/abstract/ranking_base.py index e980168..20265d0 100644 --- a/src/workrb/tasks/abstract/ranking_base.py +++ b/src/workrb/tasks/abstract/ranking_base.py @@ -10,7 +10,7 @@ import torch from workrb.metrics.ranking import calculate_ranking_metrics -from workrb.tasks.abstract.base import BaseTaskGroup, DatasetSplit, Language, Task, TaskType +from workrb.tasks.abstract.base import BaseTaskGroup, DatasetSplit, Task, TaskType from workrb.types import ModelInputType if TYPE_CHECKING: @@ -30,26 +30,27 @@ class RankingTaskGroup(BaseTaskGroup, str, Enum): class RankingDataset: - """Structure for monolingualranking datasets.""" + """Structure for ranking datasets.""" def __init__( self, query_texts: list[str], target_indices: list[list[int]], target_space: list[str], - language: Language, + dataset_id: str, ): """Initialize ranking dataset with validation. Args: - query: List of query strings - target_label: List of lists containing indices into the target vocabulary - target: List of target vocabulary strings + query_texts: List of query strings + target_indices: List of lists containing indices into the target vocabulary + target_space: List of target vocabulary strings + dataset_id: Unique identifier for this dataset """ self.query_texts = self._postprocess_texts(query_texts) self.target_indices = self._postprocess_indices(target_indices) self.target_space = self._postprocess_texts(target_space) - self.language = language + self.dataset_id = dataset_id self.validate_dataset() def validate_dataset( @@ -135,26 +136,48 @@ def target_input_type(self) -> ModelInputType: """Input type for target texts in the ranking task.""" @abstractmethod - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """Load dataset for a specific language.""" + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load dataset for specific ID and split. - def get_size_oneliner(self, language: Language) -> str: - """Get dataset summary to display for progress.""" - return f"{len(self.lang_datasets[language].query_texts)} queries x {len(self.lang_datasets[language].target_space)} targets" + For tasks that are a union of monolingual datasets: dataset_id equals + language code. + + For other tasks: dataset_id can encode arbitrary information. + + Args: + dataset_id: Unique identifier for the dataset + split: Dataset split to load + + Returns + ------- + RankingDataset object + """ + + def get_size_oneliner(self, dataset_id: str) -> str: + """Get dataset summary to display for progress. + + Args: + dataset_id: Dataset identifier + + Returns + ------- + Human-readable size string + """ + dataset = self.datasets[dataset_id] + return f"{len(dataset.query_texts)} queries x {len(dataset.target_space)} targets" def evaluate( self, model: ModelInterface, metrics: list[str] | None = None, - language: Language = Language.EN, + dataset_id: str = "en", ) -> dict[str, float]: - """ - Evaluate the model on this ranking task. + """Evaluate the model on this ranking task. Args: model: Model implementing ModelInterface (must have compute_rankings method) metrics: List of metrics to compute. If None, uses default_metrics - language: Language code for evaluation + dataset_id: Dataset identifier to evaluate on Returns ------- @@ -163,8 +186,8 @@ def evaluate( if metrics is None: metrics = self.default_metrics - # Use new dataset if available - dataset = self.lang_datasets[language] + # Retrieve dataset by ID + dataset = self.datasets[dataset_id] queries = dataset.query_texts targets = dataset.target_space labels = dataset.target_indices @@ -181,6 +204,7 @@ def evaluate( if isinstance(prediction_matrix, torch.Tensor): prediction_matrix = prediction_matrix.cpu().numpy() + # Calculate metrics metric_results = calculate_ranking_metrics( prediction_matrix=prediction_matrix, pos_label_idxs=labels, metrics=metrics ) diff --git a/src/workrb/tasks/classification/job2skill.py b/src/workrb/tasks/classification/job2skill.py index 7043907..431c0f5 100644 --- a/src/workrb/tasks/classification/job2skill.py +++ b/src/workrb/tasks/classification/job2skill.py @@ -68,32 +68,39 @@ def input_type(self) -> ModelInputType: """Input is job titles.""" return ModelInputType.JOB_TITLE - def get_output_space_size(self, language: Language) -> int: - """Number of output classes (skills) for this classification task.""" - ds: ClassificationDataset = self.lang_datasets[language] - return len(ds.label_space) + def get_output_space_size(self, dataset_id: str) -> int: + """Number of output classes (skills) for this classification task. + + Args: + dataset_id: Dataset identifier - def load_monolingual_data( - self, split: DatasetSplit, language: Language - ) -> ClassificationDataset: + Returns + ------- + Number of classes in the output space """ - Load job-skill classification data for specified language and split. + ds: ClassificationDataset = self.datasets[dataset_id] + return len(ds.label_space) + + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> ClassificationDataset: + """Load job-skill classification data for specified dataset and split. Args: + dataset_id: Dataset identifier (language code for this task) split: Data split (VAL or TEST) - language: Language code Returns ------- ClassificationDataset with job titles and multi-label skill assignments """ + language = Language(dataset_id) + if split == DatasetSplit.VAL: - return self._load_val(language) + return self._load_val(language=language, dataset_id=dataset_id) if split == DatasetSplit.TEST: - return self._load_test(language) + return self._load_test(language=language, dataset_id=dataset_id) raise ValueError(f"Split '{split}' not supported. Use VAL or TEST") - def _load_test(self, language: Language) -> ClassificationDataset: + def _load_test(self, language: Language, dataset_id: str) -> ClassificationDataset: """Load test data from ESCO occupation-skill relations.""" target_esco = ESCO(version=self.esco_version, language=language) skill_vocab = target_esco.get_skills_vocabulary() @@ -122,12 +129,11 @@ def _load_test(self, language: Language) -> ClassificationDataset: texts=texts, labels=labels, label_space=skill_vocab, - language=language, + dataset_id=dataset_id, ) - def _load_val(self, language: Language) -> ClassificationDataset: - """ - Load validation set based on vacancies with job titles. + def _load_val(self, language: Language, dataset_id: str) -> ClassificationDataset: + """Load validation set based on vacancies with job titles. Static validation set only available in English. """ @@ -187,5 +193,5 @@ def _load_val(self, language: Language) -> ClassificationDataset: texts=texts, labels=labels, label_space=skill_vocab, - language=language, + dataset_id=dataset_id, ) diff --git a/src/workrb/tasks/ranking/job2skill.py b/src/workrb/tasks/ranking/job2skill.py index 2632c96..f51f5bb 100644 --- a/src/workrb/tasks/ranking/job2skill.py +++ b/src/workrb/tasks/ranking/job2skill.py @@ -61,22 +61,31 @@ def target_input_type(self) -> ModelInputType: """Target input type for skills.""" return ModelInputType.SKILL_NAME - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """ - Load job-to-skills data for a specific split and language. + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load job-to-skills data for a specific split and dataset. Static validation set only available in English. Test set is generated from ESCO relations for the selected version and language. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object """ + language = Language(dataset_id) + if split == DatasetSplit.TEST: - return self._load_test(language=language) + return self._load_test(language=language, dataset_id=dataset_id) if split == DatasetSplit.VAL: - return self._load_val(language=language) + return self._load_val(language=language, dataset_id=dataset_id) raise ValueError(f"Invalid split: {split}") - def _load_test(self, language: Language) -> RankingDataset: + def _load_test(self, language: Language, dataset_id: str) -> RankingDataset: """Load test data for a specific version and language.""" target_esco = ESCO(version=self.esco_version, language=language) skill_vocab = target_esco.get_skills_vocabulary() @@ -105,12 +114,11 @@ def _load_test(self, language: Language) -> RankingDataset: query_texts=query_texts, target_indices=target_indices, target_space=skill_vocab, - language=language, + dataset_id=dataset_id, ) - def _load_val(self, language: Language) -> RankingDataset: - """ - Validation set based on vacancies with job titles, where description is used to extract ESCO skills. + def _load_val(self, language: Language, dataset_id: str) -> RankingDataset: + """Validation set based on vacancies with job titles, where description is used to extract ESCO skills. Static validation set only available in English. """ @@ -162,7 +170,7 @@ def _load_val(self, language: Language) -> RankingDataset: query_texts=query_texts, target_indices=target_indices, target_space=skill_vocab, - language=language, + dataset_id=dataset_id, ) @property diff --git a/src/workrb/tasks/ranking/job_similarity.py b/src/workrb/tasks/ranking/job_similarity.py index 4edc043..d18a534 100644 --- a/src/workrb/tasks/ranking/job_similarity.py +++ b/src/workrb/tasks/ranking/job_similarity.py @@ -104,8 +104,19 @@ def target_input_type(self) -> ModelInputType: """Target input type for job titles.""" return ModelInputType.JOB_TITLE - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """Load Job Title Similarity data from the HuggingFace dataset.""" + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load Job Title Similarity data from the HuggingFace dataset. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object + """ + language = Language(dataset_id) + if split != DatasetSplit.TEST: raise ValueError(f"Split '{split}' not supported. Use TEST") @@ -118,7 +129,7 @@ def load_monolingual_data(self, split: DatasetSplit, language: Language) -> Rank relevancy_labels = list(ds["queries"]["labels"]) corpus = list(ds["corpus"]["text"]) - return RankingDataset(queries, relevancy_labels, corpus, language=language) + return RankingDataset(queries, relevancy_labels, corpus, dataset_id=dataset_id) @property def citation(self) -> str: diff --git a/src/workrb/tasks/ranking/jobnorm.py b/src/workrb/tasks/ranking/jobnorm.py index 95151df..23a4bd3 100644 --- a/src/workrb/tasks/ranking/jobnorm.py +++ b/src/workrb/tasks/ranking/jobnorm.py @@ -61,8 +61,18 @@ def target_input_type(self) -> ModelInputType: """Target input type for ESCO occupations.""" return ModelInputType.JOB_TITLE - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """Load job normalization data.""" + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load job normalization data for a specific split and dataset. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object + """ + language = Language(dataset_id) # Login using e.g. `huggingface-cli login` to access this dataset ds = load_dataset("TechWolf/JobBERT-evaluation-dataset") assert isinstance(ds, DatasetDict) @@ -115,7 +125,7 @@ def load_monolingual_data(self, split: DatasetSplit, language: Language) -> Rank query_texts=query_texts, target_indices=label_indices, target_space=job_vocab, - language=language, + dataset_id=dataset_id, ) @property diff --git a/src/workrb/tasks/ranking/skill2job.py b/src/workrb/tasks/ranking/skill2job.py index 13071d7..d96093b 100644 --- a/src/workrb/tasks/ranking/skill2job.py +++ b/src/workrb/tasks/ranking/skill2job.py @@ -60,22 +60,31 @@ def target_input_type(self) -> ModelInputType: """Target input type for jobs.""" return ModelInputType.JOB_TITLE - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """ - Load skill-to-job data for a specific split and language. + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load skill-to-job data for a specific split and dataset. Validation set is static and only available in English. Test set is generated from ESCO relations for the selected version and language. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object """ + language = Language(dataset_id) + if split == DatasetSplit.TEST: - return self._load_test(language=language) + return self._load_test(language=language, dataset_id=dataset_id) if split == DatasetSplit.VAL: - return self._load_val(language=language) + return self._load_val(language=language, dataset_id=dataset_id) raise ValueError(f"Invalid split: {split}") - def _load_test(self, language: Language) -> RankingDataset: + def _load_test(self, language: Language, dataset_id: str) -> RankingDataset: """Load test data for a specific version and language.""" target_esco = ESCO(version=self.esco_version, language=language) @@ -109,10 +118,10 @@ def _load_test(self, language: Language) -> RankingDataset: query_texts=query_texts, target_indices=target_indices, target_space=job_vocab, - language=language, + dataset_id=dataset_id, ) - def _load_val(self, language: Language) -> RankingDataset: + def _load_val(self, language: Language, dataset_id: str) -> RankingDataset: """ Use vacancies with job titles where descriptions yield ESCO skills. @@ -178,7 +187,7 @@ def _load_val(self, language: Language) -> RankingDataset: query_texts=query_texts, target_indices=target_indices, target_space=job_vocab, - language=language, + dataset_id=dataset_id, ) @property diff --git a/src/workrb/tasks/ranking/skill_extraction.py b/src/workrb/tasks/ranking/skill_extraction.py index 543219a..9950e17 100644 --- a/src/workrb/tasks/ranking/skill_extraction.py +++ b/src/workrb/tasks/ranking/skill_extraction.py @@ -68,8 +68,18 @@ def target_input_type(self) -> ModelInputType: """Target input type for ESCO skills.""" return ModelInputType.SKILL_NAME - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """Load skill extraction house data.""" + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load skill extraction data for a specific split and dataset. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object + """ + language = Language(dataset_id) # Load data split_names = {DatasetSplit.TEST: "test", DatasetSplit.VAL: "validation"} dataset = load_dataset(self.hf_name, split=split_names[split]) @@ -119,7 +129,7 @@ def load_monolingual_data(self, split: DatasetSplit, language: Language) -> Rank query_texts=filtered_queries, target_indices=filtered_labels, target_space=skill_vocab, - language=language, + dataset_id=dataset_id, ) diff --git a/src/workrb/tasks/ranking/skill_similarity.py b/src/workrb/tasks/ranking/skill_similarity.py index fdeeaf6..f321969 100644 --- a/src/workrb/tasks/ranking/skill_similarity.py +++ b/src/workrb/tasks/ranking/skill_similarity.py @@ -63,13 +63,22 @@ def target_input_type(self) -> ModelInputType: """Target input type for skills.""" return ModelInputType.SKILL_NAME - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """ - Load skill similarity data from SkillMatch dataset. + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load skill similarity data from SkillMatch dataset. Uses only the 1k related pairs from the SkillMatch dataset, but uses all skills from the SkillMatch dataset for the vocabulary. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object """ + language = Language(dataset_id) + if language != Language.EN: raise ValueError("The validation set of this task is only available in English.") @@ -105,7 +114,7 @@ def load_monolingual_data(self, split: DatasetSplit, language: Language) -> Rank selected_queries = query_test selected_labels = label_test - return RankingDataset(selected_queries, selected_labels, skill_vocab, language=language) + return RankingDataset(selected_queries, selected_labels, skill_vocab, dataset_id=dataset_id) @property def citation(self) -> str: diff --git a/src/workrb/tasks/ranking/skillnorm.py b/src/workrb/tasks/ranking/skillnorm.py index 01bb1e3..41bc2b3 100644 --- a/src/workrb/tasks/ranking/skillnorm.py +++ b/src/workrb/tasks/ranking/skillnorm.py @@ -101,9 +101,19 @@ def target_input_type(self) -> ModelInputType: """Target input type for canonical skill names.""" return ModelInputType.SKILL_NAME - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: - """Load skill normalization data from ESCO.""" - target_esco = ESCO(version=self.esco_version, language=Language(language)) + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + """Load skill normalization data from ESCO. + + Args: + dataset_id: Dataset identifier (language code for this task) + split: Dataset split to load + + Returns + ------- + RankingDataset object + """ + language = Language(dataset_id) + target_esco = ESCO(version=self.esco_version, language=language) # Full vocab, even those without alternatives skill_vocab = target_esco.get_skills_vocabulary() @@ -121,7 +131,7 @@ def load_monolingual_data(self, split: DatasetSplit, language: Language) -> Rank alt2skills, skill2label, split ) - return RankingDataset(selected_queries, selected_labels, skill_vocab, language=language) + return RankingDataset(selected_queries, selected_labels, skill_vocab, dataset_id=dataset_id) def _rnd_split( self, alt2skills: dict[str, list[str]], skill2label: dict[str, int], split: DatasetSplit diff --git a/tests/test_contextmatch_model.py b/tests/test_contextmatch_model.py index 34f9696..6f92dc5 100644 --- a/tests/test_contextmatch_model.py +++ b/tests/test_contextmatch_model.py @@ -130,7 +130,7 @@ def test_tech_skill_extraction_benchmark_metrics(self): # Evaluate model on the task with the metrics from the paper metrics = ["mrr", "rp@1", "rp@5", "rp@10"] - results = task.evaluate(model=model, metrics=metrics, language=Language.EN) + results = task.evaluate(model=model, metrics=metrics, dataset_id=Language.EN.value) # Paper-reported values (RP metrics are percentages, convert to decimals) expected_mrr = 0.632 diff --git a/tests/test_curriculum_encoder_model.py b/tests/test_curriculum_encoder_model.py index 04e50c6..0585217 100644 --- a/tests/test_curriculum_encoder_model.py +++ b/tests/test_curriculum_encoder_model.py @@ -75,7 +75,7 @@ def test_skill_extraction_benchmark_metrics(self): # Evaluate model on the task with the metrics from the paper metrics = ["mrr", "rp@1", "rp@5", "rp@10"] - results = task.evaluate(model=model, metrics=metrics, language=Language.EN) + results = task.evaluate(model=model, metrics=metrics, dataset_id=Language.EN.value) # Paper-reported values (RP metrics are percentages, convert to decimals) [TECH] # expected_mrr = 0.5726 diff --git a/tests/test_model_task_compatibility.py b/tests/test_model_task_compatibility.py index 59f6ca5..f9053d2 100644 --- a/tests/test_model_task_compatibility.py +++ b/tests/test_model_task_compatibility.py @@ -41,7 +41,7 @@ def test_classification_task_with_biencoder_works(self): model = BiEncoderModel("all-MiniLM-L6-v2") # Should work - BiEncoder computes similarity between texts and label space - results = task.evaluate(model, language=Language.EN) + results = task.evaluate(model, dataset_id=Language.EN.value) # Validate results assert "f1_macro" in results @@ -56,7 +56,7 @@ def test_classification_task_output_shape(self): model = BiEncoderModel("all-MiniLM-L6-v2") # Get dataset - dataset: ClassificationDataset = task.lang_datasets[Language.EN] + dataset: ClassificationDataset = task.datasets[Language.EN.value] # Compute predictions predictions = model.compute_classification( @@ -80,7 +80,7 @@ def test_classification_task_with_classification_model_works(self): task = ToyJobSkill(split="val", languages=["en"]) # Get the label space from the task - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets[Language.EN.value] label_space = dataset.label_space # Create classification model with matching label space @@ -90,7 +90,7 @@ def test_classification_task_with_classification_model_works(self): ) # Should work - model has classification head with matching label space - results = task.evaluate(model, language=Language.EN) + results = task.evaluate(model, dataset_id=Language.EN.value) # Validate results assert "f1_macro" in results @@ -112,7 +112,7 @@ def test_classification_task_label_space_size_mismatch_fails(self): # Should fail with clear error about size mismatch with pytest.raises(ValueError, match="Model output size mismatch"): - task.evaluate(model, language=Language.EN) + task.evaluate(model, dataset_id=Language.EN.value) def test_classification_task_label_space_order_mismatch_fails(self): """Classification model with wrong label order should fail.""" @@ -120,7 +120,7 @@ def test_classification_task_label_space_order_mismatch_fails(self): task = ToyJobSkill(split="val", languages=["en"]) # Get the label space and shuffle it - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets[Language.EN.value] wrong_order_labels = list(reversed(dataset.label_space)) model = RndESCOClassificationModel( @@ -130,7 +130,7 @@ def test_classification_task_label_space_order_mismatch_fails(self): # Should fail with clear error about order mismatch with pytest.raises(ValueError, match="label order doesn't match"): - task.evaluate(model, language=Language.EN) + task.evaluate(model, dataset_id=Language.EN.value) class TestRankingTaskWithBiEncoder: @@ -146,7 +146,7 @@ def test_ranking_task_with_biencoder_works(self): model = BiEncoderModel("all-MiniLM-L6-v2") # Should work - standard ranking behavior - results = task.evaluate(model, language=Language.EN) + results = task.evaluate(model, dataset_id=Language.EN.value) # Validate results assert "map" in results @@ -161,7 +161,7 @@ def test_ranking_task_output_shape(self): model = BiEncoderModel("all-MiniLM-L6-v2") # Get dataset - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets[Language.EN.value] # Compute predictions predictions = model.compute_rankings( @@ -186,7 +186,7 @@ def test_ranking_task_with_classification_model_matching_label_space_works(self) task = ToySkillSim(split="val", languages=["en"]) # Get the target space from the task - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets[Language.EN.value] target_space = dataset.target_space # Create classification model with matching label space @@ -196,7 +196,7 @@ def test_ranking_task_with_classification_model_matching_label_space_works(self) ) # Should work - model's label space matches ranking target space - results = task.evaluate(model, language=Language.EN) + results = task.evaluate(model, dataset_id=Language.EN.value) # Validate results assert "map" in results @@ -218,7 +218,7 @@ def test_ranking_task_with_classification_model_size_mismatch_fails(self): # Should fail with clear error about size mismatch with pytest.raises(ValueError, match="target space size mismatch"): - task.evaluate(model, language=Language.EN) + task.evaluate(model, dataset_id=Language.EN.value) def test_ranking_task_with_classification_model_label_mismatch_fails(self): """Classification model with wrong labels should fail.""" @@ -226,7 +226,7 @@ def test_ranking_task_with_classification_model_label_mismatch_fails(self): task = ToySkillSim(split="val", languages=["en"]) # Get target space and create different labels with same size - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets[Language.EN.value] wrong_labels = [f"WrongLabel_{i}" for i in range(len(dataset.target_space))] model = RndESCOClassificationModel( @@ -236,7 +236,7 @@ def test_ranking_task_with_classification_model_label_mismatch_fails(self): # Should fail with clear error about label mismatch with pytest.raises(ValueError, match="target labels don't match"): - task.evaluate(model, language=Language.EN) + task.evaluate(model, dataset_id=Language.EN.value) def test_ranking_task_with_classification_model_order_mismatch_fails(self): """Classification model with wrong label order should fail.""" @@ -244,7 +244,7 @@ def test_ranking_task_with_classification_model_order_mismatch_fails(self): task = ToySkillSim(split="val", languages=["en"]) # Get target space and reverse order - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets[Language.EN.value] wrong_order_labels = list(reversed(dataset.target_space)) model = RndESCOClassificationModel( @@ -254,7 +254,7 @@ def test_ranking_task_with_classification_model_order_mismatch_fails(self): # Should fail with clear error about order mismatch with pytest.raises(ValueError, match="target label order doesn't match"): - task.evaluate(model, language=Language.EN) + task.evaluate(model, dataset_id=Language.EN.value) class TestModelTaskCompatibilitySummary: @@ -273,8 +273,8 @@ def test_all_model_task_combinations(self): biencoder_model = BiEncoderModel("all-MiniLM-L6-v2") # Get label spaces - class_dataset = classification_task.lang_datasets[Language.EN] - rank_dataset = ranking_task.lang_datasets[Language.EN] + class_dataset = classification_task.datasets[Language.EN.value] + rank_dataset = ranking_task.datasets[Language.EN.value] classification_model_for_class = RndESCOClassificationModel( base_model_name="all-MiniLM-L6-v2", @@ -290,23 +290,25 @@ def test_all_model_task_combinations(self): # 1. Classification Task + BiEncoder (NEW) results["class_biencoder"] = classification_task.evaluate( - biencoder_model, language=Language.EN + biencoder_model, dataset_id=Language.EN.value ) assert "f1_macro" in results["class_biencoder"] # 2. Classification Task + Classification Model (EXISTING) results["class_classification"] = classification_task.evaluate( - classification_model_for_class, language=Language.EN + classification_model_for_class, dataset_id=Language.EN.value ) assert "f1_macro" in results["class_classification"] # 3. Ranking Task + BiEncoder (EXISTING) - results["rank_biencoder"] = ranking_task.evaluate(biencoder_model, language=Language.EN) + results["rank_biencoder"] = ranking_task.evaluate( + biencoder_model, dataset_id=Language.EN.value + ) assert "map" in results["rank_biencoder"] # 4. Ranking Task + Classification Model (CONDITIONAL) results["rank_classification"] = ranking_task.evaluate( - classification_model_for_rank, language=Language.EN + classification_model_for_rank, dataset_id=Language.EN.value ) assert "map" in results["rank_classification"] diff --git a/tests/test_multi_dataset_task.py b/tests/test_multi_dataset_task.py new file mode 100644 index 0000000..0303246 --- /dev/null +++ b/tests/test_multi_dataset_task.py @@ -0,0 +1,177 @@ +""" +Test multi-dataset tasks that return multiple dataset IDs per language. + +This test suite validates that tasks can override languages_to_dataset_ids() +to return multiple dataset identifiers for each language, supporting use cases +like MELO benchmark where datasets encode additional metadata beyond language. +""" + +import pytest + +from workrb.models import BiEncoderModel +from workrb.tasks import ESCOJob2SkillRanking, RankingDataset +from workrb.types import Language + + +class TestMultiDatasetTask: + """Test tasks that return multiple dataset IDs per language.""" + + def test_languages_to_dataset_ids_multiple_per_language(self): + """Test task that returns multiple dataset IDs per language.""" + + # Create a custom task class that overrides languages_to_dataset_ids + class MultiDatasetTask(ESCOJob2SkillRanking): + def languages_to_dataset_ids(self, languages: list[Language]) -> list[str]: + """Map languages to multiple dataset IDs with custom logic.""" + dataset_ids = [] + lang_set = set(languages) + + # English -> 4 datasets + if Language.EN in lang_set: + dataset_ids.extend(["en1", "en2", "en3_sea", "en3_land"]) + + # French -> 2 datasets + if Language.FR in lang_set: + dataset_ids.extend(["fr1", "fr2"]) + + # German -> 1 dataset + if Language.DE in lang_set: + dataset_ids.append("de") + + # Spanish -> 3 datasets + if Language.ES in lang_set: + dataset_ids.extend(["es1", "es2", "es3_air"]) + + # Cross-language datasets when both French and German are present + if Language.FR in lang_set and Language.DE in lang_set: + dataset_ids.extend(["fr_de_land", "fr_de_sea"]) + + return dataset_ids + + def load_dataset(self, dataset_id: str, split): + """Mock load_dataset to avoid loading real data.""" + # For testing, we just need to verify the dataset_ids are correct + # Return a minimal mock dataset structure + return RankingDataset( + query_texts=["mock query"], + target_indices=[[0]], + target_space=["mock target"], + dataset_id=dataset_id, + ) + + # Test 1: English only + task_en = MultiDatasetTask(split="val", languages=["en"]) + assert task_en.dataset_ids == ["en1", "en2", "en3_sea", "en3_land"] + assert len(task_en.datasets) == 4 + assert all(dataset_id in task_en.datasets for dataset_id in task_en.dataset_ids) + + # Test 2: French only + task_fr = MultiDatasetTask(split="val", languages=["fr"]) + assert task_fr.dataset_ids == ["fr1", "fr2"] + assert len(task_fr.datasets) == 2 + + # Test 3: German only + task_de = MultiDatasetTask(split="val", languages=["de"]) + assert task_de.dataset_ids == ["de"] + assert len(task_de.datasets) == 1 + + # Test 4: Spanish only + task_es = MultiDatasetTask(split="val", languages=["es"]) + assert task_es.dataset_ids == ["es1", "es2", "es3_air"] + assert len(task_es.datasets) == 3 + + # Test 5: French + German (includes cross-language datasets) + task_fr_de = MultiDatasetTask(split="val", languages=["fr", "de"]) + assert set(task_fr_de.dataset_ids) == { + "fr1", + "fr2", + "de", + "fr_de_land", + "fr_de_sea", + } + assert len(task_fr_de.datasets) == 5 + + # Test 6: Multiple languages + task_multi = MultiDatasetTask(split="val", languages=["en", "fr", "es"]) + expected = ["en1", "en2", "en3_sea", "en3_land", "fr1", "fr2", "es1", "es2", "es3_air"] + assert task_multi.dataset_ids == expected + assert len(task_multi.datasets) == 9 + + def test_multi_dataset_task_with_biencoder(self): + """Test that multi-dataset tasks work with actual model evaluation.""" + + class ToyMultiDatasetTask(ESCOJob2SkillRanking): + def languages_to_dataset_ids(self, languages: list[Language]) -> list[str]: + """Return multiple dataset IDs per language.""" + dataset_ids = [] + if Language.EN in languages: + dataset_ids.extend(["en1", "en2"]) + return dataset_ids + + def load_dataset(self, dataset_id: str, split): + """Load minimal toy dataset.""" + from workrb.tasks.abstract.ranking_base import RankingDataset + + # Create tiny datasets for testing + return RankingDataset( + query_texts=["Software Engineer", "Data Scientist"], + target_indices=[[0, 1], [1, 2]], + target_space=["Python", "Machine Learning", "SQL"], + dataset_id=dataset_id, + ) + + # Create task with multiple datasets + task = ToyMultiDatasetTask(split="val", languages=["en"]) + assert task.dataset_ids == ["en1", "en2"] + + # Verify we can evaluate on each dataset + model = BiEncoderModel("all-MiniLM-L6-v2") + + # Evaluate on first dataset + results_en1 = task.evaluate(model, dataset_id="en1") + assert "map" in results_en1 + assert 0 <= results_en1["map"] <= 1 + + # Evaluate on second dataset + results_en2 = task.evaluate(model, dataset_id="en2") + assert "map" in results_en2 + assert 0 <= results_en2["map"] <= 1 + + def test_multi_dataset_task_evaluation_all_datasets(self): + """Test that evaluation pipeline processes all datasets.""" + + class ToyMultiDatasetTask(ESCOJob2SkillRanking): + def languages_to_dataset_ids(self, languages: list[Language]) -> list[Language]: + """Return 2 datasets for English.""" + dataset_ids = [] + if Language.EN in languages: + dataset_ids.extend(["en_region_a", "en_region_b"]) + return dataset_ids + + def load_dataset(self, dataset_id: str, split): + """Load minimal toy dataset.""" + from workrb.tasks.abstract.ranking_base import RankingDataset + + return RankingDataset( + query_texts=["test query"], + target_indices=[[0]], + target_space=["test target"], + dataset_id=dataset_id, + ) + + task = ToyMultiDatasetTask(split="val", languages=["en"]) + + # Verify dataset_ids are correct + assert task.dataset_ids == ["en_region_a", "en_region_b"] + + # Verify both datasets are loaded + assert "en_region_a" in task.datasets + assert "en_region_b" in task.datasets + + # Verify dataset objects have correct dataset_id + assert task.datasets["en_region_a"].dataset_id == "en_region_a" + assert task.datasets["en_region_b"].dataset_id == "en_region_b" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_task_registry.py b/tests/test_task_registry.py index 0121ddb..b91e30b 100644 --- a/tests/test_task_registry.py +++ b/tests/test_task_registry.py @@ -53,10 +53,10 @@ def supported_target_languages(self): def default_metrics(self): return ["accuracy"] - def load_monolingual_data(self, language, split): - return {"test": "data", "language": str(language), "split": str(split)} + def load_dataset(self, dataset_id, split): + return {"test": "data", "dataset_id": dataset_id, "split": str(split)} - def evaluate(self, model, metrics=None, language="en"): + def evaluate(self, model, metrics=None, dataset_id="en"): return {"accuracy": 0.95, "test_metric": 1.0} diff --git a/tests/test_utils.py b/tests/test_utils.py index b8b4a85..7dcead9 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -42,10 +42,10 @@ def label_type(self) -> LabelType: def default_metrics(self) -> list[str]: return ["map"] - def load_monolingual_data(self, language: Language, split: DatasetSplit) -> Any: + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> Any: return {} - def evaluate(self, model, metrics=None, language: Language = Language.EN) -> dict[str, float]: + def evaluate(self, model, metrics=None, dataset_id: str = "en") -> dict[str, float]: return {} @@ -122,7 +122,7 @@ def _limit_dataset(self, dataset: RankingDataset) -> RankingDataset: query_texts=filtered_queries, target_indices=remapped_indices, target_space=limited_target_space, - language=dataset.language, + dataset_id=dataset.dataset_id, ) @@ -161,7 +161,7 @@ def _limit_classification_dataset( texts=limited_texts, labels=limited_labels, label_space=dataset.label_space, # Keep full label space - language=dataset.language, + dataset_id=dataset.dataset_id, ) @@ -184,10 +184,8 @@ def create_toy_task_class( class ToyRankingTask(ToyTaskMixin, base_task_class): """Dynamically created toy ranking task.""" - def load_monolingual_data( - self, split: DatasetSplit, language: Language - ) -> RankingDataset: - full_dataset = super().load_monolingual_data(split=split, language=language) + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: + full_dataset = super().load_dataset(dataset_id=dataset_id, split=split) return self._limit_dataset(full_dataset) return_cls = ToyRankingTask @@ -197,10 +195,8 @@ def load_monolingual_data( class ToyClassificationTask(ToyClassificationTaskMixin, base_task_class): """Dynamically created toy classification task.""" - def load_monolingual_data( - self, split: DatasetSplit, language: Language - ) -> ClassificationDataset: - full_dataset = super().load_monolingual_data(split=split, language=language) + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> ClassificationDataset: + full_dataset = super().load_dataset(dataset_id=dataset_id, split=split) return self._limit_classification_dataset(full_dataset) return_cls = ToyClassificationTask From 17b189753fbc1810af45c33c4cb0b73e03116fc0 Mon Sep 17 00:00:00 2001 From: federetyk Date: Thu, 15 Jan 2026 14:43:48 +0100 Subject: [PATCH 02/17] fix: solve issues in example files --- examples/custom_model_example.py | 3 +++ examples/custom_task_example.py | 1 + examples/run_multiple_models.py | 2 ++ src/workrb/config.py | 2 +- 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/custom_model_example.py b/examples/custom_model_example.py index 2a2d8f7..c2a5d4e 100644 --- a/examples/custom_model_example.py +++ b/examples/custom_model_example.py @@ -9,6 +9,7 @@ import torch from sentence_transformers import SentenceTransformer +import workrb from workrb.models.base import ModelInterface from workrb.registry import register_model from workrb.types import ModelInputType @@ -47,10 +48,12 @@ def __init__( self.encoder.to(device) self.encoder.eval() + @property def name(self) -> str: """Return the unique name of this model.""" return f"MyCustomModel-{self.base_model_name.split('/')[-1]}" + @property def description(self) -> str: """Return the description of this model.""" return "A custom model that demonstrates WorkRB extensibility" diff --git a/examples/custom_task_example.py b/examples/custom_task_example.py index 5b2eaa3..99edb3e 100644 --- a/examples/custom_task_example.py +++ b/examples/custom_task_example.py @@ -6,6 +6,7 @@ and implement the required abstract methods. """ +import workrb from workrb.registry import register_task from workrb.tasks.abstract.base import DatasetSplit, LabelType, Language from workrb.tasks.abstract.ranking_base import RankingDataset, RankingTaskGroup diff --git a/examples/run_multiple_models.py b/examples/run_multiple_models.py index ed36ad5..fb3fbd7 100644 --- a/examples/run_multiple_models.py +++ b/examples/run_multiple_models.py @@ -2,6 +2,8 @@ Reproduce benchmark results. """ +import workrb + if __name__ == "__main__": # 1. Setup model and tasks models = [ diff --git a/src/workrb/config.py b/src/workrb/config.py index 0d5c2b3..8a0597c 100644 --- a/src/workrb/config.py +++ b/src/workrb/config.py @@ -205,7 +205,7 @@ def get_pending_work( self, results: BenchmarkResults | None, tasks: Sequence[Task], - ) -> list[tuple]: + ) -> list[tuple[Task, str]]: """Determine what work still needs to be done. Work is defined as a (task, dataset_id) combination that is not completed. From e16f8ddf7c6baefb61c15366a00428f6f7d581c9 Mon Sep 17 00:00:00 2001 From: federetyk Date: Thu, 15 Jan 2026 18:43:09 +0100 Subject: [PATCH 03/17] fix: add language field to MetricsResult for proper per-language aggregation --- src/workrb/results.py | 21 ++++++++++++------ src/workrb/run.py | 2 ++ src/workrb/tasks/abstract/base.py | 36 +++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 6 deletions(-) diff --git a/src/workrb/results.py b/src/workrb/results.py index 5537bcf..6976113 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -28,6 +28,10 @@ class MetricsResult(BaseModel): evaluation_time: float = Field(ge=0) metrics_dict: dict[str, Any] = Field(default_factory=dict) """ Dictionary of metric names to their computed values. """ + language: str | None = Field( + default=None, + description="Language code if this is a monolingual dataset, None for cross-language datasets.", + ) class TaskResults(BaseModel): @@ -292,19 +296,24 @@ def _aggregate_per_language( ) -> dict[ResultTagString, float]: """Aggregate results per language. - Collects language-specific results over all tasks, and aggregates all available results. + Collects results for monolingual datasets and aggregates by language across all tasks. + Cross-language datasets (where language is None) are excluded from this aggregation. Results may be imbalanced if tasks support different languages. """ - # Collect metric values per task + # Collect metric values per language raw_results = defaultdict(list) for task_result in self.task_results.values(): - for dataset_id, metrics_result in task_result.language_results.items(): + for metrics_result in task_result.language_results.values(): + # Skip cross-language datasets + if metrics_result.language is None: + continue + for metric_name, metric_value in metrics_result.metrics_dict.items(): - raw_results[(dataset_id, metric_name)].append(metric_value) + raw_results[(metrics_result.language, metric_name)].append(metric_value) # Compute stats results = {} - for (dataset_id, metric_name), values in raw_results.items(): + for (language, metric_name), values in raw_results.items(): stats = self._compute_stats(values) for agg in aggregations: assert agg in stats, f"Aggregation {agg} not found in stats: {stats.keys()}" @@ -312,7 +321,7 @@ def _aggregate_per_language( name=tag_name, metric_name=metric_name, aggregation=agg, - grouping_name=dataset_id, + grouping_name=language, ) results[tag] = stats[agg] return results diff --git a/src/workrb/run.py b/src/workrb/run.py index 7fd6e3a..b4646f2 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -344,9 +344,11 @@ def _run_pending_work( evaluation_time = time.time() - start_time_eval # Store results + dataset_language = task.get_dataset_language(dataset_id) results.task_results[task.name].language_results[dataset_id] = MetricsResult( evaluation_time=evaluation_time, metrics_dict=dataset_results, + language=dataset_language.value if dataset_language else None, ) # Save incremental results to checkpoint diff --git a/src/workrb/tasks/abstract/base.py b/src/workrb/tasks/abstract/base.py index 83b7f9b..4affe76 100644 --- a/src/workrb/tasks/abstract/base.py +++ b/src/workrb/tasks/abstract/base.py @@ -202,6 +202,42 @@ def get_size_oneliner(self, dataset_id: str) -> str: """ return "" + def get_dataset_language(self, dataset_id: str) -> Language | None: + """Return the language of a dataset if it's monolingual, None otherwise. + + Default implementation assumes dataset_id equals language code (1:1 mapping). + This works for standard monolingual tasks where each dataset corresponds to + exactly one language. + + Tasks with arbitrary dataset IDs (not 1:1 with languages) must override this + method to return the appropriate language for each dataset, or None for + cross-language datasets. + + Args: + dataset_id: Dataset identifier + + Returns + ------- + Language enum if this is a monolingual dataset, None for cross-language datasets + + Raises + ------ + NotImplementedError: If dataset_id is not a valid language code and the + subclass has not overridden this method + """ + try: + lang = Language(dataset_id) + if lang in self.languages: + return lang + return None + except ValueError as e: + raise NotImplementedError( + f"Dataset ID '{dataset_id}' is not a valid language code. " + f"Task '{self.__class__.__name__}' uses arbitrary dataset IDs (not 1:1 with languages) " + f"and must override the 'get_dataset_language' method to map each dataset ID " + f"to its corresponding language, or return None for cross-language datasets." + ) from e + @final @property def split_val_fraction(self) -> float: From e254bc2fd0d8a9efc6e722487e7aa0137e11da67 Mon Sep 17 00:00:00 2001 From: federetyk Date: Fri, 16 Jan 2026 10:01:03 +0100 Subject: [PATCH 04/17] style: update docstrings to comply with NumPy style --- src/workrb/tasks/abstract/base.py | 71 ++++++++++------ .../tasks/abstract/classification_base.py | 80 ++++++++++++------- src/workrb/tasks/abstract/ranking_base.py | 59 +++++++++----- 3 files changed, 138 insertions(+), 72 deletions(-) diff --git a/src/workrb/tasks/abstract/base.py b/src/workrb/tasks/abstract/base.py index 4affe76..8ab9516 100644 --- a/src/workrb/tasks/abstract/base.py +++ b/src/workrb/tasks/abstract/base.py @@ -93,25 +93,32 @@ def languages_to_dataset_ids(self, languages: list[Language]) -> list[str]: Other tasks with multiple datasets per language can override this method to return all datasets that use only languages from the provided set. - Args: - languages: List of Language enums requested for evaluation + Parameters + ---------- + languages : list[Language] + List of Language enums requested for evaluation. Returns ------- - List of dataset identifier strings + list[str] + List of dataset identifier strings. """ return [lang.value for lang in languages] def _load_datasets(self, dataset_ids: list[str], split: DatasetSplit) -> dict[str, Any]: """Load datasets for specified IDs. - Args: - dataset_ids: List of dataset identifiers to load - split: Dataset split to load + Parameters + ---------- + dataset_ids : list[str] + List of dataset identifiers to load. + split : DatasetSplit + Dataset split to load. Returns ------- - Dictionary mapping dataset_id to dataset object + dict[str, Any] + Dictionary mapping dataset_id to dataset object. """ datasets = {} for dataset_id in dataset_ids: @@ -193,12 +200,15 @@ def split_test_fraction(self) -> float: def get_size_oneliner(self, dataset_id: str) -> str: """Get dataset size summary to display status. - Args: - dataset_id: Dataset identifier + Parameters + ---------- + dataset_id : str + Dataset identifier. Returns ------- - Human-readable size string + str + Human-readable size string. """ return "" @@ -213,17 +223,21 @@ def get_dataset_language(self, dataset_id: str) -> Language | None: method to return the appropriate language for each dataset, or None for cross-language datasets. - Args: - dataset_id: Dataset identifier + Parameters + ---------- + dataset_id : str + Dataset identifier. Returns ------- - Language enum if this is a monolingual dataset, None for cross-language datasets + Language or None + Language enum if this is a monolingual dataset, None for cross-language datasets. Raises ------ - NotImplementedError: If dataset_id is not a valid language code and the - subclass has not overridden this method + NotImplementedError + If dataset_id is not a valid language code and the subclass has not + overridden this method. """ try: lang = Language(dataset_id) @@ -255,25 +269,34 @@ def load_dataset(self, dataset_id: str, split: DatasetSplit) -> Any: For other tasks: dataset_id can encode additional information like country and languages (e.g., "deu_q_de_c_de"). - Args: - dataset_id: Unique identifier for the dataset - split: Dataset split to load + Parameters + ---------- + dataset_id : str + Unique identifier for the dataset. + split : DatasetSplit + Dataset split to load. Returns ------- - Dataset object (RankingDataset or ClassificationDataset) + Any + Dataset object (RankingDataset or ClassificationDataset). """ @abstractmethod def evaluate(self, model, metrics=None, dataset_id: str = "en") -> dict[str, float]: """Evaluate model on specific dataset. - Args: - model: Model to evaluate - metrics: List of metric names. If None, uses default_metrics - dataset_id: Dataset identifier to evaluate on + Parameters + ---------- + model : ModelInterface + Model to evaluate. + metrics : list[str] or None, optional + List of metric names. If None, uses default_metrics. + dataset_id : str, optional + Dataset identifier to evaluate on. Default is "en". Returns ------- - Dictionary of metric names to values + dict[str, float] + Dictionary of metric names to values. """ diff --git a/src/workrb/tasks/abstract/classification_base.py b/src/workrb/tasks/abstract/classification_base.py index 999bea9..850bf4e 100644 --- a/src/workrb/tasks/abstract/classification_base.py +++ b/src/workrb/tasks/abstract/classification_base.py @@ -46,12 +46,17 @@ def __init__( ): """Initialize classification dataset with validation. - Args: - texts: List of input text strings - labels: List with list of class indices corresponding to each text. - Contains just 1 item per list for single-label classification. - label_space: List of class names/labels (e.g., ["skill1", "skill2", "skill3"]) - dataset_id: Unique identifier for this dataset + Parameters + ---------- + texts : list[str] + List of input text strings. + labels : list[list[int]] + List with list of class indices corresponding to each text. + Contains just 1 item per list for single-label classification. + label_space : list[str] + List of class names/labels (e.g., ["skill1", "skill2", "skill3"]). + dataset_id : str + Unique identifier for this dataset. """ self.texts = self._postprocess_texts(texts) self.labels = self._postprocess_labels(labels) @@ -178,12 +183,15 @@ def threshold(self) -> float | None: def get_output_space_size(self, dataset_id: str) -> int: """Number of output classes for this classification task. - Args: - dataset_id: Dataset identifier + Parameters + ---------- + dataset_id : str + Dataset identifier. Returns ------- - Number of classes in the output space + int + Number of classes in the output space. """ @property @@ -200,24 +208,31 @@ def load_dataset(self, dataset_id: str, split: DatasetSplit) -> ClassificationDa For other tasks: dataset_id can encode arbitrary information. - Args: - dataset_id: Unique identifier for the dataset - split: Dataset split to load + Parameters + ---------- + dataset_id : str + Unique identifier for the dataset. + split : DatasetSplit + Dataset split to load. Returns ------- - ClassificationDataset object + ClassificationDataset + ClassificationDataset object. """ def get_size_oneliner(self, dataset_id: str) -> str: """Get dataset summary to display for progress. - Args: - dataset_id: Dataset identifier + Parameters + ---------- + dataset_id : str + Dataset identifier. Returns ------- - Human-readable size string + str + Human-readable size string. """ dataset: ClassificationDataset = self.datasets[dataset_id] return f"{len(dataset.texts)} samples, {len(dataset.label_space)} classes" @@ -235,14 +250,19 @@ def evaluate( 2. Applies optimized threshold to test predictions 3. Calculates metrics on test data - Args: - model: Model implementing classification interface - metrics: List of metrics to compute - dataset_id: Dataset identifier to evaluate on + Parameters + ---------- + model : ModelInterface + Model implementing classification interface. + metrics : list[str] or None, optional + List of metrics to compute. + dataset_id : str, optional + Dataset identifier to evaluate on. Default is "en". Returns ------- - Dictionary containing metric scores and evaluation metadata + dict[str, float] + Dictionary containing metric scores and evaluation metadata. """ if metrics is None: metrics = self.default_metrics @@ -335,13 +355,17 @@ def _validate_model_label_space( def get_threshold_on_val_data(self, model: ModelInterface, dataset_id: str) -> float: """Get the best threshold on validation data. - Args: - model: Model to evaluate - dataset_id: Dataset identifier + Parameters + ---------- + model : ModelInterface + Model to evaluate. + dataset_id : str + Dataset identifier. Returns ------- - Optimized threshold value + float + Optimized threshold value. """ # Step 1: Optimize threshold on validation data # Load validation data (even if we're evaluating on test) @@ -431,8 +455,10 @@ def __init__( ): """Initialize classification task. - Args: - **kwargs: Arguments passed to parent Task class (languages, split, etc.) + Parameters + ---------- + **kwargs + Arguments passed to parent Task class (languages, split, etc.). """ super().__init__(**kwargs) diff --git a/src/workrb/tasks/abstract/ranking_base.py b/src/workrb/tasks/abstract/ranking_base.py index 20265d0..635ae78 100644 --- a/src/workrb/tasks/abstract/ranking_base.py +++ b/src/workrb/tasks/abstract/ranking_base.py @@ -41,11 +41,16 @@ def __init__( ): """Initialize ranking dataset with validation. - Args: - query_texts: List of query strings - target_indices: List of lists containing indices into the target vocabulary - target_space: List of target vocabulary strings - dataset_id: Unique identifier for this dataset + Parameters + ---------- + query_texts : list[str] + List of query strings. + target_indices : list[list[int]] + List of lists containing indices into the target vocabulary. + target_space : list[str] + List of target vocabulary strings. + dataset_id : str + Unique identifier for this dataset. """ self.query_texts = self._postprocess_texts(query_texts) self.target_indices = self._postprocess_indices(target_indices) @@ -118,10 +123,10 @@ def __init__( ): """Initialize ranking task. - Args: - mode: Evaluation mode ("test" or "val") - language: Language code - **kwargs: Additional arguments for legacy compatibility + Parameters + ---------- + **kwargs + Additional arguments passed to parent Task class. """ super().__init__(**kwargs) @@ -144,24 +149,31 @@ def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: For other tasks: dataset_id can encode arbitrary information. - Args: - dataset_id: Unique identifier for the dataset - split: Dataset split to load + Parameters + ---------- + dataset_id : str + Unique identifier for the dataset. + split : DatasetSplit + Dataset split to load. Returns ------- - RankingDataset object + RankingDataset + RankingDataset object. """ def get_size_oneliner(self, dataset_id: str) -> str: """Get dataset summary to display for progress. - Args: - dataset_id: Dataset identifier + Parameters + ---------- + dataset_id : str + Dataset identifier. Returns ------- - Human-readable size string + str + Human-readable size string. """ dataset = self.datasets[dataset_id] return f"{len(dataset.query_texts)} queries x {len(dataset.target_space)} targets" @@ -174,14 +186,19 @@ def evaluate( ) -> dict[str, float]: """Evaluate the model on this ranking task. - Args: - model: Model implementing ModelInterface (must have compute_rankings method) - metrics: List of metrics to compute. If None, uses default_metrics - dataset_id: Dataset identifier to evaluate on + Parameters + ---------- + model : ModelInterface + Model implementing ModelInterface (must have compute_rankings method). + metrics : list[str] or None, optional + List of metrics to compute. If None, uses default_metrics. + dataset_id : str, optional + Dataset identifier to evaluate on. Default is "en". Returns ------- - Dictionary containing metric scores and evaluation metadata + dict[str, float] + Dictionary containing metric scores and evaluation metadata. """ if metrics is None: metrics = self.default_metrics From 71d6d97d8b88cc7bd5055f3ed7758d280e5437b7 Mon Sep 17 00:00:00 2001 From: federetyk Date: Thu, 19 Feb 2026 20:11:41 +0100 Subject: [PATCH 05/17] refactor: rename language_results to datasetid_results for consistency with dataset_id abstraction --- src/workrb/config.py | 2 +- src/workrb/results.py | 10 +++++----- src/workrb/run.py | 4 ++-- tests/test_e2e_checkpointing.py | 6 +++--- tests/test_e2e_toy_benchmark.py | 4 ++-- tests/test_evaluate_multiple_models.py | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/workrb/config.py b/src/workrb/config.py index 8a0597c..14daf6a 100644 --- a/src/workrb/config.py +++ b/src/workrb/config.py @@ -217,7 +217,7 @@ def get_pending_work( if ( results is not None and task.name in results.task_results - and dataset_id in results.task_results[task.name].language_results + and dataset_id in results.task_results[task.name].datasetid_results ): continue diff --git a/src/workrb/results.py b/src/workrb/results.py index 6976113..d3f5875 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -38,7 +38,7 @@ class TaskResults(BaseModel): """Results for a task.""" metadata: TaskResultMetadata - language_results: dict[str, MetricsResult] # dataset_id -> results + datasetid_results: dict[str, MetricsResult] # dataset_id -> results """Dictionary of dataset IDs to their computed results.""" @@ -99,7 +99,7 @@ def __str__(self) -> str: def get_num_evaluation_results(self) -> int: """Get the total number of evaluation results.""" - return sum(len(task.language_results) for task in self.task_results.values()) + return sum(len(task.datasetid_results) for task in self.task_results.values()) def get_summary_metrics(self, aggregations: tuple = ("mean", "ci_margin")) -> dict[str, float]: """ @@ -139,7 +139,7 @@ def _aggregate_per_task( # Collect metric values per task raw_results = defaultdict(list) for task_name, task_result in self.task_results.items(): - for lang_metrics_result in task_result.language_results.values(): + for lang_metrics_result in task_result.datasetid_results.values(): for metric_name, metric_value in lang_metrics_result.metrics_dict.items(): raw_results[(task_name, metric_name)].append(metric_value) @@ -303,7 +303,7 @@ def _aggregate_per_language( # Collect metric values per language raw_results = defaultdict(list) for task_result in self.task_results.values(): - for metrics_result in task_result.language_results.values(): + for metrics_result in task_result.datasetid_results.values(): # Skip cross-language datasets if metrics_result.language is None: continue @@ -352,7 +352,7 @@ def _get_flat_dataframe(self) -> pd.DataFrame: """Get flat dataframe of the benchmark results with each metric value as a separate row.""" data = [] for task_name, task_result in self.task_results.items(): - for dataset_id, metrics_result in task_result.language_results.items(): + for dataset_id, metrics_result in task_result.datasetid_results.items(): for metric_name, metric_value in metrics_result.metrics_dict.items(): data.append( { diff --git a/src/workrb/run.py b/src/workrb/run.py index b4646f2..7a0abdc 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -324,7 +324,7 @@ def _run_pending_work( description=task.description, split=task.split.value, ), - language_results={}, + datasetid_results={}, ) # Evaluate pending datasets @@ -345,7 +345,7 @@ def _run_pending_work( # Store results dataset_language = task.get_dataset_language(dataset_id) - results.task_results[task.name].language_results[dataset_id] = MetricsResult( + results.task_results[task.name].datasetid_results[dataset_id] = MetricsResult( evaluation_time=evaluation_time, metrics_dict=dataset_results, language=dataset_language.value if dataset_language else None, diff --git a/tests/test_e2e_checkpointing.py b/tests/test_e2e_checkpointing.py index e29ac51..cc9a130 100644 --- a/tests/test_e2e_checkpointing.py +++ b/tests/test_e2e_checkpointing.py @@ -47,7 +47,7 @@ def verify_checkpoint(checkpoint_path: Path, expected_completed: int, total_task # Count completed task-language combinations completed_count = 0 for task_result in task_results.values(): - completed_count += len(task_result.get("language_results", {})) + completed_count += len(task_result.get("datasetid_results", {})) print(f" ✓ Checkpoint has {completed_count}/{total_tasks} completed task(s)") assert completed_count == expected_completed, ( @@ -164,11 +164,11 @@ def name(self): f"Task '{task.name}' missing from final results" ) task_result = end_results.task_results[task.name] - assert Language.EN in task_result.language_results, ( + assert Language.EN in task_result.datasetid_results, ( f"Language 'en' missing for task '{task.name}'" ) - lang_result = task_result.language_results[Language.EN] + lang_result = task_result.datasetid_results[Language.EN] assert len(lang_result.metrics_dict) > 0, f"No metrics for task '{task.name}'" print(f" ✓ Task '{task.name}' has complete results") diff --git a/tests/test_e2e_toy_benchmark.py b/tests/test_e2e_toy_benchmark.py index 5964780..4824b86 100644 --- a/tests/test_e2e_toy_benchmark.py +++ b/tests/test_e2e_toy_benchmark.py @@ -168,11 +168,11 @@ def test_e2e_toy_benchmark(): task_result = results.task_results[task_name] # Check if language results exist - if Language.EN not in task_result.language_results: + if Language.EN not in task_result.datasetid_results: validation_errors.append(f"Missing language results for {task_name} (en)") continue - lang_result = task_result.language_results[Language.EN] + lang_result = task_result.datasetid_results[Language.EN] # Validate metrics based on task type assert len(lang_result.metrics_dict) > 0, f"No metrics found for {task_name}" diff --git a/tests/test_evaluate_multiple_models.py b/tests/test_evaluate_multiple_models.py index 93801f3..c6ccea2 100644 --- a/tests/test_evaluate_multiple_models.py +++ b/tests/test_evaluate_multiple_models.py @@ -85,7 +85,7 @@ def create_mock_results(model_name: str, task_name: str) -> BenchmarkResults: description="Find similar skills using the SkillMatch-1K dataset", split="val", ), - language_results={ + datasetid_results={ "en": MetricsResult( evaluation_time=1.0, metrics_dict={"map": 0.5}, From 647b0701b295162afcabbf3f2baedad11812f391 Mon Sep 17 00:00:00 2001 From: federetyk Date: Fri, 20 Feb 2026 14:15:26 +0100 Subject: [PATCH 06/17] docs: clarify get_dataset_language docstring on purpose and when to override --- src/workrb/tasks/abstract/base.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/workrb/tasks/abstract/base.py b/src/workrb/tasks/abstract/base.py index 8ab9516..b3893b9 100644 --- a/src/workrb/tasks/abstract/base.py +++ b/src/workrb/tasks/abstract/base.py @@ -213,14 +213,16 @@ def get_size_oneliner(self, dataset_id: str) -> str: return "" def get_dataset_language(self, dataset_id: str) -> Language | None: - """Return the language of a dataset if it's monolingual, None otherwise. + """Map a dataset ID to its language for per-language metric aggregation. - Default implementation assumes dataset_id equals language code (1:1 mapping). - This works for standard monolingual tasks where each dataset corresponds to - exactly one language. + This method is used during metric aggregation to group results by language. + The default implementation assumes a 1:1 mapping between dataset IDs and + language codes, which works for standard monolingual tasks. - Tasks with arbitrary dataset IDs (not 1:1 with languages) must override this - method to return the appropriate language for each dataset, or None for + Override this method in tasks where dataset IDs do not correspond directly to + language codes (e.g., MELO tasks use compound dataset IDs like + ``"ita_q_it_c_it"`` for a monolingual Italian dataset). The override should + return the appropriate ``Language`` for monolingual datasets, or ``None`` for cross-language datasets. Parameters From 3a9514d6f9c23e08155f9d5ec3e65b25e6831cdd Mon Sep 17 00:00:00 2001 From: federetyk Date: Sat, 21 Feb 2026 23:05:13 +0100 Subject: [PATCH 07/17] refactor: migrate freelancer project matching tasks to load_dataset API --- .../ranking/freelancer_project_matching.py | 17 ++++++++++++----- tests/test_freelancer_project_matching.py | 4 ++-- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/src/workrb/tasks/ranking/freelancer_project_matching.py b/src/workrb/tasks/ranking/freelancer_project_matching.py index 0d6d69e..fe712ef 100644 --- a/src/workrb/tasks/ranking/freelancer_project_matching.py +++ b/src/workrb/tasks/ranking/freelancer_project_matching.py @@ -113,6 +113,7 @@ def _load_and_format_data( self, split: DatasetSplit, language: Language, + dataset_id: str, query_key: str, score_key: str, query_id_column: str, @@ -162,7 +163,7 @@ def _load_and_format_data( ] corpus = [self._candidate_profile_to_str(p) for p in candidate_df.to_dict(orient="records")] - return RankingDataset(queries, relevancy_labels, corpus, language=language) + return RankingDataset(queries, relevancy_labels, corpus, dataset_id=dataset_id) @property def citation(self) -> str: @@ -239,9 +240,12 @@ def query_input_type(self) -> ModelInputType: def _input_to_str(input_dict: dict[str, str]) -> str: return f"{input_dict['title']}\n\n{input_dict['description']}" - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: """Load Job Title Similarity data from the HuggingFace dataset.""" - return self._load_and_format_data(split, language, "briefs", "brief_scores", "brief_id") + language = Language(dataset_id) + return self._load_and_format_data( + split, language, dataset_id, "briefs", "brief_scores", "brief_id" + ) @register_task() @@ -296,6 +300,9 @@ def query_input_type(self) -> ModelInputType: def _input_to_str(input_dict: dict[str, str]) -> str: return input_dict["value"] - def load_monolingual_data(self, split: DatasetSplit, language: Language) -> RankingDataset: + def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: """Load Job Title Similarity data from the HuggingFace dataset.""" - return self._load_and_format_data(split, language, "queries", "query_scores", "query_id") + language = Language(dataset_id) + return self._load_and_format_data( + split, language, dataset_id, "queries", "query_scores", "query_id" + ) diff --git a/tests/test_freelancer_project_matching.py b/tests/test_freelancer_project_matching.py index 7ea7c10..8532c88 100644 --- a/tests/test_freelancer_project_matching.py +++ b/tests/test_freelancer_project_matching.py @@ -5,14 +5,14 @@ def test_freelancer_project_ranking_task_loads(): """Test that task loads without errors""" task = workrb.tasks.ProjectCandidateRanking(split="test", languages=[Language.EN.value]) - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets["en"] assert len(dataset.query_texts) > 0 assert len(dataset.target_space) > 0 assert len(dataset.target_indices) == len(dataset.query_texts) task = workrb.tasks.SearchQueryCandidateRanking(split="test", languages=[Language.EN.value]) - dataset = task.lang_datasets[Language.EN] + dataset = task.datasets["en"] assert len(dataset.query_texts) > 0 assert len(dataset.target_space) > 0 From 879dece68632838e9e13f2a7bf470a59ca44196a Mon Sep 17 00:00:00 2001 From: federetyk Date: Sun, 22 Feb 2026 02:52:30 +0100 Subject: [PATCH 08/17] feat: add cross-lingual aggregation modes for per-language metrics Introduce DatasetLanguages type and LanguageAggregationMode enum with three modes: monolingual_only, crosslingual_group_input_languages, and crosslingual_group_output_languages. Rename get_dataset_language() to get_dataset_languages(), now returning input and output language sets. Replace MetricsResult.language with input_languages/output_languages. Datasets incompatible with the chosen aggregation mode are skipped rather than causing a failure. BREAKING CHANGE: MetricsResult.language replaced by input_languages/output_languages --- src/workrb/results.py | 86 +++++++++-- src/workrb/run.py | 9 +- src/workrb/tasks/abstract/base.py | 36 +++-- src/workrb/types.py | 52 +++++++ tests/test_evaluate_multiple_models.py | 2 + tests/test_multi_dataset_task.py | 206 ++++++++++++++++++++++++- 6 files changed, 362 insertions(+), 29 deletions(-) diff --git a/src/workrb/results.py b/src/workrb/results.py index d3f5875..40dce12 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -8,6 +8,8 @@ from pydantic import BaseModel, Field from scipy import stats +from workrb.types import LanguageAggregationMode + class TaskResultMetadata(BaseModel): """Metadata for a task result.""" @@ -28,9 +30,13 @@ class MetricsResult(BaseModel): evaluation_time: float = Field(ge=0) metrics_dict: dict[str, Any] = Field(default_factory=dict) """ Dictionary of metric names to their computed values. """ - language: str | None = Field( - default=None, - description="Language code if this is a monolingual dataset, None for cross-language datasets.", + input_languages: list[str] = Field( + default_factory=list, + description="Input language codes for this dataset (e.g. query languages).", + ) + output_languages: list[str] = Field( + default_factory=list, + description="Output language codes for this dataset (e.g. target languages).", ) @@ -289,27 +295,87 @@ def _aggregate_benchmark( metric_results[tag] = stats[agg] return metric_results + @staticmethod + def _get_language_grouping_key( + metrics_result: "MetricsResult", + mode: LanguageAggregationMode, + ) -> str | None: + """Determine the grouping language for a dataset result. + + Returns ``None`` when the dataset is incompatible with the requested + mode, so that the caller can skip it during aggregation. + + Parameters + ---------- + metrics_result : MetricsResult + The metrics result to extract a language key from. + mode : LanguageAggregationMode + The aggregation mode controlling how the language key is derived. + + Returns + ------- + str or None + Language code to group by, or ``None`` if the dataset is + incompatible with the mode. + """ + input_languages = metrics_result.input_languages + output_languages = metrics_result.output_languages + + if mode == LanguageAggregationMode.MONOLINGUAL_ONLY: + if ( + len(input_languages) != 1 + or len(output_languages) != 1 + or input_languages[0] != output_languages[0] + ): + return None + return input_languages[0] + + if mode == LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES: + if len(input_languages) != 1: + return None + return input_languages[0] + + if mode == LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES: + if len(output_languages) != 1: + return None + return output_languages[0] + + return None + def _aggregate_per_language( self, tag_name: str = "mean_per_language", aggregations: tuple = ("mean", "stderr", "ci_margin"), + aggregation_mode: LanguageAggregationMode = LanguageAggregationMode.MONOLINGUAL_ONLY, ) -> dict[ResultTagString, float]: """Aggregate results per language. - Collects results for monolingual datasets and aggregates by language across all tasks. - Cross-language datasets (where language is None) are excluded from this aggregation. - Results may be imbalanced if tasks support different languages. + Groups dataset results by language across all tasks and computes + aggregate statistics. The ``aggregation_mode`` parameter controls how + the grouping language is determined for each dataset. + + Parameters + ---------- + tag_name : str + Prefix for the result tag strings. + aggregations : tuple + Statistics to compute (e.g. ``"mean"``, ``"stderr"``). + aggregation_mode : LanguageAggregationMode + How to determine the grouping language for each dataset result. + Defaults to ``MONOLINGUAL_ONLY`` (backward compatible for benchmarks + with only monolingual datasets). + Datasets incompatible with the chosen mode are silently skipped. """ # Collect metric values per language raw_results = defaultdict(list) for task_result in self.task_results.values(): for metrics_result in task_result.datasetid_results.values(): - # Skip cross-language datasets - if metrics_result.language is None: + language_key = self._get_language_grouping_key(metrics_result, aggregation_mode) + if language_key is None: + # Skip if incompatible with the aggregation mode continue - for metric_name, metric_value in metrics_result.metrics_dict.items(): - raw_results[(metrics_result.language, metric_name)].append(metric_value) + raw_results[(language_key, metric_name)].append(metric_value) # Compute stats results = {} diff --git a/src/workrb/run.py b/src/workrb/run.py index 7a0abdc..b1d2cd6 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -344,11 +344,16 @@ def _run_pending_work( evaluation_time = time.time() - start_time_eval # Store results - dataset_language = task.get_dataset_language(dataset_id) + dataset_languages = task.get_dataset_languages(dataset_id) results.task_results[task.name].datasetid_results[dataset_id] = MetricsResult( evaluation_time=evaluation_time, metrics_dict=dataset_results, - language=dataset_language.value if dataset_language else None, + input_languages=sorted( + lang.value for lang in dataset_languages.input_languages + ), + output_languages=sorted( + lang.value for lang in dataset_languages.output_languages + ), ) # Save incremental results to checkpoint diff --git a/src/workrb/tasks/abstract/base.py b/src/workrb/tasks/abstract/base.py index b3893b9..8731981 100644 --- a/src/workrb/tasks/abstract/base.py +++ b/src/workrb/tasks/abstract/base.py @@ -5,7 +5,7 @@ from enum import Enum from typing import Any, Literal, final -from workrb.types import DatasetSplit, LabelType, Language +from workrb.types import DatasetLanguages, DatasetSplit, LabelType, Language logger = logging.getLogger(__name__) @@ -212,18 +212,19 @@ def get_size_oneliner(self, dataset_id: str) -> str: """ return "" - def get_dataset_language(self, dataset_id: str) -> Language | None: - """Map a dataset ID to its language for per-language metric aggregation. + def get_dataset_languages(self, dataset_id: str) -> DatasetLanguages: + """Map a dataset ID to its input/output languages for metric aggregation. This method is used during metric aggregation to group results by language. - The default implementation assumes a 1:1 mapping between dataset IDs and - language codes, which works for standard monolingual tasks. + The default implementation assumes that each dataset is monolingual and + that the dataset ID is the language code (e.g., ``"en"``, ``"de"``). It + returns that language as both input and output. Override this method in tasks where dataset IDs do not correspond directly to language codes (e.g., MELO tasks use compound dataset IDs like - ``"ita_q_it_c_it"`` for a monolingual Italian dataset). The override should - return the appropriate ``Language`` for monolingual datasets, or ``None`` for - cross-language datasets. + ``"ita_q_it_c_it"`` for a monolingual Italian dataset or ``"ita_q_it_c_en"`` + for a cross-lingual Italian-English dataset). The override should return a + ``DatasetLanguages`` with the appropriate input and output language sets. Parameters ---------- @@ -232,8 +233,8 @@ def get_dataset_language(self, dataset_id: str) -> Language | None: Returns ------- - Language or None - Language enum if this is a monolingual dataset, None for cross-language datasets. + DatasetLanguages + Named tuple with ``input_languages`` and ``output_languages`` frozensets. Raises ------ @@ -243,16 +244,19 @@ def get_dataset_language(self, dataset_id: str) -> Language | None: """ try: lang = Language(dataset_id) - if lang in self.languages: - return lang - return None except ValueError as e: raise NotImplementedError( f"Dataset ID '{dataset_id}' is not a valid language code. " - f"Task '{self.__class__.__name__}' uses arbitrary dataset IDs (not 1:1 with languages) " - f"and must override the 'get_dataset_language' method to map each dataset ID " - f"to its corresponding language, or return None for cross-language datasets." + f"The default implementation assumes that each dataset is " + f"monolingual and that the dataset ID is the language code. " + f"Task '{self.__class__.__name__}' violates this assumption " + f"and must override 'get_dataset_languages' to map each " + f"dataset ID to its corresponding DatasetLanguages." ) from e + return DatasetLanguages( + input_languages=frozenset({lang}), + output_languages=frozenset({lang}), + ) @final @property diff --git a/src/workrb/types.py b/src/workrb/types.py index 2ac3068..3e37ff7 100644 --- a/src/workrb/types.py +++ b/src/workrb/types.py @@ -1,6 +1,7 @@ """Shared types and enums used across WorkRB.""" from enum import Enum +from typing import NamedTuple class Language(str, Enum): @@ -40,6 +41,57 @@ class Language(str, Enum): CROSS = "cross_lingual" +class DatasetLanguages(NamedTuple): + """Languages associated with a dataset for metric aggregation. + + Describes the input and output languages of a dataset. Used by + ``_aggregate_per_language`` to group results by language. + + Examples + -------- + Monolingual (e.g. English-only): + ``DatasetLanguages(input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}))`` + + Cross-lingual (e.g. English queries, German targets): + ``DatasetLanguages(input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}))`` + + Multilingual (e.g. queries in multiple languages, targets in one): + ``DatasetLanguages(input_languages=frozenset({Language.EN, Language.FR}), + output_languages=frozenset({Language.DE}))`` + """ + + input_languages: frozenset[Language] + output_languages: frozenset[Language] + + +class LanguageAggregationMode(str, Enum): + """Mode for grouping datasets by language during metric aggregation. + + Controls how ``_aggregate_per_language`` determines the grouping language + for each dataset result. + """ + + MONOLINGUAL_ONLY = "monolingual_only" + """Only aggregate monolingual datasets (singleton input == singleton output). + + Cross-lingual or multilingual datasets are skipped. + """ + + CROSSLINGUAL_GROUP_INPUT_LANGUAGES = "crosslingual_group_input_languages" + """Group by the input language (requires singleton input_languages). + + Datasets with multiple input languages are skipped. + """ + + CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES = "crosslingual_group_output_languages" + """Group by the output language (requires singleton output_languages). + + Datasets with multiple output languages are skipped. + """ + + class LabelType(str, Enum): """Label type enum.""" diff --git a/tests/test_evaluate_multiple_models.py b/tests/test_evaluate_multiple_models.py index c6ccea2..e5a470e 100644 --- a/tests/test_evaluate_multiple_models.py +++ b/tests/test_evaluate_multiple_models.py @@ -89,6 +89,8 @@ def create_mock_results(model_name: str, task_name: str) -> BenchmarkResults: "en": MetricsResult( evaluation_time=1.0, metrics_dict={"map": 0.5}, + input_languages=["en"], + output_languages=["en"], ) }, ) diff --git a/tests/test_multi_dataset_task.py b/tests/test_multi_dataset_task.py index 0303246..fc2e43a 100644 --- a/tests/test_multi_dataset_task.py +++ b/tests/test_multi_dataset_task.py @@ -6,11 +6,20 @@ like MELO benchmark where datasets encode additional metadata beyond language. """ +import time + import pytest from workrb.models import BiEncoderModel +from workrb.results import ( + BenchmarkMetadata, + BenchmarkResults, + MetricsResult, + TaskResultMetadata, + TaskResults, +) from workrb.tasks import ESCOJob2SkillRanking, RankingDataset -from workrb.types import Language +from workrb.types import DatasetLanguages, Language, LanguageAggregationMode class TestMultiDatasetTask: @@ -173,5 +182,200 @@ def load_dataset(self, dataset_id: str, split): assert task.datasets["en_region_b"].dataset_id == "en_region_b" +def _make_benchmark_results( + dataset_entries: list[tuple[str, str, dict[str, float], list[str], list[str]]], +) -> BenchmarkResults: + """Build a BenchmarkResults with controlled MetricsResult entries. + + Parameters + ---------- + dataset_entries : list of tuples + Each tuple is ``(task_name, dataset_id, metrics_dict, input_languages, output_languages)``. + """ + task_results: dict[str, TaskResults] = {} + for task_name, dataset_id, metrics_dict, inp_langs, out_langs in dataset_entries: + if task_name not in task_results: + task_results[task_name] = TaskResults( + metadata=TaskResultMetadata( + task_group="test_group", + task_type="ranking", + label_type="single_label", + description="test", + split="val", + ), + datasetid_results={}, + ) + task_results[task_name].datasetid_results[dataset_id] = MetricsResult( + evaluation_time=0.1, + metrics_dict=metrics_dict, + input_languages=inp_langs, + output_languages=out_langs, + ) + return BenchmarkResults( + task_results=task_results, + metadata=BenchmarkMetadata( + model_name="test_model", + total_evaluation_time=1.0, + timestamp=time.time(), + num_tasks=len(task_results), + languages=["en"], + ), + ) + + +class TestGetDatasetLanguages: + """Tests for Task.get_dataset_languages().""" + + def test_default_monolingual(self): + """Default implementation returns matching DatasetLanguages for standard language IDs.""" + + class MonoTask(ESCOJob2SkillRanking): + def load_dataset(self, dataset_id, split): + return RankingDataset( + query_texts=["q"], + target_indices=[[0]], + target_space=["t"], + dataset_id=dataset_id, + ) + + task = MonoTask(split="val", languages=["en"]) + result = task.get_dataset_languages("en") + assert result == DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}), + ) + + def test_raises_for_non_standard_ids(self): + """Default implementation raises NotImplementedError for arbitrary dataset IDs.""" + + class ArbitraryIdTask(ESCOJob2SkillRanking): + def languages_to_dataset_ids(self, languages): + return ["custom_dataset_1"] + + def load_dataset(self, dataset_id, split): + return RankingDataset( + query_texts=["q"], + target_indices=[[0]], + target_space=["t"], + dataset_id=dataset_id, + ) + + task = ArbitraryIdTask(split="val", languages=["en"]) + with pytest.raises(NotImplementedError, match="not a valid language code"): + task.get_dataset_languages("custom_dataset_1") + + +class TestAggregationModes: + """Tests for _aggregate_per_language aggregation modes.""" + + def test_monolingual_only_with_monolingual_datasets(self): + """MONOLINGUAL_ONLY correctly groups monolingual datasets by language.""" + br = _make_benchmark_results( + [ + ("task1", "en", {"map": 0.8}, ["en"], ["en"]), + ("task1", "de", {"map": 0.6}, ["de"], ["de"]), + ("task2", "en", {"map": 0.9}, ["en"], ["en"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + ) + result_str = {str(k): v for k, v in result.items()} + assert "mean_per_language/en/map/mean" in result_str + assert "mean_per_language/de/map/mean" in result_str + # en: mean of 0.8 and 0.9 = 0.85 + assert result_str["mean_per_language/en/map/mean"] == pytest.approx(0.85) + # de: single value 0.6 + assert result_str["mean_per_language/de/map/mean"] == pytest.approx(0.6) + + def test_monolingual_only_skips_crosslingual_dataset(self): + """MONOLINGUAL_ONLY skips cross-lingual datasets.""" + br = _make_benchmark_results( + [ + ("task1", "en_de", {"map": 0.7}, ["en"], ["de"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + ) + assert result == {} + + def test_monolingual_only_skips_multilingual_dataset(self): + """MONOLINGUAL_ONLY skips multilingual datasets.""" + br = _make_benchmark_results( + [ + ("task1", "multi", {"map": 0.7}, ["en", "fr"], ["en", "fr"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + ) + assert result == {} + + def test_crosslingual_group_input_languages(self): + """CROSSLINGUAL_GROUP_INPUT_LANGUAGES groups by input language.""" + br = _make_benchmark_results( + [ + ("task1", "en_to_de", {"map": 0.7}, ["en"], ["de"]), + ("task1", "en_to_fr", {"map": 0.9}, ["en"], ["fr"]), + ("task1", "de_to_fr", {"map": 0.5}, ["de"], ["fr"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES, + ) + result_str = {str(k): v for k, v in result.items()} + assert "mean_per_language/en/map/mean" in result_str + assert "mean_per_language/de/map/mean" in result_str + # en: mean of 0.7 and 0.9 = 0.8 + assert result_str["mean_per_language/en/map/mean"] == pytest.approx(0.8) + # de: single value 0.5 + assert result_str["mean_per_language/de/map/mean"] == pytest.approx(0.5) + + def test_crosslingual_group_input_skips_multi_input(self): + """CROSSLINGUAL_GROUP_INPUT_LANGUAGES skips datasets with multiple input langs.""" + br = _make_benchmark_results( + [ + ("task1", "multi_in", {"map": 0.7}, ["en", "fr"], ["de"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES, + ) + assert result == {} + + def test_crosslingual_group_output_languages(self): + """CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES groups by output language.""" + br = _make_benchmark_results( + [ + ("task1", "en_to_de", {"map": 0.7}, ["en"], ["de"]), + ("task1", "fr_to_de", {"map": 0.9}, ["fr"], ["de"]), + ("task1", "en_to_fr", {"map": 0.5}, ["en"], ["fr"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES, + ) + result_str = {str(k): v for k, v in result.items()} + assert "mean_per_language/de/map/mean" in result_str + assert "mean_per_language/fr/map/mean" in result_str + # de: mean of 0.7 and 0.9 = 0.8 + assert result_str["mean_per_language/de/map/mean"] == pytest.approx(0.8) + # fr: single value 0.5 + assert result_str["mean_per_language/fr/map/mean"] == pytest.approx(0.5) + + def test_crosslingual_group_output_skips_multi_output(self): + """CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES skips datasets with multiple output langs.""" + br = _make_benchmark_results( + [ + ("task1", "multi_out", {"map": 0.7}, ["en"], ["de", "fr"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES, + ) + assert result == {} + + if __name__ == "__main__": pytest.main([__file__, "-v"]) From 72b8e40396749f2df489d83d94f24ad7dccdbd74 Mon Sep 17 00:00:00 2001 From: federetyk Date: Mon, 23 Feb 2026 19:44:37 +0100 Subject: [PATCH 09/17] test: make it explicit that the dataset key "en" comes from the Language enum --- tests/test_freelancer_project_matching.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/test_freelancer_project_matching.py b/tests/test_freelancer_project_matching.py index 8532c88..3deff5f 100644 --- a/tests/test_freelancer_project_matching.py +++ b/tests/test_freelancer_project_matching.py @@ -4,15 +4,17 @@ def test_freelancer_project_ranking_task_loads(): """Test that task loads without errors""" - task = workrb.tasks.ProjectCandidateRanking(split="test", languages=[Language.EN.value]) - dataset = task.datasets["en"] + dataset_name = Language.EN.value + + task = workrb.tasks.ProjectCandidateRanking(split="test", languages=[dataset_name]) + dataset = task.datasets[dataset_name] assert len(dataset.query_texts) > 0 assert len(dataset.target_space) > 0 assert len(dataset.target_indices) == len(dataset.query_texts) - task = workrb.tasks.SearchQueryCandidateRanking(split="test", languages=[Language.EN.value]) - dataset = task.datasets["en"] + task = workrb.tasks.SearchQueryCandidateRanking(split="test", languages=[dataset_name]) + dataset = task.datasets[dataset_name] assert len(dataset.query_texts) > 0 assert len(dataset.target_space) > 0 From 033db0fe16dbc2131323f34a24343599c74a8457 Mon Sep 17 00:00:00 2001 From: federetyk Date: Mon, 23 Feb 2026 19:56:21 +0100 Subject: [PATCH 10/17] test: fix lexical baselines regression test to use dataset_id parameter --- tests/test_lexical_baselines_regression.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/test_lexical_baselines_regression.py b/tests/test_lexical_baselines_regression.py index f45d2a5..cedf0d8 100644 --- a/tests/test_lexical_baselines_regression.py +++ b/tests/test_lexical_baselines_regression.py @@ -120,7 +120,7 @@ class TestLexicalBaselineRegression: def test_metrics_match_expected(self, job_similarity_task, model_name, model_factory): """Assert that evaluation metrics match pre-recorded expected values.""" model = model_factory() - results = job_similarity_task.evaluate(model, language=Language.EN) + results = job_similarity_task.evaluate(model, dataset_id=Language.EN.value) expected = EXPECTED_METRICS[model_name] print(f"\n[{model_name}] actual metrics: {results}") @@ -135,7 +135,7 @@ def test_metrics_match_expected(self, job_similarity_task, model_name, model_fac def test_all_default_metrics_present(self, job_similarity_task, model_name, model_factory): """Assert that all default ranking metrics appear in results.""" model = model_factory() - results = job_similarity_task.evaluate(model, language=Language.EN) + results = job_similarity_task.evaluate(model, dataset_id=Language.EN.value) for metric_name in job_similarity_task.default_metrics: assert metric_name in results, ( From 724b0e0d9e964c03b078cb3e3ea3eb7111322750 Mon Sep 17 00:00:00 2001 From: federetyk Date: Mon, 23 Feb 2026 21:37:32 +0100 Subject: [PATCH 11/17] feat: add lazy execution filtering and ExecutionMode enum Skip datasets incompatible with the chosen LanguageAggregationMode before evaluation to avoid unnecessary compute. Extract get_language_grouping_key() as a shared function in types.py so the same compatibility logic is reused by both the eval-time filter (_filter_pending_work) and the aggregation-time skip in results.py. - Add get_language_grouping_key() to types.py, refactor BenchmarkResults._get_language_grouping_key() to delegate to it - Add ExecutionMode enum (LAZY / ALL) - Add language_aggregation_mode and execution_mode params to evaluate() - Add language_aggregation_mode param to get_summary_metrics() - Export ExecutionMode and LanguageAggregationMode from workrb - Add tests for shared function and MELO-like filtering scenarios --- src/workrb/__init__.py | 3 + src/workrb/results.py | 67 +++++---- src/workrb/run.py | 57 +++++++- src/workrb/types.py | 63 +++++++++ tests/test_multi_dataset_task.py | 234 ++++++++++++++++++++++++++++++- 5 files changed, 391 insertions(+), 33 deletions(-) diff --git a/src/workrb/__init__.py b/src/workrb/__init__.py index 1e61d30..ed9b07c 100644 --- a/src/workrb/__init__.py +++ b/src/workrb/__init__.py @@ -6,8 +6,11 @@ from workrb.registry import list_available_tasks from workrb.results import load_results from workrb.run import evaluate, evaluate_multiple_models, get_tasks_overview +from workrb.types import ExecutionMode, LanguageAggregationMode __all__ = [ + "ExecutionMode", + "LanguageAggregationMode", "data", "evaluate", "evaluate_multiple_models", diff --git a/src/workrb/results.py b/src/workrb/results.py index 40dce12..a8359d8 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -1,4 +1,5 @@ import json +import logging import pprint from collections import defaultdict from typing import Any @@ -8,7 +9,9 @@ from pydantic import BaseModel, Field from scipy import stats -from workrb.types import LanguageAggregationMode +from workrb.types import LanguageAggregationMode, get_language_grouping_key + +logger = logging.getLogger(__name__) class TaskResultMetadata(BaseModel): @@ -107,9 +110,21 @@ def get_num_evaluation_results(self) -> int: """Get the total number of evaluation results.""" return sum(len(task.datasetid_results) for task in self.task_results.values()) - def get_summary_metrics(self, aggregations: tuple = ("mean", "ci_margin")) -> dict[str, float]: + def get_summary_metrics( + self, + aggregations: tuple = ("mean", "ci_margin"), + language_aggregation_mode: LanguageAggregationMode = LanguageAggregationMode.MONOLINGUAL_ONLY, + ) -> dict[str, float]: """ Get summary metrics for the benchmark results. + + Parameters + ---------- + aggregations : tuple + Statistics to compute (e.g. ``"mean"``, ``"ci_margin"``). + language_aggregation_mode : LanguageAggregationMode + How to determine the grouping language for per-language aggregation. + Defaults to ``MONOLINGUAL_ONLY``. """ mean_per_task = self._aggregate_per_task( aggregations=aggregations, @@ -125,6 +140,7 @@ def get_summary_metrics(self, aggregations: tuple = ("mean", "ci_margin")) -> di ) mean_per_language = self._aggregate_per_language( aggregations=aggregations, + aggregation_mode=language_aggregation_mode, ) combined = { @@ -302,6 +318,8 @@ def _get_language_grouping_key( ) -> str | None: """Determine the grouping language for a dataset result. + Delegates to :func:`workrb.types.get_language_grouping_key`. + Returns ``None`` when the dataset is incompatible with the requested mode, so that the caller can skip it during aggregation. @@ -318,29 +336,11 @@ def _get_language_grouping_key( Language code to group by, or ``None`` if the dataset is incompatible with the mode. """ - input_languages = metrics_result.input_languages - output_languages = metrics_result.output_languages - - if mode == LanguageAggregationMode.MONOLINGUAL_ONLY: - if ( - len(input_languages) != 1 - or len(output_languages) != 1 - or input_languages[0] != output_languages[0] - ): - return None - return input_languages[0] - - if mode == LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES: - if len(input_languages) != 1: - return None - return input_languages[0] - - if mode == LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES: - if len(output_languages) != 1: - return None - return output_languages[0] - - return None + return get_language_grouping_key( + metrics_result.input_languages, + metrics_result.output_languages, + mode, + ) def _aggregate_per_language( self, @@ -364,15 +364,24 @@ def _aggregate_per_language( How to determine the grouping language for each dataset result. Defaults to ``MONOLINGUAL_ONLY`` (backward compatible for benchmarks with only monolingual datasets). - Datasets incompatible with the chosen mode are silently skipped. + Datasets incompatible with the chosen mode are skipped with a warning. """ # Collect metric values per language raw_results = defaultdict(list) - for task_result in self.task_results.values(): - for metrics_result in task_result.datasetid_results.values(): + for task_name, task_result in self.task_results.items(): + for dataset_id, metrics_result in task_result.datasetid_results.items(): language_key = self._get_language_grouping_key(metrics_result, aggregation_mode) if language_key is None: - # Skip if incompatible with the aggregation mode + logger.warning( + "Skipping dataset '%s' of task '%s' in per-language aggregation: " + "incompatible with mode '%s' " + "(input_languages=%s, output_languages=%s).", + dataset_id, + task_name, + aggregation_mode.value, + metrics_result.input_languages, + metrics_result.output_languages, + ) continue for metric_name, metric_value in metrics_result.metrics_dict.items(): raw_results[(language_key, metric_name)].append(metric_value) diff --git a/src/workrb/run.py b/src/workrb/run.py index b1d2cd6..e2f97cc 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -23,6 +23,7 @@ TaskResults, ) from workrb.tasks.abstract.base import Task +from workrb.types import ExecutionMode, LanguageAggregationMode, get_language_grouping_key logger = logging.getLogger(__name__) setup_logger(__name__, verbose=False) @@ -35,6 +36,8 @@ def evaluate( metrics: dict[str, list[str]] | None = None, description: str = "", force_restart: bool = False, + language_aggregation_mode: LanguageAggregationMode | None = None, + execution_mode: ExecutionMode = ExecutionMode.LAZY, ) -> BenchmarkResults: """ Run benchmark evaluation for a single model. @@ -46,9 +49,15 @@ def evaluate( metrics: Optional dict mapping task names to custom metrics lists description: Description for the benchmark run force_restart: If True, ignore checkpoints and restart from beginning - unsupported_lang_mode: If the task does not support a language, - "error" will raise an error, stopping execution. - "skip" will skip the language in the evaluation, and final results. + language_aggregation_mode: How per-language results should be grouped + when calling ``get_summary_metrics()`` on the returned results. + When set and ``execution_mode`` is ``LAZY``, datasets that are + incompatible with the chosen mode are also skipped before + evaluation to avoid unnecessary compute. + Defaults to ``None`` (no filtering). + execution_mode: Controls whether incompatible datasets are skipped + (``LAZY``, default) or evaluated regardless (``ALL``). + Only effective when ``language_aggregation_mode`` is not ``None``. Returns ------- @@ -69,6 +78,9 @@ def evaluate( force_restart=force_restart, ) + if language_aggregation_mode is not None and execution_mode == ExecutionMode.LAZY: + pending_work = _filter_pending_work(pending_work, language_aggregation_mode) + if len(pending_work) == 0: logger.info("All work already completed!") return results @@ -230,6 +242,45 @@ def _get_total_evaluations(tasks: Sequence[Task]) -> int: return sum(len(task.dataset_ids) for task in tasks) +def _filter_pending_work( + pending_work: list[tuple[Task, str]], + language_aggregation_mode: LanguageAggregationMode, +) -> list[tuple[Task, str]]: + """Filter pending work items to only those compatible with the aggregation mode. + + Parameters + ---------- + pending_work : list of (Task, dataset_id) tuples + The pending evaluations to filter. + language_aggregation_mode : LanguageAggregationMode + The aggregation mode to check compatibility against. + + Returns + ------- + list of (Task, dataset_id) tuples + Filtered list containing only compatible work items. + """ + filtered = [] + for task, dataset_id in pending_work: + dataset_languages = task.get_dataset_languages(dataset_id) + input_langs = sorted(lang.value for lang in dataset_languages.input_languages) + output_langs = sorted(lang.value for lang in dataset_languages.output_languages) + key = get_language_grouping_key(input_langs, output_langs, language_aggregation_mode) + if key is None: + logger.warning( + "Skipping dataset '%s' of task '%s': incompatible with " + "language_aggregation_mode '%s' (input_languages=%s, output_languages=%s).", + dataset_id, + task.name, + language_aggregation_mode.value, + input_langs, + output_langs, + ) + else: + filtered.append((task, dataset_id)) + return filtered + + def _init_checkpointing( tasks: Sequence[Task], config: BenchmarkConfig, diff --git a/src/workrb/types.py b/src/workrb/types.py index 3e37ff7..5763d87 100644 --- a/src/workrb/types.py +++ b/src/workrb/types.py @@ -1,5 +1,6 @@ """Shared types and enums used across WorkRB.""" +from collections.abc import Sequence from enum import Enum from typing import NamedTuple @@ -92,6 +93,68 @@ class LanguageAggregationMode(str, Enum): """ +def get_language_grouping_key( + input_languages: Sequence[str], + output_languages: Sequence[str], + mode: LanguageAggregationMode, +) -> str | None: + """Determine the grouping language for a dataset given its languages. + + Returns ``None`` when the dataset is incompatible with the requested + mode, so that the caller can skip it. + + Parameters + ---------- + input_languages : Sequence[str] + Input language codes for the dataset (e.g. query languages). + output_languages : Sequence[str] + Output language codes for the dataset (e.g. target languages). + mode : LanguageAggregationMode + The aggregation mode controlling how the language key is derived. + + Returns + ------- + str or None + Language code to group by, or ``None`` if the dataset is + incompatible with the mode. + """ + if mode == LanguageAggregationMode.MONOLINGUAL_ONLY: + if ( + len(input_languages) != 1 + or len(output_languages) != 1 + or input_languages[0] != output_languages[0] + ): + return None + return input_languages[0] + + if mode == LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES: + if len(input_languages) != 1: + return None + return input_languages[0] + + if mode == LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES: + if len(output_languages) != 1: + return None + return output_languages[0] + + return None + + +class ExecutionMode(str, Enum): + """Controls whether ``evaluate()`` skips datasets incompatible with the language aggregation. + + When a ``LanguageAggregationMode`` is specified, ``LAZY`` (the default) avoids + running datasets that would be discarded during aggregation, saving compute. + ``ALL`` evaluates every dataset regardless. + """ + + LAZY = "lazy" + """Skip datasets incompatible with the chosen aggregation mode (default).""" + + ALL = "all" + """Evaluate all datasets regardless of aggregation compatibility.""" + + class LabelType(str, Enum): """Label type enum.""" diff --git a/tests/test_multi_dataset_task.py b/tests/test_multi_dataset_task.py index fc2e43a..ef58d0b 100644 --- a/tests/test_multi_dataset_task.py +++ b/tests/test_multi_dataset_task.py @@ -18,8 +18,14 @@ TaskResultMetadata, TaskResults, ) +from workrb.run import _filter_pending_work from workrb.tasks import ESCOJob2SkillRanking, RankingDataset -from workrb.types import DatasetLanguages, Language, LanguageAggregationMode +from workrb.types import ( + DatasetLanguages, + Language, + LanguageAggregationMode, + get_language_grouping_key, +) class TestMultiDatasetTask: @@ -377,5 +383,231 @@ def test_crosslingual_group_output_skips_multi_output(self): assert result == {} +class TestGetLanguageGroupingKey: + """Tests for the standalone get_language_grouping_key() function.""" + + def test_monolingual_returns_language(self): + """Monolingual dataset returns the shared language.""" + assert ( + get_language_grouping_key(["en"], ["en"], LanguageAggregationMode.MONOLINGUAL_ONLY) + == "en" + ) + + def test_monolingual_skips_crosslingual(self): + """MONOLINGUAL_ONLY returns None for cross-lingual datasets.""" + assert ( + get_language_grouping_key(["en"], ["de"], LanguageAggregationMode.MONOLINGUAL_ONLY) + is None + ) + + def test_monolingual_skips_multilingual(self): + """MONOLINGUAL_ONLY returns None for multilingual datasets.""" + assert ( + get_language_grouping_key( + ["en", "fr"], ["en", "fr"], LanguageAggregationMode.MONOLINGUAL_ONLY + ) + is None + ) + + def test_group_input_returns_input_language(self): + """CROSSLINGUAL_GROUP_INPUT_LANGUAGES returns the singleton input language.""" + assert ( + get_language_grouping_key( + ["en"], ["de"], LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES + ) + == "en" + ) + + def test_group_input_skips_multi_input(self): + """CROSSLINGUAL_GROUP_INPUT_LANGUAGES returns None for multiple input languages.""" + assert ( + get_language_grouping_key( + ["en", "fr"], ["de"], LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES + ) + is None + ) + + def test_group_output_returns_output_language(self): + """CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES returns the singleton output language.""" + assert ( + get_language_grouping_key( + ["en"], ["de"], LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES + ) + == "de" + ) + + def test_group_output_skips_multi_output(self): + """CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES returns None for multiple output languages.""" + assert ( + get_language_grouping_key( + ["en"], ["de", "fr"], LanguageAggregationMode.CROSSLINGUAL_GROUP_OUTPUT_LANGUAGES + ) + is None + ) + + +class _MockTask: + """Minimal mock task for testing _filter_pending_work.""" + + def __init__(self, name: str, dataset_languages_map: dict[str, DatasetLanguages]): + self.name = name + self._dataset_languages_map = dataset_languages_map + + def get_dataset_languages(self, dataset_id: str) -> DatasetLanguages: + return self._dataset_languages_map[dataset_id] + + +class TestExecutionModeFiltering: + """Tests for _filter_pending_work().""" + + def test_lazy_monolingual_skips_crosslingual(self): + """LAZY + MONOLINGUAL_ONLY skips cross-lingual datasets.""" + task = _MockTask( + "task1", + { + "en": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}), + ), + "en_de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + }, + ) + pending = [(task, "en"), (task, "en_de")] + result = _filter_pending_work(pending, LanguageAggregationMode.MONOLINGUAL_ONLY) + assert len(result) == 1 + assert result[0][1] == "en" + + def test_lazy_group_input_keeps_crosslingual_singleton_input(self): + """CROSSLINGUAL_GROUP_INPUT_LANGUAGES keeps cross-lingual datasets with singleton input.""" + task = _MockTask( + "task1", + { + "en_de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + "multi_in": DatasetLanguages( + input_languages=frozenset({Language.EN, Language.FR}), + output_languages=frozenset({Language.DE}), + ), + }, + ) + pending = [(task, "en_de"), (task, "multi_in")] + result = _filter_pending_work( + pending, LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES + ) + assert len(result) == 1 + assert result[0][1] == "en_de" + + def test_monolingual_only_mixed_task_keeps_only_monolingual(self): + """MONOLINGUAL_ONLY keeps monolingual datasets and filters all cross-lingual ones. + + Simulates a MELO-like task with monolingual datasets (en, de) alongside + several cross-lingual datasets (en_de, fr_de) — only the monolingual + ones should survive filtering. + """ + task = _MockTask( + "melo_task", + { + "en": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}), + ), + "de": DatasetLanguages( + input_languages=frozenset({Language.DE}), + output_languages=frozenset({Language.DE}), + ), + "en_de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + "fr_de": DatasetLanguages( + input_languages=frozenset({Language.FR}), + output_languages=frozenset({Language.DE}), + ), + "multilingual": DatasetLanguages( + input_languages=frozenset({Language.EN, Language.DE, Language.FR}), + output_languages=frozenset({Language.EN, Language.DE, Language.FR}), + ), + }, + ) + pending = [ + (task, "en"), + (task, "de"), + (task, "en_de"), + (task, "fr_de"), + (task, "multilingual"), + ] + result = _filter_pending_work(pending, LanguageAggregationMode.MONOLINGUAL_ONLY) + kept_ids = [dataset_id for _, dataset_id in result] + assert kept_ids == ["en", "de"] + + def test_group_input_mixed_task_keeps_singleton_input(self): + """CROSSLINGUAL_GROUP_INPUT_LANGUAGES keeps datasets with a single input language. + + Same MELO-like task: monolingual and single-input cross-lingual datasets + survive, but the multilingual one (multiple input languages) is filtered. + """ + task = _MockTask( + "melo_task", + { + "en": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}), + ), + "en_de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + "fr_de": DatasetLanguages( + input_languages=frozenset({Language.FR}), + output_languages=frozenset({Language.DE}), + ), + "multilingual": DatasetLanguages( + input_languages=frozenset({Language.EN, Language.DE, Language.FR}), + output_languages=frozenset({Language.EN, Language.DE, Language.FR}), + ), + }, + ) + pending = [ + (task, "en"), + (task, "en_de"), + (task, "fr_de"), + (task, "multilingual"), + ] + result = _filter_pending_work( + pending, LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES + ) + kept_ids = [dataset_id for _, dataset_id in result] + assert kept_ids == ["en", "en_de", "fr_de"] + + def test_empty_pending_work(self): + """Empty pending work returns empty list.""" + result = _filter_pending_work([], LanguageAggregationMode.MONOLINGUAL_ONLY) + assert result == [] + + def test_all_datasets_filtered(self): + """All datasets incompatible with the mode results in empty list.""" + task = _MockTask( + "task1", + { + "en_de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + "fr_es": DatasetLanguages( + input_languages=frozenset({Language.FR}), + output_languages=frozenset({Language.ES}), + ), + }, + ) + pending = [(task, "en_de"), (task, "fr_es")] + result = _filter_pending_work(pending, LanguageAggregationMode.MONOLINGUAL_ONLY) + assert result == [] + + if __name__ == "__main__": pytest.main([__file__, "-v"]) From f3c5e19a127a31da425849b3ad9372ad25177940 Mon Sep 17 00:00:00 2001 From: federetyk Date: Mon, 23 Feb 2026 22:50:39 +0100 Subject: [PATCH 12/17] test: fix tolerance for regression test to work well on diverse environments --- tests/test_lexical_baselines_regression.py | 74 +++++++++++----------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/tests/test_lexical_baselines_regression.py b/tests/test_lexical_baselines_regression.py index cedf0d8..897bff6 100644 --- a/tests/test_lexical_baselines_regression.py +++ b/tests/test_lexical_baselines_regression.py @@ -32,58 +32,58 @@ # --------------------------------------------------------------------------- EXPECTED_METRICS: dict[str, dict[str, float]] = { "BM25-lower": { - "map": 0.27571023549983364, - "rp@5": 0.5165079365079365, - "rp@10": 0.4534807256235827, - "mrr": 0.7105598099130719, + "map": 0.28, + "rp@5": 0.52, + "rp@10": 0.45, + "mrr": 0.71, }, "BM25-cased": { - "map": 0.27507750200419107, - "rp@5": 0.5146031746031746, - "rp@10": 0.45252834467120184, - "mrr": 0.7105598099130719, + "map": 0.28, + "rp@5": 0.51, + "rp@10": 0.45, + "mrr": 0.71, }, "TfIdf-word-lower": { - "map": 0.28485413562322776, - "rp@5": 0.5412698412698412, - "rp@10": 0.47307256235827666, - "mrr": 0.7111908432787992, + "map": 0.28, + "rp@5": 0.54, + "rp@10": 0.47, + "mrr": 0.71, }, "TfIdf-word-cased": { - "map": 0.28485413562322776, - "rp@5": 0.5412698412698412, - "rp@10": 0.47307256235827666, - "mrr": 0.7111908432787992, + "map": 0.28, + "rp@5": 0.54, + "rp@10": 0.47, + "mrr": 0.71, }, "TfIdf-char-lower": { - "map": 0.33555610544690023, - "rp@5": 0.584920634920635, - "rp@10": 0.5117195767195767, - "mrr": 0.7272701297287621, + "map": 0.34, + "rp@5": 0.58, + "rp@10": 0.51, + "mrr": 0.73, }, "TfIdf-char-cased": { - "map": 0.33555610544690023, - "rp@5": 0.584920634920635, - "rp@10": 0.5117195767195767, - "mrr": 0.7272701297287621, + "map": 0.34, + "rp@5": 0.58, + "rp@10": 0.51, + "mrr": 0.73, }, "EditDistance-lower": { - "map": 0.22953525249793413, - "rp@5": 0.42269841269841263, - "rp@10": 0.3778987150415721, - "mrr": 0.6176868635378816, + "map": 0.23, + "rp@5": 0.42, + "rp@10": 0.38, + "mrr": 0.62, }, "EditDistance-cased": { - "map": 0.24233603296868966, - "rp@5": 0.4503174603174603, - "rp@10": 0.4056538170823885, - "mrr": 0.6348209280137445, + "map": 0.24, + "rp@5": 0.45, + "rp@10": 0.41, + "mrr": 0.63, }, "RandomRanking": { - "map": 0.01167258539227986, - "rp@5": 0.009523809523809525, - "rp@10": 0.010714285714285714, - "mrr": 0.041147312519647754, + "map": 0.01, + "rp@5": 0.01, + "rp@10": 0.01, + "mrr": 0.04, }, } @@ -127,7 +127,7 @@ def test_metrics_match_expected(self, job_similarity_task, model_name, model_fac for metric_name, expected_value in expected.items(): actual_value = results[metric_name] - assert actual_value == pytest.approx(expected_value, abs=1e-3), ( + assert actual_value == pytest.approx(expected_value, abs=1e-2), ( f"{model_name} metric '{metric_name}': " f"expected {expected_value}, got {actual_value}" ) From 7d0b8b5daaeb32ff7fdc5b5bd3f95dabef2b6b06 Mon Sep 17 00:00:00 2001 From: federetyk Date: Tue, 24 Feb 2026 21:42:43 +0100 Subject: [PATCH 13/17] refactor: make language_aggregation_mode a non-optional parameter in evaluate() Address PR #34 review feedback from @Mattdl. The aggregation mode now flows consistently through the entire evaluation and aggregation pipeline instead of being an optional parameter. --- src/workrb/metrics/reporting.py | 63 +++++++++-- src/workrb/results.py | 92 +++++++++++++--- src/workrb/run.py | 178 +++++++++++++++++++++++-------- tests/test_multi_dataset_task.py | 71 +++++------- 4 files changed, 289 insertions(+), 115 deletions(-) diff --git a/src/workrb/metrics/reporting.py b/src/workrb/metrics/reporting.py index 8f15839..156eb56 100644 --- a/src/workrb/metrics/reporting.py +++ b/src/workrb/metrics/reporting.py @@ -7,6 +7,7 @@ from typing import Literal from workrb.results import BenchmarkResults +from workrb.types import LanguageAggregationMode logger = logging.getLogger(__name__) @@ -21,6 +22,7 @@ def format_results( show_error: bool = True, error_type: Literal["ci_margin", "stderr", "std"] = "ci_margin", show_only_key_metrics: bool = True, + language_aggregation_mode: LanguageAggregationMode | None = None, ) -> str: """ Display benchmark results using BenchmarkResults aggregation methods. @@ -36,11 +38,19 @@ def format_results( show_error: Whether to show error bars error_type: Type of error to show - "ci_margin", "stderr", or "std" show_only_key_metrics: If True, only show key metrics defined in task groups + language_aggregation_mode: How to determine the grouping language for + aggregation. When ``None``, reads the mode stored in + ``results.metadata.language_aggregation_mode``. Returns ------- String containing formatted results """ + if language_aggregation_mode is None: + language_aggregation_mode = LanguageAggregationMode( + results.metadata.language_aggregation_mode + ) + # Get aggregations - always include mean and error_type aggregations = ("mean", error_type) if show_error else ("mean",) @@ -50,30 +60,67 @@ def format_results( for metrics in results.key_metrics_by_task_group.values(): key_metrics.update(metrics) + # Compute all aggregation levels at once + all_results = results._get_summary_metrics( + aggregations=aggregations, + language_aggregation_mode=language_aggregation_mode, + ) + + # Partition results by tag name prefix for selective display + results_by_level: dict[str, dict] = { + "mean_per_task": {}, + "mean_per_task_group": {}, + "mean_per_language": {}, + "mean_benchmark": {}, + } + for tag, value in all_results.items(): + if tag.name in results_by_level: + results_by_level[tag.name][tag] = value + # Display each requested aggregation level metric_strs = [] if display_per_task: - agg_results = results._aggregate_per_task(aggregations=aggregations) metric_strs.append( - _display_aggregation(agg_results, key_metrics, value_format, show_error, error_type) + _display_aggregation( + results_by_level["mean_per_task"], + key_metrics, + value_format, + show_error, + error_type, + ) ) if display_per_task_group: - agg_results = results._aggregate_per_task_group(aggregations=aggregations) metric_strs.append( - _display_aggregation(agg_results, key_metrics, value_format, show_error, error_type) + _display_aggregation( + results_by_level["mean_per_task_group"], + key_metrics, + value_format, + show_error, + error_type, + ) ) if display_per_language: - agg_results = results._aggregate_per_language(aggregations=aggregations) metric_strs.append( - _display_aggregation(agg_results, key_metrics, value_format, show_error, error_type) + _display_aggregation( + results_by_level["mean_per_language"], + key_metrics, + value_format, + show_error, + error_type, + ) ) if display_overall: - agg_results = results._aggregate_benchmark(aggregations=aggregations) metric_strs.append( - _display_aggregation(agg_results, key_metrics, value_format, show_error, error_type) + _display_aggregation( + results_by_level["mean_benchmark"], + key_metrics, + value_format, + show_error, + error_type, + ) ) return "\n".join(metric_strs) diff --git a/src/workrb/results.py b/src/workrb/results.py index a8359d8..c0d70a8 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -60,6 +60,7 @@ class BenchmarkMetadata(BaseModel): num_tasks: int = Field(ge=1) languages: list[str] resumed_from_checkpoint: bool = False + language_aggregation_mode: str = LanguageAggregationMode.MONOLINGUAL_ONLY.value class ResultTagString(BaseModel): @@ -99,10 +100,11 @@ class BenchmarkResults(BaseModel): def __str__(self) -> str: """String representation of the benchmark results.""" + mode = LanguageAggregationMode(self.metadata.language_aggregation_mode) lines = [ "BenchmarkResults", "=" * 80, - pprint.pformat(self.get_summary_metrics()), + pprint.pformat(self.get_summary_metrics(language_aggregation_mode=mode)), ] return "\n".join(lines) @@ -126,43 +128,98 @@ def get_summary_metrics( How to determine the grouping language for per-language aggregation. Defaults to ``MONOLINGUAL_ONLY``. """ - mean_per_task = self._aggregate_per_task( + combined = self._get_summary_metrics( + aggregations=aggregations, + language_aggregation_mode=language_aggregation_mode, + ) + return {str(k): v for k, v in combined.items()} + + def _get_summary_metrics( + self, + aggregations: tuple = ("mean", "ci_margin"), + language_aggregation_mode: LanguageAggregationMode = LanguageAggregationMode.MONOLINGUAL_ONLY, + ) -> dict[ResultTagString, float]: + """Compute all aggregation levels and return combined results. + + Returns a single dict with ``ResultTagString`` keys covering: + ``mean_per_task``, ``mean_per_task_group``, ``mean_per_task_type``, + ``mean_per_language``, and ``mean_benchmark``. + + Parameters + ---------- + aggregations : tuple + Statistics to compute (e.g. ``"mean"``, ``"ci_margin"``). + language_aggregation_mode : LanguageAggregationMode + How to determine the grouping language for aggregation. + """ + mean_per_task = self._aggregate_datasetids_per_task( + language_aggregation_mode=language_aggregation_mode, aggregations=aggregations, ) mean_per_task_group = self._aggregate_per_task_group( - aggregations=aggregations, task_results=mean_per_task + language_aggregation_mode=language_aggregation_mode, + aggregations=aggregations, + task_results=mean_per_task, ) mean_per_task_type = self._aggregate_per_task_type( - aggregations=aggregations, task_group_results=mean_per_task_group + language_aggregation_mode=language_aggregation_mode, + aggregations=aggregations, + task_group_results=mean_per_task_group, ) mean_benchmark = self._aggregate_benchmark( - aggregations=aggregations, task_type_results=mean_per_task_type + language_aggregation_mode=language_aggregation_mode, + aggregations=aggregations, + task_type_results=mean_per_task_type, ) mean_per_language = self._aggregate_per_language( aggregations=aggregations, aggregation_mode=language_aggregation_mode, ) - combined = { + return { **mean_per_language, **mean_per_task, **mean_per_task_group, **mean_per_task_type, **mean_benchmark, } - return {str(k): v for k, v in combined.items()} - def _aggregate_per_task( + def _aggregate_datasetids_per_task( self, + language_aggregation_mode: LanguageAggregationMode, tag_name: str = "mean_per_task", aggregations: tuple = ("mean", "stderr", "ci_margin"), ) -> dict[ResultTagString, float]: - """Aggregate results per task, by aggregating over languages within tasks.""" - # Collect metric values per task + """Aggregate dataset results per task, filtering by language aggregation mode. + + For each task, only datasets compatible with the given + ``language_aggregation_mode`` are included in the per-task average. + Incompatible datasets are skipped with a warning, using the same + ``_get_language_grouping_key`` logic as ``_aggregate_per_language``. + + This is the root aggregation level: per-task results feed into + per-task-group, per-task-type, and benchmark-level aggregations, + so filtering here ensures consistency across the entire chain. + """ raw_results = defaultdict(list) for task_name, task_result in self.task_results.items(): - for lang_metrics_result in task_result.datasetid_results.values(): - for metric_name, metric_value in lang_metrics_result.metrics_dict.items(): + for dataset_id, metrics_result in task_result.datasetid_results.items(): + language_key = self._get_language_grouping_key( + metrics_result, language_aggregation_mode + ) + if language_key is None: + logger.warning( + "Skipping dataset '%s' of task '%s' in per-task aggregation: " + "incompatible with mode '%s' " + "(input_languages=%s, output_languages=%s).", + dataset_id, + task_name, + language_aggregation_mode.value, + metrics_result.input_languages, + metrics_result.output_languages, + ) + continue + for metric_name, metric_value in metrics_result.metrics_dict.items(): raw_results[(task_name, metric_name)].append(metric_value) # Compute stats @@ -179,6 +236,7 @@ def _aggregate_per_task( def _aggregate_per_task_group( self, + language_aggregation_mode: LanguageAggregationMode, tag_name: str = "mean_per_task_group", aggregations: tuple = ("mean", "stderr", "ci_margin"), task_results: dict[ResultTagString, float] | None = None, @@ -187,7 +245,9 @@ def _aggregate_per_task_group( First aggregates over languages within tasks, then over tasks within task groups. """ - task_results = task_results or self._aggregate_per_task(aggregations=("mean",)) + task_results = task_results or self._aggregate_datasetids_per_task( + language_aggregation_mode=language_aggregation_mode, aggregations=("mean",) + ) task_group_list_results = defaultdict(list) for task_result_tag, value in task_results.items(): @@ -221,6 +281,7 @@ def _aggregate_per_task_group( def _aggregate_per_task_type( self, + language_aggregation_mode: LanguageAggregationMode, tag_name: str = "mean_per_task_type", aggregations: tuple = ("mean", "stderr", "ci_margin"), task_group_results: dict[ResultTagString, float] | None = None, @@ -231,7 +292,7 @@ def _aggregate_per_task_type( then over task groups within task types. """ task_group_results = task_group_results or self._aggregate_per_task_group( - aggregations=("mean",) + language_aggregation_mode=language_aggregation_mode, aggregations=("mean",) ) # Mapping from task group name to task type name @@ -275,6 +336,7 @@ def _aggregate_per_task_type( def _aggregate_benchmark( self, + language_aggregation_mode: LanguageAggregationMode, tag_name: str = "mean_benchmark", aggregations: tuple = ("mean", "stderr", "ci_margin"), task_type_results: dict[ResultTagString, float] | None = None, @@ -288,7 +350,7 @@ def _aggregate_benchmark( 4. Aggregates over task types for final benchmark scores """ task_type_results = task_type_results or self._aggregate_per_task_type( - aggregations=("mean",) + language_aggregation_mode=language_aggregation_mode, aggregations=("mean",) ) metric_list_results = defaultdict(list) diff --git a/src/workrb/run.py b/src/workrb/run.py index e2f97cc..8a4da38 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -36,7 +36,7 @@ def evaluate( metrics: dict[str, list[str]] | None = None, description: str = "", force_restart: bool = False, - language_aggregation_mode: LanguageAggregationMode | None = None, + language_aggregation_mode: LanguageAggregationMode = LanguageAggregationMode.MONOLINGUAL_ONLY, execution_mode: ExecutionMode = ExecutionMode.LAZY, ) -> BenchmarkResults: """ @@ -51,13 +51,12 @@ def evaluate( force_restart: If True, ignore checkpoints and restart from beginning language_aggregation_mode: How per-language results should be grouped when calling ``get_summary_metrics()`` on the returned results. - When set and ``execution_mode`` is ``LAZY``, datasets that are + When ``execution_mode`` is ``LAZY``, datasets that are incompatible with the chosen mode are also skipped before evaluation to avoid unnecessary compute. - Defaults to ``None`` (no filtering). + Defaults to ``MONOLINGUAL_ONLY``. execution_mode: Controls whether incompatible datasets are skipped (``LAZY``, default) or evaluated regardless (``ALL``). - Only effective when ``language_aggregation_mode`` is not ``None``. Returns ------- @@ -76,19 +75,26 @@ def evaluate( config=config, model=model, force_restart=force_restart, + language_aggregation_mode=language_aggregation_mode, ) - if language_aggregation_mode is not None and execution_mode == ExecutionMode.LAZY: - pending_work = _filter_pending_work(pending_work, language_aggregation_mode) + # Determine which datasets are in scope for this run + if execution_mode == ExecutionMode.LAZY: + dataset_ids_to_evaluate = _get_dataset_ids_to_evaluate(tasks, language_aggregation_mode) + else: + dataset_ids_to_evaluate = {task.name: list(task.dataset_ids) for task in tasks} + + pending_work = _filter_pending_work(pending_work, dataset_ids_to_evaluate) + total_evaluations = sum(len(dids) for dids in dataset_ids_to_evaluate.values()) if len(pending_work) == 0: logger.info("All work already completed!") return results logger.info(f"Running WorkRB for model: {model.name}") - logger.info(get_tasks_overview(tasks)) + logger.info(get_tasks_overview(tasks, dataset_ids_to_evaluate=dataset_ids_to_evaluate)) logger.info(f"{'=' * 60}") - logger.info(f"Pending work: {len(pending_work)} / {_get_total_evaluations(tasks)} evaluations") + logger.info(f"Pending work: {len(pending_work)} / {total_evaluations} evaluations") # Group pending work by task for better organization work_by_task = {} @@ -100,21 +106,19 @@ def evaluate( # Run pending work start_time_benchmark = time.time() results = _run_pending_work( - tasks=tasks, config=config, work_by_task=work_by_task, results=results, model=model, metrics=metrics, + total_evaluations=total_evaluations, ) if results.metadata.resumed_from_checkpoint: logger.info("✓ Successfully resuming from checkpoint") # Update metadata results.metadata.total_evaluation_time = time.time() - start_time_benchmark - results.metadata.resumed_from_checkpoint = len(pending_work) < sum( - len(task.dataset_ids) for task in tasks - ) + results.metadata.resumed_from_checkpoint = len(pending_work) < total_evaluations # Save config and results config.save_final_result_artifacts(results) @@ -129,6 +133,7 @@ def evaluate( display_per_task_group=False, display_per_language=False, display_overall=True, + language_aggregation_mode=language_aggregation_mode, ) ) logger.info(f"{'=' * 60}") @@ -188,8 +193,25 @@ def evaluate_multiple_models( return all_results -def get_tasks_overview(tasks: Sequence[Task]) -> str: - """Get information about configured tasks as a formatted string summary.""" +def get_tasks_overview( + tasks: Sequence[Task], + dataset_ids_to_evaluate: dict[str, list[str]] | None = None, +) -> str: + """Get information about configured tasks as a formatted string summary. + + Parameters + ---------- + tasks : Sequence[Task] + All tasks configured for this benchmark run. + dataset_ids_to_evaluate : dict[str, list[str]] or None + When provided, only tasks present as keys with non-empty lists are + shown, and only the listed dataset IDs appear under each task. + When ``None``, all tasks and their full ``dataset_ids`` are shown. + """ + # When filtering, only keep tasks that have at least one dataset to evaluate + if dataset_ids_to_evaluate is not None: + tasks = [t for t in tasks if dataset_ids_to_evaluate.get(t.name)] + # Calculate summary statistics num_tasks = len(tasks) task_groups = {task.task_group for task in tasks if task.task_group} @@ -219,7 +241,12 @@ def get_tasks_overview(tasks: Sequence[Task]) -> str: lines.append(f"{task_name:<40} {group:<20} {task_languages:<20}") # Add size one-liner for each dataset - for dataset_id in task.dataset_ids: + dataset_ids = ( + dataset_ids_to_evaluate[task.name] + if dataset_ids_to_evaluate is not None + else task.dataset_ids + ) + for dataset_id in dataset_ids: size_info = task.get_size_oneliner(dataset_id) if size_info: lines.append(f" └─ {dataset_id}: {size_info}") @@ -237,48 +264,77 @@ def _get_all_languages(tasks: Sequence[Task]) -> list[str]: return sorted([str(lang) for lang in languages]) -def _get_total_evaluations(tasks: Sequence[Task]) -> int: - """Get the total number of evaluations.""" - return sum(len(task.dataset_ids) for task in tasks) +def _get_dataset_ids_to_evaluate( + tasks: Sequence[Task], + language_aggregation_mode: LanguageAggregationMode, +) -> dict[str, list[str]]: + """Compute which dataset IDs per task are compatible with the aggregation mode. + + This is the single source of truth for the run's scope when + ``execution_mode`` is ``LAZY``. The returned dict drives the overview + display, total-evaluation count, and pending-work filtering. + + Parameters + ---------- + tasks : Sequence[Task] + All tasks configured for this benchmark run. + language_aggregation_mode : LanguageAggregationMode + The aggregation mode to check compatibility against. + + Returns + ------- + dict[str, list[str]] + Mapping of task name → list of dataset IDs to evaluate. + Tasks whose datasets are all incompatible still appear as keys + with an empty list. + """ + result: dict[str, list[str]] = {} + for task in tasks: + filtered = [] + for dataset_id in task.dataset_ids: + dataset_languages = task.get_dataset_languages(dataset_id) + input_langs = sorted(lang.value for lang in dataset_languages.input_languages) + output_langs = sorted(lang.value for lang in dataset_languages.output_languages) + key = get_language_grouping_key(input_langs, output_langs, language_aggregation_mode) + if key is None: + logger.warning( + "Skipping dataset '%s' of task '%s': incompatible with " + "language_aggregation_mode '%s' (input_languages=%s, output_languages=%s).", + dataset_id, + task.name, + language_aggregation_mode.value, + input_langs, + output_langs, + ) + else: + filtered.append(dataset_id) + result[task.name] = filtered + return result def _filter_pending_work( pending_work: list[tuple[Task, str]], - language_aggregation_mode: LanguageAggregationMode, + dataset_ids_to_evaluate: dict[str, list[str]], ) -> list[tuple[Task, str]]: - """Filter pending work items to only those compatible with the aggregation mode. + """Keep only pending work items whose dataset ID is in the evaluation scope. Parameters ---------- pending_work : list of (Task, dataset_id) tuples - The pending evaluations to filter. - language_aggregation_mode : LanguageAggregationMode - The aggregation mode to check compatibility against. + The pending evaluations (already filtered by checkpoint). + dataset_ids_to_evaluate : dict[str, list[str]] + Mapping of task name → dataset IDs that are in scope for this run, + as returned by :func:`_get_dataset_ids_to_evaluate`. Returns ------- list of (Task, dataset_id) tuples - Filtered list containing only compatible work items. + Filtered list containing only in-scope work items. """ - filtered = [] - for task, dataset_id in pending_work: - dataset_languages = task.get_dataset_languages(dataset_id) - input_langs = sorted(lang.value for lang in dataset_languages.input_languages) - output_langs = sorted(lang.value for lang in dataset_languages.output_languages) - key = get_language_grouping_key(input_langs, output_langs, language_aggregation_mode) - if key is None: - logger.warning( - "Skipping dataset '%s' of task '%s': incompatible with " - "language_aggregation_mode '%s' (input_languages=%s, output_languages=%s).", - dataset_id, - task.name, - language_aggregation_mode.value, - input_langs, - output_langs, - ) - else: - filtered.append((task, dataset_id)) - return filtered + scope = { + (task_name, did) for task_name, dids in dataset_ids_to_evaluate.items() for did in dids + } + return [(task, did) for task, did in pending_work if (task.name, did) in scope] def _init_checkpointing( @@ -286,9 +342,23 @@ def _init_checkpointing( config: BenchmarkConfig, model: ModelInterface, force_restart: bool, + language_aggregation_mode: LanguageAggregationMode = LanguageAggregationMode.MONOLINGUAL_ONLY, ) -> tuple[BenchmarkResults, list[tuple[Task, str]]]: """Initialize checkpointing. + Parameters + ---------- + tasks : Sequence[Task] + Tasks to evaluate. + config : BenchmarkConfig + Benchmark configuration. + model : ModelInterface + Model to evaluate. + force_restart : bool + If True, ignore checkpoints and restart from beginning. + language_aggregation_mode : LanguageAggregationMode + Language aggregation mode to store in the results metadata. + Returns ------- Tuple containing the results and the pending work. @@ -309,6 +379,17 @@ def _init_checkpointing( logger.info( f"Restored {len(existing_results.task_results)} completed tasks from checkpoint" ) + # Warn if the checkpoint was saved with a different aggregation mode + stored_mode = existing_results.metadata.language_aggregation_mode + if stored_mode != language_aggregation_mode.value: + logger.warning( + "Checkpoint was saved with language_aggregation_mode='%s', " + "but current call uses '%s'. Updating stored mode to '%s'.", + stored_mode, + language_aggregation_mode.value, + language_aggregation_mode.value, + ) + existing_results.metadata.language_aggregation_mode = language_aggregation_mode.value pending_work = config.get_pending_work( results=existing_results, @@ -332,6 +413,7 @@ def _init_checkpointing( num_tasks=len(tasks), languages=_get_all_languages(tasks), resumed_from_checkpoint=False, + language_aggregation_mode=language_aggregation_mode.value, ), key_metrics_by_task_group=key_metrics_by_task_group, ) @@ -339,20 +421,22 @@ def _init_checkpointing( def _run_pending_work( - tasks: Sequence[Task], config: BenchmarkConfig, work_by_task: dict[str, dict[str, Any]], results: BenchmarkResults, model: ModelInterface, metrics: dict[str, list[str]] | None, + total_evaluations: int, ) -> BenchmarkResults: """Run pending evaluations. Args: - work_by_task: Dictionary of task names to their pending languages. + config: Benchmark configuration for checkpointing. + work_by_task: Dictionary of task names to their pending datasets. results: BenchmarkResults object to store results. model: ModelInterface object to evaluate. metrics: Dictionary of task names to their custom metrics. + total_evaluations: Total number of compatible evaluations (for progress display). """ # Run pending evaluations run_idx = results.get_num_evaluation_results() # Already completed evaluations @@ -362,7 +446,7 @@ def _run_pending_work( logger.info(f"{'=' * 60}") logger.info(f"Evaluating task: {task.name}") - logger.info(f"Completed {run_idx} / {_get_total_evaluations(tasks)} evaluations. ") + logger.info(f"Completed {run_idx} / {total_evaluations} evaluations. ") logger.info(f"Pending datasets for this task: {len(pending_dataset_ids)}") # Initialize task results if not exists @@ -419,5 +503,5 @@ def _run_pending_work( logger.error(f"Error: {e}") raise e - logger.info(f"Completed {run_idx} / {_get_total_evaluations(tasks)} evaluations. ") + logger.info(f"Completed {run_idx} / {total_evaluations} evaluations. ") return results diff --git a/tests/test_multi_dataset_task.py b/tests/test_multi_dataset_task.py index ef58d0b..400fd8c 100644 --- a/tests/test_multi_dataset_task.py +++ b/tests/test_multi_dataset_task.py @@ -18,7 +18,7 @@ TaskResultMetadata, TaskResults, ) -from workrb.run import _filter_pending_work +from workrb.run import _get_dataset_ids_to_evaluate from workrb.tasks import ESCOJob2SkillRanking, RankingDataset from workrb.types import ( DatasetLanguages, @@ -447,21 +447,22 @@ def test_group_output_skips_multi_output(self): class _MockTask: - """Minimal mock task for testing _filter_pending_work.""" + """Minimal mock task for testing _get_dataset_ids_to_evaluate.""" def __init__(self, name: str, dataset_languages_map: dict[str, DatasetLanguages]): self.name = name self._dataset_languages_map = dataset_languages_map + self.dataset_ids = list(dataset_languages_map.keys()) def get_dataset_languages(self, dataset_id: str) -> DatasetLanguages: return self._dataset_languages_map[dataset_id] -class TestExecutionModeFiltering: - """Tests for _filter_pending_work().""" +class TestGetDatasetIdsToEvaluate: + """Tests for _get_dataset_ids_to_evaluate().""" - def test_lazy_monolingual_skips_crosslingual(self): - """LAZY + MONOLINGUAL_ONLY skips cross-lingual datasets.""" + def test_monolingual_only_skips_crosslingual(self): + """MONOLINGUAL_ONLY skips cross-lingual datasets.""" task = _MockTask( "task1", { @@ -475,12 +476,10 @@ def test_lazy_monolingual_skips_crosslingual(self): ), }, ) - pending = [(task, "en"), (task, "en_de")] - result = _filter_pending_work(pending, LanguageAggregationMode.MONOLINGUAL_ONLY) - assert len(result) == 1 - assert result[0][1] == "en" + result = _get_dataset_ids_to_evaluate([task], LanguageAggregationMode.MONOLINGUAL_ONLY) + assert result == {"task1": ["en"]} - def test_lazy_group_input_keeps_crosslingual_singleton_input(self): + def test_group_input_keeps_crosslingual_singleton_input(self): """CROSSLINGUAL_GROUP_INPUT_LANGUAGES keeps cross-lingual datasets with singleton input.""" task = _MockTask( "task1", @@ -495,12 +494,10 @@ def test_lazy_group_input_keeps_crosslingual_singleton_input(self): ), }, ) - pending = [(task, "en_de"), (task, "multi_in")] - result = _filter_pending_work( - pending, LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES + result = _get_dataset_ids_to_evaluate( + [task], LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES ) - assert len(result) == 1 - assert result[0][1] == "en_de" + assert result == {"task1": ["en_de"]} def test_monolingual_only_mixed_task_keeps_only_monolingual(self): """MONOLINGUAL_ONLY keeps monolingual datasets and filters all cross-lingual ones. @@ -534,16 +531,8 @@ def test_monolingual_only_mixed_task_keeps_only_monolingual(self): ), }, ) - pending = [ - (task, "en"), - (task, "de"), - (task, "en_de"), - (task, "fr_de"), - (task, "multilingual"), - ] - result = _filter_pending_work(pending, LanguageAggregationMode.MONOLINGUAL_ONLY) - kept_ids = [dataset_id for _, dataset_id in result] - assert kept_ids == ["en", "de"] + result = _get_dataset_ids_to_evaluate([task], LanguageAggregationMode.MONOLINGUAL_ONLY) + assert result == {"melo_task": ["en", "de"]} def test_group_input_mixed_task_keeps_singleton_input(self): """CROSSLINGUAL_GROUP_INPUT_LANGUAGES keeps datasets with a single input language. @@ -572,25 +561,18 @@ def test_group_input_mixed_task_keeps_singleton_input(self): ), }, ) - pending = [ - (task, "en"), - (task, "en_de"), - (task, "fr_de"), - (task, "multilingual"), - ] - result = _filter_pending_work( - pending, LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES + result = _get_dataset_ids_to_evaluate( + [task], LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES ) - kept_ids = [dataset_id for _, dataset_id in result] - assert kept_ids == ["en", "en_de", "fr_de"] + assert result == {"melo_task": ["en", "en_de", "fr_de"]} - def test_empty_pending_work(self): - """Empty pending work returns empty list.""" - result = _filter_pending_work([], LanguageAggregationMode.MONOLINGUAL_ONLY) - assert result == [] + def test_no_tasks(self): + """Empty task list returns empty dict.""" + result = _get_dataset_ids_to_evaluate([], LanguageAggregationMode.MONOLINGUAL_ONLY) + assert result == {} - def test_all_datasets_filtered(self): - """All datasets incompatible with the mode results in empty list.""" + def test_all_datasets_incompatible(self): + """All datasets incompatible with the mode results in empty list for the task.""" task = _MockTask( "task1", { @@ -604,9 +586,8 @@ def test_all_datasets_filtered(self): ), }, ) - pending = [(task, "en_de"), (task, "fr_es")] - result = _filter_pending_work(pending, LanguageAggregationMode.MONOLINGUAL_ONLY) - assert result == [] + result = _get_dataset_ids_to_evaluate([task], LanguageAggregationMode.MONOLINGUAL_ONLY) + assert result == {"task1": []} if __name__ == "__main__": From 48254868b7d7c6a4791c8cb17b25987353785b33 Mon Sep 17 00:00:00 2001 From: federetyk Date: Tue, 24 Feb 2026 22:44:42 +0100 Subject: [PATCH 14/17] refactor: migrate freelancer task to dataset_id-based language mapping Override get_dataset_languages() and languages_to_dataset_ids() in the freelancer candidate ranking tasks, replacing the Language.CROSS sentinel with a proper DATASET_LANGUAGES_MAP that describes each dataset's input and output languages. This lets the aggregation and filtering logic handle the multilingual dataset ("cross_lingual" renamed to "multilingual") as a regular entry rather than a special-cased language enum value. --- .../ranking/freelancer_project_matching.py | 112 +++++++++++++----- src/workrb/types.py | 1 - 2 files changed, 85 insertions(+), 28 deletions(-) diff --git a/src/workrb/tasks/ranking/freelancer_project_matching.py b/src/workrb/tasks/ranking/freelancer_project_matching.py index fe712ef..f26856e 100644 --- a/src/workrb/tasks/ranking/freelancer_project_matching.py +++ b/src/workrb/tasks/ranking/freelancer_project_matching.py @@ -9,7 +9,7 @@ from workrb.registry import register_task from workrb.tasks.abstract.base import DatasetSplit, LabelType, Language from workrb.tasks.abstract.ranking_base import RankingDataset, RankingTask, RankingTaskGroup -from workrb.types import ModelInputType +from workrb.types import DatasetLanguages, ModelInputType class _BaseCandidateRanking(RankingTask, ABC): @@ -26,7 +26,7 @@ class _BaseCandidateRanking(RankingTask, ABC): HuggingFace Dataset: https://huggingface.co/datasets/MaltCompany/Freelancer-Project-Matching Languages: en, de, es, fr, nl. - + cross_lingual to test language agnostic matching + + multilingual corpus to test language-agnostic matching. The dataset contains: - ``queries``: 200 search queries @@ -46,14 +46,34 @@ class _BaseCandidateRanking(RankingTask, ABC): A threshold is applied on the score to binarize the interactions into relevant and non-relevant pairs. """ - SUPPORTED_DATASET_LANGUAGES = [ - Language.DE, - Language.EN, - Language.ES, - Language.FR, - Language.NL, - Language.CROSS, - ] + DATASET_LANGUAGES_MAP: dict[str, DatasetLanguages] = { + "en": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}), + ), + "de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + "es": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.ES}), + ), + "fr": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.FR}), + ), + "nl": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.NL}), + ), + "multilingual": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset( + {Language.EN, Language.DE, Language.ES, Language.FR, Language.NL} + ), + ), + } RELEVANCE_SCORE_THRESHOLD = 0.8 @@ -74,7 +94,7 @@ def supported_query_languages(self) -> list[Language]: @property def supported_target_languages(self) -> list[Language]: """Supported target languages.""" - return self.SUPPORTED_DATASET_LANGUAGES + return [Language.DE, Language.EN, Language.ES, Language.FR, Language.NL] @property def split_test_fraction(self) -> float: @@ -91,6 +111,49 @@ def target_input_type(self) -> ModelInputType: """Target input type for profiles.""" return ModelInputType.CANDIDATE_PROFILE_STRING + def languages_to_dataset_ids(self, languages: list[Language]) -> list[str]: + """Filter datasets based on the requested languages. + + A dataset is included when all of its input and output languages are + within the requested set. + + Parameters + ---------- + languages : list[Language] + List of Language enums requested for evaluation. + + Returns + ------- + list[str] + List of dataset identifier strings. + """ + lang_codes = {lang.value for lang in languages} + result = [] + for dataset_id, ds_langs in self.DATASET_LANGUAGES_MAP.items(): + all_langs = { + lang.value for lang in ds_langs.input_languages | ds_langs.output_languages + } + if all_langs <= lang_codes: + result.append(dataset_id) + return result + + def get_dataset_languages(self, dataset_id: str) -> DatasetLanguages: + """Map a dataset ID to its input/output languages. + + Parameters + ---------- + dataset_id : str + One of ``"en"``, ``"de"``, ``"es"``, ``"fr"``, ``"nl"``, or + ``"multilingual"``. + + Returns + ------- + DatasetLanguages + Named tuple with ``input_languages`` (query) and + ``output_languages`` (corpus). + """ + return self.DATASET_LANGUAGES_MAP[dataset_id] + @staticmethod def _candidate_profile_to_str(candidate: dict[str, Any]) -> str: experiences = "\n\n".join( @@ -112,7 +175,6 @@ def _input_to_str(query: dict[str, str]) -> str: def _load_and_format_data( self, split: DatasetSplit, - language: Language, dataset_id: str, query_key: str, score_key: str, @@ -121,8 +183,8 @@ def _load_and_format_data( if split != DatasetSplit.TEST: raise ValueError(f"Split '{split}' not supported. Use TEST") - if language not in self.SUPPORTED_DATASET_LANGUAGES: - raise ValueError(f"Language '{language}' not supported.") + if dataset_id not in self.DATASET_LANGUAGES_MAP: + raise ValueError(f"Dataset '{dataset_id}' not supported.") query_df = pd.DataFrame( load_dataset("MaltCompany/Freelancer-Project-Matching", query_key)["test"] @@ -134,8 +196,10 @@ def _load_and_format_data( load_dataset("MaltCompany/Freelancer-Project-Matching", score_key)["test"] ) - if language != Language.CROSS: - candidate_df = candidate_df[candidate_df["language"] == language.value] + # For monolingual datasets, filter candidates to the target language. + # For the "multilingual" dataset, use all candidates. + if dataset_id != "multilingual": + candidate_df = candidate_df[candidate_df["language"] == dataset_id] # create labels candidate_df = ( @@ -241,11 +305,8 @@ def _input_to_str(input_dict: dict[str, str]) -> str: return f"{input_dict['title']}\n\n{input_dict['description']}" def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: - """Load Job Title Similarity data from the HuggingFace dataset.""" - language = Language(dataset_id) - return self._load_and_format_data( - split, language, dataset_id, "briefs", "brief_scores", "brief_id" - ) + """Load project-candidate matching data from the HuggingFace dataset.""" + return self._load_and_format_data(split, dataset_id, "briefs", "brief_scores", "brief_id") @register_task() @@ -262,7 +323,7 @@ class SearchQueryCandidateRanking(_BaseCandidateRanking): HuggingFace Dataset: https://huggingface.co/datasets/MaltCompany/Freelancer-Project-Matching Languages: en, de, es, fr, nl. - + cross_lingual to test language agnostic matching + + multilingual corpus to test language-agnostic matching. The dataset contains: - ``queries``: 200 search queries @@ -301,8 +362,5 @@ def _input_to_str(input_dict: dict[str, str]) -> str: return input_dict["value"] def load_dataset(self, dataset_id: str, split: DatasetSplit) -> RankingDataset: - """Load Job Title Similarity data from the HuggingFace dataset.""" - language = Language(dataset_id) - return self._load_and_format_data( - split, language, dataset_id, "queries", "query_scores", "query_id" - ) + """Load query-candidate matching data from the HuggingFace dataset.""" + return self._load_and_format_data(split, dataset_id, "queries", "query_scores", "query_id") diff --git a/src/workrb/types.py b/src/workrb/types.py index 5763d87..e69b235 100644 --- a/src/workrb/types.py +++ b/src/workrb/types.py @@ -39,7 +39,6 @@ class Language(str, Enum): JA = "ja" KO = "ko" ZH = "zh" - CROSS = "cross_lingual" class DatasetLanguages(NamedTuple): From bbe0ac359f3a87ba5ee81fb449d88c73946c099a Mon Sep 17 00:00:00 2001 From: federetyk Date: Wed, 25 Feb 2026 20:00:10 +0100 Subject: [PATCH 15/17] refactor: use language-grouped averaging in per-task aggregation Per-task aggregation now groups datasets by language before averaging, giving equal weight to each language regardless of how many datasets it contains. Previously, all compatible datasets were flat-averaged, which over-represented languages with more datasets. Add SKIP_LANGUAGE_AGGREGATION mode for users who want the old flat average with no filtering and no per-language output. --- src/workrb/results.py | 95 ++++++++++++++--- src/workrb/run.py | 3 + src/workrb/types.py | 7 ++ tests/test_e2e_checkpointing.py | 4 +- tests/test_e2e_toy_benchmark.py | 6 +- tests/test_multi_dataset_task.py | 173 ++++++++++++++++++++++++++++++- 6 files changed, 268 insertions(+), 20 deletions(-) diff --git a/src/workrb/results.py b/src/workrb/results.py index c0d70a8..132036b 100644 --- a/src/workrb/results.py +++ b/src/workrb/results.py @@ -190,18 +190,72 @@ def _aggregate_datasetids_per_task( tag_name: str = "mean_per_task", aggregations: tuple = ("mean", "stderr", "ci_margin"), ) -> dict[ResultTagString, float]: - """Aggregate dataset results per task, filtering by language aggregation mode. + """Aggregate dataset results per task. - For each task, only datasets compatible with the given - ``language_aggregation_mode`` are included in the per-task average. - Incompatible datasets are skipped with a warning, using the same - ``_get_language_grouping_key`` logic as ``_aggregate_per_language``. + Dispatches to either a flat average (``SKIP_LANGUAGE_AGGREGATION``) + or a language-grouped average (all other modes). This is the root aggregation level: per-task results feed into per-task-group, per-task-type, and benchmark-level aggregations, so filtering here ensures consistency across the entire chain. """ - raw_results = defaultdict(list) + if language_aggregation_mode == LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION: + return self._aggregate_datasetids_per_task_flat( + tag_name=tag_name, aggregations=aggregations + ) + return self._aggregate_datasetids_per_task_language_grouped( + language_aggregation_mode=language_aggregation_mode, + tag_name=tag_name, + aggregations=aggregations, + ) + + def _aggregate_datasetids_per_task_flat( + self, + tag_name: str = "mean_per_task", + aggregations: tuple = ("mean", "stderr", "ci_margin"), + ) -> dict[ResultTagString, float]: + """Flat average of all datasets per task, with no filtering or language grouping. + + Each dataset is an equal data point regardless of its language configuration. + """ + raw_results: dict[tuple[str, str], list[float]] = defaultdict(list) + for task_name, task_result in self.task_results.items(): + for _dataset_id, metrics_result in task_result.datasetid_results.items(): + for metric_name, metric_value in metrics_result.metrics_dict.items(): + raw_results[(task_name, metric_name)].append(metric_value) + + results: dict[ResultTagString, float] = {} + for (task_name, metric_name), values in raw_results.items(): + computed_stats = self._compute_stats(values) + for agg in aggregations: + assert agg in computed_stats, ( + f"Aggregation {agg} not found in stats: {computed_stats.keys()}" + ) + tag = ResultTagString( + name=tag_name, + metric_name=metric_name, + aggregation=agg, + grouping_name=task_name, + ) + results[tag] = computed_stats[agg] + return results + + def _aggregate_datasetids_per_task_language_grouped( + self, + language_aggregation_mode: LanguageAggregationMode, + tag_name: str = "mean_per_task", + aggregations: tuple = ("mean", "stderr", "ci_margin"), + ) -> dict[ResultTagString, float]: + """Language-grouped average: filter incompatible datasets, group by + language, average within each language group, then average across + language groups. + + This gives equal weight per language regardless of how many datasets + each language has. For tasks with exactly one dataset per language + the result is identical to a flat average of the compatible datasets. + """ + # Filter incompatible datasets and group by (task, metric, lang_key) + lang_grouped: dict[tuple[str, str, str], list[float]] = defaultdict(list) for task_name, task_result in self.task_results.items(): for dataset_id, metrics_result in task_result.datasetid_results.items(): language_key = self._get_language_grouping_key( @@ -220,18 +274,28 @@ def _aggregate_datasetids_per_task( ) continue for metric_name, metric_value in metrics_result.metrics_dict.items(): - raw_results[(task_name, metric_name)].append(metric_value) + lang_grouped[(task_name, metric_name, language_key)].append(metric_value) - # Compute stats - results = {} - for (task_name, metric_name), values in raw_results.items(): - stats = self._compute_stats(values) + # Compute mean within each language bucket + per_language_means: dict[tuple[str, str], list[float]] = defaultdict(list) + for (task_name, metric_name, _lang_key), values in lang_grouped.items(): + per_language_means[(task_name, metric_name)].append(float(np.mean(values))) + + # Compute stats across per-language means to get per-task score + results: dict[ResultTagString, float] = {} + for (task_name, metric_name), lang_means in per_language_means.items(): + computed_stats = self._compute_stats(lang_means) for agg in aggregations: - assert agg in stats, f"Aggregation {agg} not found in stats: {stats.keys()}" + assert agg in computed_stats, ( + f"Aggregation {agg} not found in stats: {computed_stats.keys()}" + ) tag = ResultTagString( - name=tag_name, metric_name=metric_name, aggregation=agg, grouping_name=task_name + name=tag_name, + metric_name=metric_name, + aggregation=agg, + grouping_name=task_name, ) - results[tag] = stats[agg] + results[tag] = computed_stats[agg] return results def _aggregate_per_task_group( @@ -428,6 +492,9 @@ def _aggregate_per_language( with only monolingual datasets). Datasets incompatible with the chosen mode are skipped with a warning. """ + if aggregation_mode == LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION: + return {} + # Collect metric values per language raw_results = defaultdict(list) for task_name, task_result in self.task_results.items(): diff --git a/src/workrb/run.py b/src/workrb/run.py index 8a4da38..aefdeb1 100644 --- a/src/workrb/run.py +++ b/src/workrb/run.py @@ -288,6 +288,9 @@ def _get_dataset_ids_to_evaluate( Tasks whose datasets are all incompatible still appear as keys with an empty list. """ + if language_aggregation_mode == LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION: + return {task.name: list(task.dataset_ids) for task in tasks} + result: dict[str, list[str]] = {} for task in tasks: filtered = [] diff --git a/src/workrb/types.py b/src/workrb/types.py index e69b235..51089b3 100644 --- a/src/workrb/types.py +++ b/src/workrb/types.py @@ -91,6 +91,13 @@ class LanguageAggregationMode(str, Enum): Datasets with multiple output languages are skipped. """ + SKIP_LANGUAGE_AGGREGATION = "skip_language_aggregation" + """Skip language-based grouping entirely. + + All datasets are included in a flat average per task with no filtering + and no per-language output is produced. + """ + def get_language_grouping_key( input_languages: Sequence[str], diff --git a/tests/test_e2e_checkpointing.py b/tests/test_e2e_checkpointing.py index cc9a130..275760b 100644 --- a/tests/test_e2e_checkpointing.py +++ b/tests/test_e2e_checkpointing.py @@ -164,11 +164,11 @@ def name(self): f"Task '{task.name}' missing from final results" ) task_result = end_results.task_results[task.name] - assert Language.EN in task_result.datasetid_results, ( + assert Language.EN.value in task_result.datasetid_results, ( f"Language 'en' missing for task '{task.name}'" ) - lang_result = task_result.datasetid_results[Language.EN] + lang_result = task_result.datasetid_results[Language.EN.value] assert len(lang_result.metrics_dict) > 0, f"No metrics for task '{task.name}'" print(f" ✓ Task '{task.name}' has complete results") diff --git a/tests/test_e2e_toy_benchmark.py b/tests/test_e2e_toy_benchmark.py index 4824b86..0bb1485 100644 --- a/tests/test_e2e_toy_benchmark.py +++ b/tests/test_e2e_toy_benchmark.py @@ -112,7 +112,7 @@ def test_e2e_toy_benchmark(): # Display dataset sizes print("\n📊 Dataset sizes:") for task in tasks: - print(f" • {task.name:35} {task.get_size_oneliner(Language.EN)}") + print(f" • {task.name:35} {task.get_size_oneliner(Language.EN.value)}") # Separate ranking and classification tasks ranking_tasks = [t for t in tasks if isinstance(t, RankingTask)] @@ -168,11 +168,11 @@ def test_e2e_toy_benchmark(): task_result = results.task_results[task_name] # Check if language results exist - if Language.EN not in task_result.datasetid_results: + if Language.EN.value not in task_result.datasetid_results: validation_errors.append(f"Missing language results for {task_name} (en)") continue - lang_result = task_result.datasetid_results[Language.EN] + lang_result = task_result.datasetid_results[Language.EN.value] # Validate metrics based on task type assert len(lang_result.metrics_dict) > 0, f"No metrics found for {task_name}" diff --git a/tests/test_multi_dataset_task.py b/tests/test_multi_dataset_task.py index 400fd8c..07b2df1 100644 --- a/tests/test_multi_dataset_task.py +++ b/tests/test_multi_dataset_task.py @@ -503,7 +503,7 @@ def test_monolingual_only_mixed_task_keeps_only_monolingual(self): """MONOLINGUAL_ONLY keeps monolingual datasets and filters all cross-lingual ones. Simulates a MELO-like task with monolingual datasets (en, de) alongside - several cross-lingual datasets (en_de, fr_de) — only the monolingual + several cross-lingual datasets (en_de, fr_de) -- only the monolingual ones should survive filtering. """ task = _MockTask( @@ -589,6 +589,177 @@ def test_all_datasets_incompatible(self): result = _get_dataset_ids_to_evaluate([task], LanguageAggregationMode.MONOLINGUAL_ONLY) assert result == {"task1": []} + def test_skip_language_aggregation_keeps_all(self): + """SKIP_LANGUAGE_AGGREGATION returns all dataset IDs without filtering.""" + task = _MockTask( + "task1", + { + "en": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.EN}), + ), + "en_de": DatasetLanguages( + input_languages=frozenset({Language.EN}), + output_languages=frozenset({Language.DE}), + ), + "multi": DatasetLanguages( + input_languages=frozenset({Language.EN, Language.FR}), + output_languages=frozenset({Language.DE, Language.ES}), + ), + }, + ) + result = _get_dataset_ids_to_evaluate( + [task], LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION + ) + assert result == {"task1": ["en", "en_de", "multi"]} + + +class TestAggregateDatasetidsPerTask: + """Tests for _aggregate_datasetids_per_task with language-grouped averaging.""" + + def test_monolingual_equal_language_weight(self): + """4 EN datasets + 1 DE dataset: language-grouped mean != flat mean. + + Flat: mean(0.8, 0.8, 0.8, 0.8, 0.6) = 0.76 + Grouped: mean(mean(0.8,0.8,0.8,0.8), mean(0.6)) = mean(0.8, 0.6) = 0.70 + """ + br = _make_benchmark_results( + [ + ("task1", "en1", {"map": 0.8}, ["en"], ["en"]), + ("task1", "en2", {"map": 0.8}, ["en"], ["en"]), + ("task1", "en3", {"map": 0.8}, ["en"], ["en"]), + ("task1", "en4", {"map": 0.8}, ["en"], ["en"]), + ("task1", "de", {"map": 0.6}, ["de"], ["de"]), + ] + ) + result = br._aggregate_datasetids_per_task( + language_aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + aggregations=("mean",), + ) + result_str = {str(k): v for k, v in result.items()} + assert result_str["mean_per_task/task1/map/mean"] == pytest.approx(0.70) + + def test_monolingual_filters_crosslingual(self): + """Cross-lingual dataset is skipped under MONOLINGUAL_ONLY.""" + br = _make_benchmark_results( + [ + ("task1", "en", {"map": 0.8}, ["en"], ["en"]), + ("task1", "en_de", {"map": 0.5}, ["en"], ["de"]), + ] + ) + result = br._aggregate_datasetids_per_task( + language_aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + aggregations=("mean",), + ) + result_str = {str(k): v for k, v in result.items()} + # Only the en monolingual dataset should be included + assert result_str["mean_per_task/task1/map/mean"] == pytest.approx(0.8) + + def test_crosslingual_group_input_language_grouped(self): + """Group by input language, verify per-language weighting.""" + br = _make_benchmark_results( + [ + ("task1", "en_to_de1", {"map": 0.8}, ["en"], ["de"]), + ("task1", "en_to_de2", {"map": 0.6}, ["en"], ["de"]), + ("task1", "fr_to_de", {"map": 0.4}, ["fr"], ["de"]), + ] + ) + result = br._aggregate_datasetids_per_task( + language_aggregation_mode=LanguageAggregationMode.CROSSLINGUAL_GROUP_INPUT_LANGUAGES, + aggregations=("mean",), + ) + result_str = {str(k): v for k, v in result.items()} + # en group: mean(0.8, 0.6) = 0.7, fr group: mean(0.4) = 0.4 + # task mean: mean(0.7, 0.4) = 0.55 + assert result_str["mean_per_task/task1/map/mean"] == pytest.approx(0.55) + + def test_single_dataset_per_language(self): + """1:1 mapping: same result as flat average (regression test).""" + br = _make_benchmark_results( + [ + ("task1", "en", {"map": 0.8}, ["en"], ["en"]), + ("task1", "de", {"map": 0.6}, ["de"], ["de"]), + ] + ) + result = br._aggregate_datasetids_per_task( + language_aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + aggregations=("mean",), + ) + result_str = {str(k): v for k, v in result.items()} + # mean(0.8, 0.6) = 0.70, same as flat + assert result_str["mean_per_task/task1/map/mean"] == pytest.approx(0.70) + + def test_all_datasets_incompatible_produces_empty_result(self): + """All datasets skipped under MONOLINGUAL_ONLY produce empty result.""" + br = _make_benchmark_results( + [ + ("task1", "en_de", {"map": 0.7}, ["en"], ["de"]), + ("task1", "fr_es", {"map": 0.5}, ["fr"], ["es"]), + ] + ) + result = br._aggregate_datasetids_per_task( + language_aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + aggregations=("mean",), + ) + assert result == {} + + +class TestSkipLanguageAggregation: + """Tests for SKIP_LANGUAGE_AGGREGATION mode.""" + + def test_flat_average_no_filtering(self): + """Mix of mono/cross/multi datasets, all included in flat average.""" + br = _make_benchmark_results( + [ + ("task1", "en", {"map": 0.8}, ["en"], ["en"]), + ("task1", "en_de", {"map": 0.6}, ["en"], ["de"]), + ("task1", "multi", {"map": 0.4}, ["en", "fr"], ["de", "es"]), + ] + ) + result = br._aggregate_datasetids_per_task( + language_aggregation_mode=LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION, + aggregations=("mean",), + ) + result_str = {str(k): v for k, v in result.items()} + # Flat mean: (0.8 + 0.6 + 0.4) / 3 = 0.6 + assert result_str["mean_per_task/task1/map/mean"] == pytest.approx(0.6) + + def test_per_language_returns_empty(self): + """_aggregate_per_language returns {} for SKIP_LANGUAGE_AGGREGATION.""" + br = _make_benchmark_results( + [ + ("task1", "en", {"map": 0.8}, ["en"], ["en"]), + ] + ) + result = br._aggregate_per_language( + aggregation_mode=LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION, + ) + assert result == {} + + def test_full_chain_skip_mode(self): + """Full get_summary_metrics call with SKIP_LANGUAGE_AGGREGATION. + + Verifies flat average propagates to benchmark level and no + per-language keys are produced. + """ + br = _make_benchmark_results( + [ + ("task1", "en", {"map": 0.8}, ["en"], ["en"]), + ("task1", "en_de", {"map": 0.6}, ["en"], ["de"]), + ("task1", "multi", {"map": 0.4}, ["en", "fr"], ["de", "es"]), + ] + ) + summary = br.get_summary_metrics( + language_aggregation_mode=LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION, + ) + # No per-language keys + per_lang_keys = [k for k in summary if k.startswith("mean_per_language/")] + assert per_lang_keys == [] + + # Flat average: (0.8 + 0.6 + 0.4) / 3 = 0.6 + assert summary["mean_per_task/task1/map/mean"] == pytest.approx(0.6) + assert summary["mean_benchmark/map/mean"] == pytest.approx(0.6) + if __name__ == "__main__": pytest.main([__file__, "-v"]) From e1dfd9d3d576dace98bcb73307181f28a053d6b4 Mon Sep 17 00:00:00 2001 From: federetyk Date: Wed, 25 Feb 2026 20:55:37 +0100 Subject: [PATCH 16/17] docs: add benchmark example scripts for each aggregation mode Add four example scripts illustrating the two aggregation modes (language-weighted vs flat average) combined with task-level language filtering (selected subset vs all available languages). --- examples/run_benchmark_flat_average.py | 82 +++++++++++++++++++ .../run_benchmark_flat_average_all_langs.py | 68 +++++++++++++++ examples/run_benchmark_language_weighted.py | 77 +++++++++++++++++ ...n_benchmark_language_weighted_all_langs.py | 63 ++++++++++++++ 4 files changed, 290 insertions(+) create mode 100644 examples/run_benchmark_flat_average.py create mode 100644 examples/run_benchmark_flat_average_all_langs.py create mode 100644 examples/run_benchmark_language_weighted.py create mode 100644 examples/run_benchmark_language_weighted_all_langs.py diff --git a/examples/run_benchmark_flat_average.py b/examples/run_benchmark_flat_average.py new file mode 100644 index 0000000..e4d4bbe --- /dev/null +++ b/examples/run_benchmark_flat_average.py @@ -0,0 +1,82 @@ +""" +Run the benchmark with flat averaging on a selected set of languages. + +Aggregation mode: SKIP_LANGUAGE_AGGREGATION + All datasets contribute equally to the per-task score as a flat + average, with no language-based grouping or filtering. This means + cross-lingual and multilingual datasets are included alongside + monolingual ones. The final results do not include per-language + averages, since no language grouping criterion is defined and + there is no unambiguous way to assign cross-lingual or + multilingual datasets to a single language bucket. + +Task-level language filtering: + The `langs` list restricts which datasets each task loads during + initialization. Only languages in this list are considered. + +Execution mode: ALL + Explicitly set here, but has no practical effect under + SKIP_LANGUAGE_AGGREGATION since no datasets are ever filtered + out by the aggregation mode. +""" + +import workrb +from workrb.types import ExecutionMode, Language, LanguageAggregationMode + +if __name__ == "__main__": + # Models + models = [ + # Lexical baselines + workrb.models.RandomRankingModel(), + workrb.models.BM25Model(lowercase=True), + # DL model + workrb.models.JobBERTModel(), + ] + + # Languages (as strings via .value) + langs = [ + Language.DA.value, + Language.DE.value, + Language.EN.value, + Language.ES.value, + Language.FR.value, + Language.HU.value, + Language.IT.value, + Language.LT.value, + Language.NL.value, + Language.PL.value, + Language.PT.value, + Language.SL.value, + Language.SV.value, + ] + split = "test" + + # Tasks + tasks = [ + # Tasks with monolingual datasets + workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), + workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), + workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), + # Tasks with monolingual, cross-lingual, and multilingual datasets + workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), + workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), + # TODO: add MELO and MELS tasks when PR #37 is merged + ] + + # Evaluate + # NOTE: execution_mode=ALL has no effect when using SKIP_LANGUAGE_AGGREGATION, + # because no datasets are ever filtered out regardless of execution mode. + all_results = workrb.evaluate_multiple_models( + models=models, + tasks=tasks, + output_folder_template="../results/flat_average/{model_name}", + description="Flat average benchmark", + force_restart=True, + language_aggregation_mode=LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION, + execution_mode=ExecutionMode.ALL, + ) + + # Display results + for model_name, results in all_results.items(): + print(f"\nResults for {model_name}:") + print(results) diff --git a/examples/run_benchmark_flat_average_all_langs.py b/examples/run_benchmark_flat_average_all_langs.py new file mode 100644 index 0000000..24a86d3 --- /dev/null +++ b/examples/run_benchmark_flat_average_all_langs.py @@ -0,0 +1,68 @@ +""" +Run the benchmark with flat averaging on all available languages. + +Aggregation mode: SKIP_LANGUAGE_AGGREGATION + All datasets contribute equally to the per-task score as a flat + average, with no language-based grouping or filtering. This means + cross-lingual and multilingual datasets are included alongside + monolingual ones. The final results do not include per-language + averages, since no language grouping criterion is defined and + there is no unambiguous way to assign cross-lingual or + multilingual datasets to a single language bucket. + +Task-level language filtering: None + Setting langs=None means each task loads all languages it supports, + with no filtering at the task level. + +Execution mode: ALL + Explicitly set here, but has no practical effect under + SKIP_LANGUAGE_AGGREGATION since no datasets are ever filtered + out by the aggregation mode. +""" + +import workrb +from workrb.types import ExecutionMode, LanguageAggregationMode + +if __name__ == "__main__": + # Models + models = [ + # Lexical baselines + workrb.models.RandomRankingModel(), + workrb.models.BM25Model(lowercase=True), + # DL model + workrb.models.JobBERTModel(), + ] + + # No language filtering: each task loads all languages it supports + langs = None + split = "test" + + # Tasks + tasks = [ + # Tasks with monolingual datasets + workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), + workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), + workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), + # Tasks with monolingual, cross-lingual, and multilingual datasets + workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), + workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), + # TODO: add MELO and MELS tasks when PR #37 is merged + ] + + # Evaluate + # NOTE: execution_mode=ALL has no effect when using SKIP_LANGUAGE_AGGREGATION, + # because no datasets are ever filtered out regardless of execution mode. + all_results = workrb.evaluate_multiple_models( + models=models, + tasks=tasks, + output_folder_template="../results/flat_average_all_langs/{model_name}", + description="Flat average benchmark (all languages)", + force_restart=True, + language_aggregation_mode=LanguageAggregationMode.SKIP_LANGUAGE_AGGREGATION, + execution_mode=ExecutionMode.ALL, + ) + + # Display results + for model_name, results in all_results.items(): + print(f"\nResults for {model_name}:") + print(results) diff --git a/examples/run_benchmark_language_weighted.py b/examples/run_benchmark_language_weighted.py new file mode 100644 index 0000000..c42ce84 --- /dev/null +++ b/examples/run_benchmark_language_weighted.py @@ -0,0 +1,77 @@ +""" +Run the benchmark with language-weighted aggregation on a selected set of languages. + +Aggregation mode: MONOLINGUAL_ONLY + Within each task, datasets are grouped by language and averaged per + group, then the per-language means are averaged to produce the + per-task score. This gives equal weight to each language regardless + of how many datasets it has. Datasets where input and output + languages differ (cross-lingual) are filtered out of aggregation. + +Task-level language filtering: + The `langs` list restricts which datasets each task loads during + initialization. Only languages in this list are considered. + +Execution mode: LAZY (default) + Datasets that would be filtered out by the aggregation mode are + not evaluated at all, saving compute. +""" + +import workrb +from workrb.types import ExecutionMode, Language, LanguageAggregationMode + +if __name__ == "__main__": + # Models + models = [ + # Lexical baselines + workrb.models.RandomRankingModel(), + workrb.models.BM25Model(lowercase=True), + # DL model + workrb.models.JobBERTModel(), + ] + + # Languages (as strings via .value) + langs = [ + Language.DA.value, + Language.DE.value, + Language.EN.value, + Language.ES.value, + Language.FR.value, + Language.HU.value, + Language.IT.value, + Language.LT.value, + Language.NL.value, + Language.PL.value, + Language.PT.value, + Language.SL.value, + Language.SV.value, + ] + split = "test" + + # Tasks + tasks = [ + # Tasks with monolingual datasets + workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), + workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), + workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), + # Tasks with monolingual, cross-lingual, and multilingual datasets + workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), + workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), + # TODO: add MELO and MELS tasks when PR #37 is merged + ] + + # Evaluate + all_results = workrb.evaluate_multiple_models( + models=models, + tasks=tasks, + output_folder_template="../results/language_weighted/{model_name}", + description="Language-weighted benchmark", + force_restart=True, + language_aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + execution_mode=ExecutionMode.LAZY, + ) + + # Display results + for model_name, results in all_results.items(): + print(f"\nResults for {model_name}:") + print(results) diff --git a/examples/run_benchmark_language_weighted_all_langs.py b/examples/run_benchmark_language_weighted_all_langs.py new file mode 100644 index 0000000..ad74522 --- /dev/null +++ b/examples/run_benchmark_language_weighted_all_langs.py @@ -0,0 +1,63 @@ +""" +Run the benchmark with language-weighted aggregation on all available languages. + +Aggregation mode: MONOLINGUAL_ONLY + Within each task, datasets are grouped by language and averaged per + group, then the per-language means are averaged to produce the + per-task score. This gives equal weight to each language regardless + of how many datasets it has. Datasets where input and output + languages differ (cross-lingual) are filtered out of aggregation. + +Task-level language filtering: None + Setting langs=None means each task loads all languages it supports, + with no filtering at the task level. + +Execution mode: LAZY (default) + Datasets that would be filtered out by the aggregation mode are + not evaluated at all, saving compute. +""" + +import workrb +from workrb.types import ExecutionMode, LanguageAggregationMode + +if __name__ == "__main__": + # Models + models = [ + # Lexical baselines + workrb.models.RandomRankingModel(), + workrb.models.BM25Model(lowercase=True), + # DL model + workrb.models.JobBERTModel(), + ] + + # No language filtering: each task loads all languages it supports + langs = None + split = "test" + + # Tasks + tasks = [ + # Tasks with monolingual datasets + workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), + workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), + workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), + # Tasks with monolingual, cross-lingual, and multilingual datasets + workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), + workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), + # TODO: add MELO and MELS tasks when PR #37 is merged + ] + + # Evaluate + all_results = workrb.evaluate_multiple_models( + models=models, + tasks=tasks, + output_folder_template="../results/language_weighted_all_langs/{model_name}", + description="Language-weighted benchmark (all languages)", + force_restart=True, + language_aggregation_mode=LanguageAggregationMode.MONOLINGUAL_ONLY, + execution_mode=ExecutionMode.LAZY, + ) + + # Display results + for model_name, results in all_results.items(): + print(f"\nResults for {model_name}:") + print(results) From e4a6bce3889d3fae6083443576b7518046ee4e0f Mon Sep 17 00:00:00 2001 From: federetyk Date: Wed, 25 Feb 2026 23:29:36 +0100 Subject: [PATCH 17/17] fix: remove from example the dataset that uses ESCO 1.0.5 but defines UK as supported language --- examples/run_benchmark_flat_average.py | 1 - examples/run_benchmark_flat_average_all_langs.py | 1 - examples/run_benchmark_language_weighted.py | 1 - examples/run_benchmark_language_weighted_all_langs.py | 1 - 4 files changed, 4 deletions(-) diff --git a/examples/run_benchmark_flat_average.py b/examples/run_benchmark_flat_average.py index e4d4bbe..1b08f51 100644 --- a/examples/run_benchmark_flat_average.py +++ b/examples/run_benchmark_flat_average.py @@ -56,7 +56,6 @@ # Tasks with monolingual datasets workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), - workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), # Tasks with monolingual, cross-lingual, and multilingual datasets workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), diff --git a/examples/run_benchmark_flat_average_all_langs.py b/examples/run_benchmark_flat_average_all_langs.py index 24a86d3..e411b24 100644 --- a/examples/run_benchmark_flat_average_all_langs.py +++ b/examples/run_benchmark_flat_average_all_langs.py @@ -42,7 +42,6 @@ # Tasks with monolingual datasets workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), - workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), # Tasks with monolingual, cross-lingual, and multilingual datasets workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), diff --git a/examples/run_benchmark_language_weighted.py b/examples/run_benchmark_language_weighted.py index c42ce84..0006457 100644 --- a/examples/run_benchmark_language_weighted.py +++ b/examples/run_benchmark_language_weighted.py @@ -53,7 +53,6 @@ # Tasks with monolingual datasets workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), - workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), # Tasks with monolingual, cross-lingual, and multilingual datasets workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs), diff --git a/examples/run_benchmark_language_weighted_all_langs.py b/examples/run_benchmark_language_weighted_all_langs.py index ad74522..8e9dd06 100644 --- a/examples/run_benchmark_language_weighted_all_langs.py +++ b/examples/run_benchmark_language_weighted_all_langs.py @@ -39,7 +39,6 @@ # Tasks with monolingual datasets workrb.tasks.ESCOJob2SkillRanking(split=split, languages=langs), workrb.tasks.ESCOSkill2JobRanking(split=split, languages=langs), - workrb.tasks.JobBERTJobNormRanking(split=split, languages=langs), # Tasks with monolingual, cross-lingual, and multilingual datasets workrb.tasks.ProjectCandidateRanking(split=split, languages=langs), workrb.tasks.SearchQueryCandidateRanking(split=split, languages=langs),