morgankavanagh commited on
Commit
be85bc4
·
1 Parent(s): 6b1dcb9

Delete run_experiment.py

Browse files
Files changed (1) hide show
  1. run_experiment.py +0 -42
run_experiment.py DELETED
@@ -1,42 +0,0 @@
1
- import json
2
- from pathlib import Path
3
-
4
- from assignment5.chrf import calculate_chrf
5
- from assignment5.mbr import select_best_hypothesis
6
-
7
- # Load data
8
- data_dir = Path(__file__).parent / "mt_data"
9
- source_sentences = (data_dir / "source_sentences.txt").read_text().splitlines()
10
- reference_translations = (data_dir / "reference_translations.txt").read_text().splitlines()
11
- beam_search_translations = (data_dir / "beam_search_translations.txt").read_text().splitlines()
12
-
13
- with open(data_dir / "samples.jsonl") as f:
14
- samples = [json.loads(line)["samples"] for line in f]
15
-
16
-
17
-
18
- # Step 1: Select the best hypothesis for each source sentence using MBR decoding
19
- mbr_translations = [select_best_hypothesis(sample_set) for sample_set in samples]
20
-
21
- # Step 2: Calculate ChrF scores for MBR translations
22
- mbr_chrf_scores = [
23
- calculate_chrf(mbr_translation, reference)
24
- for mbr_translation, reference in zip(mbr_translations, reference_translations)
25
- ]
26
- average_mbr_chrf = sum(mbr_chrf_scores) / len(mbr_chrf_scores)
27
-
28
- # Step 3: Calculate ChrF scores for beam search translations
29
- beam_chrf_scores = [
30
- calculate_chrf(beam_translation, reference)
31
- for beam_translation, reference in zip(beam_search_translations, reference_translations)
32
- ]
33
- average_beam_chrf = sum(beam_chrf_scores) / len(beam_chrf_scores)
34
-
35
- # Step 4: Print the results
36
- print(f"Average ChrF score for MBR decoding: {average_mbr_chrf:.2f}")
37
- print(f"Average ChrF score for beam search: {average_beam_chrf:.2f}")
38
-
39
- if average_mbr_chrf > average_beam_chrf:
40
- print("MBR decoding produced better translations.")
41
- else:
42
- print("Beam search produced better translations.")