omarkamali commited on
Commit
21bbc28
·
verified ·
1 Parent(s): 8dc7bd0

Upload all models and assets for ee (latest)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. README.md +322 -134
  3. models/embeddings/aligned/ee_128d.bin +3 -0
  4. models/embeddings/aligned/ee_128d.meta.json +1 -0
  5. models/embeddings/aligned/ee_128d.projection.npy +3 -0
  6. models/embeddings/aligned/ee_128d_metadata.json +8 -0
  7. models/embeddings/aligned/ee_32d.bin +3 -0
  8. models/embeddings/aligned/ee_32d.meta.json +1 -0
  9. models/embeddings/aligned/ee_32d.projection.npy +3 -0
  10. models/embeddings/aligned/ee_32d_metadata.json +8 -0
  11. models/embeddings/aligned/ee_64d.bin +3 -0
  12. models/embeddings/aligned/ee_64d.meta.json +1 -0
  13. models/embeddings/aligned/ee_64d.projection.npy +3 -0
  14. models/embeddings/aligned/ee_64d_metadata.json +8 -0
  15. models/embeddings/monolingual/ee_128d.bin +2 -2
  16. models/embeddings/monolingual/ee_128d_metadata.json +5 -3
  17. models/embeddings/monolingual/ee_32d.bin +2 -2
  18. models/embeddings/monolingual/ee_32d_metadata.json +5 -3
  19. models/embeddings/monolingual/ee_64d.bin +2 -2
  20. models/embeddings/monolingual/ee_64d_metadata.json +5 -3
  21. models/subword_markov/ee_markov_ctx1_subword.parquet +2 -2
  22. models/subword_markov/ee_markov_ctx1_subword_metadata.json +2 -2
  23. models/subword_markov/ee_markov_ctx2_subword.parquet +2 -2
  24. models/subword_markov/ee_markov_ctx2_subword_metadata.json +2 -2
  25. models/subword_markov/ee_markov_ctx3_subword.parquet +2 -2
  26. models/subword_markov/ee_markov_ctx3_subword_metadata.json +2 -2
  27. models/subword_markov/ee_markov_ctx4_subword.parquet +2 -2
  28. models/subword_markov/ee_markov_ctx4_subword_metadata.json +2 -2
  29. models/subword_ngram/ee_2gram_subword.parquet +2 -2
  30. models/subword_ngram/ee_2gram_subword_metadata.json +2 -2
  31. models/subword_ngram/ee_3gram_subword.parquet +2 -2
  32. models/subword_ngram/ee_3gram_subword_metadata.json +2 -2
  33. models/subword_ngram/ee_4gram_subword.parquet +2 -2
  34. models/subword_ngram/ee_4gram_subword_metadata.json +2 -2
  35. models/subword_ngram/ee_5gram_subword.parquet +3 -0
  36. models/subword_ngram/ee_5gram_subword_metadata.json +7 -0
  37. models/tokenizer/ee_tokenizer_16k.model +2 -2
  38. models/tokenizer/ee_tokenizer_16k.vocab +0 -0
  39. models/tokenizer/ee_tokenizer_32k.model +2 -2
  40. models/tokenizer/ee_tokenizer_32k.vocab +0 -0
  41. models/tokenizer/ee_tokenizer_8k.model +2 -2
  42. models/tokenizer/ee_tokenizer_8k.vocab +0 -0
  43. models/vocabulary/ee_vocabulary.parquet +2 -2
  44. models/vocabulary/ee_vocabulary_metadata.json +10 -9
  45. models/word_markov/ee_markov_ctx1_word.parquet +2 -2
  46. models/word_markov/ee_markov_ctx1_word_metadata.json +2 -2
  47. models/word_markov/ee_markov_ctx2_word.parquet +2 -2
  48. models/word_markov/ee_markov_ctx2_word_metadata.json +2 -2
  49. models/word_markov/ee_markov_ctx3_word.parquet +2 -2
  50. models/word_markov/ee_markov_ctx3_word_metadata.json +2 -2
.gitattributes CHANGED
@@ -39,3 +39,4 @@ visualizations/position_encoding_comparison.png filter=lfs diff=lfs merge=lfs -t
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
 
 
39
  visualizations/tsne_sentences.png filter=lfs diff=lfs merge=lfs -text
40
  visualizations/tsne_words.png filter=lfs diff=lfs merge=lfs -text
41
  visualizations/zipf_law.png filter=lfs diff=lfs merge=lfs -text
42
+ visualizations/embedding_tsne_multilingual.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: ee
3
- language_name: EE
4
  language_family: atlantic_kwa
5
  tags:
6
  - wikilangs
@@ -10,11 +10,21 @@ tags:
10
  - n-gram
11
  - markov
12
  - wikipedia
 
 
 
 
 
 
 
 
 
 
13
  - monolingual
14
  - family-atlantic_kwa
15
  license: mit
16
  library_name: wikilangs
17
- pipeline_tag: feature-extraction
18
  datasets:
19
  - omarkamali/wikipedia-monthly
20
  dataset_info:
@@ -23,20 +33,20 @@ dataset_info:
23
  metrics:
24
  - name: best_compression_ratio
25
  type: compression
26
- value: 4.014
27
  - name: best_isotropy
28
  type: isotropy
29
- value: 0.6434
30
  - name: vocabulary_size
31
  type: vocab
32
- value: 12330
33
- generated: 2025-12-30
34
  ---
35
 
36
- # EE - Wikilangs Models
37
  ## Comprehensive Research Report & Full Ablation Study
38
 
39
- This repository contains NLP models trained and evaluated by Wikilangs, specifically on **EE** Wikipedia data.
40
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
41
 
42
  ## 📋 Repository Contents
@@ -44,12 +54,13 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
44
  ### Models & Assets
45
 
46
  - Tokenizers (8k, 16k, 32k, 64k)
47
- - N-gram models (2, 3, 4-gram)
48
- - Markov chains (context of 1, 2, 3 and 4)
49
  - Subword N-gram and Markov chains
50
- - Embeddings in various sizes and dimensions
51
  - Language Vocabulary
52
  - Language Statistics
 
53
  ![Performance Dashboard](visualizations/performance_dashboard.png)
54
 
55
  ### Analysis and Evaluation
@@ -59,7 +70,8 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
59
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
60
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
61
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
62
- - [6. Summary & Recommendations](#6-summary--recommendations)
 
63
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
64
  - [Visualizations Index](#visualizations-index)
65
 
@@ -68,55 +80,53 @@ We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and
68
 
69
  ![Tokenizer Compression](visualizations/tokenizer_compression.png)
70
 
 
 
 
 
 
 
71
  ### Results
72
 
73
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
74
  |------------|-------------|---------------|----------|--------------|
75
- | **8k** | 3.451x | 3.42 | 0.1964% | 189,386 |
76
- | **16k** | 3.696x | 3.66 | 0.2104% | 176,825 |
77
- | **32k** | 3.905x | 3.87 | 0.2223% | 167,343 |
78
- | **64k** | 4.014x 🏆 | 3.97 | 0.2285% | 162,807 |
79
 
80
  ### Tokenization Examples
81
 
82
  Below are sample sentences tokenized with each vocabulary size:
83
 
84
- **Sample 1:** `Ũ, ũ nye Kikuyutɔwo ƒe nya alfabet.`
85
 
86
  | Vocab | Tokens | Count |
87
  |-------|--------|-------|
88
- | 8k | `▁ ũ ,ũ ▁— ▁nye ▁ki ku yu ... (+5 more)` | 15 |
89
- | 16k | `▁ũ , ▁ũ ▁— ▁nye ▁ki kuyu tɔwo ▁ƒe nya ... (+2 more)` | 12 |
90
- | 32k | `▁ũ , ▁ũ ▁— ▁nye ▁kikuyutɔwo ▁ƒenyaalfabet .` | 10 |
91
- | 64k | `▁ũ , ▁ũ ▁— ▁nye ▁kikuyutɔwo ▁ƒe ▁nya ▁alfabet .` | 10 |
92
-
93
- **Sample 2:** `Hong Kong nye Asia dukɔwo dometɔ ɖeka.
94
 
95
- Category:China`
96
 
97
  | Vocab | Tokens | Count |
98
  |-------|--------|-------|
99
- | 8k | `▁h ong k ong ▁nye ▁asiadukɔwodometɔ ▁ɖeka . ... (+3 more)` | 13 |
100
- | 16k | `▁hongkong ▁nye ▁asiadukɔwodometɔ ▁ɖeka . ▁category : ... (+1 more)` | 11 |
101
- | 32k | `▁hongkong ▁nye ▁asiadukɔwodometɔ ▁ɖeka . ▁category : ... (+1 more)` | 11 |
102
- | 64k | `▁hong ▁kong ▁nye ▁asia ▁dukɔwo ▁dometɔ ▁ɖeka . ▁category : ... (+1 more)` | 11 |
103
 
104
- **Sample 3:** `Oslo nye Norway dugã enye. Eƒe dukɔmenɔlawo ƒe xexlẽme ɖo 658,390 lɔƒo (2016).
105
-
106
- ...`
107
 
108
  | Vocab | Tokens | Count |
109
  |-------|--------|-------|
110
- | 8k | `▁os lonyenorwaydugãenye . eƒedukɔmenɔlawo ▁ƒe ... (+30 more)` | 40 |
111
- | 16k | `▁oslo ▁nye ▁norwaydugãenye . ▁eƒedukɔmenɔlawo ▁ƒexexlẽme ... (+28 more)` | 38 |
112
- | 32k | `▁oslo ▁nyenorwaydugã ▁enye . ▁eƒedukɔmenɔlawo ▁ƒexexlẽme ... (+26 more)` | 36 |
113
- | 64k | `▁oslo ▁nye ▁norway ▁dugã ▁enye . ▁eƒe ▁dukɔmenɔlawo ▁ƒe ▁xexlẽme ... (+26 more)` | 36 |
114
 
115
 
116
  ### Key Findings
117
 
118
- - **Best Compression:** 64k achieves 4.014x compression
119
- - **Lowest UNK Rate:** 8k with 0.1964% unknown tokens
120
  - **Trade-off:** Larger vocabularies improve compression but increase model size
121
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
122
 
@@ -125,57 +135,111 @@ Category:China`
125
 
126
  ![N-gram Perplexity](visualizations/ngram_perplexity.png)
127
 
 
 
128
  ![N-gram Coverage](visualizations/ngram_coverage.png)
129
 
130
  ### Results
131
 
132
- | N-gram | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
133
- |--------|------------|---------|----------------|------------------|-------------------|
134
- | **2-gram** | 3,597 🏆 | 11.81 | 9,575 | 23.2% | 55.1% |
135
- | **2-gram** | 316 🏆 | 8.30 | 2,357 | 61.9% | 98.5% |
136
- | **3-gram** | 6,180 | 12.59 | 13,821 | 19.8% | 43.8% |
137
- | **3-gram** | 2,220 | 11.12 | 16,421 | 29.6% | 70.6% |
138
- | **4-gram** | 10,276 | 13.33 | 22,472 | 18.3% | 35.3% |
139
- | **4-gram** | 9,097 | 13.15 | 62,387 | 16.3% | 45.5% |
 
 
140
 
141
  ### Top 5 N-grams by Size
142
 
143
- **2-grams:**
 
 
 
 
 
 
 
 
 
 
144
 
145
  | Rank | N-gram | Count |
146
  |------|--------|-------|
147
- | 1 | `la ,` | 3,065 |
148
- | 2 | `le ƒe` | 2,270 |
149
- | 3 | `me .` | 1,712 |
150
- | 4 | `category :` | 1,483 |
151
- | 5 | `me la` | 1,440 |
152
 
153
- **3-grams:**
154
 
155
  | Rank | N-gram | Count |
156
  |------|--------|-------|
157
- | 1 | `me la ,` | 1,287 |
158
- | 2 | `: / /` | 514 |
159
- | 3 | `. le ƒe` | 491 |
160
- | 4 | `, si nye` | 458 |
161
- | 5 | `va ɖo ƒe` | 326 |
162
 
163
- **4-grams:**
164
 
165
  | Rank | N-gram | Count |
166
  |------|--------|-------|
167
- | 1 | `https : / /` | 320 |
168
- | 2 | `ƒe tsoƒe category :` | 278 |
169
- | 3 | `: / / www` | 272 |
170
- | 4 | `/ / www .` | 272 |
171
- | 5 | `nyasiawo ƒe tsoƒe category` | 267 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
 
174
  ### Key Findings
175
 
176
- - **Best Perplexity:** 2-gram with 316
177
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
178
- - **Coverage:** Top-1000 patterns cover ~46% of corpus
179
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
180
 
181
  ---
@@ -183,55 +247,86 @@ Category:China`
183
 
184
  ![Markov Entropy](visualizations/markov_entropy.png)
185
 
 
 
186
  ![Markov Branching](visualizations/markov_branching.png)
187
 
188
  ### Results
189
 
190
- | Context | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
191
- |---------|-------------|------------|------------------|-----------------|----------------|
192
- | **1** | 0.7181 | 1.645 | 4.75 | 27,803 | 28.2% |
193
- | **1** | 1.5928 | 3.016 | 13.09 | 400 | 0.0% |
194
- | **2** | 0.3176 | 1.246 | 1.78 | 131,912 | 68.2% |
195
- | **2** | 1.0913 | 2.131 | 6.11 | 5,232 | 0.0% |
196
- | **3** | 0.1273 | 1.092 | 1.22 | 234,623 | 87.3% |
197
- | **3** | 0.8236 | 1.770 | 3.55 | 31,943 | 17.6% |
198
- | **4** | 0.0520 🏆 | 1.037 | 1.08 | 286,556 | 94.8% |
199
- | **4** | 0.5125 🏆 | 1.426 | 2.16 | 113,293 | 48.8% |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- ### Generated Text Samples
 
 
 
 
202
 
203
- Below are text samples generated from each Markov chain model:
 
 
 
 
 
 
 
204
 
205
  **Context Size 1:**
206
 
207
- 1. `. eɖo " the cursed ones - 3 . to subɔsubɔha me tɔ . m .`
208
- 2. `ƒe nukpɔsusuwo ta si be / mps / / / / browsecollections / 20190427151234 / 9780812291445`
209
- 3. `, dukɔ aɖe tso aʋawɔwɔwo me le united nations decade for the national democratic congress me`
210
 
211
  **Context Size 2:**
212
 
213
- 1. `la , islam kpɔ ŋusẽ ɖe amewo ŋu , eye wòdidi tso tsidzɔƒe gã la dzi .`
214
- 2. `le ƒe 2007 fifa u - 20 muhammad hidayat ullah ( nuwɔna ) ƒe habɔbɔ si le`
215
- 3. `me . enye senyala gilbert strauss - kahn nye kɔmiunist sukuviwo ƒe habɔbɔ me tɔ le september`
216
 
217
  **Context Size 3:**
218
 
219
- 1. `me la , edze be woa de dzesi eɖokui na ɖɔwɔƒe si gbalɛ nyawo gbɔ enumake alo ana`
220
- 2. `: / / en . wikipedia . org / fellows / 979 / " . 2023 - 01`
221
- 3. `. le ƒe geɖe ƒe nazãbubu , vovototodedeameme , kple hadome vovototodedeameme , si ɖe gabontɔ kung fu`
222
 
223
  **Context Size 4:**
224
 
225
- 1. `https : / / en . wikipedia . org / wiki / s2cid_ ( identifier ) 194922873 dedieu ,`
226
- 2. `ƒe tsoƒe category : tɔmelãwo`
227
- 3. `: / / www . fao . org / docrep / 009 / a0154e / a0154e07 . htm nɔnɔmetatawo`
228
 
229
 
230
  ### Key Findings
231
 
232
- - **Best Predictability:** Context-4 with 94.8% predictability
233
  - **Branching Factor:** Decreases with context size (more deterministic)
234
- - **Memory Trade-off:** Larger contexts require more storage (113,293 contexts)
235
  - **Recommendation:** Context-3 or Context-4 for text generation
236
 
237
  ---
@@ -247,35 +342,35 @@ Below are text samples generated from each Markov chain model:
247
 
248
  | Metric | Value |
249
  |--------|-------|
250
- | Vocabulary Size | 12,330 |
251
- | Total Tokens | 280,434 |
252
- | Mean Frequency | 22.74 |
253
- | Median Frequency | 4 |
254
- | Frequency Std Dev | 251.45 |
255
 
256
  ### Most Common Words
257
 
258
  | Rank | Word | Frequency |
259
  |------|------|-----------|
260
- | 1 | ƒe | 17,053 |
261
- | 2 | le | 14,563 |
262
  | 3 | me | 8,468 |
263
- | 4 | si | 6,286 |
264
- | 5 | la | 4,916 |
265
- | 6 | kple | 4,853 |
266
- | 7 | nye | 3,835 |
267
- | 8 | be | 3,754 |
268
- | 9 | ɖe | 3,265 |
269
  | 10 | siwo | 2,545 |
270
 
271
  ### Least Common Words (from vocabulary)
272
 
273
  | Rank | Word | Frequency |
274
  |------|------|-----------|
275
- | 1 | loi | 2 |
276
- | 2 | 12932 | 2 |
277
- | 3 | crunchy | 2 |
278
- | 4 | kakl | 2 |
279
  | 5 | klottey | 2 |
280
  | 6 | korle | 2 |
281
  | 7 | domelovo | 2 |
@@ -287,24 +382,24 @@ Below are text samples generated from each Markov chain model:
287
 
288
  | Metric | Value |
289
  |--------|-------|
290
- | Zipf Coefficient | 1.1796 |
291
- | R² (Goodness of Fit) | 0.990771 |
292
  | Adherence Quality | **excellent** |
293
 
294
  ### Coverage Analysis
295
 
296
  | Top N Words | Coverage |
297
  |-------------|----------|
298
- | Top 100 | 47.4% |
299
- | Top 1,000 | 77.4% |
300
- | Top 5,000 | 93.3% |
301
- | Top 10,000 | 98.3% |
302
 
303
  ### Key Findings
304
 
305
- - **Zipf Compliance:** R²=0.9908 indicates excellent adherence to Zipf's law
306
- - **High Frequency Dominance:** Top 100 words cover 47.4% of corpus
307
- - **Long Tail:** 2,330 words needed for remaining 1.7% coverage
308
 
309
  ---
310
  ## 5. Word Embeddings Evaluation
@@ -317,24 +412,114 @@ Below are text samples generated from each Markov chain model:
317
 
318
  ![t-SNE Sentences](visualizations/tsne_sentences.png)
319
 
320
- ### Model Comparison
321
 
322
- | Model | Vocab Size | Dimension | Avg Norm | Std Norm | Isotropy |
323
- |-------|------------|-----------|----------|----------|----------|
324
- | **mono_32d** | 5,162 | 32 | 2.985 | 0.736 | 0.6434 🏆 |
325
- | **mono_64d** | 5,162 | 64 | 3.060 | 0.703 | 0.2784 |
326
- | **mono_128d** | 5,162 | 128 | 3.081 | 0.698 | 0.0599 |
327
- | **embeddings_enhanced** | 0 | 0 | 0.000 | 0.000 | 0.0000 |
 
 
 
 
 
 
 
 
 
 
 
328
 
329
  ### Key Findings
330
 
331
- - **Best Isotropy:** mono_32d with 0.6434 (more uniform distribution)
332
- - **Dimension Trade-off:** Higher dimensions capture more semantics but reduce isotropy
333
- - **Vocabulary Coverage:** All models cover 5,162 words
334
- - **Recommendation:** 100d for balanced semantic capture and efficiency
335
 
336
  ---
337
- ## 6. Summary & Recommendations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
 
339
  ![Performance Dashboard](visualizations/performance_dashboard.png)
340
 
@@ -342,11 +527,12 @@ Below are text samples generated from each Markov chain model:
342
 
343
  | Component | Recommended | Rationale |
344
  |-----------|-------------|-----------|
345
- | Tokenizer | **32k BPE** | Best compression (4.01x) with low UNK rate |
346
- | N-gram | **5-gram** | Lowest perplexity (316) |
347
- | Markov | **Context-4** | Highest predictability (94.8%) |
348
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
349
 
 
350
  ---
351
  ## Appendix: Metrics Glossary & Interpretation Guide
352
 
@@ -536,7 +722,8 @@ If you use these models in your research, please cite:
536
  author = {Kamali, Omar},
537
  title = {Wikilangs: Open NLP Models for Wikipedia Languages},
538
  year = {2025},
539
- publisher = {HuggingFace},
 
540
  url = {https://huggingface.co/wikilangs}
541
  institution = {Omneity Labs}
542
  }
@@ -552,7 +739,8 @@ MIT License - Free for academic and commercial use.
552
  - 🤗 Models: [huggingface.co/wikilangs](https://huggingface.co/wikilangs)
553
  - 📊 Data: [wikipedia-monthly](https://huggingface.co/datasets/omarkamali/wikipedia-monthly)
554
  - 👤 Author: [Omar Kamali](https://huggingface.co/omarkamali)
 
555
  ---
556
  *Generated by Wikilangs Models Pipeline*
557
 
558
- *Report Date: 2025-12-30 08:47:35*
 
1
  ---
2
  language: ee
3
+ language_name: Ewe
4
  language_family: atlantic_kwa
5
  tags:
6
  - wikilangs
 
10
  - n-gram
11
  - markov
12
  - wikipedia
13
+ - feature-extraction
14
+ - sentence-similarity
15
+ - tokenization
16
+ - n-grams
17
+ - markov-chain
18
+ - text-mining
19
+ - fasttext
20
+ - babelvec
21
+ - vocabulous
22
+ - vocabulary
23
  - monolingual
24
  - family-atlantic_kwa
25
  license: mit
26
  library_name: wikilangs
27
+ pipeline_tag: text-generation
28
  datasets:
29
  - omarkamali/wikipedia-monthly
30
  dataset_info:
 
33
  metrics:
34
  - name: best_compression_ratio
35
  type: compression
36
+ value: 4.309
37
  - name: best_isotropy
38
  type: isotropy
39
+ value: 0.7155
40
  - name: vocabulary_size
41
  type: vocab
42
+ value: 0
43
+ generated: 2026-01-04
44
  ---
45
 
46
+ # Ewe - Wikilangs Models
47
  ## Comprehensive Research Report & Full Ablation Study
48
 
49
+ This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Ewe** Wikipedia data.
50
  We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings.
51
 
52
  ## 📋 Repository Contents
 
54
  ### Models & Assets
55
 
56
  - Tokenizers (8k, 16k, 32k, 64k)
57
+ - N-gram models (2, 3, 4, 5-gram)
58
+ - Markov chains (context of 1, 2, 3, 4 and 5)
59
  - Subword N-gram and Markov chains
60
+ - Embeddings in various sizes and dimensions (aligned and unaligned)
61
  - Language Vocabulary
62
  - Language Statistics
63
+
64
  ![Performance Dashboard](visualizations/performance_dashboard.png)
65
 
66
  ### Analysis and Evaluation
 
70
  - [3. Markov Chain Evaluation](#3-markov-chain-evaluation)
71
  - [4. Vocabulary Analysis](#4-vocabulary-analysis)
72
  - [5. Word Embeddings Evaluation](#5-word-embeddings-evaluation)
73
+ - [6. Morphological Analysis (Experimental)](#6--morphological-analysis-experimental)
74
+ - [7. Summary & Recommendations](#7-summary--recommendations)
75
  - [Metrics Glossary](#appendix-metrics-glossary--interpretation-guide)
76
  - [Visualizations Index](#visualizations-index)
77
 
 
80
 
81
  ![Tokenizer Compression](visualizations/tokenizer_compression.png)
82
 
83
+ ![Tokenizer Fertility](visualizations/tokenizer_fertility.png)
84
+
85
+ ![Tokenizer OOV](visualizations/tokenizer_oov.png)
86
+
87
+ ![Total Tokens](visualizations/tokenizer_total_tokens.png)
88
+
89
  ### Results
90
 
91
  | Vocab Size | Compression | Avg Token Len | UNK Rate | Total Tokens |
92
  |------------|-------------|---------------|----------|--------------|
93
+ | **8k** | 3.822x | 3.83 | 0.5658% | 181,329 |
94
+ | **16k** | 4.082x | 4.09 | 0.6044% | 169,762 |
95
+ | **32k** | 4.309x 🏆 | 4.31 | 0.6380% | 160,824 |
 
96
 
97
  ### Tokenization Examples
98
 
99
  Below are sample sentences tokenized with each vocabulary size:
100
 
101
+ **Sample 1:** `Ata Messan Ajavon Zeus nye Togo dunyahela, eye wònye Save Togo Collective ƒe zim...`
102
 
103
  | Vocab | Tokens | Count |
104
  |-------|--------|-------|
105
+ | 8k | `▁ata ▁me ssanaja von ▁ze us ▁nye ▁togo ▁dunyahela ... (+18 more)` | 28 |
106
+ | 16k | `▁ata ▁messan ▁ajavon ▁ze us ▁nye ▁togo ▁dunyahela ,eye ... (+15 more)` | 25 |
107
+ | 32k | `▁ata ▁messan ▁ajavon ▁zeus ▁nye ▁togo ▁dunyahela , eyewònye ... (+13 more)` | 23 |
 
 
 
108
 
109
+ **Sample 2:** `South Carolina nye dukɔ aɖe le United States. States`
110
 
111
  | Vocab | Tokens | Count |
112
  |-------|--------|-------|
113
+ | 8k | `▁southcaro lina ▁nye ▁dukɔaɖele ▁united ▁states . ... (+1 more)` | 11 |
114
+ | 16k | `▁southcarolina ▁nye ▁dukɔaɖele ▁united ▁states . ▁states` | 10 |
115
+ | 32k | `▁southcarolina ▁nye ▁dukɔaɖele ▁united ▁states . ▁states` | 10 |
 
116
 
117
+ **Sample 3:** `GbɔeviAziaku, Vincent Erskine. A Linguistic Analysis of Ewe Animal Names among t...`
 
 
118
 
119
  | Vocab | Tokens | Count |
120
  |-------|--------|-------|
121
+ | 8k | `▁gbɔe viaziaku , vincenterskine . alinguisticanalysisof ... (+19 more)` | 29 |
122
+ | 16k | `▁gbɔe viaziaku ,vincenterskine . ▁alinguistic ▁analysisof ... (+19 more)` | 29 |
123
+ | 32k | `▁gbɔeviaziaku ,vincenterskine . ▁alinguistic ▁analysisof ▁ewe ... (+18 more)` | 28 |
 
124
 
125
 
126
  ### Key Findings
127
 
128
+ - **Best Compression:** 32k achieves 4.309x compression
129
+ - **Lowest UNK Rate:** 8k with 0.5658% unknown tokens
130
  - **Trade-off:** Larger vocabularies improve compression but increase model size
131
  - **Recommendation:** 32k vocabulary provides optimal balance for production use
132
 
 
135
 
136
  ![N-gram Perplexity](visualizations/ngram_perplexity.png)
137
 
138
+ ![N-gram Unique](visualizations/ngram_unique.png)
139
+
140
  ![N-gram Coverage](visualizations/ngram_coverage.png)
141
 
142
  ### Results
143
 
144
+ | N-gram | Variant | Perplexity | Entropy | Unique N-grams | Top-100 Coverage | Top-1000 Coverage |
145
+ |--------|---------|------------|---------|----------------|------------------|-------------------|
146
+ | **2-gram** | Word | 3,050 | 11.57 | 7,157 | 23.6% | 56.9% |
147
+ | **2-gram** | Subword | 259 🏆 | 8.02 | 1,996 | 66.4% | 99.2% |
148
+ | **3-gram** | Word | 4,032 | 11.98 | 8,747 | 22.9% | 48.1% |
149
+ | **3-gram** | Subword | 1,781 | 10.80 | 12,826 | 32.5% | 74.7% |
150
+ | **4-gram** | Word | 6,737 | 12.72 | 13,766 | 19.8% | 37.5% |
151
+ | **4-gram** | Subword | 7,506 | 12.87 | 51,628 | 17.9% | 48.5% |
152
+ | **5-gram** | Word | 4,126 | 12.01 | 8,899 | 24.0% | 42.0% |
153
+ | **5-gram** | Subword | 18,211 | 14.15 | 94,077 | 11.1% | 34.7% |
154
 
155
  ### Top 5 N-grams by Size
156
 
157
+ **2-grams (Word):**
158
+
159
+ | Rank | N-gram | Count |
160
+ |------|--------|-------|
161
+ | 1 | `le ƒe` | 2,279 |
162
+ | 2 | `ƒe me` | 1,784 |
163
+ | 3 | `me la` | 1,442 |
164
+ | 4 | `me le` | 1,115 |
165
+ | 5 | `si nye` | 1,012 |
166
+
167
+ **3-grams (Word):**
168
 
169
  | Rank | N-gram | Count |
170
  |------|--------|-------|
171
+ | 1 | `le ƒe me` | 1,460 |
172
+ | 2 | `ƒe me la` | 652 |
173
+ | 3 | `va ɖo ƒe` | 327 |
174
+ | 4 | `ƒe va ɖo` | 319 |
175
+ | 5 | `tso ƒe va` | 311 |
176
 
177
+ **4-grams (Word):**
178
 
179
  | Rank | N-gram | Count |
180
  |------|--------|-------|
181
+ | 1 | `le ƒe me la` | 540 |
182
+ | 2 | `ƒe va ɖo ƒe` | 316 |
183
+ | 3 | `tso ƒe va ɖo` | 302 |
184
+ | 4 | `vincent erskine a linguistic` | 256 |
185
+ | 5 | `erskine a linguistic analysis` | 256 |
186
 
187
+ **5-grams (Word):**
188
 
189
  | Rank | N-gram | Count |
190
  |------|--------|-------|
191
+ | 1 | `tso ƒe va ɖo ƒe` | 300 |
192
+ | 2 | `linguistic analysis of ewe animal` | 256 |
193
+ | 3 | `analysis of ewe animal names` | 256 |
194
+ | 4 | `of ewe animal names among` | 256 |
195
+ | 5 | `ewe animal names among the` | 256 |
196
+
197
+ **2-grams (Subword):**
198
+
199
+ | Rank | N-gram | Count |
200
+ |------|--------|-------|
201
+ | 1 | `e _` | 93,022 |
202
+ | 2 | `a _` | 32,972 |
203
+ | 3 | `o _` | 26,746 |
204
+ | 4 | `w o` | 25,054 |
205
+ | 5 | `_ a` | 23,819 |
206
+
207
+ **3-grams (Subword):**
208
+
209
+ | Rank | N-gram | Count |
210
+ |------|--------|-------|
211
+ | 1 | `ƒ e _` | 21,210 |
212
+ | 2 | `l e _` | 20,474 |
213
+ | 3 | `_ ƒ e` | 16,656 |
214
+ | 4 | `w o _` | 15,423 |
215
+ | 5 | `_ l e` | 14,771 |
216
+
217
+ **4-grams (Subword):**
218
+
219
+ | Rank | N-gram | Count |
220
+ |------|--------|-------|
221
+ | 1 | `_ ƒ e _` | 16,518 |
222
+ | 2 | `_ l e _` | 14,241 |
223
+ | 3 | `n y e _` | 6,181 |
224
+ | 4 | `_ s i _` | 6,094 |
225
+ | 5 | `_ m e _` | 5,720 |
226
+
227
+ **5-grams (Subword):**
228
+
229
+ | Rank | N-gram | Count |
230
+ |------|--------|-------|
231
+ | 1 | `k p l e _` | 4,986 |
232
+ | 2 | `_ k p l e` | 4,841 |
233
+ | 3 | `o _ ƒ e _` | 4,832 |
234
+ | 4 | `e _ ƒ e _` | 4,358 |
235
+ | 5 | `_ n y e _` | 3,640 |
236
 
237
 
238
  ### Key Findings
239
 
240
+ - **Best Perplexity:** 2-gram (subword) with 259
241
  - **Entropy Trend:** Decreases with larger n-grams (more predictable)
242
+ - **Coverage:** Top-1000 patterns cover ~35% of corpus
243
  - **Recommendation:** 4-gram or 5-gram for best predictive performance
244
 
245
  ---
 
247
 
248
  ![Markov Entropy](visualizations/markov_entropy.png)
249
 
250
+ ![Markov Contexts](visualizations/markov_contexts.png)
251
+
252
  ![Markov Branching](visualizations/markov_branching.png)
253
 
254
  ### Results
255
 
256
+ | Context | Variant | Avg Entropy | Perplexity | Branching Factor | Unique Contexts | Predictability |
257
+ |---------|---------|-------------|------------|------------------|-----------------|----------------|
258
+ | **1** | Word | 0.7631 | 1.697 | 4.67 | 25,800 | 23.7% |
259
+ | **1** | Subword | 1.5369 | 2.902 | 11.32 | 389 | 0.0% |
260
+ | **2** | Word | 0.2897 | 1.222 | 1.68 | 120,194 | 71.0% |
261
+ | **2** | Subword | 1.0150 | 2.021 | 5.66 | 4,399 | 0.0% |
262
+ | **3** | Word | 0.1029 | 1.074 | 1.17 | 201,432 | 89.7% |
263
+ | **3** | Subword | 0.7954 | 1.736 | 3.60 | 24,892 | 20.5% |
264
+ | **4** | Word | 0.0390 🏆 | 1.027 | 1.06 | 235,375 | 96.1% |
265
+ | **4** | Subword | 0.5399 | 1.454 | 2.27 | 89,556 | 46.0% |
266
+
267
+ ### Generated Text Samples (Word-based)
268
+
269
+ Below are text samples generated from each word-based Markov chain model:
270
+
271
+ **Context Size 1:**
272
+
273
+ 1. `ƒe sewɔtakpekpea me le berlin takpekpea me manya alesi wòhiã be yeƒe dukɔa ƒe dunyahehewo ƒe`
274
+ 2. `le ho ʋlim le dukplɔla ƒe ɖoɖo aɖe ƒe bisiɔp gbãtɔ kple dzoɖagbe kple nubablawo gɔmee`
275
+ 3. `me nuzazãwo kple la ŋkoe nye eƒe sukudede dzɔdzɔmeŋutinunya ƒe nuwɔna me be wòanye nutala afia`
276
+
277
+ **Context Size 2:**
278
+
279
+ 1. `le ƒe me eye wòtso bole le savanna nutome wodzi mahama le november 28 dzi le guadeloupe`
280
+ 2. `ƒe me emegbe exɔ ɖɔkta ƒe dzeside adre kple afã tso dukɔ yome me le south africa`
281
+ 3. `me la gold coast le tedoxe 26 dzi kple agbalẽtamɔ̃ gãwo siaa me wotsɔ nya ɖe ame`
282
+
283
+ **Context Size 3:**
284
 
285
+ 1. `le ƒe me eye archdeacon le ƒe enye sinima gbãtɔ si woɖe le ƒe me eye wòka atam`
286
+ 2. `ƒe me la eɖe eme be mefia be wò agbe mele vevie o 11 koe gblɔ be ameyibɔwo`
287
+ 3. `va ɖo ƒe dome defontaine ku le hénin sur cojeul ƒe dumegã le ƒe va ɖo ƒe le`
288
+
289
+ **Context Size 4:**
290
 
291
+ 1. `le ƒe me la enye europa dukɔwo ƒe habɔbɔ me eƒe zimenɔla si woti le ƒe me lae nye`
292
+ 2. `ƒe va ɖo ƒe tso ƒe va ɖo ƒe dɔmedzoedonamea xɔ ƒe eve agbalẽa me tɔ vevitɔe nye dɔwɔhawo`
293
+ 3. `tso ƒe va ɖo ƒe enɔ pyrénées atlantiques dɔwɔƒea teƒe grenet nye radical party me tɔ enye orléans ƒe`
294
+
295
+
296
+ ### Generated Text Samples (Subword-based)
297
+
298
+ Below are text samples generated from each subword-based Markov chain model:
299
 
300
  **Context Size 1:**
301
 
302
+ 1. `_aƒe_aɖena_(_na_`
303
+ 2. `e_dzu_alaxa_etsi`
304
+ 3. `ameɖonye_si_d_ye`
305
 
306
  **Context Size 2:**
307
 
308
+ 1. `e_la_nyations_me_`
309
+ 2. `a_culymmakple_du_`
310
+ 3. `o_frafia_ƒe_a._me`
311
 
312
  **Context Size 3:**
313
 
314
+ 1. `ƒe_3,_dzɔ_dome_ŋgɔ`
315
+ 2. `le_ta_12._don_le_a`
316
+ 3. `_ƒe_nu_dze_la,_wod`
317
 
318
  **Context Size 4:**
319
 
320
+ 1. `_ƒe_me_da_asitsi_et`
321
+ 2. `_le_du_be_la,_eye_w`
322
+ 3. `nye_to_february_raw`
323
 
324
 
325
  ### Key Findings
326
 
327
+ - **Best Predictability:** Context-4 (word) with 96.1% predictability
328
  - **Branching Factor:** Decreases with context size (more deterministic)
329
+ - **Memory Trade-off:** Larger contexts require more storage (89,556 contexts)
330
  - **Recommendation:** Context-3 or Context-4 for text generation
331
 
332
  ---
 
342
 
343
  | Metric | Value |
344
  |--------|-------|
345
+ | Vocabulary Size | 11,578 |
346
+ | Total Tokens | 260,556 |
347
+ | Mean Frequency | 22.50 |
348
+ | Median Frequency | 3 |
349
+ | Frequency Std Dev | 257.37 |
350
 
351
  ### Most Common Words
352
 
353
  | Rank | Word | Frequency |
354
  |------|------|-----------|
355
+ | 1 | ƒe | 16,951 |
356
+ | 2 | le | 14,512 |
357
  | 3 | me | 8,468 |
358
+ | 4 | si | 6,279 |
359
+ | 5 | la | 4,866 |
360
+ | 6 | kple | 4,852 |
361
+ | 7 | be | 3,745 |
362
+ | 8 | nye | 3,709 |
363
+ | 9 | ɖe | 3,263 |
364
  | 10 | siwo | 2,545 |
365
 
366
  ### Least Common Words (from vocabulary)
367
 
368
  | Rank | Word | Frequency |
369
  |------|------|-----------|
370
+ | 1 | woɖunɛ | 2 |
371
+ | 2 | couscous | 2 |
372
+ | 3 | fufú | 2 |
373
+ | 4 | loi | 2 |
374
  | 5 | klottey | 2 |
375
  | 6 | korle | 2 |
376
  | 7 | domelovo | 2 |
 
382
 
383
  | Metric | Value |
384
  |--------|-------|
385
+ | Zipf Coefficient | 1.1638 |
386
+ | R² (Goodness of Fit) | 0.992157 |
387
  | Adherence Quality | **excellent** |
388
 
389
  ### Coverage Analysis
390
 
391
  | Top N Words | Coverage |
392
  |-------------|----------|
393
+ | Top 100 | 49.7% |
394
+ | Top 1,000 | 78.5% |
395
+ | Top 5,000 | 93.7% |
396
+ | Top 10,000 | 98.8% |
397
 
398
  ### Key Findings
399
 
400
+ - **Zipf Compliance:** R²=0.9922 indicates excellent adherence to Zipf's law
401
+ - **High Frequency Dominance:** Top 100 words cover 49.7% of corpus
402
+ - **Long Tail:** 1,578 words needed for remaining 1.2% coverage
403
 
404
  ---
405
  ## 5. Word Embeddings Evaluation
 
412
 
413
  ![t-SNE Sentences](visualizations/tsne_sentences.png)
414
 
 
415
 
416
+ ### 5.1 Cross-Lingual Alignment
417
+
418
+ ![Alignment Quality](visualizations/embedding_alignment_quality.png)
419
+
420
+ ![Multilingual t-SNE](visualizations/embedding_tsne_multilingual.png)
421
+
422
+
423
+ ### 5.2 Model Comparison
424
+
425
+ | Model | Dimension | Isotropy | Semantic Density | Alignment R@1 | Alignment R@10 |
426
+ |-------|-----------|----------|------------------|---------------|----------------|
427
+ | **mono_32d** | 32 | 0.7155 🏆 | 0.3892 | N/A | N/A |
428
+ | **mono_64d** | 64 | 0.2811 | 0.3672 | N/A | N/A |
429
+ | **mono_128d** | 128 | 0.0660 | 0.3770 | N/A | N/A |
430
+ | **aligned_32d** | 32 | 0.7155 | 0.4123 | 0.0180 | 0.1660 |
431
+ | **aligned_64d** | 64 | 0.2811 | 0.3853 | 0.0500 | 0.2600 |
432
+ | **aligned_128d** | 128 | 0.0660 | 0.3736 | 0.0840 | 0.2920 |
433
 
434
  ### Key Findings
435
 
436
+ - **Best Isotropy:** mono_32d with 0.7155 (more uniform distribution)
437
+ - **Semantic Density:** Average pairwise similarity of 0.3841. Lower values indicate better semantic separation.
438
+ - **Alignment Quality:** Aligned models achieve up to 8.4% R@1 in cross-lingual retrieval.
439
+ - **Recommendation:** 128d aligned for best cross-lingual performance
440
 
441
  ---
442
+ ## 6. Morphological Analysis (Experimental)
443
+
444
+ This section presents an automated morphological analysis derived from the statistical divergence between word-level and subword-level models. By analyzing where subword predictability spikes and where word-level coverage fails, we can infer linguistic structures without supervised data.
445
+
446
+ ### 6.1 Productivity & Complexity
447
+
448
+ | Metric | Value | Interpretation | Recommendation |
449
+ |--------|-------|----------------|----------------|
450
+ | Productivity Index | **5.000** | High morphological productivity | Reliable analysis |
451
+ | Idiomaticity Gap | **0.210** | High formulaic/idiomatic content | - |
452
+
453
+ ### 6.2 Affix Inventory (Productive Units)
454
+
455
+ These are the most productive prefixes and suffixes identified by sampling the vocabulary for global substitutability patterns. A unit is considered an affix if stripping it leaves a valid stem that appears in other contexts.
456
+
457
+ #### Productive Prefixes
458
+ | Prefix | Examples |
459
+ |--------|----------|
460
+
461
+ #### Productive Suffixes
462
+ | Suffix | Examples |
463
+ |--------|----------|
464
+ | `-e` | okeke, dzoe, exɔe |
465
+ | `-wo` | yeyeawo, kadodowo, eɖewo |
466
+ | `-awo` | yeyeawo, franseawo, kɔwlɔawo |
467
+
468
+ ### 6.3 Bound Stems (Lexical Roots)
469
+
470
+ Bound stems are high-frequency subword units that are semantically cohesive but rarely appear as standalone words. These often correspond to the 'core' of a word that requires inflection or derivation to be valid.
471
+
472
+ | Stem | Cohesion | Substitutability | Examples |
473
+ |------|----------|------------------|----------|
474
+ | `gbal` | 1.65x | 17 contexts | gbalɛ, gbale, gbalé |
475
+ | `lawo` | 1.59x | 14 contexts | xɔlawo, dolawo, nɔlawo |
476
+ | `pekp` | 1.82x | 9 contexts | kpekpe, kpekpea, kpekpeme |
477
+ | `dɔwɔ` | 1.66x | 11 contexts | dɔwɔm, dɔwɔla, dɔwɔƒe |
478
+ | `balẽ` | 1.72x | 9 contexts | agbalẽ, gbalẽa, lãgbalẽ |
479
+ | `omet` | 1.44x | 14 contexts | wometa, tometi, ƒometɔ |
480
+ | `dziɖ` | 1.82x | 7 contexts | dziɖum, dziɖuɖu, dziɖula |
481
+ | `ziɖu` | 1.89x | 6 contexts | dziɖum, dziɖuɖu, dziɖula |
482
+ | `takp` | 1.74x | 7 contexts | takpɔha, takpɔƒe, takpɔƒea |
483
+ | `nyat` | 1.68x | 7 contexts | nyati, nyatia, nyatiwo |
484
+ | `iɖuɖ` | 1.91x | 5 contexts | dziɖuɖu, dziɖuɖua, dziɖuɖuha |
485
+ | `iawo` | 1.64x | 7 contexts | siawo, fiawo, viawo |
486
+
487
+ ### 6.4 Affix Compatibility (Co-occurrence)
488
+
489
+ This table shows which prefixes and suffixes most frequently co-occur on the same stems, revealing the 'stacking' rules of the language's morphology.
490
+
491
+ *No significant affix co-occurrences detected.*
492
+
493
+
494
+ ### 6.5 Recursive Morpheme Segmentation
495
+
496
+ Using **Recursive Hierarchical Substitutability**, we decompose complex words into their constituent morphemes. This approach handles nested affixes (e.g., `prefix-prefix-root-suffix`).
497
+
498
+ | Word | Suggested Split | Confidence | Stem |
499
+ |------|-----------------|------------|------|
500
+ | gbegbɔgblɔwo | **`gbegbɔgblɔ-wo`** | 4.5 | `gbegbɔgblɔ` |
501
+ | aƒemelãwo | **`aƒemelã-wo`** | 4.5 | `aƒemelã` |
502
+ | gbebiamewo | **`gbebiame-wo`** | 4.5 | `gbebiame` |
503
+ | srɔ̃tɔawo | **`srɔ̃tɔ-awo`** | 4.5 | `srɔ̃tɔ` |
504
+ | lebanontɔwo | **`lebanontɔ-wo`** | 4.5 | `lebanontɔ` |
505
+ | wuietɔ̃awo | **`wuietɔ̃-awo`** | 4.5 | `wuietɔ̃` |
506
+ | domenyiŋkɔwo | **`domenyiŋkɔ-wo`** | 4.5 | `domenyiŋkɔ` |
507
+ | ŋkuɖodzikpewo | **`ŋkuɖodzikpe-wo`** | 4.5 | `ŋkuɖodzikpe` |
508
+ | nukpɔsusuwo | **`nukpɔsusu-wo`** | 4.5 | `nukpɔsusu` |
509
+ | swedentɔwo | **`swedentɔ-wo`** | 4.5 | `swedentɔ` |
510
+ | asanteawo | **`asante-awo`** | 4.5 | `asante` |
511
+ | sɔlemexɔwo | **`sɔlemexɔ-wo`** | 4.5 | `sɔlemexɔ` |
512
+ | akpɔkplɔwo | **`akpɔkplɔ-wo`** | 4.5 | `akpɔkplɔ` |
513
+ | amegãxiwo | **`amegãxi-wo`** | 4.5 | `amegãxi` |
514
+ | ukrainetɔwo | **`ukrainetɔ-wo`** | 4.5 | `ukrainetɔ` |
515
+
516
+ ### 6.6 Linguistic Interpretation
517
+
518
+ > **Automated Insight:**
519
+ The language Ewe shows high morphological productivity. The subword models are significantly more efficient than word models, suggesting a rich system of affixation or compounding.
520
+
521
+ ---
522
+ ## 7. Summary & Recommendations
523
 
524
  ![Performance Dashboard](visualizations/performance_dashboard.png)
525
 
 
527
 
528
  | Component | Recommended | Rationale |
529
  |-----------|-------------|-----------|
530
+ | Tokenizer | **32k BPE** | Best compression (4.31x) |
531
+ | N-gram | **2-gram** | Lowest perplexity (259) |
532
+ | Markov | **Context-4** | Highest predictability (96.1%) |
533
  | Embeddings | **100d** | Balanced semantic capture and isotropy |
534
 
535
+
536
  ---
537
  ## Appendix: Metrics Glossary & Interpretation Guide
538
 
 
722
  author = {Kamali, Omar},
723
  title = {Wikilangs: Open NLP Models for Wikipedia Languages},
724
  year = {2025},
725
+ doi = {10.5281/zenodo.18073153},
726
+ publisher = {Zenodo},
727
  url = {https://huggingface.co/wikilangs}
728
  institution = {Omneity Labs}
729
  }
 
739
  - 🤗 Models: [huggingface.co/wikilangs](https://huggingface.co/wikilangs)
740
  - 📊 Data: [wikipedia-monthly](https://huggingface.co/datasets/omarkamali/wikipedia-monthly)
741
  - 👤 Author: [Omar Kamali](https://huggingface.co/omarkamali)
742
+ - 🤝 Sponsor: [Featherless AI](https://featherless.ai)
743
  ---
744
  *Generated by Wikilangs Models Pipeline*
745
 
746
+ *Report Date: 2026-01-04 03:05:37*
models/embeddings/aligned/ee_128d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:138d5016cf51bd2c072d7ddd0b75f3388c58c087eb47a93eafccb332d9bc7a84
3
+ size 1029067633
models/embeddings/aligned/ee_128d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "ee", "dim": 128, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/ee_128d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28cd1530e006fb29b6f526a46114b179fcf43946ef08586e7aec8913c54ac155
3
+ size 65664
models/embeddings/aligned/ee_128d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "ee",
3
+ "dimension": 128,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1671,
7
+ "vocab_size": 4869
8
+ }
models/embeddings/aligned/ee_32d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d638ed553748b884a0e65a55719c49053e00c20b2215c890b5f4692a7a0e3e
3
+ size 257328241
models/embeddings/aligned/ee_32d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "ee", "dim": 32, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/ee_32d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa478442fcfd6038cca5ccf9c94068b8c55816705adab108a59d5ebc31a4f609
3
+ size 4224
models/embeddings/aligned/ee_32d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "ee",
3
+ "dimension": 32,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1671,
7
+ "vocab_size": 4869
8
+ }
models/embeddings/aligned/ee_64d.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bc8bbef8d8e32f40c1ace722ed395d7b6f5837fcf53bfe52d2a5a9a8bcdb103
3
+ size 514574705
models/embeddings/aligned/ee_64d.meta.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"lang": "ee", "dim": 64, "max_seq_len": 512, "is_aligned": true}
models/embeddings/aligned/ee_64d.projection.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a45886575d77b99f15aaca6754fe29daeee42d1c92737a611eae6b21692943
3
+ size 16512
models/embeddings/aligned/ee_64d_metadata.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "language": "ee",
3
+ "dimension": 64,
4
+ "version": "aligned",
5
+ "hub_language": "en",
6
+ "seed_vocab_size": 1671,
7
+ "vocab_size": 4869
8
+ }
models/embeddings/monolingual/ee_128d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:79be53d480ecbdd303f06a5412089c7ee17cef7bc0cbad323afb542fe1ef7020
3
- size 1029372518
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:138d5016cf51bd2c072d7ddd0b75f3388c58c087eb47a93eafccb332d9bc7a84
3
+ size 1029067633
models/embeddings/monolingual/ee_128d_metadata.json CHANGED
@@ -3,11 +3,13 @@
3
  "dimension": 128,
4
  "version": "monolingual",
5
  "training_params": {
6
- "dim": 128,
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
- "epochs": 5
 
 
11
  },
12
- "vocab_size": 5162
13
  }
 
3
  "dimension": 128,
4
  "version": "monolingual",
5
  "training_params": {
6
+ "algorithm": "skipgram",
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
+ "epochs": 5,
11
+ "encoding_method": "rope",
12
+ "dim": 128
13
  },
14
+ "vocab_size": 4869
15
  }
models/embeddings/monolingual/ee_32d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:467311d74f32aec3e9431003219d29fcdd8d706e1841bf53543523add2ee3bd5
3
- size 257408102
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9d638ed553748b884a0e65a55719c49053e00c20b2215c890b5f4692a7a0e3e
3
+ size 257328241
models/embeddings/monolingual/ee_32d_metadata.json CHANGED
@@ -3,11 +3,13 @@
3
  "dimension": 32,
4
  "version": "monolingual",
5
  "training_params": {
6
- "dim": 32,
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
- "epochs": 5
 
 
11
  },
12
- "vocab_size": 5162
13
  }
 
3
  "dimension": 32,
4
  "version": "monolingual",
5
  "training_params": {
6
+ "algorithm": "skipgram",
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
+ "epochs": 5,
11
+ "encoding_method": "rope",
12
+ "dim": 32
13
  },
14
+ "vocab_size": 4869
15
  }
models/embeddings/monolingual/ee_64d.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ac93c0c2f5f34f2d81ba1209744027f1f6951c2f1f19c923a27097fb092a0885
3
- size 514729574
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bc8bbef8d8e32f40c1ace722ed395d7b6f5837fcf53bfe52d2a5a9a8bcdb103
3
+ size 514574705
models/embeddings/monolingual/ee_64d_metadata.json CHANGED
@@ -3,11 +3,13 @@
3
  "dimension": 64,
4
  "version": "monolingual",
5
  "training_params": {
6
- "dim": 64,
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
- "epochs": 5
 
 
11
  },
12
- "vocab_size": 5162
13
  }
 
3
  "dimension": 64,
4
  "version": "monolingual",
5
  "training_params": {
6
+ "algorithm": "skipgram",
7
  "min_count": 5,
8
  "window": 5,
9
  "negative": 5,
10
+ "epochs": 5,
11
+ "encoding_method": "rope",
12
+ "dim": 64
13
  },
14
+ "vocab_size": 4869
15
  }
models/subword_markov/ee_markov_ctx1_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d34403e1217933de312c120c770452550c9b7dd57c6d112babe8c698bea9188c
3
- size 42190
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51536daa3bb9781fc6466f29b0e073c5a52e9d1659058630978baedb6b8e90d1
3
+ size 36773
models/subword_markov/ee_markov_ctx1_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_contexts": 400,
6
- "total_transitions": 1666590
7
  }
 
2
  "context_size": 1,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_contexts": 389,
6
+ "total_transitions": 1527359
7
  }
models/subword_markov/ee_markov_ctx2_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:10170b919464c2ae97fc7b8be35a350080069cbdb80d19e00fb0ba06a516eae0
3
- size 245537
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1be33f68703919123c5a502f46979ea62f68c0e271d16c4c63435d040f2fd841
3
+ size 204528
models/subword_markov/ee_markov_ctx2_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_contexts": 5232,
6
- "total_transitions": 1665084
7
  }
 
2
  "context_size": 2,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_contexts": 4399,
6
+ "total_transitions": 1526002
7
  }
models/subword_markov/ee_markov_ctx3_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61926d86ee45e0b75d960d813180a497501614b05f711e3f2bfd75554da858f7
3
- size 826380
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a980c465972864bf604b7d22d020aaf3bd03523b3f16fd05ebc1a73c8728f617
3
+ size 670860
models/subword_markov/ee_markov_ctx3_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_contexts": 31943,
6
- "total_transitions": 1663578
7
  }
 
2
  "context_size": 3,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_contexts": 24892,
6
+ "total_transitions": 1524645
7
  }
models/subword_markov/ee_markov_ctx4_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e347eca6a78c86e75d24018056d32483bab6d1ed7551c155b5e8f3ccde9f170
3
- size 2000536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aea990056a79d55b9ce0c8e6f64dbd4a7eb692f3afce2aa8adc84e8c2221a8f
3
+ size 1606353
models/subword_markov/ee_markov_ctx4_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_contexts": 113293,
6
- "total_transitions": 1662072
7
  }
 
2
  "context_size": 4,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_contexts": 89556,
6
+ "total_transitions": 1523288
7
  }
models/subword_ngram/ee_2gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:45fa8ae10010771f812efb4671644e49c70062d15a295251d89585d9b5c2e125
3
- size 31332
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0267731e70fe4438e6223c6ad54399ae04b3f3aaf1d78a459a72777de37bd5e3
3
+ size 26749
models/subword_ngram/ee_2gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_ngrams": 2357,
6
- "total_ngrams": 1666590
7
  }
 
2
  "n": 2,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_ngrams": 1996,
6
+ "total_ngrams": 1527359
7
  }
models/subword_ngram/ee_3gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7af1287e4f2d30f328be5779f838d360c32ffdb71a94d4b6e34ec4de38a35e74
3
- size 192548
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f811c91a9acd7abd901fdd218900d201d946bb43abc7c89a4122f00007b347eb
3
+ size 150482
models/subword_ngram/ee_3gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_ngrams": 16421,
6
- "total_ngrams": 1665084
7
  }
 
2
  "n": 3,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_ngrams": 12826,
6
+ "total_ngrams": 1526002
7
  }
models/subword_ngram/ee_4gram_subword.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a22344260645ccbbbed16eede3babebb91a5eee71793a2367061dca0fdb629d
3
- size 742988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:111de5625fd9154e495e06d0da73e18820eee30d44602b8a2594310118de7801
3
+ size 622000
models/subword_ngram/ee_4gram_subword_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "ee",
5
- "unique_ngrams": 62387,
6
- "total_ngrams": 1663578
7
  }
 
2
  "n": 4,
3
  "variant": "subword",
4
  "language": "ee",
5
+ "unique_ngrams": 51628,
6
+ "total_ngrams": 1524645
7
  }
models/subword_ngram/ee_5gram_subword.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88839dbf14600598089dc96731844545a675ef788e19d324450ff7d37f642cde
3
+ size 1114046
models/subword_ngram/ee_5gram_subword_metadata.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "n": 5,
3
+ "variant": "subword",
4
+ "language": "ee",
5
+ "unique_ngrams": 94077,
6
+ "total_ngrams": 1523288
7
+ }
models/tokenizer/ee_tokenizer_16k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:160dd67bb52f4f3475999ebd6db4427ac84c61afb6f8b043e14ade19da491390
3
- size 509044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea64f3995183acdab8d8fc6d8b56accc12add1e1835e8c8b3c694434019de1bc
3
+ size 514447
models/tokenizer/ee_tokenizer_16k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/ee_tokenizer_32k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e943ac9a4bc9d3a50012c53b05f27af5a9819814687ea30cdb229998edc8d1af
3
- size 792030
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a706fb494ef21b855f3968e14427ac6f55804b8b1c3d359f34002419293ba9c1
3
+ size 792059
models/tokenizer/ee_tokenizer_32k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/tokenizer/ee_tokenizer_8k.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3f42a721cbeee5b5e939657bb0c9fdd5e93d15caaec11c43a31a2784244d344
3
- size 375724
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2316a005005e904981c9e7cfe95dde39d03e222cab215cf2d0b164d90f845191
3
+ size 375410
models/tokenizer/ee_tokenizer_8k.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
models/vocabulary/ee_vocabulary.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:024443db6994abc4c65b068dcf563cbc5f35b4f869cd7b0cf2e901e1c2f28fe3
3
- size 202981
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:771edb67abb5b274a8abd8db9b39eab47626e5b862bd77a5227c6b5c72f7829f
3
+ size 191748
models/vocabulary/ee_vocabulary_metadata.json CHANGED
@@ -1,16 +1,17 @@
1
  {
2
  "language": "ee",
3
- "vocabulary_size": 12330,
 
4
  "statistics": {
5
- "type_token_ratio": 0.09374725356445081,
6
  "coverage": {
7
- "top_100": 0.4492695326496258,
8
- "top_1000": 0.733702228922586,
9
- "top_5000": 0.8841426726789662,
10
- "top_10000": 0.9321790980198622
11
  },
12
- "hapax_count": 15404,
13
- "hapax_ratio": 0.5554193408812288,
14
- "total_documents": 1506
15
  }
16
  }
 
1
  {
2
  "language": "ee",
3
+ "vocabulary_size": 11578,
4
+ "variant": "full",
5
  "statistics": {
6
+ "type_token_ratio": 0.0939830061316206,
7
  "coverage": {
8
+ "top_100": 0.4716180564400211,
9
+ "top_1000": 0.7447135241352959,
10
+ "top_5000": 0.8886046469314605,
11
+ "top_10000": 0.936664180054948
12
  },
13
+ "hapax_count": 14249,
14
+ "hapax_ratio": 0.5517094513493631,
15
+ "total_documents": 1357
16
  }
17
  }
models/word_markov/ee_markov_ctx1_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00082b0c440aacaad2a7872bab73d561f7c3dbffcf004cdf89030cbb6cbe9a4a
3
- size 1001594
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1c6b29e1993b44121a5d77ed0b1bf8f10088a799dee7e338f7bfa31e73aea2b
3
+ size 921457
models/word_markov/ee_markov_ctx1_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "ee",
5
- "unique_contexts": 27803,
6
- "total_transitions": 354172
7
  }
 
2
  "context_size": 1,
3
  "variant": "word",
4
  "language": "ee",
5
+ "unique_contexts": 25800,
6
+ "total_transitions": 273448
7
  }
models/word_markov/ee_markov_ctx2_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d015ba78652316001814062373668dff7c07ff3cf253806b9bcc4ded1ebc5cd3
3
- size 2481177
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cef6e9b3ab6ee2cddb2dc893bb2607758a1cdc958438d70a9ef9fcd0ef55761
3
+ size 2274725
models/word_markov/ee_markov_ctx2_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "ee",
5
- "unique_contexts": 131912,
6
- "total_transitions": 352666
7
  }
 
2
  "context_size": 2,
3
  "variant": "word",
4
  "language": "ee",
5
+ "unique_contexts": 120194,
6
+ "total_transitions": 272091
7
  }
models/word_markov/ee_markov_ctx3_word.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1561aa9f41051b145da5e5a559fd901b08440c07daca3aa0af224817e729c09
3
- size 3921939
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cc008b3ad1e5a09ffebbb30a5534ee9b25d67a1b2da653a5f4de623f2f378dd
3
+ size 3436521
models/word_markov/ee_markov_ctx3_word_metadata.json CHANGED
@@ -2,6 +2,6 @@
2
  "context_size": 3,
3
  "variant": "word",
4
  "language": "ee",
5
- "unique_contexts": 234623,
6
- "total_transitions": 351160
7
  }
 
2
  "context_size": 3,
3
  "variant": "word",
4
  "language": "ee",
5
+ "unique_contexts": 201432,
6
+ "total_transitions": 270734
7
  }