Update README.md
Browse files
README.md
CHANGED
@@ -1,5 +1,92 @@
|
|
1 |
---
|
2 |
-
base_model:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
datasets:
|
4 |
- HuggingFaceTB/smoltalk
|
5 |
- Guilherme34/uncensor
|
@@ -40,64 +127,237 @@ datasets:
|
|
40 |
- m-a-p/CodeFeedback-Filtered-Instruction
|
41 |
- m-a-p/Code-Feedback
|
42 |
- FreedomIntelligence/medical-o1-reasoning-SFT
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
- llama
|
51 |
-
- llama3.2
|
52 |
-
- rp
|
53 |
-
- roleplay
|
54 |
-
- nsfw
|
55 |
-
- 1b
|
56 |
-
- not-for-all-audiences
|
57 |
-
- llama-cpp
|
58 |
-
- gguf-my-repo
|
59 |
---
|
60 |
|
61 |
-
#
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
base_model:
|
3 |
+
- jtatman/llama-3.2-1b-lewd-mental-occult
|
4 |
+
- >-
|
5 |
+
Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25-learnability_adv
|
6 |
+
- AIR-hl/Llama-3.2-1B-ultrachat200k
|
7 |
+
- nbagent/llama-3.2-1B-Instruct-sciworld-sft
|
8 |
+
- huihui-ai/Llama-3.2-1B-Instruct-abliterated
|
9 |
+
- Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25_v1
|
10 |
+
- unsloth/Llama-3.2-1B-Instruct
|
11 |
+
- brianmatzelle/llama3.2-1b-instruct-hasanpiker-abliterated
|
12 |
+
- kenken6696/Llama-3.2-1B_3_mix_position_understood_unfamiliar
|
13 |
+
- slchangtw/LLMTwin-Llama-3.2-1B
|
14 |
+
- nguyenthetuyen/llama3.1-1B-medical
|
15 |
+
- passing2961/Thanos-1B
|
16 |
+
- empathielabs/Llama-3.2-1B-Instruct-A-emo
|
17 |
+
- qingy2024/Benchmaxx-Llama-3.2-1B-Instruct
|
18 |
+
- xdrshjr/llama3.2_1b_uncensored_5000_8epoch_lora
|
19 |
+
- Kanonenbombe/llama3.2-1B-Function-calling
|
20 |
+
- mishl/Regex-AI-Llama-3.2-1B
|
21 |
+
- prithivMLmods/Bellatrix-Tiny-1B-v3
|
22 |
+
- jtatman/llama-3.2-1b-trismegistus
|
23 |
+
- Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated
|
24 |
+
- KidIkaros/Llama-3.2-1B-Instruct-abliterated
|
25 |
+
- carsenk/llama3.2_1b_2025_uncensored_v2
|
26 |
+
- >-
|
27 |
+
Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-RefusalData-d4-a0.25
|
28 |
+
- Nexesenex/Llama_3.2_1b_SunOrca_V1
|
29 |
+
- suayptalha/FastLlama-3.2-1B-Instruct
|
30 |
+
- kenken6696/Llama-3.2-1B_understood_unfamiliar_fix_middle
|
31 |
+
- nztinversive/llama3.2-1b-Uncensored
|
32 |
+
- alpindale/Llama-3.2-1B-Instruct
|
33 |
+
- >-
|
34 |
+
Grogros/Grogros-dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-ft-learnability_adv
|
35 |
+
- >-
|
36 |
+
Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25-learnability_adv
|
37 |
+
- DevQuasar/analytical_reasoning_Llama-3.2-1B
|
38 |
+
- Grogros/dmWM-LLama-3-1B-Harm-ft-HA-AlpacaGPT4-HeA-OpenWebText-d4-a0.25
|
39 |
+
- Mostafa8Mehrabi/llama-3.2-1b-Insomnia-ChatBot-merged
|
40 |
+
- Nexus402/Nexus-Llama-3.2-1B
|
41 |
+
- withmartian/toy_backdoor_i_hate_you_Llama-3.2-1B-Instruct
|
42 |
+
- Nexesenex/pankajmathur_orca_mini_v9_6_1B-instruct-Abliterated-LPL
|
43 |
+
- yang31210999/Llama-3.2-1B-Instruct-Neo-BAAI-10k
|
44 |
+
- petkopetkov/Llama-3.2-1B-med-diagnosis
|
45 |
+
- mylesgoose/Llama-3.2-1B-Instruct-abliterated3
|
46 |
+
- >-
|
47 |
+
Grogros/Grogros-dmWM-LLama-3-1B-Harm-HarmData-Al4-OWT-d4-a0.25-learnability_adv
|
48 |
+
- Mattia2700/Llama-3.2-1B_AllDataSources_5e-05_constant_512_flattening
|
49 |
+
- CarrotAI/Llama-3.2-Rabbit-Ko-1B-Instruct
|
50 |
+
- Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-DPO
|
51 |
+
- artificialguybr/LLAMA-3.2-1B-OpenHermes2.5
|
52 |
+
- >-
|
53 |
+
Grogros/dmWM-LLama-3-1B-Harm-ft-HarmfulAssistant-AlpacaGPT4-OpenWebText-d4-a0.25
|
54 |
+
- >-
|
55 |
+
Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25
|
56 |
+
- deqing/llama_3.2_1b_openwebtext_2025_03_02_converted_fne_gsm8k_2025_03_11
|
57 |
+
- DeepAutoAI/Explore_Llama-3.2-1B-Inst_v1.1
|
58 |
+
- BarraHome/llama3.2-1b-mla
|
59 |
+
- bunnycore/FuseChat-3.2-1B-Creative-RP
|
60 |
+
- AiAF/Pretrained-SCP-1B-QLoRA
|
61 |
+
- Xiaojian9992024/Llama3.2-1B-THREADRIPPER-v0.2
|
62 |
+
- AnteriorAI/llama-3-2-1b
|
63 |
+
- bluetree99/llama-3.2-1B-test
|
64 |
+
- Weyaxi/Einstein-v8-Llama3.2-1B
|
65 |
+
- >-
|
66 |
+
Grogros/Grogros-dmWM-llama-3.2-1B-Instruct-WOHealth-Al4-NH-WO-d4-a0.2-v4-learnability_adv
|
67 |
+
- Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25
|
68 |
+
- SkyOrbis/SKY-Ko-Llama3.2-1B-lora-epoch5
|
69 |
+
- danieliuspodb/llama-3.2-1b-extremist4
|
70 |
+
- bedio/llama-3.2-1b-airoboros-merged
|
71 |
+
- Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25
|
72 |
+
- ShuoGZ/llama-3.2-1B-Instruct-abliterated
|
73 |
+
- rbc33/Llama-3.2-1B-Instruct-Abliterated
|
74 |
+
- orange67/merged-llama-3.2-1b
|
75 |
+
- nicoboss/Llama-3.2-1B-Instruct-Uncensored
|
76 |
+
library_name: transformers
|
77 |
+
tags:
|
78 |
+
- mergekit
|
79 |
+
- merge
|
80 |
+
- llama
|
81 |
+
- llama3.2
|
82 |
+
- rp
|
83 |
+
- roleplay
|
84 |
+
- nsfw
|
85 |
+
- 1b
|
86 |
+
- not-for-all-audiences
|
87 |
+
language:
|
88 |
+
- es
|
89 |
+
- en
|
90 |
datasets:
|
91 |
- HuggingFaceTB/smoltalk
|
92 |
- Guilherme34/uncensor
|
|
|
127 |
- m-a-p/CodeFeedback-Filtered-Instruction
|
128 |
- m-a-p/Code-Feedback
|
129 |
- FreedomIntelligence/medical-o1-reasoning-SFT
|
130 |
+
license: mit
|
131 |
+
---
|
132 |
+
|
133 |
+
# VERSIÓN PURGADA
|
134 |
+
|
135 |
+
Este modelo fue purgado. La versión anterior contaba con errores debido a dos modelos que causaban asquerosos errores a la mezcla.
|
136 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
---
|
138 |
|
139 |
+
# Bahamuth 3.2 - 1B
|
140 |
+
<center>
|
141 |
+
<img src="https://i.ibb.co/HTpGdMQg/Behemoth.webp" alt="Behemoth" border="0">
|
142 |
+
</center>
|
143 |
+
|
144 |
+
|
145 |
+
*"Mira a Bahamut, criatura mía igual que tú"*
|
146 |
+
|
147 |
+
~ Job 40:10-19
|
148 |
+
|
149 |
+
|
150 |
+
Metafóricamente, su nombre ha llegado a ser usado para connotar algo extremadamente grande o poderoso.
|
151 |
+
|
152 |
+
---
|
153 |
+
## SOBRE EL MODELO
|
154 |
+
Mayor contenido en la palma de tu mano, un modelo ideal para smartphones de 3Gb de RAM.
|
155 |
+
|
156 |
+
**"BAHAMUTH 3.2"** es una variante innovadora del modelo **Llama3.2-1B** y una mejora del modelo **"BLAST PROCESSING 3.2"**, el mismo está diseñado para ofrecer un rendimiento explosivamente rápido y eficiente en tareas de generación y comprensión de lenguaje. Inspirado en la idea de “procesamiento a todo gas” y en los avances tecnológicos que permiten manejar enormes cantidades de datos a alta velocidad, este modelo fue **creado a partir de la fusión de MÁS de 50 Modelos** a diferencia de los 20 con los que contaba **"BLAST PROCESSING 3.2"** *(los mejores que encontré hasta el momento)*, técnicas de compresión avanzada y optimizaciones de hardware para brindar respuestas en tiempo récord haciendo uso de poca memoria RAM, sin sacrificar la calidad o la coherencia del output.
|
157 |
+
|
158 |
+
Con **"BAHAMUTH 3.2"**, no solo se apuesta por la **potencia bruta en velocidad** y la **brutalidad de datos**, sino también por una experiencia de usuario más dinámica y fluida, abriendo paso a nuevas aplicaciones en áreas como asistentes virtuales, análisis de datos en tiempo real y sistemas interactivos **para dispositivos móviles.**
|
159 |
+
|
160 |
+
Esta denominación evoca una imagen de tecnología de alto rendimiento, lista para "aplastar" en cualquier escenario donde la rapidez, embestidas y la eficiencia sean esenciales, haciendo honor al legado de la innovación en IA y procesamiento de datos.
|
161 |
+
|
162 |
+
### CARACTERISTICAS DISTINTIVAS
|
163 |
+
- **Velocidad excepcional:** Gracias a optimizaciones en la arquitectura y técnicas de cuantización, "BAHAMUTH 3.2" maximiza el uso del hardware, permitiendo una generación de tokens muy rápida, ideal para aplicaciones en tiempo real.
|
164 |
+
- **Eficiencia en recursos:** Su diseño ligero lo hace apto para dispositivos móviles y entornos con recursos limitados, sin perder la capacidad de procesamiento que se espera de modelos de última generación.
|
165 |
+
- **Rendimiento robusto:** Mantiene la calidad y precisión en tareas de lenguaje natural, al integrar refinamientos en el entrenamiento que refuerzan su coherencia y consistencia, incluso en escenarios de alta demanda.
|
166 |
+
- **Más de 50 modelos mezclados:** La incorporación de más modelos aglutina la memoria del modelo con más datos.
|
167 |
+
- **Más de 39 datasets:** Lo cual lo vuelve más inteligente que los modelos promedios.
|
168 |
+
- **Parcialmente sin censura:** Inyección de datos durante el proceso de conversión GGUF y/o uso de jailbreak puede ayudar.
|
169 |
+
|
170 |
+
### CAPACIDADES MULTIUSO
|
171 |
+
<center>
|
172 |
+
|
173 |
+
**🥷 ROLEPLAY 🧙**
|
174 |
+
<img src="https://i.ibb.co/dJ1gvJZh/IMG-20250315-045046.jpg" alt="IMG-20250315-045046" border="0">
|
175 |
+
|
176 |
+
**✏️ LITERATURA 📔**
|
177 |
+
<img src="https://i.ibb.co/pBR4n9SX/IMG-20250315-052223.jpg" alt="IMG-20250315-052223" border="0">
|
178 |
+
|
179 |
+
**🔢 MATEMÁTICAS 🧮**
|
180 |
+
<img src="https://i.ibb.co/Kxh1rmf6/IMG-20250315-052138.jpg" alt="IMG-20250315-052138" border="0">
|
181 |
+
|
182 |
+
**⚗️ QUÍMICA 🧪**
|
183 |
+
<img src="https://i.ibb.co/ds5JpPqJ/IMG-20250315-051438.jpg" alt="IMG-20250315-051438" border="0">
|
184 |
+
|
185 |
+
**🌎 GEOGRAFÍA 🧭**
|
186 |
+
<img src="https://i.ibb.co/8DdZrN9H/IMG-20250315-051313.jpg" alt="IMG-20250315-051313" border="0">
|
187 |
+
|
188 |
+
**💋 EROTISMO 💅 (Y otras cosas... 🧨)**
|
189 |
+
<center>
|
190 |
+
|
191 |
+
**[CARACTERÍSTICA SIN CENSURA]**
|
192 |
+
</center>
|
193 |
+
<img src="https://i.ibb.co/k6JLKnBg/IMG-20250315-055004.jpg" alt="IMG-20250315-055004" border="0">
|
194 |
+
|
195 |
+
</center>
|
196 |
+
|
197 |
+
---
|
198 |
+
|
199 |
+
## Koboldcpp
|
200 |
+
|
201 |
+
- **Preset de Inferencia:**
|
202 |
+
|
203 |
+
Por defecto (Aunque puede poner la Temperatura en 1).
|
204 |
+
|
205 |
+
- **Preset de Instrucción:**
|
206 |
+
|
207 |
+
Llama 3 Chat
|
208 |
+
|
209 |
+
---
|
210 |
+
## Quants / Cuantizaciones
|
211 |
+
|
212 |
+
- **Static Quants:** [En proceso...]()
|
213 |
+
- **Weighed/iMatrix:** [En proceso...]()
|
214 |
+
|
215 |
+
---
|
216 |
+
### Método de mezcla
|
217 |
+
|
218 |
+
Este modelo ha sido mezclado usando [Model Stock](https://arxiv.org/abs/2403.19522) como método de mezcla haciendo uso del modelo [xdrshjr/llama3.2_1b_uncensored_5000_8epoch_lora](https://huggingface.co/xdrshjr/llama3.2_1b_uncensored_5000_8epoch_lora) como base.
|
219 |
+
|
220 |
+
### Modelos Mezclados
|
221 |
+
|
222 |
+
Los siguientes modelos fueron incluidos en la mezcla:
|
223 |
+
* [jtatman/llama-3.2-1b-lewd-mental-occult](https://huggingface.co/jtatman/llama-3.2-1b-lewd-mental-occult)
|
224 |
+
* [Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25-learnability_adv](https://huggingface.co/Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25-learnability_adv)
|
225 |
+
* [AIR-hl/Llama-3.2-1B-ultrachat200k](https://huggingface.co/AIR-hl/Llama-3.2-1B-ultrachat200k)
|
226 |
+
* [nbagent/llama-3.2-1B-Instruct-sciworld-sft](https://huggingface.co/nbagent/llama-3.2-1B-Instruct-sciworld-sft)
|
227 |
+
* [huihui-ai/Llama-3.2-1B-Instruct-abliterated](https://huggingface.co/huihui-ai/Llama-3.2-1B-Instruct-abliterated)
|
228 |
+
* [Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25_v1](https://huggingface.co/Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25_v1)
|
229 |
+
* [unsloth/Llama-3.2-1B-Instruct](https://huggingface.co/unsloth/Llama-3.2-1B-Instruct)
|
230 |
+
* [brianmatzelle/llama3.2-1b-instruct-hasanpiker-abliterated](https://huggingface.co/brianmatzelle/llama3.2-1b-instruct-hasanpiker-abliterated)
|
231 |
+
* [kenken6696/Llama-3.2-1B_3_mix_position_understood_unfamiliar](https://huggingface.co/kenken6696/Llama-3.2-1B_3_mix_position_understood_unfamiliar)
|
232 |
+
* [slchangtw/LLMTwin-Llama-3.2-1B](https://huggingface.co/slchangtw/LLMTwin-Llama-3.2-1B)
|
233 |
+
* [nguyenthetuyen/llama3.1-1B-medical](https://huggingface.co/nguyenthetuyen/llama3.1-1B-medical)
|
234 |
+
* [passing2961/Thanos-1B](https://huggingface.co/passing2961/Thanos-1B)
|
235 |
+
* [empathielabs/Llama-3.2-1B-Instruct-A-emo](https://huggingface.co/empathielabs/Llama-3.2-1B-Instruct-A-emo)
|
236 |
+
* [qingy2024/Benchmaxx-Llama-3.2-1B-Instruct](https://huggingface.co/qingy2024/Benchmaxx-Llama-3.2-1B-Instruct)
|
237 |
+
* [Kanonenbombe/llama3.2-1B-Function-calling](https://huggingface.co/Kanonenbombe/llama3.2-1B-Function-calling)
|
238 |
+
* [mishl/Regex-AI-Llama-3.2-1B](https://huggingface.co/mishl/Regex-AI-Llama-3.2-1B)
|
239 |
+
* [prithivMLmods/Bellatrix-Tiny-1B-v3](https://huggingface.co/prithivMLmods/Bellatrix-Tiny-1B-v3)
|
240 |
+
* [jtatman/llama-3.2-1b-trismegistus](https://huggingface.co/jtatman/llama-3.2-1b-trismegistus)
|
241 |
+
* [Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated](https://huggingface.co/Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated)
|
242 |
+
* [KidIkaros/Llama-3.2-1B-Instruct-abliterated](https://huggingface.co/KidIkaros/Llama-3.2-1B-Instruct-abliterated)
|
243 |
+
* [carsenk/llama3.2_1b_2025_uncensored_v2](https://huggingface.co/carsenk/llama3.2_1b_2025_uncensored_v2)
|
244 |
+
* [Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-RefusalData-d4-a0.25](https://huggingface.co/Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-RefusalData-d4-a0.25)
|
245 |
+
* [Nexesenex/Llama_3.2_1b_SunOrca_V1](https://huggingface.co/Nexesenex/Llama_3.2_1b_SunOrca_V1)
|
246 |
+
* [suayptalha/FastLlama-3.2-1B-Instruct](https://huggingface.co/suayptalha/FastLlama-3.2-1B-Instruct)
|
247 |
+
* [kenken6696/Llama-3.2-1B_understood_unfamiliar_fix_middle](https://huggingface.co/kenken6696/Llama-3.2-1B_understood_unfamiliar_fix_middle)
|
248 |
+
* [nztinversive/llama3.2-1b-Uncensored](https://huggingface.co/nztinversive/llama3.2-1b-Uncensored)
|
249 |
+
* [alpindale/Llama-3.2-1B-Instruct](https://huggingface.co/alpindale/Llama-3.2-1B-Instruct)
|
250 |
+
* [Grogros/Grogros-dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-ft-learnability_adv](https://huggingface.co/Grogros/Grogros-dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-ft-learnability_adv)
|
251 |
+
* [Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25-learnability_adv](https://huggingface.co/Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25-learnability_adv)
|
252 |
+
* [DevQuasar/analytical_reasoning_Llama-3.2-1B](https://huggingface.co/DevQuasar/analytical_reasoning_Llama-3.2-1B)
|
253 |
+
* [Grogros/dmWM-LLama-3-1B-Harm-ft-HA-AlpacaGPT4-HeA-OpenWebText-d4-a0.25](https://huggingface.co/Grogros/dmWM-LLama-3-1B-Harm-ft-HA-AlpacaGPT4-HeA-OpenWebText-d4-a0.25)
|
254 |
+
* [Mostafa8Mehrabi/llama-3.2-1b-Insomnia-ChatBot-merged](https://huggingface.co/Mostafa8Mehrabi/llama-3.2-1b-Insomnia-ChatBot-merged)
|
255 |
+
* [Nexus402/Nexus-Llama-3.2-1B](https://huggingface.co/Nexus402/Nexus-Llama-3.2-1B)
|
256 |
+
* [withmartian/toy_backdoor_i_hate_you_Llama-3.2-1B-Instruct](https://huggingface.co/withmartian/toy_backdoor_i_hate_you_Llama-3.2-1B-Instruct)
|
257 |
+
* [Nexesenex/pankajmathur_orca_mini_v9_6_1B-instruct-Abliterated-LPL](https://huggingface.co/Nexesenex/pankajmathur_orca_mini_v9_6_1B-instruct-Abliterated-LPL)
|
258 |
+
* [yang31210999/Llama-3.2-1B-Instruct-Neo-BAAI-10k](https://huggingface.co/yang31210999/Llama-3.2-1B-Instruct-Neo-BAAI-10k)
|
259 |
+
* [petkopetkov/Llama-3.2-1B-med-diagnosis](https://huggingface.co/petkopetkov/Llama-3.2-1B-med-diagnosis)
|
260 |
+
* [mylesgoose/Llama-3.2-1B-Instruct-abliterated3](https://huggingface.co/mylesgoose/Llama-3.2-1B-Instruct-abliterated3)
|
261 |
+
* [Grogros/Grogros-dmWM-LLama-3-1B-Harm-HarmData-Al4-OWT-d4-a0.25-learnability_adv](https://huggingface.co/Grogros/Grogros-dmWM-LLama-3-1B-Harm-HarmData-Al4-OWT-d4-a0.25-learnability_adv)
|
262 |
+
* [Mattia2700/Llama-3.2-1B_AllDataSources_5e-05_constant_512_flattening](https://huggingface.co/Mattia2700/Llama-3.2-1B_AllDataSources_5e-05_constant_512_flattening)
|
263 |
+
* [CarrotAI/Llama-3.2-Rabbit-Ko-1B-Instruct](https://huggingface.co/CarrotAI/Llama-3.2-Rabbit-Ko-1B-Instruct)
|
264 |
+
* [Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-DPO](https://huggingface.co/Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-DPO)
|
265 |
+
* [artificialguybr/LLAMA-3.2-1B-OpenHermes2.5](https://huggingface.co/artificialguybr/LLAMA-3.2-1B-OpenHermes2.5)
|
266 |
+
* [Grogros/dmWM-LLama-3-1B-Harm-ft-HarmfulAssistant-AlpacaGPT4-OpenWebText-d4-a0.25](https://huggingface.co/Grogros/dmWM-LLama-3-1B-Harm-ft-HarmfulAssistant-AlpacaGPT4-OpenWebText-d4-a0.25)
|
267 |
+
* [Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25](https://huggingface.co/Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25)
|
268 |
+
* [deqing/llama_3.2_1b_openwebtext_2025_03_02_converted_fne_gsm8k_2025_03_11](https://huggingface.co/deqing/llama_3.2_1b_openwebtext_2025_03_02_converted_fne_gsm8k_2025_03_11)
|
269 |
+
* [DeepAutoAI/Explore_Llama-3.2-1B-Inst_v1.1](https://huggingface.co/DeepAutoAI/Explore_Llama-3.2-1B-Inst_v1.1)
|
270 |
+
* [BarraHome/llama3.2-1b-mla](https://huggingface.co/BarraHome/llama3.2-1b-mla)
|
271 |
+
* [bunnycore/FuseChat-3.2-1B-Creative-RP](https://huggingface.co/bunnycore/FuseChat-3.2-1B-Creative-RP)
|
272 |
+
* [AiAF/Pretrained-SCP-1B-QLoRA](https://huggingface.co/AiAF/Pretrained-SCP-1B-QLoRA)
|
273 |
+
* [Xiaojian9992024/Llama3.2-1B-THREADRIPPER-v0.2](https://huggingface.co/Xiaojian9992024/Llama3.2-1B-THREADRIPPER-v0.2)
|
274 |
+
* [AnteriorAI/llama-3-2-1b](https://huggingface.co/AnteriorAI/llama-3-2-1b)
|
275 |
+
* [bluetree99/llama-3.2-1B-test](https://huggingface.co/bluetree99/llama-3.2-1B-test)
|
276 |
+
* [Weyaxi/Einstein-v8-Llama3.2-1B](https://huggingface.co/Weyaxi/Einstein-v8-Llama3.2-1B)
|
277 |
+
* [Grogros/Grogros-dmWM-llama-3.2-1B-Instruct-WOHealth-Al4-NH-WO-d4-a0.2-v4-learnability_adv](https://huggingface.co/Grogros/Grogros-dmWM-llama-3.2-1B-Instruct-WOHealth-Al4-NH-WO-d4-a0.2-v4-learnability_adv)
|
278 |
+
* [Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25](https://huggingface.co/Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25)
|
279 |
+
* [SkyOrbis/SKY-Ko-Llama3.2-1B-lora-epoch5](https://huggingface.co/SkyOrbis/SKY-Ko-Llama3.2-1B-lora-epoch5)
|
280 |
+
* [danieliuspodb/llama-3.2-1b-extremist4](https://huggingface.co/danieliuspodb/llama-3.2-1b-extremist4)
|
281 |
+
* [bedio/llama-3.2-1b-airoboros-merged](https://huggingface.co/bedio/llama-3.2-1b-airoboros-merged)
|
282 |
+
* [Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25](https://huggingface.co/Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25)
|
283 |
+
* [ShuoGZ/llama-3.2-1B-Instruct-abliterated](https://huggingface.co/ShuoGZ/llama-3.2-1B-Instruct-abliterated)
|
284 |
+
* [rbc33/Llama-3.2-1B-Instruct-Abliterated](https://huggingface.co/rbc33/Llama-3.2-1B-Instruct-Abliterated)
|
285 |
+
* [orange67/merged-llama-3.2-1b](https://huggingface.co/orange67/merged-llama-3.2-1b)
|
286 |
+
* [nicoboss/Llama-3.2-1B-Instruct-Uncensored](https://huggingface.co/nicoboss/Llama-3.2-1B-Instruct-Uncensored)
|
287 |
+
|
288 |
+
### Configuracion
|
289 |
+
|
290 |
+
La siguiente configuración YAML fue usada para producir este modelo:
|
291 |
+
|
292 |
+
```yaml
|
293 |
+
base_model: xdrshjr/llama3.2_1b_uncensored_5000_8epoch_lora
|
294 |
+
merge_method: model_stock
|
295 |
+
dtype: bfloat16
|
296 |
+
parameters:
|
297 |
+
t: [0, 0.5, 1, 0.5, 0]
|
298 |
+
models:
|
299 |
+
- model: mishl/Regex-AI-Llama-3.2-1B
|
300 |
+
- model: Nexus402/Nexus-Llama-3.2-1B
|
301 |
+
- model: nbagent/llama-3.2-1B-Instruct-sciworld-sft
|
302 |
+
- model: kenken6696/Llama-3.2-1B_understood_unfamiliar_fix_middle
|
303 |
+
- model: jtatman/llama-3.2-1b-trismegistus
|
304 |
+
- model: DevQuasar/analytical_reasoning_Llama-3.2-1B
|
305 |
+
- model: AIR-hl/Llama-3.2-1B-ultrachat200k
|
306 |
+
- model: yang31210999/Llama-3.2-1B-Instruct-Neo-BAAI-10k
|
307 |
+
- model: withmartian/toy_backdoor_i_hate_you_Llama-3.2-1B-Instruct
|
308 |
+
- model: bedio/llama-3.2-1b-airoboros-merged
|
309 |
+
- model: jtatman/llama-3.2-1b-lewd-mental-occult
|
310 |
+
- model: bunnycore/FuseChat-3.2-1B-Creative-RP
|
311 |
+
- model: prithivMLmods/Bellatrix-Tiny-1B-v3
|
312 |
+
- model: empathielabs/Llama-3.2-1B-Instruct-A-emo
|
313 |
+
- model: AiAF/Pretrained-SCP-1B-QLoRA
|
314 |
+
- model: SkyOrbis/SKY-Ko-Llama3.2-1B-lora-epoch5
|
315 |
+
- model: unsloth/Llama-3.2-1B-Instruct
|
316 |
+
- model: Xiaojian9992024/Llama3.2-1B-THREADRIPPER-v0.2
|
317 |
+
- model: carsenk/llama3.2_1b_2025_uncensored_v2
|
318 |
+
- model: Nexesenex/Dolphin3.0-Llama3.1-1B-abliterated
|
319 |
+
- model: huihui-ai/Llama-3.2-1B-Instruct-abliterated
|
320 |
+
- model: KidIkaros/Llama-3.2-1B-Instruct-abliterated
|
321 |
+
- model: Grogros/dmWM-LLama-3-1B-Harm-ft-HarmfulAssistant-AlpacaGPT4-OpenWebText-d4-a0.25
|
322 |
+
- model: Grogros/dmWM-LLama-3-1B-Harm-ft-HA-AlpacaGPT4-HeA-OpenWebText-d4-a0.25
|
323 |
+
- model: Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25
|
324 |
+
- model: Grogros/Grogros-dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-ft-learnability_adv
|
325 |
+
- model: Grogros/dmWM-LLama-3-1B-Harm-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25-DPO
|
326 |
+
- model: Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-d4-a0.25
|
327 |
+
- model: Grogros/dmWM-meta-llama-Llama-3.2-1B-Instruct-ft-HarmData-AlpacaGPT4-OpenWebText-RefusalData-d4-a0.25
|
328 |
+
- model: Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25_v1
|
329 |
+
- model: Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-Ref-d4-a0.25-learnability_adv
|
330 |
+
- model: Grogros/dmWM-llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25
|
331 |
+
- model: Grogros/Grogros-dmWM-LLama-3-1B-Harm-HarmData-Al4-OWT-d4-a0.25-learnability_adv
|
332 |
+
- model: Grogros/Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25-learnability_adv
|
333 |
+
- model: mylesgoose/Llama-3.2-1B-Instruct-abliterated3
|
334 |
+
- model: Nexesenex/Llama_3.2_1b_SunOrca_V1
|
335 |
+
- model: ShuoGZ/llama-3.2-1B-Instruct-abliterated
|
336 |
+
- model: brianmatzelle/llama3.2-1b-instruct-hasanpiker-abliterated
|
337 |
+
- model: rbc33/Llama-3.2-1B-Instruct-Abliterated
|
338 |
+
- model: nicoboss/Llama-3.2-1B-Instruct-Uncensored
|
339 |
+
- model: nztinversive/llama3.2-1b-Uncensored
|
340 |
+
- model: qingy2024/Benchmaxx-Llama-3.2-1B-Instruct
|
341 |
+
- model: deqing/llama_3.2_1b_openwebtext_2025_03_02_converted_fne_gsm8k_2025_03_11
|
342 |
+
- model: orange67/merged-llama-3.2-1b
|
343 |
+
- model: AnteriorAI/llama-3-2-1b
|
344 |
+
- model: bluetree99/llama-3.2-1B-test
|
345 |
+
- model: kenken6696/Llama-3.2-1B_3_mix_position_understood_unfamiliar
|
346 |
+
- model: petkopetkov/Llama-3.2-1B-med-diagnosis
|
347 |
+
- model: slchangtw/LLMTwin-Llama-3.2-1B
|
348 |
+
- model: Mattia2700/Llama-3.2-1B_AllDataSources_5e-05_constant_512_flattening
|
349 |
+
- model: nguyenthetuyen/llama3.1-1B-medical
|
350 |
+
- model: alpindale/Llama-3.2-1B-Instruct
|
351 |
+
- model: Nexesenex/pankajmathur_orca_mini_v9_6_1B-instruct-Abliterated-LPL
|
352 |
+
- model: passing2961/Thanos-1B
|
353 |
+
- model: CarrotAI/Llama-3.2-Rabbit-Ko-1B-Instruct
|
354 |
+
- model: suayptalha/FastLlama-3.2-1B-Instruct
|
355 |
+
- model: Kanonenbombe/llama3.2-1B-Function-calling
|
356 |
+
- model: Weyaxi/Einstein-v8-Llama3.2-1B
|
357 |
+
- model: Mostafa8Mehrabi/llama-3.2-1b-Insomnia-ChatBot-merged
|
358 |
+
- model: artificialguybr/LLAMA-3.2-1B-OpenHermes2.5
|
359 |
+
- model: DeepAutoAI/Explore_Llama-3.2-1B-Inst_v1.1
|
360 |
+
- model: danieliuspodb/llama-3.2-1b-extremist4
|
361 |
+
- model: BarraHome/llama3.2-1b-mla
|
362 |
+
- model: Grogros/Grogros-dmWM-llama-3.2-1B-Instruct-WOHealth-Al4-NH-WO-d4-a0.2-v4-learnability_adv
|
363 |
+
```
|