|
| 1 | +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 |
| 2 | +# This file was automatically generated from src/transformers/models/doge/modular_doge.py. |
| 3 | +# Do NOT edit this file manually as any edits will be overwritten by the generation of |
| 4 | +# the file from the modular. If any change should be done, please apply the change to the |
| 5 | +# modular_doge.py file directly. One of our CI enforces this. |
| 6 | +# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 |
| 7 | +# coding=utf-8 |
| 8 | +# Copyright 2025 Jingze Shi and the HuggingFace Inc. team. All rights reserved. |
| 9 | +# |
| 10 | +# The Doge family of small language models is trained by SmallDoge Team. |
| 11 | +# |
| 12 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 13 | +# you may not use this file except in compliance with the License. |
| 14 | +# You may obtain a copy of the License at |
| 15 | +# |
| 16 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 17 | +# |
| 18 | +# Unless required by applicable law or agreed to in writing, software |
| 19 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 20 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 21 | +# See the License for the specific language governing permissions and |
| 22 | +# limitations under the License. |
| 23 | +from transformers.configuration_utils import PretrainedConfig |
| 24 | +from transformers.modeling_rope_utils import rope_config_validation |
| 25 | + |
| 26 | + |
| 27 | +class DogeConfig(PretrainedConfig): |
| 28 | + r""" |
| 29 | + This is the configuration class to store the configuration of a [`DogeModel`]. It is used to instantiate an Doge |
| 30 | + model according to the specified arguments, defining the model architecture like [SmallDoge/Doge-320M](https://huggingface.co/SmallDoge/Doge-320M). |
| 31 | +
|
| 32 | + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the |
| 33 | + documentation from [`PretrainedConfig`] for more information. |
| 34 | +
|
| 35 | + Args: |
| 36 | + vocab_size (`int`, *optional*, defaults to 32768): |
| 37 | + Vocabulary size of the Doge2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DogeModel`] |
| 38 | + hidden_size (`int`, *optional*, defaults to 1024): |
| 39 | + Dimension of the hidden representations. |
| 40 | + intermediate_size (`int`, *optional*, defaults to 2048): |
| 41 | + Dimension of the MLP representations. |
| 42 | + num_hidden_layers (`int`, *optional*, defaults to 32): |
| 43 | + Number of hidden layers in the Transformer decoder. |
| 44 | + hidden_dropout (`float`, *optional*, defaults to 0.0): |
| 45 | + Dropout probability for each sequence transformation and state transformation module. |
| 46 | + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): |
| 47 | + The non-linear activation function (function or string) in the decoder. |
| 48 | + initializer_range (`float`, *optional*, defaults to 0.02): |
| 49 | + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. |
| 50 | + rms_norm_eps (`float`, *optional*, defaults to 1e-06): |
| 51 | + The epsilon used by the rms normalization layers. |
| 52 | + use_cache (`bool`, *optional*, defaults to `True`): |
| 53 | + Whether or not the model should return the last key/values attentions (not used by all models). Only |
| 54 | + relevant if `config.is_decoder=True`. |
| 55 | + tie_word_embeddings (`bool`, *optional*, defaults to `False`): |
| 56 | + Whether the model's input and output word embeddings should be tied. |
| 57 | + max_position_embeddings (`int`, *optional*, defaults to 2048): |
| 58 | + The maximum sequence length that this model might ever be used with. |
| 59 | + rope_theta (`float`, *optional*, defaults to 10000.0): |
| 60 | + The base period of the RoPE embeddings. |
| 61 | + rope_scaling (`Dict`, *optional*): |
| 62 | + Dictionary containing the scaling configuration for the RoPE embeddings. |
| 63 | + NOTE: if you apply new rope type and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value accordingly. |
| 64 | + Doge family of small models use `{ 'rope_type': 'dynamic', 'factor': 4.0, 'original_max_position_embeddings': 2048 }` as the default value. |
| 65 | + Expected contents: |
| 66 | + `rope_type` (`str`): |
| 67 | + The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope', 'llama3'], with 'default' being the original RoPE implementation. |
| 68 | + `factor` (`float`, *optional*): |
| 69 | + Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. |
| 70 | + In most scaling types, a `factor` of x will enable the model to handle sequences of length x * original maximum pre-trained length. |
| 71 | + `original_max_position_embeddings` (`int`, *optional*): |
| 72 | + Used with 'dynamic', 'longrope' and 'llama3'. |
| 73 | + The original max position embeddings used during pretraining. |
| 74 | + `attention_factor` (`float`, *optional*): |
| 75 | + Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention |
| 76 | + computation. |
| 77 | + If unspecified, it defaults to value recommended by the implementation, using the `factor` field to infer the suggested value. |
| 78 | + `beta_fast` (`float`, *optional*): |
| 79 | + Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear |
| 80 | + ramp function. If unspecified, it defaults to 32. |
| 81 | + `beta_slow` (`float`, *optional*): |
| 82 | + Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear |
| 83 | + ramp function. If unspecified, it defaults to 1. |
| 84 | + `short_factor` (`List[float]`, *optional*): |
| 85 | + Only used with 'longrope'. The scaling factor to be applied to short contexts (<`original_max_position_embeddings`). |
| 86 | + Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 |
| 87 | + `long_factor` (`List[float]`, *optional*): |
| 88 | + Only used with 'longrope'. The scaling factor to be applied to long contexts (<`original_max_position_embeddings`). |
| 89 | + Must be a list of numbers with the same length as the hidden size divided by the number of attention heads divided by 2 |
| 90 | + `low_freq_factor` (`float`, *optional*): |
| 91 | + Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE |
| 92 | + `high_freq_factor` (`float`, *optional*): |
| 93 | + Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE |
| 94 | + num_attention_heads (`int`, *optional*, defaults to 8): |
| 95 | + Number of attention heads for each attention layer in the Transformer decoder. |
| 96 | + num_key_value_heads (`int`, *optional*): |
| 97 | + This is the number of key_value heads that should be used to implement Grouped Query Attention. |
| 98 | + If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if |
| 99 | + `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. |
| 100 | + When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. |
| 101 | + For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). |
| 102 | + If it is not specified, will default to `num_attention_heads`. |
| 103 | + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): |
| 104 | + Whether to use a bias in the query, key, value and output projection layers during self-attention. |
| 105 | + attention_dropout (`float`, *optional*, defaults to 0.0): |
| 106 | + The dropout ratio for the attention probabilities. |
| 107 | + mlp_bias (`bool`, *optional*, defaults to `False`): |
| 108 | + Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. |
| 109 | + sliding_window (`int`, *optional*): |
| 110 | + Sliding window attention window size. If not specified, will default to `None`. |
| 111 | + keep_window_size (`int`, *optional*, defaults to 2048): |
| 112 | + The window size of tokens that are not dynamically masked, and dynamic masking is only performed when the sequence length exceeds this value. |
| 113 | + is_moe (`bool`, *optional*, defaults to `False`): |
| 114 | + Whether to use the Cross Domain Mixture of Experts, if `True`, the MoE will inherit the MLP to initialize. |
| 115 | + num_experts (`int`, *optional*, defaults to 16384): |
| 116 | + Number of routed experts in the model. This is only used when `is_moe=True`. |
| 117 | + num_experts_per_tok (`int`, *optional*, defaults to 64): |
| 118 | + Number of selected experts to route per-token. |
| 119 | + norm_topk_prob (`bool`, *optional*, defaults to `False`): |
| 120 | + Whether to normalize the topk probabilities. |
| 121 | + output_router_logits (`bool`, *optional*, defaults to `False`): |
| 122 | + Whether or not the router logits should be returned by the model. Enabling this will also |
| 123 | + allow the model to output the auxiliary loss, including load balancing loss and router z-loss. |
| 124 | + router_aux_loss_coef (`float`, *optional*, defaults to 0.001): |
| 125 | + The aux loss factor for the total loss. |
| 126 | +
|
| 127 | + ```python |
| 128 | + >>> from transformers import DogeConfig, DogeModel |
| 129 | +
|
| 130 | + >>> # Initializing a Doge-320M style configuration |
| 131 | + >>> configuration = DogeConfig() |
| 132 | +
|
| 133 | + >>> # Initializing a model from the Doge-320M style configuration |
| 134 | + >>> model = DogeModel(configuration) |
| 135 | +
|
| 136 | + >>> # Accessing the model configuration |
| 137 | + >>> configuration = model.config |
| 138 | + ```""" |
| 139 | + |
| 140 | + model_type = "doge" |
| 141 | + keys_to_ignore_at_inference = ["past_key_values"] |
| 142 | + # Default tensor parallel plan for base model `DogeModel` |
| 143 | + base_model_tp_plan = { |
| 144 | + "layers.*.self_attn.q_proj": "colwise", |
| 145 | + "layers.*.self_attn.k_proj": "colwise", |
| 146 | + "layers.*.self_attn.v_proj": "colwise", |
| 147 | + "layers.*.self_attn.dt_proj": "rowwise", |
| 148 | + "layers.*.self_attn.o_proj": "rowwise", |
| 149 | + "layers.*.input_layernorm.weight": "sequence_parallel", |
| 150 | + "layers.*.input_residual.weight": "sequence_parallel", |
| 151 | + "layers.*.post_attention_layernorm.weight": "sequence_parallel", |
| 152 | + "layers.*.post_attention_residual.weight": "sequence_parallel", |
| 153 | + "norm.weight": "sequence_parallel", |
| 154 | + "layers.*.mlp.gate_proj": "colwise", |
| 155 | + "layers.*.mlp.up_proj": "colwise", |
| 156 | + "layers.*.mlp.down_proj": "rowwise", |
| 157 | + "layers.*.mlp.router_gate": "colwise_rep", |
| 158 | + "layers.*.mlp.down_embed": "rowwise_rep", |
| 159 | + "layers.*.mlp.up_embed": "rowwise_rep", |
| 160 | + } |
| 161 | + base_model_pp_plan = { |
| 162 | + "embed_tokens": (["input_ids"], ["inputs_embeds"]), |
| 163 | + "layers": (["hidden_states", "attention_mask"], ["hidden_states"]), |
| 164 | + "norm": (["hidden_states"], ["hidden_states"]), |
| 165 | + } |
| 166 | + |
| 167 | + def __init__( |
| 168 | + self, |
| 169 | + vocab_size=32768, |
| 170 | + hidden_size=1024, |
| 171 | + intermediate_size=2048, |
| 172 | + num_hidden_layers=32, |
| 173 | + hidden_dropout=0.0, |
| 174 | + hidden_act="silu", |
| 175 | + initializer_range=0.02, |
| 176 | + rms_norm_eps=1e-06, |
| 177 | + use_cache=True, |
| 178 | + tie_word_embeddings=False, |
| 179 | + max_position_embeddings=2048, |
| 180 | + rope_theta=10000.0, |
| 181 | + rope_scaling=None, |
| 182 | + num_attention_heads=8, |
| 183 | + num_key_value_heads=None, |
| 184 | + attention_bias=False, |
| 185 | + attention_dropout=0.0, |
| 186 | + mlp_bias=False, |
| 187 | + sliding_window=None, |
| 188 | + keep_window_size=2048, |
| 189 | + is_moe=False, |
| 190 | + num_experts=16384, |
| 191 | + num_experts_per_tok=64, |
| 192 | + norm_topk_prob=False, |
| 193 | + output_router_logits=False, |
| 194 | + router_aux_loss_coef=0.001, |
| 195 | + **kwargs, |
| 196 | + ): |
| 197 | + self.vocab_size = vocab_size |
| 198 | + self.hidden_size = hidden_size |
| 199 | + self.intermediate_size = intermediate_size |
| 200 | + self.num_hidden_layers = num_hidden_layers |
| 201 | + |
| 202 | + self.hidden_dropout = hidden_dropout |
| 203 | + self.hidden_act = hidden_act |
| 204 | + self.initializer_range = initializer_range |
| 205 | + self.rms_norm_eps = rms_norm_eps |
| 206 | + self.use_cache = use_cache |
| 207 | + |
| 208 | + self.max_position_embeddings = max_position_embeddings |
| 209 | + self.rope_theta = rope_theta |
| 210 | + self.rope_scaling = rope_scaling |
| 211 | + self.num_attention_heads = num_attention_heads |
| 212 | + self.num_key_value_heads = num_key_value_heads |
| 213 | + self.attention_bias = attention_bias |
| 214 | + self.attention_dropout = attention_dropout |
| 215 | + self.mlp_bias = mlp_bias |
| 216 | + self.sliding_window = sliding_window |
| 217 | + self.keep_window_size = keep_window_size |
| 218 | + self.is_moe = is_moe |
| 219 | + self.num_experts = num_experts |
| 220 | + self.num_experts_per_tok = num_experts_per_tok |
| 221 | + self.norm_topk_prob = norm_topk_prob |
| 222 | + self.output_router_logits = output_router_logits |
| 223 | + self.router_aux_loss_coef = router_aux_loss_coef |
| 224 | + |
| 225 | + # Validate the correctness of rotary position embeddings parameters |
| 226 | + # BC: if there is a 'type' field, copy it it to 'rope_type'. |
| 227 | + if self.rope_scaling is not None and "type" in self.rope_scaling: |
| 228 | + self.rope_scaling["rope_type"] = self.rope_scaling["type"] |
| 229 | + rope_config_validation(self) |
| 230 | + |
| 231 | + # for backward compatibility |
| 232 | + if num_key_value_heads is None: |
| 233 | + self.num_key_value_heads = num_attention_heads |
| 234 | + |
| 235 | + super().__init__( |
| 236 | + tie_word_embeddings=tie_word_embeddings, |
| 237 | + **kwargs, |
| 238 | + ) |
| 239 | + |
| 240 | + |
| 241 | +__all__ = ["DogeConfig"] |
0 commit comments