from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging from transformers.models.auto import CONFIG_MAPPING logger = logging.get_logger(__name__) class GeckoConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LlavaForConditionalGeneration`]. It is used to instantiate an Llava model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Llava-9B. e.g. [llava-hf/llava-9b](https://huggingface.co/llava-hf/llava-9b) Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vision_config (`LlavaVisionConfig`, *optional*): Custom vision config or dict text_config (`Union[AutoConfig, dict]`, *optional*): The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`. ignore_index (`int`, *optional*, defaults to -100): The ignore index for the loss function. image_token_index (`int`, *optional*, defaults to 32000): The image token index to encode the image prompt. projector_hidden_act (`str`, *optional*, defaults to `"gelu"`): The activation function used by the multimodal projector. vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`): The feature selection strategy used to select the vision feature from the CLIP backbone. vision_feature_layer (`int`, *optional*, defaults to -2): The index of the layer to select the vision feature. vocab_size (`int`, *optional*, defaults to 32000): Vocabulary size of the Llava model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`~LlavaForConditionalGeneration`] """ model_type = "gecko" is_composition = False def __init__( self, vision_config=None, text_config=None, ignore_index=-100, image_token_index=32000, projector_hidden_act="gelu", vision_feature_select_strategy="cls", patch_picking_strategy="across_layers", vision_feature_layer=-2, vocab_size=32000, topk=4, keyword_criteria="template", positional_information="explicit", visualize_patches=False, visualize_topk_patches=False, print_keyword=False, print_topk_patches=False, **kwargs, ): self.ignore_index = ignore_index self.image_token_index = image_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_layer = vision_feature_layer self.vision_feature_select_strategy = vision_feature_select_strategy self.patch_picking_strategy = patch_picking_strategy self.vocab_size = vocab_size self.topk = topk self.vision_config = vision_config self.text_config = text_config self.keyword_criteria = keyword_criteria self.positional_information = positional_information self.visualize_patches = visualize_patches self.visualize_topk_patches = visualize_topk_patches self.print_keyword = print_keyword self.print_topk_patches = print_topk_patches if isinstance(self.vision_config, dict): vision_config["model_type"] = ( vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model" ) self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config) elif vision_config is None: self.vision_config = CONFIG_MAPPING["clip_vision_model"]( intermediate_size=4096, hidden_size=1024, patch_size=14, image_size=336, num_hidden_layers=24, num_attention_heads=16, vocab_size=32000, projection_dim=768, ) self.vocab_size = self.vocab_size self.text_config = text_config if isinstance(self.text_config, dict): text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama" self.text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config) self.vocab_size = self.text_config.vocab_size elif text_config is None: self.text_config = CONFIG_MAPPING["llama"]() super().__init__(**kwargs)