Skip to content

llmcompressor.utils.pytorch

get_matching_layer(target, name_to_match, module)

Given a target regex, find the layer name in the module that most closely matches the name_to_match string. This is used to matches submodules in the same layer, for instance matching "re.*k_proj" to "model.decoder.layer.0.q_proj" to find the k_proj that exists in layer 0.

Parameters:

Name Type Description Default
target str

regex to search for

required
name_to_match str

full layer name to match to, should exist in module

required
module Module

module to search for target in

required

Returns:

Type Description
Optional[Tuple[str, Module]]

Tuple containing the layer name and module that fits the target regex and best matches name_to_match, or None if no match can be found

Source code in src/llmcompressor/utils/pytorch/module.py
def get_matching_layer(
    target: str, name_to_match: str, module: Module
) -> Optional[Tuple[str, Module]]:
    """
    Given a target regex, find the layer name in the module that most closely matches
    the name_to_match string. This is used to matches submodules in the same layer, for
    instance matching "re.*k_proj" to "model.decoder.layer.0.q_proj" to find the k_proj
    that exists in layer 0.

    :param target: regex to search for
    :param name_to_match: full layer name to match to, should exist in module
    :param module: module to search for target in
    :return: Tuple containing the layer name and module that fits the target regex and
    best matches name_to_match, or None if no match can be found
    """
    potential_matches = get_layers(target, module)
    largest_substring = 0
    match = None
    for name, module in potential_matches.items():
        seq_matcher = difflib.SequenceMatcher(None, name, name_to_match)
        _, _, match_length = seq_matcher.find_longest_match(
            0, len(name), 0, len(name_to_match)
        )
        if match_length > largest_substring:
            match = (name, module)
            largest_substring = match_length

    return match

get_no_split_params(model)

Get list of module classes that shouldn't be split when sharding. For Hugging Face Transformer models, this is the decoder layer type. For other types of models, this just returns all module names.

Returns:

Type Description
Union[str, List[str]]

list of class names that shouldn't be split

Source code in src/llmcompressor/utils/pytorch/module.py
def get_no_split_params(model: PreTrainedModel) -> Union[str, List[str]]:
    """
    Get list of module classes that shouldn't be split when sharding. For
    Hugging Face Transformer models, this is the decoder layer type. For other
    types of models, this just returns all module names.

    :return: list of class names that shouldn't be split
    """
    # importing here to avoid circular import
    from llmcompressor.utils.fsdp.helpers import maybe_get_wrapped

    model = maybe_get_wrapped(model)
    no_split_modules = model._get_no_split_modules("auto")
    if len(no_split_modules) <= 0:
        return ALL_TARGET

    return no_split_modules

get_parent_by_name(layer_name, model)

Get the parent layer of a layer by name.

Parameters:

Name Type Description Default
layer_name str

Name of the layer to find the parent of.

required
model Module

Model to search for the parent layer.

required

Returns:

Type Description
Tuple[str, Module]

Tuple containing the name of the parent layer and the parent layer itself.

Source code in src/llmcompressor/utils/pytorch/module.py
def get_parent_by_name(layer_name: str, model: Module) -> Tuple[str, Module]:
    """
    Get the parent layer of a layer by name.
    :param layer_name: Name of the layer to find the parent of.
    :param model: Model to search for the parent layer.
    :return: Tuple containing the name of the parent layer
        and the parent layer itself.
    """
    if not any(layer_name == name for name, _ in model.named_modules()):
        raise ValueError(f"Layer '{layer_name}' not found in model")

    parent_name_parts = layer_name.split(".")[:-1]
    if not parent_name_parts:
        return "", model

    parent_name = ".".join(parent_name_parts)
    return get_layer(parent_name, model)

qat_active(module)

Determines if any layers in the model have quantization enabled by checking for weight_fake_quant attributes

Parameters:

Name Type Description Default
module Module

PyTorch model to check for quantization

required

Returns:

Type Description
bool

True if quantization is active anywhere in the model, False otherwise

Source code in src/llmcompressor/utils/pytorch/module.py
def qat_active(module: Module) -> bool:
    """
    Determines if any layers in the model have quantization enabled by checking for
    weight_fake_quant attributes

    :param module: PyTorch model to check for quantization
    :return: True if quantization is active anywhere in the model, False otherwise
    """
    for _, layer in module.named_modules():
        if isinstance(layer, torch.quantization.FakeQuantize):
            return True
        if is_module_quantized(layer):
            return True

    return False