Skip to content

util

logging

wrapper of logging module. use print if logging module is not configured.

auto_name_prune_quantize_layers(net)

Set name attribute of Prune/Quantize layers based on their torch module paths. This utility can be applied for better logging.

Parameters:

Name Type Description Default
net nn.Module

network module with PruneLayer and QuantizeLayer.

required

Returns:

Type Description
nn.Module

modified module

Source code in qsparse/util.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def auto_name_prune_quantize_layers(net: nn.Module) -> nn.Module:
    """Set name attribute of Prune/Quantize layers based on their torch module
    paths. This utility can be applied for better logging.

    Args:
        net (nn.Module): network module with [PruneLayer][qsparse.sparse.PruneLayer] and [QuantizeLayer][qsparse.quantize.QuantizeLayer].

    Returns:
        nn.Module: modified module
    """

    from qsparse.quantize import QuantizeLayer
    from qsparse.sparse import PruneLayer

    for name, mod in net.named_modules():
        if isinstance(mod, (PruneLayer, QuantizeLayer)):
            mod.name = name
    return net

calculate_mask_given_importance(importance, sparsity)

return a binary torch tensor with sparsity equals to the given sparsity.

Parameters:

Name Type Description Default
importance torch.Tensor

Floating-point tensor represents importance.

required
sparsity float

sparsity level in [0, 1]

required

Returns:

Type Description
torch.Tensor

binary mask

Source code in qsparse/util.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def calculate_mask_given_importance(importance: torch.Tensor, sparsity: float) -> torch.Tensor:
    """return a binary torch tensor with sparsity equals to the given sparsity. 

    Args:
        importance (torch.Tensor): Floating-point tensor represents importance.
        sparsity (float): sparsity level in `[0, 1]`

    Returns:
        torch.Tensor: binary mask
    """
    values = importance.flatten().sort()[0]
    n = len(values)
    idx = max(int(sparsity * n - 1), 0)
    threshold = values[idx + 1]
    return importance >= threshold

get_option(key)

return the requested option. The exported alias of this function is get_qsparse_option.

Parameters:

Name Type Description Default
key str

option name

required

Returns:

Type Description

option value

Source code in qsparse/util.py
29
30
31
32
33
34
35
36
37
38
39
40
def get_option(key: str):
    """return the requested option. The exported alias of this function is `get_qsparse_option`.

    Args:
        key (str): option name

    Returns:
        option value
    """
    assert key in ("log_on_created", "log_during_train")

    return _options_[key]

nn_module(mod)

Return actual module of a nn.Module or nn.DataParallel.

Parameters:

Name Type Description Default
mod nn.Module

input pytorch module

required

Returns:

Type Description
nn.Module

actual module

Source code in qsparse/util.py
64
65
66
67
68
69
70
71
72
73
74
75
76
def nn_module(mod: nn.Module) -> nn.Module:
    """Return actual module of a `nn.Module` or `nn.DataParallel`.

    Args:
        mod (nn.Module): input pytorch module

    Returns:
        nn.Module: actual module
    """
    if hasattr(mod, "module"):
        return mod.module
    else:
        return mod

preload_qsparse_state_dict(model, state_dict)

calling before load_state_dict to preload the state dict of QSPARSE layers, because load_state_dict currently does not allow shape mismatch

Parameters:

Name Type Description Default
model torch.nn.Module

model to be loaded

required
state_dict Dict[str, torch.Tensor]

state dict

required

Returns:

Type Description
torch.nn.Module

loaded model

Source code in qsparse/util.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def preload_qsparse_state_dict(model: torch.nn.Module, state_dict: Dict[str, torch.Tensor]) -> torch.nn.Module:
    """calling before `load_state_dict` to preload the state dict of QSPARSE layers, because `load_state_dict` currently does not allow shape mismatch

    Args:
        model (torch.nn.Module): model to be loaded
        state_dict (Dict[str, torch.Tensor]): state dict

    Returns:
        torch.nn.Module: loaded model
    """
    from qsparse.quantize import QuantizeLayer
    from qsparse.sparse import PruneLayer
    keys = list(state_dict.keys())
    device = list(model.parameters())[0].device
    for ki, mod in model.named_modules():
        if isinstance(mod, (PruneLayer, QuantizeLayer)):
            for kj, submod in mod.named_modules():
                prefix = ""
                if ki:
                    prefix += (ki + '.')
                if kj:
                    prefix += (kj + '.')
                for K in keys:
                    if K.startswith(prefix) and '.' not in K[len(prefix):]:
                        submod._parameters[K[len(prefix):]] = nn.Parameter(state_dict[K].to(device), requires_grad=False)
    return model

set_options(log_on_created=None, log_during_train=None)

set QSPARSE options. Only the options given will be updated. The exported alias of this function is set_qsparse_options.

Parameters:

Name Type Description Default
log_on_created Optional[bool]

If set to True, QSPARSE will log into console when every prune/quantize layer is created, the built-in value is True. Defaults to None.

None
log_during_train Optional[bool]

If set to True, QSPARSE will log into console when pruning and quantization happen, the built-in value is True. Defaults to None.

None
Source code in qsparse/util.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
def set_options(
    log_on_created: Optional[bool] = None, log_during_train: Optional[bool] = None
):
    """set QSPARSE options. Only the options given will be updated. The exported alias of this function is `set_qsparse_options`.

    Args:
        log_on_created (Optional[bool], optional): If set to True, QSPARSE will log into console when every prune/quantize layer is created, the built-in value is True. Defaults to None.
        log_during_train (Optional[bool], optional): If set to True, QSPARSE will log into console when pruning and quantization happen, the built-in value is True. Defaults to None.
    """
    if log_on_created is not None:
        _options_["log_on_created"] = log_on_created

    if log_during_train is not None:
        _options_["log_during_train"] = log_during_train

squeeze_tensor_to_shape(x, shape)

squeeze a tensor to a given shape through averaging.

Parameters:

Name Type Description Default
x torch.Tensor

input tensor

required
shape List[int]

target shape

required

Exceptions:

Type Description
ValueError

when the input tensor has different number of dimensions than the target shape, or the target shape provides a non-1 dimension to reduce

Returns:

Type Description
torch.Tensor

aligned tensor

Source code in qsparse/util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def squeeze_tensor_to_shape(x: torch.Tensor, shape: List[int]) -> torch.Tensor:
    """squeeze a tensor to a given shape through averaging.

    Args:
        x (torch.Tensor): input tensor
        shape (List[int]): target shape

    Raises:
        ValueError: when the input tensor has different number of dimensions than the target shape, or the target shape provides a non-1 dimension to reduce

    Returns:
        torch.Tensor: aligned tensor
    """
    assert len(x.shape) == len(shape), "mismatch between the input tensor and mask"
    for i, (sx, sm) in enumerate(zip(x.shape, shape)):
        if sx != sm:
            if sm == 1:
                x = x.mean(i, keepdim=True)
            else:
                raise ValueError("mismatch between the input tensor and mask")
    return x