Python torch.nn.Bilinear() Examples

The following are 30 code examples of torch.nn.Bilinear(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch.nn , or try the search function .
Example #1
Source File: combination.py    From torchsupport with MIT License 6 votes vote down vote up
def __init__(self, evaluator, inputs, outputs, batch_norm=True):
    """Structural element combining two tensors by bilinear transformation.

    Args:
      evaluator (nn.Module): module taking a combined tensor, and
                  performing computation on that tensor.
      inputs (list or tuple): number of input features for each input tensor.
      outputs (int): number of output features.
      batch_norm (bool): perform batch normalization?
    """
    bilinear = nn.Bilinear(*inputs, outputs)
    if batch_norm:
      batch_norm_layer = nn.BatchNorm1d(outputs)
    else:
      batch_norm_layer = None
    super(BilinearCombination, self).__init__(
      lambda input, task: self.compute(input, task),
      evaluator
    )
    self.bilinear = bilinear
    self.batch_norm = batch_norm_layer 
Example #2
Source File: vgg.py    From Jacinle with MIT License 6 votes vote down vote up
def reset_vgg_parameters(m, fc_std=0.01, bfc_std=0.001):
    if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
        if m.bias is not None:
            m.bias.data.zero_()
    elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_()
    elif isinstance(m, nn.Linear):
        m.weight.data.normal_(0, fc_std)
        if m.bias is not None:
            m.bias.data.zero_()
    elif isinstance(m, nn.Bilinear):
        m.weight.data.normal_(0, bfc_std)
        if m.bias is not None:
            m.bias.data.zero_()
    else:
        for sub in m.modules():
            if m != sub:
                reset_vgg_parameters(sub, fc_std=fc_std, bfc_std=bfc_std) 
Example #3
Source File: resnet.py    From Jacinle with MIT License 6 votes vote down vote up
def reset_resnet_parameters(m, fc_std=0.01, bfc_std=0.001):
    if isinstance(m, nn.Conv2d):
        n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        m.weight.data.normal_(0, math.sqrt(2. / n))
        if m.bias is not None:
            m.bias.data.zero_()
    elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_()
    elif isinstance(m, nn.Linear):
        m.weight.data.normal_(0, fc_std)
        if m.bias is not None:
            m.bias.data.zero_()
    elif isinstance(m, nn.Bilinear):
        m.weight.data.normal_(0, bfc_std)
        if m.bias is not None:
            m.bias.data.zero_()
    else:
        for sub in m.modules():
            if m != sub:
                reset_resnet_parameters(sub, fc_std=fc_std, bfc_std=bfc_std) 
Example #4
Source File: bilinear.py    From torecsys with MIT License 6 votes vote down vote up
def __init__(self,
                 inputs_size : int,
                 num_layers  : int):
        r"""Initialize BilinearNetworkLayer

        Args:
            inputs_size (int): Input size of Bilinear, i.e. size of embedding tensor. 
            num_layers (int): Number of layers of Bilinear Network
        
        Attributes:
            inputs_size (int): Size of inputs, or Product of embed_size and num_fields.
            model (torch.nn.ModuleList): Module List of Bilinear Layers.
        """
        # Refer to parent class
        super(BilinearNetworkLayer, self).__init__()

        # Bind inputs_size to inputs_size
        self.inputs_size = inputs_size

        # Initialize module list for Bilinear
        self.model = nn.ModuleList()

        # Initialize bilinear layers and add them to module list
        for _ in range(num_layers):
            self.model.append(nn.Bilinear(inputs_size, inputs_size, inputs_size)) 
Example #5
Source File: test_linear.py    From pytorch-meta with MIT License 6 votes vote down vote up
def test_metabilinear(bias):
    meta_model = MetaBilinear(2, 3, 5, bias=bias)
    model = nn.Bilinear(2, 3, 5, bias=bias)

    assert isinstance(meta_model, MetaModule)
    assert isinstance(meta_model, nn.Bilinear)

    # Set same weights for both models
    weight = torch.randn(5, 2, 3)
    meta_model.weight.data.copy_(weight)
    model.weight.data.copy_(weight)

    if bias:
        bias = torch.randn(5)
        meta_model.bias.data.copy_(bias)
        model.bias.data.copy_(bias)

    inputs1 = torch.randn(7, 2)
    inputs2 = torch.randn(7, 3)

    outputs_torchmeta = meta_model(inputs1, inputs2, params=None)
    outputs_nn = model(inputs1, inputs2)

    np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
                            outputs_nn.detach().numpy()) 
Example #6
Source File: soft_logic.py    From GLN with MIT License 6 votes vote down vote up
def __init__(self, args):
        super(CenterProbCalc, self).__init__()
        self.prod_enc = get_gnn(args)
        if args.subg_enc == 'onehot':
            self.prod_center_enc = OnehotEmbedder(list_keys=DataInfo.prod_cano_smarts, 
                                                  fn_getkey=lambda m: m.name if m is not None else None,
                                                  embed_size=args.embed_dim)
            self.prod_embed_func = lambda x: self.prod_center_enc(x)
        else:
            self.prod_center_enc = get_gnn(args, gm=args.subg_enc)
            self.prod_embed_func = lambda x: self.prod_center_enc(x)[0]
        if args.att_type == 'inner_prod':
            self.att_func = lambda x, y: torch.sum(x * y, dim=1).view(-1)
        elif args.att_type == 'mlp':
            self.pred = MLP(2 * args.embed_dim, [args.mlp_hidden, 1], nonlinearity=args.act_func)
            self.att_func = lambda x, y: self.pred(torch.cat((x, y), dim=1)).view(-1)
        elif args.att_type == 'bilinear':
            self.bilin = nn.Bilinear(args.embed_dim, args.embed_dim, 1)
            self.att_func = lambda x, y: self.bilin(x, y).view(-1)
        else:
            raise NotImplementedError 
Example #7
Source File: soft_logic.py    From GLN with MIT License 6 votes vote down vote up
def __init__(self, args):
        super(ActiveProbCalc, self).__init__()
        self.prod_enc = get_gnn(args)
        if args.tpl_enc == 'deepset':
            self.tpl_enc = DeepsetTempFeaturizer(args)
        elif args.tpl_enc == 'onehot':
            self.tpl_enc = OnehotEmbedder(list_keys=DataInfo.unique_templates,
                                          fn_getkey=lambda x: x,
                                          embed_size=args.embed_dim)        
        else:
            raise NotImplementedError
        if args.att_type == 'inner_prod':
            self.att_func = lambda x, y: torch.sum(x * y, dim=1).view(-1)
        elif args.att_type == 'mlp':
            self.pred = MLP(2 * args.embed_dim, [args.mlp_hidden, 1], nonlinearity='relu')
            self.att_func = lambda x, y: self.pred(torch.cat((x, y), dim=1)).view(-1)
        elif args.att_type == 'bilinear':
            self.bilin = nn.Bilinear(args.embed_dim, args.embed_dim, 1)
            self.att_func = lambda x, y: self.bilin(x, y).view(-1)
        else:
            raise NotImplementedError 
Example #8
Source File: test_linear.py    From pytorch-meta with MIT License 6 votes vote down vote up
def test_metabilinear_params(bias):
    meta_model = MetaBilinear(2, 3, 5, bias=bias)
    model = nn.Bilinear(2, 3, 5, bias=bias)

    params = OrderedDict()
    params['weight'] = torch.randn(5, 2, 3)
    model.weight.data.copy_(params['weight'])

    if bias:
        params['bias'] = torch.randn(5)
        model.bias.data.copy_(params['bias'])

    inputs1 = torch.randn(7, 2)
    inputs2 = torch.randn(7, 3)

    outputs_torchmeta = meta_model(inputs1, inputs2, params=params)
    outputs_nn = model(inputs1, inputs2)

    np.testing.assert_equal(outputs_torchmeta.detach().numpy(),
                            outputs_nn.detach().numpy()) 
Example #9
Source File: compute_madd.py    From FNA with Apache License 2.0 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        print("[MAdd]: {} is not supported!".format(type(module).__name__))
        return 0 
Example #10
Source File: compute_madd.py    From TreeFilter-Torch with MIT License 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        print("[MAdd]: {} is not supported!".format(type(module).__name__))
        return 0 
Example #11
Source File: compute_madd.py    From TorchSeg with MIT License 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        print("[MAdd]: {} is not supported!".format(type(module).__name__))
        return 0 
Example #12
Source File: compute_madd.py    From torchstat with MIT License 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        print(f"[MAdd]: {type(module).__name__} is not supported!")
        return 0 
Example #13
Source File: helper.py    From torchscope with Apache License 2.0 6 votes vote down vote up
def compute_madd(module, inp, out):
    if isinstance(module, nn.Conv2d):
        return compute_Conv2d_madd(module, inp, out)
    elif isinstance(module, nn.ConvTranspose2d):
        return compute_ConvTranspose2d_madd(module, inp, out)
    elif isinstance(module, nn.BatchNorm2d):
        return compute_BatchNorm2d_madd(module, inp, out)
    elif isinstance(module, nn.MaxPool2d):
        return compute_MaxPool2d_madd(module, inp, out)
    elif isinstance(module, nn.AvgPool2d):
        return compute_AvgPool2d_madd(module, inp, out)
    elif isinstance(module, (nn.ReLU, nn.ReLU6)):
        return compute_ReLU_madd(module, inp, out)
    elif isinstance(module, nn.Softmax):
        return compute_Softmax_madd(module, inp, out)
    elif isinstance(module, nn.Linear):
        return compute_Linear_madd(module, inp, out)
    elif isinstance(module, nn.Bilinear):
        return compute_Bilinear_madd(module, inp[0], inp[1], out)
    else:
        return 0 
Example #14
Source File: models.py    From spatial-VAE with MIT License 5 votes vote down vote up
def __init__(self, latent_dim, hidden_dim, n_out=1, num_layers=1, activation=nn.Tanh
                , softplus=False, resid=False, expand_coords=False, bilinear=False):
        super(SpatialGenerator, self).__init__()

        self.softplus = softplus
        self.expand_coords = expand_coords

        in_dim = 2
        if expand_coords:
            in_dim = 5 # include squares of coordinates as inputs

        self.coord_linear = nn.Linear(in_dim, hidden_dim)
        self.latent_dim = latent_dim
        if latent_dim > 0:
            self.latent_linear = nn.Linear(latent_dim, hidden_dim, bias=False)

        if latent_dim > 0 and bilinear: # include bilinear layer on latent and coordinates
            self.bilinear = nn.Bilinear(in_dim, latent_dim, hidden_dim, bias=False)

        layers = [activation()]
        for _ in range(1,num_layers):
            if resid:
                layers.append(ResidLinear(hidden_dim, hidden_dim, activation=activation))
            else:
                layers.append(nn.Linear(hidden_dim,hidden_dim))
                layers.append(activation())
        layers.append(nn.Linear(hidden_dim, n_out))

        self.layers = nn.Sequential(*layers) 
Example #15
Source File: helper.py    From torchscope with Apache License 2.0 5 votes vote down vote up
def compute_Bilinear_madd(module, inp1, inp2, out):
    assert isinstance(module, nn.Bilinear)
    assert len(inp1.size()) == 2 and len(inp2.size()) == 2 and len(out.size()) == 2

    num_in_features_1 = inp1.size()[1]
    num_in_features_2 = inp2.size()[1]
    num_out_features = out.size()[1]

    mul = num_in_features_1 * num_in_features_2 + num_in_features_2
    add = num_in_features_1 * num_in_features_2 + num_in_features_2 - 1
    return num_out_features * (mul + add) 
Example #16
Source File: compute_madd.py    From TorchSeg with MIT License 5 votes vote down vote up
def compute_Bilinear_madd(module, inp1, inp2, out):
    assert isinstance(module, nn.Bilinear)
    assert len(inp1.size()) == 2 and len(inp2.size()) == 2 and len(
        out.size()) == 2

    num_in_features_1 = inp1.size()[1]
    num_in_features_2 = inp2.size()[1]
    num_out_features = out.size()[1]

    mul = num_in_features_1 * num_in_features_2 + num_in_features_2
    add = num_in_features_1 * num_in_features_2 + num_in_features_2 - 1
    return num_out_features * (mul + add) 
Example #17
Source File: RNN_RNN.py    From SummaRuNNer with MIT License 5 votes vote down vote up
def __init__(self, args, embed=None):
        super(RNN_RNN, self).__init__(args)
        self.model_name = 'RNN_RNN'
        self.args = args
        
        V = args.embed_num
        D = args.embed_dim
        H = args.hidden_size
        S = args.seg_num
        P_V = args.pos_num
        P_D = args.pos_dim
        self.abs_pos_embed = nn.Embedding(P_V,P_D)
        self.rel_pos_embed = nn.Embedding(S,P_D)
        self.embed = nn.Embedding(V,D,padding_idx=0)
        if embed is not None:
            self.embed.weight.data.copy_(embed)

        self.word_RNN = nn.GRU(
                        input_size = D,
                        hidden_size = H,
                        batch_first = True,
                        bidirectional = True
                        )
        self.sent_RNN = nn.GRU(
                        input_size = 2*H,
                        hidden_size = H,
                        batch_first = True,
                        bidirectional = True
                        )
        self.fc = nn.Linear(2*H,2*H)

        # Parameters of Classification Layer
        self.content = nn.Linear(2*H,1,bias=False)
        self.salience = nn.Bilinear(2*H,2*H,1,bias=False)
        self.novelty = nn.Bilinear(2*H,2*H,1,bias=False)
        self.abs_pos = nn.Linear(P_D,1,bias=False)
        self.rel_pos = nn.Linear(P_D,1,bias=False)
        self.bias = nn.Parameter(torch.FloatTensor(1).uniform_(-0.1,0.1)) 
Example #18
Source File: encoder.py    From LDG with Educational Community License v2.0 5 votes vote down vote up
def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Bilinear):
                nn.init.xavier_normal_(m.weight.data)
                m.bias.data.fill_(0.1) 
Example #19
Source File: compute_madd.py    From FNA with Apache License 2.0 5 votes vote down vote up
def compute_Bilinear_madd(module, inp1, inp2, out):
    assert isinstance(module, nn.Bilinear)
    assert len(inp1.size()) == 2 and len(inp2.size()) == 2 and len(
        out.size()) == 2

    num_in_features_1 = inp1.size()[1]
    num_in_features_2 = inp2.size()[1]
    num_out_features = out.size()[1]

    mul = num_in_features_1 * num_in_features_2 + num_in_features_2
    add = num_in_features_1 * num_in_features_2 + num_in_features_2 - 1
    return num_out_features * (mul + add) 
Example #20
Source File: encoder.py    From LDG with Educational Community License v2.0 5 votes vote down vote up
def __init__(self, n_in, n_hid, n_out, do_prob=0., bilinear=False, bnorm=True):
        super(MLP, self).__init__()
        self.bilinear = bilinear
        self.bnorm = bnorm
        if bilinear:
            self.fc1 = nn.Bilinear(n_in, n_in, n_hid)
        else:
            self.fc1 = nn.Linear(n_in, n_hid)
        self.fc2 = nn.Linear(n_hid, n_out)
        if bnorm:
            self.bn = nn.BatchNorm1d(n_out)
        self.dropout_prob = do_prob

        self.init_weights() 
Example #21
Source File: encoder.py    From LDG with Educational Community License v2.0 5 votes vote down vote up
def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Bilinear):
                nn.init.xavier_normal(m.weight.data)
                m.bias.data.fill_(0.1)
            elif isinstance(m, nn.BatchNorm1d):
                m.weight.data.fill_(1)
                m.bias.data.zero_() 
Example #22
Source File: encoder.py    From LDG with Educational Community License v2.0 5 votes vote down vote up
def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Bilinear):
                nn.init.xavier_normal_(m.weight.data)
                m.bias.data.fill_(0.1) 
Example #23
Source File: dyrep.py    From LDG with Educational Community License v2.0 5 votes vote down vote up
def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Linear) or isinstance(m, nn.Bilinear):
                # print('before Xavier', m.weight.data.shape, m.weight.data.min(), m.weight.data.max())
                nn.init.xavier_normal_(m.weight.data)
                # print('after Xavier', m.weight.data.shape, m.weight.data.min(), m.weight.data.max()) 
Example #24
Source File: compute_madd.py    From torchstat with MIT License 5 votes vote down vote up
def compute_Bilinear_madd(module, inp1, inp2, out):
    assert isinstance(module, nn.Bilinear)
    assert len(inp1.size()) == 2 and len(inp2.size()) == 2 and len(out.size()) == 2

    num_in_features_1 = inp1.size()[1]
    num_in_features_2 = inp2.size()[1]
    num_out_features = out.size()[1]

    mul = num_in_features_1 * num_in_features_2 + num_in_features_2
    add = num_in_features_1 * num_in_features_2 + num_in_features_2 - 1
    return num_out_features * (mul + add) 
Example #25
Source File: biaffine_parser.py    From fastNLP with Apache License 2.0 5 votes vote down vote up
def __init__(self, in1_features, in2_features, num_label, bias=True):
        r"""
        
        :param in1_features: 输入的特征1维度
        :param in2_features: 输入的特征2维度
        :param num_label: 边类别的个数
        :param bias: 是否使用bias. Default: ``True``
        """
        super(LabelBilinear, self).__init__()
        self.bilinear = nn.Bilinear(in1_features, in2_features, num_label, bias=bias)
        self.lin = nn.Linear(in1_features + in2_features, num_label, bias=False) 
Example #26
Source File: fusions.py    From block.bootstrap.pytorch with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
def __init__(self,
            input_dims,
            output_dim,
            mm_dim=1600,
            shared=False,
            normalize=False,
            dropout_input=0.,
            dropout_pre_lin=0.,
            dropout_output=0.):
        super(Tucker, self).__init__()
        self.input_dims = input_dims
        self.shared = shared
        self.mm_dim = mm_dim
        self.output_dim = output_dim
        self.normalize = normalize
        self.dropout_input = dropout_input
        self.dropout_pre_lin = dropout_pre_lin
        self.dropout_output = dropout_output
        # Modules
        self.linear0 = nn.Linear(input_dims[0], mm_dim)
        if shared:
            self.linear1 = self.linear0
        else:
            self.linear1 = nn.Linear(input_dims[1], mm_dim)
        self.linear1 = nn.Linear(input_dims[1], mm_dim)
        self.bilinear = nn.Bilinear(mm_dim, mm_dim, mm_dim)
        self.linear_out = nn.Linear(mm_dim, output_dim)
        self.n_params = sum(p.numel() for p in self.parameters() if p.requires_grad) 
Example #27
Source File: global_infonce_stdim.py    From atari-representation-learning with MIT License 5 votes vote down vote up
def __init__(self, num_inputs1, num_inputs2):
        super().__init__()
        self.network = nn.Bilinear(num_inputs1, num_inputs2, 1) 
Example #28
Source File: stdim.py    From atari-representation-learning with MIT License 5 votes vote down vote up
def __init__(self, num_inputs1, num_inputs2):
        super().__init__()
        self.network = nn.Bilinear(num_inputs1, num_inputs2, 1) 
Example #29
Source File: global_local_infonce.py    From atari-representation-learning with MIT License 5 votes vote down vote up
def __init__(self, num_inputs1, num_inputs2):
        super().__init__()
        self.network = nn.Bilinear(num_inputs1, num_inputs2, 1) 
Example #30
Source File: temporal_dim.py    From atari-representation-learning with MIT License 5 votes vote down vote up
def __init__(self, num_inputs1, num_inputs2):
        super().__init__()
        self.network = nn.Bilinear(num_inputs1, num_inputs2, 1)