Python mxnet.symbol() Examples

The following are 30 code examples of mxnet.symbol(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module mxnet , or try the search function .
Example #1
Source File: model_handler.py    From xfer with Apache License 2.0 6 votes vote down vote up
def drop_layer_top(self, num_layers_to_drop=1):
        """
        Remove layers from output of model.

        :param int n: Number of layers to remove from model output.
        """
        network_symbol = self.symbol
        network = self._get_symbol_dict(network_symbol)

        self._assert_drop_layer_valid(num_layers_to_drop)
        self._assert_model_has_single_output(self._get_symbol_dict(network_symbol))

        layers_dropped = []
        last_layer = len(network[consts.NODES]) - 1
        for n in range(num_layers_to_drop):
            last_layer_inputs = self._get_names_of_inputs_to_layer(symbol_dict=network, node_idx=last_layer)
            self._assert_layer_drop_not_ambiguous(possible_layers_to_drop=last_layer_inputs, layer_drop_number=n)
            # There will only be one value in possible_layers_to_drop
            layers_dropped.append(network[consts.NODES][last_layer][consts.NAME])
            last_layer = last_layer_inputs[0]

        network_symbol = network_symbol.get_internals()[network[consts.NODES][last_layer][consts.NAME] + consts.OUTPUT]

        logging.info('{} deleted from model top'.format(', '.join(layers_dropped)))
        self.update_sym(network_symbol) 
Example #2
Source File: test_symbol.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_symbol_compose():
    data = mx.symbol.Variable('data')
    net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
    net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
    net1.list_arguments() == ['data',
                              'fc1_weight', 'fc1_bias',
                              'fc2_weight', 'fc2_bias']

    net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
    net2 = mx.symbol.Activation(data=net2, act_type='relu')
    net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)

    composed = net2(fc3_data=net1, name='composed')
    multi_out = mx.symbol.Group([composed, net1])
    assert len(multi_out.list_outputs()) == 2
    assert len(multi_out) == 2 
Example #3
Source File: test_symbol.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_symbol_children():
    data = mx.symbol.Variable('data')
    oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
    net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)

    assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias']
    assert len(net1.get_children()) == 3
    assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias']
    assert len(net1.get_children().get_children()) == 3
    assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight']
    assert net1.get_children()['fc2_weight'].get_children() is None

    data = mx.sym.Variable('data')
    sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice')
    concat = mx.sym.Concat(*list(sliced))

    assert concat.get_children().list_outputs() == \
        ['slice_output0', 'slice_output1', 'slice_output2']
    assert sliced.get_children().list_outputs() == ['data'] 
Example #4
Source File: test_symbol.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_symbol_children():
    data = mx.symbol.Variable('data')
    oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
    net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)

    assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias']
    assert len(net1.get_children()) == 3
    assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias']
    assert len(net1.get_children().get_children()) == 3
    assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight']
    assert net1.get_children()['fc2_weight'].get_children() is None

    data = mx.sym.Variable('data')
    sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice')
    concat = mx.sym.Concat(*list(sliced))

    assert concat.get_children().list_outputs() == \
        ['slice_output0', 'slice_output1', 'slice_output2']
    assert sliced.get_children().list_outputs() == ['data'] 
Example #5
Source File: test_symbol.py    From SNIPER-mxnet with Apache License 2.0 6 votes vote down vote up
def test_symbol_compose():
    data = mx.symbol.Variable('data')
    net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
    net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
    net1.list_arguments() == ['data',
                              'fc1_weight', 'fc1_bias',
                              'fc2_weight', 'fc2_bias']

    net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
    net2 = mx.symbol.Activation(data=net2, act_type='relu')
    net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)

    composed = net2(fc3_data=net1, name='composed')
    multi_out = mx.symbol.Group([composed, net1])
    assert len(multi_out.list_outputs()) == 2
    assert len(multi_out) == 2 
Example #6
Source File: test_symbol.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_symbol_infer_shape_var():
    "Test specifying shape information when constructing a variable"
    shape = (2, 3)
    a = mx.symbol.Variable('a', shape=shape)
    b = mx.symbol.Variable('b')
    c = mx.symbol.elemwise_add(a, b)
    arg_shapes, out_shapes, aux_shapes = c.infer_shape()
    assert arg_shapes[0] == shape
    assert arg_shapes[1] == shape
    assert out_shapes[0] == shape

    overwrite_shape = (5, 6)
    arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=overwrite_shape)
    assert arg_shapes[0] == overwrite_shape
    assert arg_shapes[1] == overwrite_shape
    assert out_shapes[0] == overwrite_shape 
Example #7
Source File: mx_sparse.py    From mercari-solution with MIT License 6 votes vote down vote up
def predict(self, X):
        assert self.is_fitted
        ys = []
        n = X.shape[0]
        batch_size = min(n, 2**13)
        mod = mx.mod.Module(symbol=self.output, label_names=None)
        for _ in range(2):
            eval_iter = self._make_train_iter(
                X[len(ys):, :], y=None, batch_size=batch_size, shuffle=False)
            mod.bind(
                data_shapes=eval_iter.provide_data,
                label_shapes=None,
                for_training=False,
                force_rebind=True,
            )
            mod.set_params(*self.mod_params)
            ys.extend(mod.predict(eval_iter).asnumpy())
            batch_size = n % batch_size
            if batch_size == 0:
                break
        assert len(ys) == n
        return self._invert_target(ys) 
Example #8
Source File: test_symbol.py    From dynamic-training-with-apache-mxnet-on-aws with Apache License 2.0 6 votes vote down vote up
def test_zero_prop():
    data = mx.symbol.Variable('data')
    for i in range(10):
        data = data * data

    exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
    big = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))

    exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256), grad_req='null')
    small1 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))

    data = mx.sym.stop_gradient(data)
    exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
    small2 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))

    assert big > small2
    assert small1 == small2 
Example #9
Source File: mxnet_input_symbols.py    From datawig with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 field_name: str,
                 numeric_latent_dim: int = 100,
                 numeric_hidden_layers: int = 1) -> None:
        super(NumericalFeaturizer, self).__init__(field_name, numeric_latent_dim)

        self.numeric_hidden_layers = int(numeric_hidden_layers)
        self.numeric_latent_dim = int(numeric_latent_dim)

        with mx.name.Prefix(self.prefix):
            self.symbol = self.input_symbol
            for _ in range(self.numeric_hidden_layers):
                symbol = mx.sym.FullyConnected(
                    data=self.symbol,
                    num_hidden=self.numeric_latent_dim
                )
                self.symbol = mx.symbol.Activation(data=symbol, act_type="relu") 
Example #10
Source File: imputer.py    From datawig with Apache License 2.0 6 votes vote down vote up
def __make_numerical_loss(latents: mx.symbol,
                              label_field_name: str) -> Tuple[Any, Any]:
        """
        Generate output symbol for univariate numeric loss

        :param latents:
        :param label_field_name:
        :return: mxnet symbols for predictions and loss
        """

        # generate prediction symbol
        pred = mx.sym.FullyConnected(
            data=latents,
            num_hidden=1,
            name="label_{}".format(label_field_name))

        target = mx.sym.Variable(label_field_name)

        # squared loss
        loss = mx.sym.sum((pred - target) ** 2.0)

        return pred, loss 
Example #11
Source File: mxnet_input_symbols.py    From datawig with Apache License 2.0 6 votes vote down vote up
def __init__(self,
                 field_name: str,
                 max_tokens: int = 100,
                 embed_dim: int = 10) -> None:
        super(EmbeddingFeaturizer, self).__init__(field_name, embed_dim)

        self.vocab_size = int(max_tokens)
        self.embed_dim = int(embed_dim)

        with mx.name.Prefix(field_name + "_"):
            symbol = mx.sym.Embedding(
                data=self.input_symbol,
                input_dim=self.vocab_size,
                output_dim=self.embed_dim
            )
            self.symbol = mx.sym.FullyConnected(data=symbol, num_hidden=self.latent_dim) 
Example #12
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def update_sym(self, new_symbol):
        """
        Update symbol attribute, layer names, and layer types dict and clean parameters.

        :param new_symbol: Symbol with which to update ModelHandler.
        :type new_symbol: :class:`mx.symbol.Symbol`
        """
        self.symbol = new_symbol
        self.layer_type_dict = self._get_layer_type_dict()
        self.arg_params = self._clean_params(self.symbol, self.arg_params)
        self.aux_params = self._clean_params(self.symbol, self.aux_params) 
Example #13
Source File: test_symbol.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_symbol_infer_type():
    data = mx.symbol.Variable('data')
    f32data = mx.symbol.Cast(data=data, dtype='float32')
    fc1  = mx.symbol.FullyConnected(data = f32data, name='fc1', num_hidden=128)
    mlp  = mx.symbol.SoftmaxOutput(data = fc1, name = 'softmax')

    arg, out, aux = mlp.infer_type(data=np.float16)
    assert arg == [np.float16, np.float32, np.float32, np.float32]
    assert out == [np.float32]
    assert aux == [] 
Example #14
Source File: common.py    From mxnet-ssd with MIT License 5 votes vote down vote up
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    bias = mx.symbol.Variable(name="{}_conv_bias".format(name),   
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="{}_conv".format(name), bias=bias)
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}_{}".format(name, act_type))
    return relu 
Example #15
Source File: common.py    From mxnet-ssd with MIT License 5 votes vote down vote up
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    assert not use_batchnorm, "batchnorm not yet supported"
    bias = mx.symbol.Variable(name="conv{}_bias".format(name),
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    if use_batchnorm:
        relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
    return conv, relu 
Example #16
Source File: summary.py    From mxboard with Apache License 2.0 5 votes vote down vote up
def _sym2pb(sym):
    """Converts an MXNet symbol to its graph protobuf definition."""
    return GraphDef(node=_get_nodes_from_symbol(sym), versions=VersionDef(producer=100)) 
Example #17
Source File: test_symbol.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_symbol_copy():
    data = mx.symbol.Variable('data')
    data_2 = copy.deepcopy(data)
    data_3 = copy.copy(data)
    assert data.tojson() == data_2.tojson()
    assert data.tojson() == data_3.tojson() 
Example #18
Source File: common.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="{}_conv".format(name))
    if use_batchnorm:
        conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}_{}".format(name, act_type))
    return relu 
Example #19
Source File: common.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
    stride=(1,1), act_type="relu", use_batchnorm=False):
    """
    wrapper for a small Convolution group

    Parameters:
    ----------
    from_layer : mx.symbol
        continue on which layer
    name : str
        base name of the new layers
    num_filter : int
        how many filters to use in Convolution layer
    kernel : tuple (int, int)
        kernel size (h, w)
    pad : tuple (int, int)
        padding size (h, w)
    stride : tuple (int, int)
        stride size (h, w)
    act_type : str
        activation type, can be relu...
    use_batchnorm : bool
        whether to use batch normalization

    Returns:
    ----------
    (conv, relu) mx.Symbols
    """
    assert not use_batchnorm, "batchnorm not yet supported"
    bias = mx.symbol.Variable(name="conv{}_bias".format(name),
        init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'})
    conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \
        stride=stride, num_filter=num_filter, name="conv{}".format(name))
    relu = mx.symbol.Activation(data=conv, act_type=act_type, \
        name="{}{}".format(act_type, name))
    if use_batchnorm:
        relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name))
    return conv, relu 
Example #20
Source File: test_symbol.py    From SNIPER-mxnet with Apache License 2.0 5 votes vote down vote up
def test_symbol_bool():
    x = mx.symbol.Variable('x')
    assertRaises(NotImplementedForSymbol, bool, x) 
Example #21
Source File: _seq2seq_network.py    From gluon-ts with Apache License 2.0 5 votes vote down vote up
def hybrid_forward(
        self,
        F,
        past_target: Tensor,
        feat_static_cat: Tensor,
        past_feat_dynamic_real: Tensor,
        future_feat_dynamic_real: Tensor,
    ) -> Tensor:
        """

        Parameters
        ----------
        F: mx.symbol or mx.ndarray
            Gluon function space
        past_target: mx.nd.NDArray or mx.sym.Symbol
            past target
        feat_static_cat: mx.nd.NDArray or mx.sym.Symbol
            static categorical features
        past_feat_dynamic_real: mx.nd.NDArray or mx.sym.Symbol
            past dynamic real-valued features
        future_feat_dynamic_real: mx.nd.NDArray or mx.sym.Symbol
            future dynamic real-valued features

        Returns
        -------
        mx.nd.NDArray or mx.sym.Symbol
            the predicted sequence
        """
        scaled_decoder_output = self.compute_decoder_outputs(
            F,
            past_target=past_target,
            feat_static_cat=feat_static_cat,
            past_feat_dynamic_real=past_feat_dynamic_real,
            future_feat_dynamic_real=future_feat_dynamic_real,
        )
        predictions = self.quantile_proj(scaled_decoder_output).swapaxes(2, 1)

        return predictions 
Example #22
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def _clean_params(symbol, parameter_dict):
        """
        Return a copy of parameter_dict with parameters for layers that are not in the symbol removed.

        :param symbol: Symbol to give point of reference for removing parameters.
        :type symbol: :class:`mx.symbol.Symbol`
        :param dict parameter_dict: Dictionary of model parameters.
        :return: Parameter dictionary with all parameters referring to layer(s) in the symbol
        :rtype: dict
        """
        parameter_dict = parameter_dict.copy()
        keys_to_delete_arg = set(parameter_dict.keys()) - set(symbol.get_internals().list_outputs())
        for key in keys_to_delete_arg:
            del parameter_dict[key]
        return parameter_dict 
Example #23
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def _get_symbol(symbol_dict):
        """
        Get MXNet symbol from its symbol dictionary.
        """
        return mx.sym.load_json(json.dumps(symbol_dict)) 
Example #24
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def _get_symbol_dict(symbol):
        """
        Get symbol dictionary.

        :return: Symbol dictionary
        :rtype: dict
        """
        return json.loads(symbol.tojson()) 
Example #25
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def _validate_layer_name(self, layer_name):
        """
        Validate name of layer.

        :param str layer_name: Name to be validated.
        """
        # Input name is not included in layer_names so the check for conflict is done with symbol inputs and layer_names
        if layer_name in self.symbol.list_inputs() or layer_name in self.layer_names:
            raise ValueError("Layer name '{}' conflicts with name already in model.".format(layer_name))
        # MXNet uses these suffixes for specific things so we are avoiding using them in layer names
        for suffix in ['output', 'label', 'weight', 'bias', 'moving_mean', 'moving_var', 'gamma', 'beta']:
            if '_' + suffix in layer_name:
                raise ValueError("Layer name cannot contain '{}'".format(suffix)) 
Example #26
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def save_symbol(self, model_name):
        """
        Serialise model symbol graph.

        :param str model_name: Prefix to file name (model_name-symbol.json).
        """
        self.symbol.save(model_name + '-symbol.json') 
Example #27
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def visualize_net(self):
        """
        Display computational graph of model.
        """
        return mx.viz.plot_network(self.symbol, node_attrs={'fixedsize': 'false'}) 
Example #28
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def get_module(self, iterator, fixed_layer_parameters=None, random_layer_parameters=None):
        """
        Return MXNet Module using the model symbol and parameters.

        :param iterator: MXNet iterator to be used with model.
        :type iterator: :class:`mxnet.io.DataIter`
        :param list(str) fixed_layer_parameters: List of layer parameters to keep fixed.
        :param list(str) random_layer_parameters: List of layer parameters to randomise.
        :return: MXNet module
        :rtype: :class:`mx.module.Module`
        """
        if fixed_layer_parameters is not None:
            fixed_layer_parameters = self._prune_parameters(fixed_layer_parameters)
        if random_layer_parameters is None:
            arg_params, aux_params = self.arg_params.copy(), self.aux_params.copy()
        else:
            arg_params, aux_params = self._remove_random_parameters(random_layer_parameters)
        mod = mx.mod.Module(symbol=self.symbol, context=self.devices, fixed_param_names=fixed_layer_parameters,
                            label_names=(self.layer_names[-1] + "_label",), data_names=(self.data_name,))
        mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)
        mod.init_params(mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2))
        try:
            mod.set_params(arg_params, aux_params, allow_missing=True, force_init=True)
        except mx.MXNetError as e:
            exceptions._handle_mxnet_error(e)
        return mod 
Example #29
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def _get_output_layer_names(symbol_dict):
        """
        Return names of output layers given symbol dictionary.
        """
        return [symbol_dict[consts.NODES][i[0]][consts.NAME] for i in symbol_dict[consts.HEADS]] 
Example #30
Source File: model_handler.py    From xfer with Apache License 2.0 5 votes vote down vote up
def add_layer_bottom(self, layer_list):
        """
        Add layer to input of model.
        model layers = (layer1, layer2, layer3), layer_list = [layerA, layerB] -> model layers =
        (layerA, layerB, layer1, layer2, layer3)

        :param layer_list: List of MxNet symbol layers to be added to model input.
        :type layer_list: list(:class:`mx.symbol`)
        """
        network_symbol = self._get_symbol_dict(self.symbol)
        # Concatentate nodes of new layers
        new_nodes = []
        added_layer_names = []
        for layer in layer_list:
            layer_nodes = self._get_symbol_dict(layer)[consts.NODES]
            # Shift input indices of new layer by number of nodes added before it
            layer_nodes = self._shift_input_indices(layer_nodes, len(new_nodes))
            new_nodes += layer_nodes[1:]  # adding all but input node
            layer_name = layer_nodes[-1][consts.NAME]  # Last node contains layer name
            self._validate_layer_name(layer_name)
            added_layer_names.append(layer_name)

        output_layer_names = self._get_output_layer_names(network_symbol)
        # Shift input indices of existing nodes by the number of nodes being added. Exclude input node.
        shifted_input_network_nodes = self._shift_input_indices(network_symbol[consts.NODES][1:], len(new_nodes))
        # Concatentate data node of network, new layer nodes and remaining network nodes
        network_symbol[consts.NODES] = [network_symbol[consts.NODES][0]] + new_nodes + shifted_input_network_nodes
        network_symbol[consts.HEADS] = self._get_heads(network_symbol[consts.NODES],
                                                       output_layer_names=output_layer_names)
        network_symbol[consts.ARG_NODES] = self._get_arg_nodes(network_symbol[consts.NODES])

        sym = self._get_symbol(network_symbol)
        self.update_sym(sym)
        logging.info('Added {} to model bottom'.format(', '.join(added_layer_names)))