Java Code Examples for org.apache.hadoop.hive.metastore.api.StorageDescriptor#setLocation()

The following examples show how to use org.apache.hadoop.hive.metastore.api.StorageDescriptor#setLocation() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CatalogToHiveConverter.java    From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 6 votes vote down vote up
public static StorageDescriptor convertStorageDescriptor(com.amazonaws.services.glue.model.StorageDescriptor catalogSd) {
  StorageDescriptor hiveSd = new StorageDescriptor();
  hiveSd.setCols(convertFieldSchemaList(catalogSd.getColumns()));
  hiveSd.setLocation(catalogSd.getLocation());
  hiveSd.setInputFormat(catalogSd.getInputFormat());
  hiveSd.setOutputFormat(catalogSd.getOutputFormat());
  hiveSd.setCompressed(catalogSd.getCompressed());
  hiveSd.setNumBuckets(catalogSd.getNumberOfBuckets());
  hiveSd.setSerdeInfo(convertSerDeInfo(catalogSd.getSerdeInfo()));
  hiveSd.setBucketCols(firstNonNull(catalogSd.getBucketColumns(), Lists.<String>newArrayList()));
  hiveSd.setSortCols(convertOrderList(catalogSd.getSortColumns()));
  hiveSd.setParameters(firstNonNull(catalogSd.getParameters(), Maps.<String, String>newHashMap()));
  hiveSd.setSkewedInfo(convertSkewedInfo(catalogSd.getSkewedInfo()));
  hiveSd.setStoredAsSubDirectories(catalogSd.getStoredAsSubDirectories());

  return hiveSd;
}
 
Example 2
Source File: HiveCatalog.java    From flink with Apache License 2.0 6 votes vote down vote up
private Partition instantiateHivePartition(Table hiveTable, CatalogPartitionSpec partitionSpec, CatalogPartition catalogPartition)
		throws PartitionSpecInvalidException {
	List<String> partCols = getFieldNames(hiveTable.getPartitionKeys());
	List<String> partValues = getOrderedFullPartitionValues(
		partitionSpec, partCols, new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()));
	// validate partition values
	for (int i = 0; i < partCols.size(); i++) {
		if (StringUtils.isNullOrWhitespaceOnly(partValues.get(i))) {
			throw new PartitionSpecInvalidException(getName(), partCols,
				new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()), partitionSpec);
		}
	}
	// TODO: handle GenericCatalogPartition
	StorageDescriptor sd = hiveTable.getSd().deepCopy();
	sd.setLocation(catalogPartition.getProperties().remove(HiveCatalogConfig.PARTITION_LOCATION));

	Map<String, String> properties = new HashMap<>(catalogPartition.getProperties());
	properties.put(HiveCatalogConfig.COMMENT, catalogPartition.getComment());

	return HiveTableUtil.createHivePartition(
			hiveTable.getDbName(),
			hiveTable.getTableName(),
			partValues,
			sd,
			properties);
}
 
Example 3
Source File: TestUtils.java    From waggle-dance with Apache License 2.0 6 votes vote down vote up
static Table createUnpartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    File location)
  throws TException {
  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toURI().toString());
  sd.setParameters(new HashMap<>());
  sd.setSerdeInfo(new SerDeInfo());

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  return hiveTable;
}
 
Example 4
Source File: DestructiveReplicaTest.java    From circus-train with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() {
  SourceTable sourceTable = new SourceTable();
  sourceTable.setDatabaseName(DATABASE);
  sourceTable.setTableName(TABLE);
  tableReplication.setSourceTable(sourceTable);
  ReplicaTable replicaTable = new ReplicaTable();
  replicaTable.setDatabaseName(DATABASE);
  replicaTable.setTableName(REPLICA_TABLE);
  tableReplication.setReplicaTable(replicaTable);
  when(replicaMetaStoreClientSupplier.get()).thenReturn(client);
  replica = new DestructiveReplica(replicaMetaStoreClientSupplier, cleanupLocationManager, tableReplication);

  table = new Table();
  table.setDbName(DATABASE);
  table.setTableName(REPLICA_TABLE);
  table.setPartitionKeys(Lists.newArrayList(new FieldSchema("part1", "string", "")));
  Map<String, String> parameters = new HashMap<>();
  parameters.put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), DATABASE + "." + TABLE);
  parameters.put(REPLICATION_EVENT.parameterName(), EVENT_ID);
  table.setParameters(parameters);
  StorageDescriptor sd1 = new StorageDescriptor();
  sd1.setLocation(tableLocation.toString());
  table.setSd(sd1);
}
 
Example 5
Source File: SourceTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Test
public void getLocationManagerForPartitionedTable() throws Exception {
  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(TABLE_LOCATION + "/partition");
  partition.setSd(sd);

  SourceLocationManager locationManager = source.getLocationManager(table, partitions, EVENT_ID, copierOptions);
  assertThat(locationManager.getTableLocation(), is(new Path(TABLE_LOCATION)));
}
 
Example 6
Source File: HiveEntityFactory.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static StorageDescriptor newStorageDescriptor(File location, String... columns) {
  StorageDescriptor sd = new StorageDescriptor();
  List<FieldSchema> cols = new ArrayList<>(columns.length);
  for (String name : columns) {
    cols.add(newFieldSchema(name));
  }
  sd.setCols(cols);
  sd.setSerdeInfo(new SerDeInfo());
  sd.setLocation(location.toURI().toString());
  return sd;
}
 
Example 7
Source File: ThriftMetastoreUtil.java    From presto with Apache License 2.0 5 votes vote down vote up
private static StorageDescriptor makeStorageDescriptor(String tableName, List<Column> columns, Storage storage)
{
    SerDeInfo serdeInfo = new SerDeInfo();
    serdeInfo.setName(tableName);
    serdeInfo.setSerializationLib(storage.getStorageFormat().getSerDeNullable());
    serdeInfo.setParameters(storage.getSerdeParameters());

    StorageDescriptor sd = new StorageDescriptor();
    sd.setLocation(emptyToNull(storage.getLocation()));
    sd.setCols(columns.stream()
            .map(ThriftMetastoreUtil::toMetastoreApiFieldSchema)
            .collect(toImmutableList()));
    sd.setSerdeInfo(serdeInfo);
    sd.setInputFormat(storage.getStorageFormat().getInputFormatNullable());
    sd.setOutputFormat(storage.getStorageFormat().getOutputFormatNullable());
    sd.setSkewedInfoIsSet(storage.isSkewed());
    sd.setParameters(ImmutableMap.of());

    Optional<HiveBucketProperty> bucketProperty = storage.getBucketProperty();
    if (bucketProperty.isPresent()) {
        sd.setNumBuckets(bucketProperty.get().getBucketCount());
        sd.setBucketCols(bucketProperty.get().getBucketedBy());
        if (!bucketProperty.get().getSortedBy().isEmpty()) {
            sd.setSortCols(bucketProperty.get().getSortedBy().stream()
                    .map(column -> new Order(column.getColumnName(), column.getOrder().getHiveOrder()))
                    .collect(toImmutableList()));
        }
    }

    return sd;
}
 
Example 8
Source File: HdfsSnapshotLocationManagerTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Test
public void partitionSubPathUriEncoded() throws Exception {
  Path path = new Path(PARTITION_BASE_LOCATION + "/partition%251");
  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(path.toUri().getPath());
  partition1.setSd(sd);

  HdfsSnapshotLocationManager manager = new HdfsSnapshotLocationManager(hiveConf, EVENT_ID, sourceTable,
      Arrays.asList(partition1), false, PARTITION_BASE_LOCATION, fileSystemFactory, sourceCatalogListener);
  assertThat(manager.getPartitionSubPath(path), is(new Path("partition%251")));
}
 
Example 9
Source File: HdfsSnapshotLocationManagerTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Test
public void partitionSubPathWithSnapshot() throws Exception {
  when(fileSystem.exists(new Path(PARTITION_BASE_LOCATION + "/.snapshot"))).thenReturn(true);
  when(fileSystem.createSnapshot(new Path(PARTITION_BASE_LOCATION), EVENT_ID)).thenReturn(new Path("snapshotPath"));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(PARTITION_BASE_LOCATION + "/partition1");
  partition1.setSd(sd);

  HdfsSnapshotLocationManager manager = new HdfsSnapshotLocationManager(hiveConf, EVENT_ID, sourceTable,
      Arrays.asList(partition1), false, PARTITION_BASE_LOCATION, fileSystemFactory, sourceCatalogListener);
  assertThat(manager.getPartitionSubPath(new Path(partition1.getSd().getLocation())), is(new Path("partition1")));
}
 
Example 10
Source File: TableTransformationTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void init() {
  table = new Table();
  table.setDbName("database");
  table.setTableName("table");
  table.setTableType("type");

  Map<String, List<PrivilegeGrantInfo>> userPrivileges = new HashMap<>();
  userPrivileges.put("read", ImmutableList.of(new PrivilegeGrantInfo()));
  PrincipalPrivilegeSet privileges = new PrincipalPrivilegeSet();
  privileges.setUserPrivileges(userPrivileges);
  table.setPrivileges(privileges);

  StorageDescriptor storageDescriptor = new StorageDescriptor();
  storageDescriptor.setCols(Arrays.asList(new FieldSchema("a", "int", null)));
  storageDescriptor.setInputFormat("input_format");
  storageDescriptor.setOutputFormat("output_format");
  storageDescriptor.setSerdeInfo(new SerDeInfo("serde", "lib", new HashMap<String, String>()));
  storageDescriptor.setSkewedInfo(new SkewedInfo());
  storageDescriptor.setParameters(new HashMap<String, String>());
  storageDescriptor.setLocation("database/table/");
  table.setSd(storageDescriptor);

  Map<String, String> parameters = new HashMap<>();
  parameters.put("com.company.parameter", "abc");
  table.setParameters(parameters);
}
 
Example 11
Source File: HdfsSnapshotLocationManagerTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Test(expected = CircusTrainException.class)
public void invalidPartitionSubPath() throws Exception {
  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation("anotherBaseLocation" + "/partition1");
  partition1.setSd(sd);

  HdfsSnapshotLocationManager manager = new HdfsSnapshotLocationManager(hiveConf, EVENT_ID, sourceTable,
      Arrays.asList(partition1), false, PARTITION_BASE_LOCATION, fileSystemFactory, sourceCatalogListener);
  manager.getPartitionSubPath(new Path(partition1.getSd().getLocation()));
}
 
Example 12
Source File: TestUtils.java    From circus-train with Apache License 2.0 5 votes vote down vote up
public static Table createUnpartitionedTable(
    HiveMetaStoreClient metaStoreClient,
    String database,
    String table,
    URI location)
  throws TException {
  Table hiveTable = new Table();
  hiveTable.setDbName(database);
  hiveTable.setTableName(table);
  hiveTable.setTableType(TableType.EXTERNAL_TABLE.name());
  hiveTable.putToParameters("EXTERNAL", "TRUE");

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(DATA_COLUMNS);
  sd.setLocation(location.toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setInputFormat(TextInputFormat.class.getName());
  sd.setOutputFormat(TextOutputFormat.class.getName());
  sd.setSerdeInfo(new SerDeInfo());
  sd.getSerdeInfo().setSerializationLib("org.apache.hadoop.hive.serde2.OpenCSVSerde");

  hiveTable.setSd(sd);

  metaStoreClient.createTable(hiveTable);

  ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, database, table);
  ColumnStatisticsData statsData = new ColumnStatisticsData(_Fields.LONG_STATS, new LongColumnStatsData(1L, 2L));
  ColumnStatisticsObj cso1 = new ColumnStatisticsObj("id", "bigint", statsData);
  List<ColumnStatisticsObj> statsObj = Collections.singletonList(cso1);
  metaStoreClient.updateTableColumnStatistics(new ColumnStatistics(statsDesc, statsObj));

  return hiveTable;
}
 
Example 13
Source File: ReplicaTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private Partition newPartition(String... values) {
  Partition partition = new Partition();
  partition.setDbName(DB_NAME);
  partition.setTableName(TABLE_NAME);
  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(new Path(tableLocation, partitionName(values)).toUri().toString());
  sd.setCols(FIELDS);
  partition.setSd(sd);
  HashMap<String, String> parameters = new HashMap<>();
  parameters.put(StatsSetupConst.ROW_COUNT, "1");
  partition.setParameters(parameters);
  partition.setValues(Arrays.asList(values));
  return partition;
}
 
Example 14
Source File: DestructiveReplicaTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private Partition newPartition(String partitionValue, Path location1) {
  Partition partition = new Partition();
  partition.setValues(Lists.newArrayList(partitionValue));
  StorageDescriptor sd1 = new StorageDescriptor();
  sd1.setLocation(location1.toString());
  partition.setSd(sd1);
  Map<String, String> parameters = new HashMap<>();
  parameters.put(REPLICATION_EVENT.parameterName(), EVENT_ID);
  partition.setParameters(parameters);
  return partition;
}
 
Example 15
Source File: HdfsSnapshotLocationManagerTest.java    From circus-train with Apache License 2.0 5 votes vote down vote up
@Before
public void setupTable() {
  sourceTable = new Table();
  sourceTable.setDbName(DB_NAME);
  sourceTable.setTableName(TABLE_NAME);

  StorageDescriptor sd = new StorageDescriptor();
  sd.setLocation(TABLE_LOCATION);
  sourceTable.setSd(sd);
}
 
Example 16
Source File: HiveTableUtil.java    From flink with Apache License 2.0 4 votes vote down vote up
public static void extractLocation(StorageDescriptor sd, Map<String, String> properties) {
	String location = properties.remove(TABLE_LOCATION_URI);
	if (location != null) {
		sd.setLocation(location);
	}
}
 
Example 17
Source File: HiveConvertersImpl.java    From metacat with Apache License 2.0 4 votes vote down vote up
private StorageDescriptor fromStorageDto(@Nullable final StorageDto storageDto, @Nullable final String serdeName) {
    //
    // Set all required fields to null. This is to simulate Hive behavior.
    // Setting it to empty string failed certain hive operations.
    //
    final StorageDescriptor result = new StorageDescriptor();
    String inputFormat = null;
    String location = null;
    String outputFormat = null;
    String serializationLib = null;
    Map<String, String> sdParams = Maps.newHashMap();
    Map<String, String> serdeParams = Maps.newHashMap();

    if (storageDto != null) {
        if (storageDto.getInputFormat() != null) {
            inputFormat = storageDto.getInputFormat();
        }
        if (storageDto.getUri() != null) {
            location = storageDto.getUri();
        }
        if (storageDto.getOutputFormat() != null) {
            outputFormat = storageDto.getOutputFormat();
        }
        if (storageDto.getSerializationLib() != null) {
            serializationLib = storageDto.getSerializationLib();
        }
        if (storageDto.getParameters() != null) {
            sdParams = storageDto.getParameters();
        }
        if (storageDto.getSerdeInfoParameters() != null) {
            serdeParams = storageDto.getSerdeInfoParameters();
        }
    }
    result.setSerdeInfo(new SerDeInfo(serdeName, serializationLib, serdeParams));
    result.setBucketCols(Collections.emptyList());
    result.setSortCols(Collections.emptyList());
    result.setInputFormat(inputFormat);
    result.setLocation(location);
    result.setOutputFormat(outputFormat);
    result.setCols(Collections.emptyList());
    // Setting an empty skewed info.
    result.setSkewedInfo(new SkewedInfo(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap()));
    result.setParameters(sdParams);
    return result;
}
 
Example 18
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createReplicaTable() throws Exception {
  File partitionEurope = new File(replicaTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\tuk\n2\tsusan\tglasgow\tuk\n");

  File partitionAsia = new File(replicaTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\tchina\n2\tshanghai\tmilan\titaly\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient replicaClient = catalog.client();

  Table replica = new Table();
  replica.setDbName(DATABASE);
  replica.setTableName(REPLICA_TABLE);
  replica.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment replica");
  replica.setParameters(parameters);
  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  replica.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""),
      new FieldSchema("country", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(replicaTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  replica.setSd(sd);

  replicaClient.createTable(replica);
  LOG.info(">>>> Partitions added: {}",
      +replicaClient.add_partitions(
          Arrays.asList(newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 19
Source File: ComparisonToolIntegrationTest.java    From circus-train with Apache License 2.0 4 votes vote down vote up
private void createSourceTable() throws Exception {
  File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01");
  File partitionUk = new File(partitionEurope, "local_hour=0");
  File dataFileUk = new File(partitionUk, PART_00000);
  FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n");

  File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02");
  File partitionChina = new File(partitionAsia, "local_hour=0");
  File dataFileChina = new File(partitionChina, PART_00000);
  String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n";
  FileUtils.writeStringToFile(dataFileChina, data);

  HiveMetaStoreClient sourceClient = catalog.client();

  Table source = new Table();
  source.setDbName(DATABASE);
  source.setTableName(SOURCE_TABLE);
  source.setTableType(TableType.EXTERNAL_TABLE.name());
  Map<String, String> parameters = new HashMap<>();
  parameters.put("comment", "comment source");
  source.setParameters(parameters);

  List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""),
      new FieldSchema("local_hour", "string", ""));
  source.setPartitionKeys(partitionColumns);

  List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""),
      new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""));

  StorageDescriptor sd = new StorageDescriptor();
  sd.setCols(dataColumns);
  sd.setLocation(sourceTableUri.toURI().toString());
  sd.setParameters(new HashMap<String, String>());
  sd.setSerdeInfo(new SerDeInfo());

  source.setSd(sd);

  sourceClient.createTable(source);
  LOG.info(">>>> Partitions added: {}",
      +sourceClient
          .add_partitions(Arrays.asList(newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk),
              newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina))));
}
 
Example 20
Source File: TestHDFSIntegration.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
/**
* SENTRY-1002:
* Ensure the paths with no scheme will not cause NPE during paths update.
*/
@Test
public void testMissingScheme() throws Throwable {

  // In the local test environment, EXTERNAL_SENTRY_SERVICE is false,
  // set the default URI scheme to be hdfs.
  boolean testConfOff = new Boolean(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false"));
  if (!testConfOff) {
    PathsUpdate.getConfiguration().set("fs.defaultFS", "hdfs:///");
  }

  tmpHDFSDir = new Path("/tmp/external");
  if (!miniDFS.getFileSystem().exists(tmpHDFSDir)) {
    miniDFS.getFileSystem().mkdirs(tmpHDFSDir);
  }

  Path partitionDir = new Path("/tmp/external/p1");
  if (!miniDFS.getFileSystem().exists(partitionDir)) {
    miniDFS.getFileSystem().mkdirs(partitionDir);
  }

  String dbName = "db1";
  String tblName = "tab1";
  dbNames = new String[]{dbName};
  roles = new String[]{"admin_role"};
  admin = StaticUserGroup.ADMIN1;

  Connection conn;
  Statement stmt;

  conn = hiveServer2.createConnection("hive", "hive");
  stmt = conn.createStatement();
  stmt.execute("create role admin_role");
  stmt.execute("grant all on server server1 to role admin_role");
  stmt.execute("grant role admin_role to group " + StaticUserGroup.ADMINGROUP);
  stmt.close();
  conn.close();

  conn = hiveServer2.createConnection(StaticUserGroup.ADMIN1, StaticUserGroup.ADMIN1);
  stmt = conn.createStatement();
  stmt.execute("create database " + dbName);
  stmt.execute("create external table " + dbName + "." + tblName + "(s string) location '/tmp/external/p1'");

  // Deep copy of table tab1
  Table tbCopy = hmsClient.getTable(dbName, tblName);

  // Change the location of the table to strip the scheme.
  StorageDescriptor sd = hmsClient.getTable(dbName, tblName).getSd();
  sd.setLocation("/tmp/external");
  tbCopy.setSd(sd);

  // Alter table tab1 to be tbCopy which is at scheme-less location.
  // And the corresponding path will be updated to sentry server.
  hmsClient.alter_table(dbName, "tab1", tbCopy);
  Assert.assertEquals(hmsClient.getTable(dbName, tblName).getSd().getLocation(), "/tmp/external");
  verifyOnPath("/tmp/external", FsAction.ALL, StaticUserGroup.HIVE, true);

  stmt.close();
  conn.close();
}