org.apache.hadoop.hdfs.web.URLConnectionFactory Java Examples
The following examples show how to use
org.apache.hadoop.hdfs.web.URLConnectionFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestEditLogFileInputStream.java From big-c with Apache License 2.0 | 6 votes |
@Test public void testReadURL() throws Exception { HttpURLConnection conn = mock(HttpURLConnection.class); doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream(); doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode(); doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length"); URLConnectionFactory factory = mock(URLConnectionFactory.class); doReturn(conn).when(factory).openConnection(Mockito.<URL> any(), anyBoolean()); URL url = new URL("http://localhost/fakeLog"); EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); // Read the edit log and verify that we got all of the data. EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil .countEditLogOpTypes(elis); assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1)); // Check that length header was picked up. assertEquals(FAKE_LOG_DATA.length, elis.length()); elis.close(); }
Example #2
Source File: TestReconUtils.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Test public void testMakeHttpCall() throws Exception { String url = "http://localhost:9874/dbCheckpoint"; File file1 = Paths.get(folder.getRoot().getPath(), "file1") .toFile(); BufferedWriter writer = new BufferedWriter(new FileWriter( file1.getAbsolutePath())); writer.write("File 1 Contents"); writer.close(); InputStream fileInputStream = new FileInputStream(file1); String contents; URLConnectionFactory connectionFactoryMock = mock(URLConnectionFactory.class); URLConnection urlConnectionMock = mock(URLConnection.class); when(urlConnectionMock.getInputStream()).thenReturn(fileInputStream); when(connectionFactoryMock.openConnection(any(URL.class), anyBoolean())) .thenReturn(urlConnectionMock); try (InputStream inputStream = new ReconUtils() .makeHttpCall(connectionFactoryMock, url, false)) { contents = IOUtils.toString(inputStream, Charset.defaultCharset()); } assertEquals("File 1 Contents", contents); }
Example #3
Source File: TestOzoneManagerHttpServer.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws Exception { File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); conf = new OzoneConfiguration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( TestOzoneManagerHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(conf); conf.set(OzoneConfigKeys.OZONE_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getClientSSLConfigFileName()); conf.set(OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getServerSSLConfigFileName()); }
Example #4
Source File: TestEditLogFileInputStream.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testReadURL() throws Exception { HttpURLConnection conn = mock(HttpURLConnection.class); doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream(); doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode(); doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length"); URLConnectionFactory factory = mock(URLConnectionFactory.class); doReturn(conn).when(factory).openConnection(Mockito.<URL> any(), anyBoolean()); URL url = new URL("http://localhost/fakeLog"); EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url, HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false); // Read the edit log and verify that we got all of the data. EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil .countEditLogOpTypes(elis); assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1)); assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1)); // Check that length header was picked up. assertEquals(FAKE_LOG_DATA.length, elis.length()); elis.close(); }
Example #5
Source File: TestStorageContainerManagerHttpServer.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUp() throws Exception { File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); conf = new OzoneConfiguration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( TestStorageContainerManagerHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(conf); conf.set(OzoneConfigKeys.OZONE_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getClientSSLConfigFileName()); conf.set(OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getServerSSLConfigFileName()); }
Example #6
Source File: QuorumJournalManager.java From hadoop with Apache License 2.0 | 5 votes |
QuorumJournalManager(Configuration conf, URI uri, NamespaceInfo nsInfo, AsyncLogger.Factory loggerFactory) throws IOException { Preconditions.checkArgument(conf != null, "must be configured"); this.conf = conf; this.uri = uri; this.nsInfo = nsInfo; this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory)); this.connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); // Configure timeouts. this.startSegmentTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT); this.prepareRecoveryTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT); this.acceptRecoveryTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT); this.finalizeSegmentTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_DEFAULT); this.selectInputStreamsTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_DEFAULT); this.getJournalStateTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT); this.newEpochTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT); this.writeTxnsTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT); }
Example #7
Source File: TestNameNodeHttpServer.java From big-c with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); conf = new Configuration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); }
Example #8
Source File: QuorumJournalManager.java From big-c with Apache License 2.0 | 5 votes |
QuorumJournalManager(Configuration conf, URI uri, NamespaceInfo nsInfo, AsyncLogger.Factory loggerFactory) throws IOException { Preconditions.checkArgument(conf != null, "must be configured"); this.conf = conf; this.uri = uri; this.nsInfo = nsInfo; this.loggers = new AsyncLoggerSet(createLoggers(loggerFactory)); this.connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); // Configure timeouts. this.startSegmentTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_START_SEGMENT_TIMEOUT_DEFAULT); this.prepareRecoveryTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_PREPARE_RECOVERY_TIMEOUT_DEFAULT); this.acceptRecoveryTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_ACCEPT_RECOVERY_TIMEOUT_DEFAULT); this.finalizeSegmentTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_FINALIZE_SEGMENT_TIMEOUT_DEFAULT); this.selectInputStreamsTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_SELECT_INPUT_STREAMS_TIMEOUT_DEFAULT); this.getJournalStateTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_GET_JOURNAL_STATE_TIMEOUT_DEFAULT); this.newEpochTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_NEW_EPOCH_TIMEOUT_DEFAULT); this.writeTxnsTimeoutMs = conf.getInt( DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_KEY, DFSConfigKeys.DFS_QJOURNAL_WRITE_TXNS_TIMEOUT_DEFAULT); }
Example #9
Source File: DelegationTokenFetcher.java From big-c with Apache License 2.0 | 5 votes |
/** * Cancel a Delegation Token. * @param nnAddr the NameNode's address * @param tok the token to cancel * @throws IOException * @throws AuthenticationException */ static public void cancelDelegationToken(URLConnectionFactory factory, URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException, AuthenticationException { StringBuilder buf = new StringBuilder(nnAddr.toString()) .append(CancelDelegationTokenServlet.PATH_SPEC).append("?") .append(CancelDelegationTokenServlet.TOKEN).append("=") .append(tok.encodeToUrlString()); HttpURLConnection conn = run(factory, new URL(buf.toString())); conn.disconnect(); }
Example #10
Source File: DFSck.java From big-c with Apache License 2.0 | 5 votes |
public DFSck(Configuration conf, PrintStream out) throws IOException { super(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.out = out; this.connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); }
Example #11
Source File: TestNameNodeHttpServer.java From hadoop with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUp() throws Exception { File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); conf = new Configuration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeHttpServer.class); KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false); connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); }
Example #12
Source File: DelegationTokenFetcher.java From hadoop with Apache License 2.0 | 5 votes |
/** * Cancel a Delegation Token. * @param nnAddr the NameNode's address * @param tok the token to cancel * @throws IOException * @throws AuthenticationException */ static public void cancelDelegationToken(URLConnectionFactory factory, URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException, AuthenticationException { StringBuilder buf = new StringBuilder(nnAddr.toString()) .append(CancelDelegationTokenServlet.PATH_SPEC).append("?") .append(CancelDelegationTokenServlet.TOKEN).append("=") .append(tok.encodeToUrlString()); HttpURLConnection conn = run(factory, new URL(buf.toString())); conn.disconnect(); }
Example #13
Source File: DFSck.java From hadoop with Apache License 2.0 | 5 votes |
public DFSck(Configuration conf, PrintStream out) throws IOException { super(conf); this.ugi = UserGroupInformation.getCurrentUser(); this.out = out; this.connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(conf); this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); }
Example #14
Source File: OzoneManagerSnapshotProvider.java From hadoop-ozone with Apache License 2.0 | 5 votes |
public OzoneManagerSnapshotProvider(ConfigurationSource conf, File omRatisSnapshotDir, List<OMNodeDetails> peerNodes) { LOG.info("Initializing OM Snapshot Provider"); this.omSnapshotDir = omRatisSnapshotDir; this.peerNodesMap = new HashMap<>(); for (OMNodeDetails peerNode : peerNodes) { this.peerNodesMap.put(peerNode.getOMNodeId(), peerNode); } this.httpPolicy = HttpConfig.getHttpPolicy(conf); this.spnegoEnabled = conf.get(OZONE_OM_HTTP_AUTH_TYPE, "simple") .equals("kerberos"); TimeUnit connectionTimeoutUnit = OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getUnit(); int connectionTimeoutMS = (int) conf.getTimeDuration( OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_KEY, OZONE_OM_SNAPSHOT_PROVIDER_CONNECTION_TIMEOUT_DEFAULT.getDuration(), connectionTimeoutUnit); TimeUnit requestTimeoutUnit = OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getUnit(); int requestTimeoutMS = (int) conf.getTimeDuration( OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_KEY, OZONE_OM_SNAPSHOT_PROVIDER_REQUEST_TIMEOUT_DEFAULT.getDuration(), requestTimeoutUnit); connectionFactory = URLConnectionFactory .newDefaultURLConnectionFactory(connectionTimeoutMS, requestTimeoutMS, LegacyHadoopConfigurationSource.asHadoopConfiguration(conf)); }
Example #15
Source File: ReconUtils.java From hadoop-ozone with Apache License 2.0 | 5 votes |
/** * Make HTTP GET call on the URL and return inputstream to the response. * @param connectionFactory URLConnectionFactory to use. * @param url url to call * @param isSpnego is SPNEGO enabled * @return Inputstream to the response of the HTTP call. * @throws IOException, AuthenticationException While reading the response. */ public InputStream makeHttpCall(URLConnectionFactory connectionFactory, String url, boolean isSpnego) throws IOException, AuthenticationException { URLConnection urlConnection = connectionFactory.openConnection(new URL(url), isSpnego); urlConnection.connect(); return urlConnection.getInputStream(); }
Example #16
Source File: EditLogFileInputStream.java From big-c with Apache License 2.0 | 4 votes |
public URLLog(URLConnectionFactory connectionFactory, URL url) { this.connectionFactory = connectionFactory; this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); this.url = url; }
Example #17
Source File: EditLogFileInputStream.java From hadoop with Apache License 2.0 | 4 votes |
public URLLog(URLConnectionFactory connectionFactory, URL url) { this.connectionFactory = connectionFactory; this.isSpnegoEnabled = UserGroupInformation.isSecurityEnabled(); this.url = url; }
Example #18
Source File: OzoneManagerServiceProviderImpl.java From hadoop-ozone with Apache License 2.0 | 4 votes |
@Inject public OzoneManagerServiceProviderImpl( OzoneConfiguration configuration, ReconOMMetadataManager omMetadataManager, ReconTaskController reconTaskController, ReconUtils reconUtils, OzoneManagerProtocol ozoneManagerClient) { int connectionTimeout = (int) configuration.getTimeDuration( RECON_OM_CONNECTION_TIMEOUT, RECON_OM_CONNECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); int connectionRequestTimeout = (int)configuration.getTimeDuration( RECON_OM_CONNECTION_REQUEST_TIMEOUT, RECON_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(connectionTimeout, connectionRequestTimeout, configuration); String ozoneManagerHttpAddress = configuration.get(OMConfigKeys .OZONE_OM_HTTP_ADDRESS_KEY); String ozoneManagerHttpsAddress = configuration.get(OMConfigKeys .OZONE_OM_HTTPS_ADDRESS_KEY); omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR); HttpConfig.Policy policy = HttpConfig.getHttpPolicy(configuration); omDBSnapshotUrl = "http://" + ozoneManagerHttpAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; if (policy.isHttpsEnabled()) { omDBSnapshotUrl = "https://" + ozoneManagerHttpsAddress + OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; } boolean flushParam = configuration.getBoolean( RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM, false); if (flushParam) { omDBSnapshotUrl += "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true"; } this.reconUtils = reconUtils; this.omMetadataManager = omMetadataManager; this.reconTaskController = reconTaskController; this.reconTaskStatusDao = reconTaskController.getReconTaskStatusDao(); this.ozoneManagerClient = ozoneManagerClient; this.configuration = configuration; this.metrics = OzoneManagerSyncMetrics.create(); }
Example #19
Source File: EditLogFileInputStream.java From big-c with Apache License 2.0 | 3 votes |
/** * Open an EditLogInputStream for the given URL. * * @param connectionFactory * the URLConnectionFactory used to create the connection. * @param url * the url hosting the log * @param startTxId * the expected starting txid * @param endTxId * the expected ending txid * @param inProgress * whether the log is in-progress * @return a stream from which edits may be read */ public static EditLogInputStream fromUrl( URLConnectionFactory connectionFactory, URL url, long startTxId, long endTxId, boolean inProgress) { return new EditLogFileInputStream(new URLLog(connectionFactory, url), startTxId, endTxId, inProgress); }
Example #20
Source File: EditLogFileInputStream.java From hadoop with Apache License 2.0 | 3 votes |
/** * Open an EditLogInputStream for the given URL. * * @param connectionFactory * the URLConnectionFactory used to create the connection. * @param url * the url hosting the log * @param startTxId * the expected starting txid * @param endTxId * the expected ending txid * @param inProgress * whether the log is in-progress * @return a stream from which edits may be read */ public static EditLogInputStream fromUrl( URLConnectionFactory connectionFactory, URL url, long startTxId, long endTxId, boolean inProgress) { return new EditLogFileInputStream(new URLLog(connectionFactory, url), startTxId, endTxId, inProgress); }