Java Code Examples for org.apache.hadoop.hdfs.inotify.Event

The following examples show how to use org.apache.hadoop.hdfs.inotify.Event. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: localization_nifi   Source File: EventTypeValidator.java    License: Apache License 2.0 6 votes vote down vote up
private String isValidEventType(String input) {
    if (input != null && !"".equals(input.trim())) {
        final String[] events = input.split(",");
        final List<String> invalid = new ArrayList<>();
        for (String event : events) {
            try {
                Event.EventType.valueOf(event.trim().toUpperCase());
            } catch (IllegalArgumentException e) {
                invalid.add(event.trim());
            }
        }

        return invalid.isEmpty() ? null : "The following are not valid event types: " + invalid;
    }

    return "Empty event types are not allowed.";
}
 
Example 2
Source Project: localization_nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void onTriggerShouldProperlyHandleAnEmptyEventBatch() throws Exception {
    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(new Event[]{});

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.POLL_DURATION, "1 second");
    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path");
    runner.setProperty(GetHDFSEvents.NUMBER_OF_RETRIES_FOR_POLL, "5");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(0, successfulFlowFiles.size());
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 3
Source Project: localization_nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void makeSureHappyPathForProcessingEventsSendsFlowFilesToCorrectRelationship() throws Exception {
    Event[] events = getEvents();

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.POLL_DURATION, "1 second");
    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path(/)?.*");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(3, successfulFlowFiles.size());
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 4
Source Project: localization_nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void onTriggerShouldOnlyProcessEventsWithSpecificPath() throws Exception {
    Event[] events = getEvents();

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path/create(/)?");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(1, successfulFlowFiles.size());
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 5
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private static InotifyProtos.MetadataUpdateType metadataUpdateTypeConvert(
    Event.MetadataUpdateEvent.MetadataType type) {
  switch (type) {
  case TIMES:
    return InotifyProtos.MetadataUpdateType.META_TYPE_TIMES;
  case REPLICATION:
    return InotifyProtos.MetadataUpdateType.META_TYPE_REPLICATION;
  case OWNER:
    return InotifyProtos.MetadataUpdateType.META_TYPE_OWNER;
  case PERMS:
    return InotifyProtos.MetadataUpdateType.META_TYPE_PERMS;
  case ACLS:
    return InotifyProtos.MetadataUpdateType.META_TYPE_ACLS;
  case XATTRS:
    return InotifyProtos.MetadataUpdateType.META_TYPE_XATTRS;
  default:
    return null;
  }
}
 
Example 6
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private static Event.MetadataUpdateEvent.MetadataType metadataUpdateTypeConvert(
    InotifyProtos.MetadataUpdateType type) {
  switch (type) {
  case META_TYPE_TIMES:
    return Event.MetadataUpdateEvent.MetadataType.TIMES;
  case META_TYPE_REPLICATION:
    return Event.MetadataUpdateEvent.MetadataType.REPLICATION;
  case META_TYPE_OWNER:
    return Event.MetadataUpdateEvent.MetadataType.OWNER;
  case META_TYPE_PERMS:
    return Event.MetadataUpdateEvent.MetadataType.PERMS;
  case META_TYPE_ACLS:
    return Event.MetadataUpdateEvent.MetadataType.ACLS;
  case META_TYPE_XATTRS:
    return Event.MetadataUpdateEvent.MetadataType.XATTRS;
  default:
    return null;
  }
}
 
Example 7
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private static InotifyProtos.MetadataUpdateType metadataUpdateTypeConvert(
    Event.MetadataUpdateEvent.MetadataType type) {
  switch (type) {
  case TIMES:
    return InotifyProtos.MetadataUpdateType.META_TYPE_TIMES;
  case REPLICATION:
    return InotifyProtos.MetadataUpdateType.META_TYPE_REPLICATION;
  case OWNER:
    return InotifyProtos.MetadataUpdateType.META_TYPE_OWNER;
  case PERMS:
    return InotifyProtos.MetadataUpdateType.META_TYPE_PERMS;
  case ACLS:
    return InotifyProtos.MetadataUpdateType.META_TYPE_ACLS;
  case XATTRS:
    return InotifyProtos.MetadataUpdateType.META_TYPE_XATTRS;
  default:
    return null;
  }
}
 
Example 8
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 6 votes vote down vote up
private static Event.MetadataUpdateEvent.MetadataType metadataUpdateTypeConvert(
    InotifyProtos.MetadataUpdateType type) {
  switch (type) {
  case META_TYPE_TIMES:
    return Event.MetadataUpdateEvent.MetadataType.TIMES;
  case META_TYPE_REPLICATION:
    return Event.MetadataUpdateEvent.MetadataType.REPLICATION;
  case META_TYPE_OWNER:
    return Event.MetadataUpdateEvent.MetadataType.OWNER;
  case META_TYPE_PERMS:
    return Event.MetadataUpdateEvent.MetadataType.PERMS;
  case META_TYPE_ACLS:
    return Event.MetadataUpdateEvent.MetadataType.ACLS;
  case META_TYPE_XATTRS:
    return Event.MetadataUpdateEvent.MetadataType.XATTRS;
  default:
    return null;
  }
}
 
Example 9
Source Project: nifi   Source File: EventTypeValidator.java    License: Apache License 2.0 6 votes vote down vote up
private String isValidEventType(String input) {
    if (input != null && !"".equals(input.trim())) {
        final String[] events = input.split(",");
        final List<String> invalid = new ArrayList<>();
        for (String event : events) {
            try {
                Event.EventType.valueOf(event.trim().toUpperCase());
            } catch (IllegalArgumentException e) {
                invalid.add(event.trim());
            }
        }

        return invalid.isEmpty() ? null : "The following are not valid event types: " + invalid;
    }

    return "Empty event types are not allowed.";
}
 
Example 10
Source Project: nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void onTriggerShouldProperlyHandleAnEmptyEventBatch() throws Exception {
    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(new Event[]{});

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.POLL_DURATION, "1 second");
    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path");
    runner.setProperty(GetHDFSEvents.NUMBER_OF_RETRIES_FOR_POLL, "5");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(0, successfulFlowFiles.size());
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 11
Source Project: nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void makeSureHappyPathForProcessingEventsSendsFlowFilesToCorrectRelationship() throws Exception {
    Event[] events = getEvents();

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.POLL_DURATION, "1 second");
    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path(/)?.*");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(3, successfulFlowFiles.size());
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 12
Source Project: nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void onTriggerShouldOnlyProcessEventsWithSpecificPath() throws Exception {
    Event[] events = getEvents();

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path/create(/)?");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(1, successfulFlowFiles.size());
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 13
Source Project: localization_nifi   Source File: GetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
private boolean toProcessEvent(ProcessContext context, Event event) {
    final String[] eventTypes = context.getProperty(EVENT_TYPES).getValue().split(",");
    for (String name : eventTypes) {
        if (name.trim().equalsIgnoreCase(event.getEventType().name())) {
            return notificationConfig.getPathFilter().accept(new Path(getPath(event)));
        }
    }

    return false;
}
 
Example 14
Source Project: localization_nifi   Source File: GetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
private String getPath(Event event) {
    if (event == null || event.getEventType() == null) {
        throw new IllegalArgumentException("Event and event type must not be null.");
    }

    switch (event.getEventType()) {
        case CREATE: return ((Event.CreateEvent) event).getPath();
        case CLOSE: return ((Event.CloseEvent) event).getPath();
        case APPEND: return ((Event.AppendEvent) event).getPath();
        case RENAME: return ((Event.RenameEvent) event).getSrcPath();
        case METADATA: return ((Event.MetadataUpdateEvent) event).getPath();
        case UNLINK: return ((Event.UnlinkEvent) event).getPath();
        default: throw new IllegalArgumentException("Unsupported event type.");
    }
}
 
Example 15
Source Project: localization_nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void eventsProcessorShouldProperlyFilterEventTypes() throws Exception {
    Event[] events = getEvents();

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path(/.*)?");
    runner.setProperty(GetHDFSEvents.EVENT_TYPES, "create, metadata");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(2, successfulFlowFiles.size());

    List<String> expectedEventTypes = Arrays.asList("CREATE", "METADATA");
    for (MockFlowFile f : successfulFlowFiles) {
        String eventType = f.getAttribute(EventAttributes.EVENT_TYPE);
        assertTrue(expectedEventTypes.contains(eventType));
    }

    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 16
Source Project: localization_nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void makeSureExpressionLanguageIsWorkingProperlyWithinTheHdfsPathToWatch() throws Exception {
    Event[] events = new Event[] {
            new Event.CreateEvent.Builder().path("/some/path/1/2/3/t.txt").build(),
            new Event.CreateEvent.Builder().path("/some/path/1/2/4/t.txt").build(),
            new Event.CreateEvent.Builder().path("/some/path/1/2/3/.t.txt").build()
    };

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path/${literal(1)}/${literal(2)}/${literal(3)}/.*.txt");
    runner.setProperty(GetHDFSEvents.EVENT_TYPES, "create");
    runner.setProperty(GetHDFSEvents.IGNORE_HIDDEN_FILES, "true");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(1, successfulFlowFiles.size());

    for (MockFlowFile f : successfulFlowFiles) {
        String eventType = f.getAttribute(EventAttributes.EVENT_TYPE);
        assertTrue(eventType.equals("CREATE"));
    }

    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
 
Example 17
Source Project: localization_nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
private Event[] getEvents() {
    return new Event[]{
                EventTestUtils.createCreateEvent(),
                EventTestUtils.createCloseEvent(),
                EventTestUtils.createMetadataUpdateEvent()
        };
}
 
Example 18
Source Project: localization_nifi   Source File: EventTestUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static Event.CreateEvent createCreateEvent() {
    return new Event.CreateEvent.Builder()
            .ctime(new Date().getTime())
            .groupName("group_name")
            .iNodeType(Event.CreateEvent.INodeType.DIRECTORY)
            .overwrite(false)
            .ownerName("ownerName")
            .path("/some/path/create")
            .perms(new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE))
            .replication(1)
            .symlinkTarget("/some/symlink/target")
            .build();
}
 
Example 19
Source Project: localization_nifi   Source File: EventTestUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static Event.MetadataUpdateEvent createMetadataUpdateEvent() {
    return new Event.MetadataUpdateEvent.Builder()
            .replication(0)
            .perms(new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE))
            .path("/some/path/metadata")
            .ownerName("owner")
            .acls(Collections.singletonList(new AclEntry.Builder().setName("schema").setPermission(FsAction.ALL).setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build()))
            .atime(new Date().getTime())
            .groupName("groupName")
            .metadataType(Event.MetadataUpdateEvent.MetadataType.ACLS)
            .mtime(1L)
            .xAttrs(Collections.singletonList(new XAttr.Builder().setName("name").setNameSpace(XAttr.NameSpace.USER).setValue(new byte[0]).build()))
            .xAttrsRemoved(false)
            .build();
}
 
Example 20
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType
    type) {
  switch (type) {
  case I_TYPE_DIRECTORY:
    return Event.CreateEvent.INodeType.DIRECTORY;
  case I_TYPE_FILE:
    return Event.CreateEvent.INodeType.FILE;
  case I_TYPE_SYMLINK:
    return Event.CreateEvent.INodeType.SYMLINK;
  default:
    return null;
  }
}
 
Example 21
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType
    type) {
  switch (type) {
  case DIRECTORY:
    return InotifyProtos.INodeType.I_TYPE_DIRECTORY;
  case FILE:
    return InotifyProtos.INodeType.I_TYPE_FILE;
  case SYMLINK:
    return InotifyProtos.INodeType.I_TYPE_SYMLINK;
  default:
    return null;
  }
}
 
Example 22
Source Project: hadoop   Source File: TestDFSInotifyEventInputStream.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testNNFailover() throws IOException, URISyntaxException,
    MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs
        (cluster.getDfsCluster(), conf)).dfs;
    DFSInotifyEventInputStream eis = client.getInotifyEventStream();
    for (int i = 0; i < 10; i++) {
      client.mkdirs("/dir" + i, null, false);
    }
    cluster.getDfsCluster().shutdownNameNode(0);
    cluster.getDfsCluster().transitionToActive(1);
    EventBatch batch = null;
    // we can read all of the edits logged by the old active from the new
    // active
    for (int i = 0; i < 10; i++) {
      batch = waitForNextEvents(eis);
      Assert.assertEquals(1, batch.getEvents().length);
      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
      Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
          i));
    }
    Assert.assertTrue(eis.poll() == null);
  } finally {
    cluster.shutdown();
  }
}
 
Example 23
Source Project: hadoop   Source File: TestDFSInotifyEventInputStream.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testReadEventsWithTimeout() throws IOException,
    InterruptedException, MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    final DFSClient client = new DFSClient(cluster.getDfsCluster()
        .getNameNode(0).getNameNodeAddress(), conf);
    DFSInotifyEventInputStream eis = client.getInotifyEventStream();
    ScheduledExecutorService ex = Executors
        .newSingleThreadScheduledExecutor();
    ex.schedule(new Runnable() {
      @Override
      public void run() {
        try {
          client.mkdirs("/dir", null, false);
        } catch (IOException e) {
          // test will fail
          LOG.error("Unable to create /dir", e);
        }
      }
    }, 1, TimeUnit.SECONDS);
    // a very generous wait period -- the edit will definitely have been
    // processed by the time this is up
    EventBatch batch = eis.poll(5, TimeUnit.SECONDS);
    Assert.assertNotNull(batch);
    Assert.assertEquals(1, batch.getEvents().length);
    Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
    Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
  } finally {
    cluster.shutdown();
  }
}
 
Example 24
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType
    type) {
  switch (type) {
  case I_TYPE_DIRECTORY:
    return Event.CreateEvent.INodeType.DIRECTORY;
  case I_TYPE_FILE:
    return Event.CreateEvent.INodeType.FILE;
  case I_TYPE_SYMLINK:
    return Event.CreateEvent.INodeType.SYMLINK;
  default:
    return null;
  }
}
 
Example 25
Source Project: big-c   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType
    type) {
  switch (type) {
  case DIRECTORY:
    return InotifyProtos.INodeType.I_TYPE_DIRECTORY;
  case FILE:
    return InotifyProtos.INodeType.I_TYPE_FILE;
  case SYMLINK:
    return InotifyProtos.INodeType.I_TYPE_SYMLINK;
  default:
    return null;
  }
}
 
Example 26
Source Project: big-c   Source File: TestDFSInotifyEventInputStream.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testNNFailover() throws IOException, URISyntaxException,
    MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    DFSClient client = ((DistributedFileSystem) HATestUtil.configureFailoverFs
        (cluster.getDfsCluster(), conf)).dfs;
    DFSInotifyEventInputStream eis = client.getInotifyEventStream();
    for (int i = 0; i < 10; i++) {
      client.mkdirs("/dir" + i, null, false);
    }
    cluster.getDfsCluster().shutdownNameNode(0);
    cluster.getDfsCluster().transitionToActive(1);
    EventBatch batch = null;
    // we can read all of the edits logged by the old active from the new
    // active
    for (int i = 0; i < 10; i++) {
      batch = waitForNextEvents(eis);
      Assert.assertEquals(1, batch.getEvents().length);
      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
      Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
          i));
    }
    Assert.assertTrue(eis.poll() == null);
  } finally {
    cluster.shutdown();
  }
}
 
Example 27
Source Project: big-c   Source File: TestDFSInotifyEventInputStream.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout = 120000)
public void testReadEventsWithTimeout() throws IOException,
    InterruptedException, MissingEventsException {
  Configuration conf = new HdfsConfiguration();
  MiniQJMHACluster cluster = new MiniQJMHACluster.Builder(conf).build();

  try {
    cluster.getDfsCluster().waitActive();
    cluster.getDfsCluster().transitionToActive(0);
    final DFSClient client = new DFSClient(cluster.getDfsCluster()
        .getNameNode(0).getNameNodeAddress(), conf);
    DFSInotifyEventInputStream eis = client.getInotifyEventStream();
    ScheduledExecutorService ex = Executors
        .newSingleThreadScheduledExecutor();
    ex.schedule(new Runnable() {
      @Override
      public void run() {
        try {
          client.mkdirs("/dir", null, false);
        } catch (IOException e) {
          // test will fail
          LOG.error("Unable to create /dir", e);
        }
      }
    }, 1, TimeUnit.SECONDS);
    // a very generous wait period -- the edit will definitely have been
    // processed by the time this is up
    EventBatch batch = eis.poll(5, TimeUnit.SECONDS);
    Assert.assertNotNull(batch);
    Assert.assertEquals(1, batch.getEvents().length);
    Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
    Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
  } finally {
    cluster.shutdown();
  }
}
 
Example 28
Source Project: nifi   Source File: GetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
private boolean toProcessEvent(ProcessContext context, Event event) {
    final String[] eventTypes = context.getProperty(EVENT_TYPES).getValue().split(",");
    for (String name : eventTypes) {
        if (name.trim().equalsIgnoreCase(event.getEventType().name())) {
            return notificationConfig.getPathFilter().accept(new Path(getPath(event)));
        }
    }

    return false;
}
 
Example 29
Source Project: nifi   Source File: GetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
private String getPath(Event event) {
    if (event == null || event.getEventType() == null) {
        throw new IllegalArgumentException("Event and event type must not be null.");
    }

    switch (event.getEventType()) {
        case CREATE: return ((Event.CreateEvent) event).getPath();
        case CLOSE: return ((Event.CloseEvent) event).getPath();
        case APPEND: return ((Event.AppendEvent) event).getPath();
        case RENAME: return ((Event.RenameEvent) event).getSrcPath();
        case METADATA: return ((Event.MetadataUpdateEvent) event).getPath();
        case UNLINK: return ((Event.UnlinkEvent) event).getPath();
        default: throw new IllegalArgumentException("Unsupported event type.");
    }
}
 
Example 30
Source Project: nifi   Source File: TestGetHDFSEvents.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void eventsProcessorShouldProperlyFilterEventTypes() throws Exception {
    Event[] events = getEvents();

    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);

    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);

    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);

    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path(/.*)?");
    runner.setProperty(GetHDFSEvents.EVENT_TYPES, "create, metadata");
    runner.run();

    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(2, successfulFlowFiles.size());

    List<String> expectedEventTypes = Arrays.asList("CREATE", "METADATA");
    for (MockFlowFile f : successfulFlowFiles) {
        String eventType = f.getAttribute(EventAttributes.EVENT_TYPE);
        assertTrue(expectedEventTypes.contains(eventType));
    }

    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}