com.github.shyiko.mysql.binlog.event.UpdateRowsEventData Java Examples

The following examples show how to use com.github.shyiko.mysql.binlog.event.UpdateRowsEventData. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BinLogUpdateEventHandler.java    From kkbinlog with Apache License 2.0 6 votes vote down vote up
@Override
protected EventBaseDTO formatData(Event event) {
    UpdateRowsEventData d = event.getData();
    UpdateRowsDTO updateRowsDTO = new UpdateRowsDTO();
    updateRowsDTO.setEventType(DatabaseEvent.UPDATE_ROWS);
    //添加表信息
    ColumnsTableMapEventData tableMapData = context.getTableMapData(d.getTableId());
    updateRowsDTO.setDatabase(tableMapData.getDatabase());
    updateRowsDTO.setTable(tableMapData.getTable());
    updateRowsDTO.setNamespace(context.getBinaryLogConfig().getNamespace());
    //添加列映射
    int[] includedColumns = d.getIncludedColumns().stream().toArray();
    List<UpdateRow> urs = d.getRows().stream()
            .map(e -> new UpdateRow(convert(e.getKey(),includedColumns,tableMapData),
                    convert(e.getValue(),includedColumns,tableMapData))).collect(Collectors.toList());
    updateRowsDTO.setRows(urs);
    return updateRowsDTO;
}
 
Example #2
Source File: BinaryLogConnectorEventMapperTest.java    From SpinalTap with Apache License 2.0 6 votes vote down vote up
@Test
public void testUpdateEvent() {
  eventHeader.setEventType(EventType.EXT_UPDATE_ROWS);
  UpdateRowsEventData eventData = new UpdateRowsEventData();
  eventData.setTableId(TABLE_ID);
  eventData.setRows(ImmutableList.of(Maps.immutableEntry(PREV_ROW, ROW)));

  Optional<BinlogEvent> binlogEvent =
      BinaryLogConnectorEventMapper.INSTANCE.map(
          new Event(eventHeader, eventData), BINLOG_FILE_POS);
  assertTrue(binlogEvent.isPresent());
  assertTrue(binlogEvent.get() instanceof UpdateEvent);
  UpdateEvent updateEvent = (UpdateEvent) (binlogEvent.get());
  assertEquals(BINLOG_FILE_POS, updateEvent.getBinlogFilePos());
  assertEquals(ImmutableList.of(Maps.immutableEntry(PREV_ROW, ROW)), updateEvent.getRows());
  assertEquals(SERVER_ID, updateEvent.getServerId());
  assertEquals(TABLE_ID, updateEvent.getTableId());
  assertEquals(TIMESTAMP, updateEvent.getTimestamp());
}
 
Example #3
Source File: EventProcessor.java    From openmessaging-connect-odar with Apache License 2.0 5 votes vote down vote up
private void processUpdateEvent(Event event) {
    UpdateRowsEventData data = event.getData();
    Long tableId = data.getTableId();
    List<Map.Entry<Serializable[], Serializable[]>> list = data.getRows();

    for (Map.Entry<Serializable[], Serializable[]> entry : list) {
        addRow(EntryType.UPDATE, tableId, entry.getValue(), entry.getKey());
    }
}
 
Example #4
Source File: BinLogUpdateEventHandler.java    From kkbinlog with Apache License 2.0 5 votes vote down vote up
@Override
protected Set<ClientInfo> filter(Event event) {
    UpdateRowsEventData d = event.getData();
    long tableId = d.getTableId();
    TableMapEventData tableMapEventData =  context.getTableMapData(tableId);
    String tableKey = tableMapEventData.getDatabase().concat("/").concat(tableMapEventData.getTable());
    return clientInfoMap.get(tableKey);
}
 
Example #5
Source File: RowsEvent.java    From syncer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
public static List<IndexedFullRow> getIndexedRows(SimpleEventType eventType, EventData data,
                                                  Set<Integer> primaryKeys) {
  switch (eventType) {
    case UPDATE:
      return UpdateRowsEvent.getIndexedRows((UpdateRowsEventData) data);
    case WRITE:
      WriteRowsEventData write = (WriteRowsEventData) data;
      return getIndexedRows(write.getRows(), write.getIncludedColumns());
    case DELETE:
      DeleteRowsEventData delete = (DeleteRowsEventData) data;
      return getIndexedRows(delete.getRows(), delete.getIncludedColumns());
    default:
      throw new IllegalArgumentException("Unsupported event type");
  }
}
 
Example #6
Source File: UpdateRowsEvent.java    From syncer with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
static List<IndexedFullRow> getIndexedRows(UpdateRowsEventData updateRowsEventData){
  List<IndexedFullRow> res = new ArrayList<>();
  // TODO 17/10/10 may support different binlog row image, only 'full' now
  // If support 'minimal' format:
  // - it will only keep columns needed to identify rows (id) & updated fields, but not partition key in DRDS
  // - it will disable upsert related function of ES output channel
  List<Entry<Serializable[], Serializable[]>> rows = updateRowsEventData.getRows();
  for (Entry<Serializable[], Serializable[]> row : rows) {
    res.add(new IndexedFullRow(row.getValue()).setBefore(row.getKey()));
  }
  return res;
}
 
Example #7
Source File: RecordConverter.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public List<Record> toRecords(EnrichedEvent event) {
  EventType eventType = event.getEvent().getHeader().getEventType();
  switch (eventType) {
    case PRE_GA_WRITE_ROWS:
    case WRITE_ROWS:
    case EXT_WRITE_ROWS:
      return toRecords(
          event.getTable(),
          event.getEvent().getHeader(),
          event.getEvent().<WriteRowsEventData>getData(),
          event.getOffset()
      );
    case PRE_GA_UPDATE_ROWS:
    case UPDATE_ROWS:
    case EXT_UPDATE_ROWS:
      return toRecords(
          event.getTable(),
          event.getEvent().getHeader(),
          event.getEvent().<UpdateRowsEventData>getData(),
          event.getOffset()
      );
    case PRE_GA_DELETE_ROWS:
    case DELETE_ROWS:
    case EXT_DELETE_ROWS:
      return toRecords(
          event.getTable(),
          event.getEvent().getHeader(),
          event.getEvent().<DeleteRowsEventData>getData(),
          event.getOffset()
      );
    default:
      throw new IllegalArgumentException(String.format("EventType '%s' not supported", eventType));
  }
}
 
Example #8
Source File: RecordConverter.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private List<Record> toRecords(Table table,
                               EventHeader eventHeader,
                               UpdateRowsEventData eventData,
                               SourceOffset offset) {
  List<Record> res = new ArrayList<>(eventData.getRows().size());
  for (Map.Entry<Serializable[], Serializable[]> row : eventData.getRows()) {
    Record record = recordFactory.create(offset.format());
    Map<String, Field> fields = createHeader(table, eventHeader, offset);
    fields.put(TYPE_FIELD, create("UPDATE"));
    record.getHeader().setAttribute(
        OperationType.SDC_OPERATION_TYPE,
        String.valueOf(OperationType.UPDATE_CODE)
    );
    List<ColumnValue> columnValuesOld = zipColumnsValues(
        eventData.getIncludedColumnsBeforeUpdate(),
        table,
        row.getKey()
    );
    Map<String, Field> oldData = toMap(columnValuesOld);
    fields.put(OLD_DATA_FIELD, create(oldData));

    List<ColumnValue> columnValues = zipColumnsValues(
        eventData.getIncludedColumns(),
        table,
        row.getValue()
    );
    Map<String, Field> data = toMap(columnValues);
    fields.put(DATA_FIELD, create(data));

    record.set(create(fields));
    res.add(record);
  }
  return res;
}
 
Example #9
Source File: RecordConverter.java    From datacollector with Apache License 2.0 5 votes vote down vote up
public List<Record> toRecords(EnrichedEvent event) {
  EventType eventType = event.getEvent().getHeader().getEventType();
  switch (eventType) {
    case PRE_GA_WRITE_ROWS:
    case WRITE_ROWS:
    case EXT_WRITE_ROWS:
      return toRecords(
          event.getTable(),
          event.getEvent().getHeader(),
          event.getEvent().<WriteRowsEventData>getData(),
          event.getOffset()
      );
    case PRE_GA_UPDATE_ROWS:
    case UPDATE_ROWS:
    case EXT_UPDATE_ROWS:
      return toRecords(
          event.getTable(),
          event.getEvent().getHeader(),
          event.getEvent().<UpdateRowsEventData>getData(),
          event.getOffset()
      );
    case PRE_GA_DELETE_ROWS:
    case DELETE_ROWS:
    case EXT_DELETE_ROWS:
      return toRecords(
          event.getTable(),
          event.getEvent().getHeader(),
          event.getEvent().<DeleteRowsEventData>getData(),
          event.getOffset()
      );
    default:
      throw new IllegalArgumentException(String.format("EventType '%s' not supported", eventType));
  }
}
 
Example #10
Source File: RecordConverter.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private List<Record> toRecords(Table table,
                               EventHeader eventHeader,
                               UpdateRowsEventData eventData,
                               SourceOffset offset) {
  List<Record> res = new ArrayList<>(eventData.getRows().size());
  for (Map.Entry<Serializable[], Serializable[]> row : eventData.getRows()) {
    Record record = recordFactory.create(offset.format());
    Map<String, Field> fields = createHeader(table, eventHeader, offset);
    fields.put(TYPE_FIELD, create("UPDATE"));
    record.getHeader().setAttribute(
        OperationType.SDC_OPERATION_TYPE,
        String.valueOf(OperationType.UPDATE_CODE)
    );
    List<ColumnValue> columnValuesOld = zipColumnsValues(
        eventData.getIncludedColumnsBeforeUpdate(),
        table,
        row.getKey()
    );
    Map<String, Field> oldData = toMap(columnValuesOld);
    fields.put(OLD_DATA_FIELD, create(oldData));

    List<ColumnValue> columnValues = zipColumnsValues(
        eventData.getIncludedColumns(),
        table,
        row.getValue()
    );
    Map<String, Field> data = toMap(columnValues);
    fields.put(DATA_FIELD, create(data));

    record.set(create(fields));
    res.add(record);
  }
  return res;
}
 
Example #11
Source File: BinlogServiceTest.java    From ad with Apache License 2.0 4 votes vote down vote up
@Test
public void testBinlog() throws IOException {
    String hostname = "127.0.0.1", username = "yuwen", password = "lyp82nlf";
    int port = 3306;
    // BinaryLogClient其实就是一个连接数据库的客户端,
    // 它加自己伪装成slave 连接到master上
    BinaryLogClient client = new BinaryLogClient(
            hostname, port, username, password
    );
    // 设置监听的Binlog, 如果不设置则监听最新的Binlog
    //client.setBinlogFilename();
    // 设置监听的binlog位置, 如果不设置则监听最新的位置
    //client.setBinlogPosition();
    // 注册事件监听器, 监听期间MySQL发生的一些变化, Event代表已经发生的事件
    client.registerEventListener(event -> {
        // MySQL 数据表发生变化的一些数据
        EventData eventData = event.getData();
        if (eventData instanceof UpdateRowsEventData) {
            log.info("update event");
            log.debug("{}", eventData);
        } else if (eventData instanceof WriteRowsEventData) {
            log.info("write event");
            log.debug("{}", eventData);
        } else if (eventData instanceof DeleteRowsEventData) {
            log.info("delete event");
            log.debug("{}", eventData);
        }
    });
    // 连接到 master 开始监听
    client.connect();


    // 启动后手动连接到 MySQL执行
    // insert into `ad_unit_keyword` (`unit_id`, `keyword`) values (10, '标志');
    // 控制台得到日志
    // 15:39:17.410 [main] INFO top.ezttf.ad.service.BinlogServiceTest - write event
    // 15:39:17.459 [main] DEBUG top.ezttf.ad.service.BinlogServiceTest - WriteRowsEventData{tableId=122, includedColumns={0, 1, 2}, rows=[
    //     [13, 10, 标志]
    // ]}

    // WriteRowsEventData{tableId=118, includedColumns={0, 1, 2, 3, 4, 5, 6, 7}, rows=[
    //    [11, 666, plan, 1, 2019-01-01, 2019-01-01, Tue Jan 01 08:00:00 CST 2019, Tue Jan 01 08:00:00 CST 2019]
    //]}
}
 
Example #12
Source File: BinaryLogConsumer.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public void onEvent(Event event) {
  LOG.trace("Received event {}", event);
  EventType eventType = event.getHeader().getEventType();
  currentBinLogFileName = client.getBinlogFilename();
  switch (eventType) {
    case TABLE_MAP:
      handleTableMappingEvent((TableMapEventData) event.getData());
      break;
    case PRE_GA_WRITE_ROWS:
    case WRITE_ROWS:
    case EXT_WRITE_ROWS:
      handleRowEvent(event, event.<WriteRowsEventData>getData().getTableId());
      break;
    case PRE_GA_UPDATE_ROWS:
    case UPDATE_ROWS:
    case EXT_UPDATE_ROWS:
      handleRowEvent(event, event.<UpdateRowsEventData>getData().getTableId());
      break;
    case PRE_GA_DELETE_ROWS:
    case DELETE_ROWS:
    case EXT_DELETE_ROWS:
      handleRowEvent(event, event.<DeleteRowsEventData>getData().getTableId());
      break;
    case QUERY:
      QueryEventData queryEventData = event.getData();
      String query = queryEventData.getSql();
      if (isCommit(query)) {
        finishTx();
      } else if (isSchemaChangeQuery(query)) {
        schemaRepository.evictAll();
      }
      break;
    case XID:
      finishTx();
      break;
    case GTID:
      GtidEventData eventData = event.getData();
      currentGtidSet = client.getGtidSet();
      currentTxGtid = eventData.getGtid();
      currentTxEventSeqNo = 0;
      LOG.trace("Started new tx, gtid: {}", currentTxGtid);
      break;
    default:
      // ignore
      break;
  }
}
 
Example #13
Source File: BinaryLogConsumer.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public void onEvent(Event event) {
  LOG.trace("Received event {}", event);
  EventType eventType = event.getHeader().getEventType();
  currentBinLogFileName = client.getBinlogFilename();
  switch (eventType) {
    case TABLE_MAP:
      handleTableMappingEvent((TableMapEventData) event.getData());
      break;
    case PRE_GA_WRITE_ROWS:
    case WRITE_ROWS:
    case EXT_WRITE_ROWS:
      handleRowEvent(event, event.<WriteRowsEventData>getData().getTableId());
      break;
    case PRE_GA_UPDATE_ROWS:
    case UPDATE_ROWS:
    case EXT_UPDATE_ROWS:
      handleRowEvent(event, event.<UpdateRowsEventData>getData().getTableId());
      break;
    case PRE_GA_DELETE_ROWS:
    case DELETE_ROWS:
    case EXT_DELETE_ROWS:
      handleRowEvent(event, event.<DeleteRowsEventData>getData().getTableId());
      break;
    case QUERY:
      QueryEventData queryEventData = event.getData();
      String query = queryEventData.getSql();
      if (isCommit(query)) {
        finishTx();
      } else if (isSchemaChangeQuery(query)) {
        schemaRepository.evictAll();
      }
      break;
    case XID:
      finishTx();
      break;
    case GTID:
      GtidEventData eventData = event.getData();
      currentGtidSet = client.getGtidSet();
      currentTxGtid = eventData.getGtid();
      currentTxEventSeqNo = 0;
      LOG.trace("Started new tx, gtid: {}", currentTxGtid);
      break;
    default:
      // ignore
      break;
  }
}
 
Example #14
Source File: UpdateRowsEventInfo.java    From nifi with Apache License 2.0 4 votes vote down vote up
public UpdateRowsEventInfo(TableInfo tableInfo, Long timestamp, String binlogFilename, Long binlogPosition, UpdateRowsEventData data) {
    super(tableInfo, UPDATE_EVENT, timestamp, binlogFilename, binlogPosition, data.getIncludedColumns(), data.getRows());
    includedColumnsBeforeUpdate = data.getIncludedColumnsBeforeUpdate();
}