Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -557,6 +557,12 @@ public final class OzoneConsts {
public static final String COMPACTION_LOG_TABLE =
"compactionLogTable";

/**
* DB flush log table name. Referenced in RDBStore.
*/
public static final String FLUSH_LOG_TABLE =
"flushLogTable";

/**
* S3G multipart upload request's ETag header key.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.COMPACTION_LOG_TABLE;
import static org.apache.hadoop.ozone.OzoneConsts.DB_COMPACTION_LOG_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.DB_COMPACTION_SST_BACKUP_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.FLUSH_LOG_TABLE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR;
Expand Down Expand Up @@ -167,6 +168,13 @@ public class RDBStore implements DBStore {
"CompactionLogTable column family handle should not be null.");
rocksDBCheckpointDiffer.setCompactionLogTableCFHandle(
compactionLogTableCF.getHandle());
// Set CF handle in differ to store flush log entry.
ColumnFamily flushLogTableCF =
db.getColumnFamily(FLUSH_LOG_TABLE);
Objects.requireNonNull(flushLogTableCF,
"FlushLogTable column family handle should not be null.");
rocksDBCheckpointDiffer.setFlushLogTableCFHandle(
flushLogTableCF.getHandle());
// Set activeRocksDB in differ to access compaction log CF.
rocksDBCheckpointDiffer.setActiveRocksDB(db.getManagedRocksDb());
// Load all previous compaction logs
Expand Down
16 changes: 16 additions & 0 deletions hadoop-hdds/interface-client/src/main/proto/hdds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -539,6 +539,22 @@ message CompactionLogEntryProto {
optional string compactionReason = 5;
}

// Flush tracking messages (simpler than compaction tracking)
message FlushFileInfoProto {
required string fileName = 1;
optional string startKey = 2;
optional string endKey = 3;
optional string columnFamily = 4;
optional bool pruned = 5;
}

message FlushLogEntryProto {
required uint64 dbSequenceNumber = 1;
required uint64 flushTime = 2;
required FlushFileInfoProto fileInfo = 3; // Single file (not repeated)
optional string flushReason = 4;
}

message NodeTopology {
optional string name = 1;
optional string location = 2;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,184 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.ozone.compaction.log;

import com.google.common.annotations.VisibleForTesting;
import java.util.Objects;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;

/**
* Flush file information for persistence in FlushLogTable.
* Similar to CompactionFileInfo but used for flush tracking.
*/
public final class FlushFileInfo extends SstFileInfo {
private boolean pruned;

@VisibleForTesting
public FlushFileInfo(String fileName,
String startRange,
String endRange,
String columnFamily) {
this(fileName, startRange, endRange, columnFamily, false);
}

public FlushFileInfo(String fileName,
String startRange,
String endRange,
String columnFamily,
boolean pruned) {
super(fileName, startRange, endRange, columnFamily);
this.pruned = pruned;
}

public boolean isPruned() {
return pruned;
}

public void setPruned() {
this.pruned = true;
}

public HddsProtos.FlushFileInfoProto getProtobuf() {
HddsProtos.FlushFileInfoProto.Builder builder =
HddsProtos.FlushFileInfoProto.newBuilder()
.setFileName(getFileName())
.setPruned(pruned);
if (getStartKey() != null) {
builder = builder.setStartKey(getStartKey());
}
if (getEndKey() != null) {
builder = builder.setEndKey(getEndKey());
}
if (getColumnFamily() != null) {
builder = builder.setColumnFamily(getColumnFamily());
}
return builder.build();
}

public static FlushFileInfo getFromProtobuf(
HddsProtos.FlushFileInfoProto proto) {
Builder builder = new Builder(proto.getFileName());

if (proto.hasStartKey()) {
builder.setStartRange(proto.getStartKey());
}
if (proto.hasEndKey()) {
builder.setEndRange(proto.getEndKey());
}
if (proto.hasColumnFamily()) {
builder.setColumnFamily(proto.getColumnFamily());
}
if (proto.hasPruned() && proto.getPruned()) {
builder.setPruned();
}

return builder.build();
}

@Override
public String toString() {
return String.format("%s, isPruned: '%b'", super.toString(), pruned);
}

@Override
public SstFileInfo copyObject() {
return new FlushFileInfo(getFileName(), getStartKey(), getEndKey(),
getColumnFamily(), pruned);
}

@Override
public boolean equals(Object o) {
if (!(o instanceof FlushFileInfo)) {
return false;
}
return super.equals(o) && pruned == ((FlushFileInfo) o).pruned;
}

@Override
public int hashCode() {
return Objects.hash(super.hashCode(), pruned);
}

/**
* Builder of FlushFileInfo.
*/
public static class Builder {
private final String fileName;
private String startRange;
private String endRange;
private String columnFamily;
private boolean pruned = false;

public Builder(String fileName) {
this.fileName = Objects.requireNonNull(fileName,
"FileName is required parameter.");
}

public Builder setStartRange(String startRange) {
this.startRange = startRange;
return this;
}

public Builder setEndRange(String endRange) {
this.endRange = endRange;
return this;
}

public Builder setColumnFamily(String columnFamily) {
this.columnFamily = columnFamily;
return this;
}

public Builder setValues(LiveFileMetaData fileMetaData) {
if (fileMetaData != null) {
String columnFamilyName = StringUtils.bytes2String(
fileMetaData.columnFamilyName());
String startRangeValue = StringUtils.bytes2String(
fileMetaData.smallestKey());
String endRangeValue = StringUtils.bytes2String(
fileMetaData.largestKey());
this.setColumnFamily(columnFamilyName)
.setStartRange(startRangeValue)
.setEndRange(endRangeValue);
}
return this;
}

public Builder setPruned() {
this.pruned = true;
return this;
}

public FlushFileInfo build() {
if ((startRange != null || endRange != null || columnFamily != null) &&
(startRange == null || endRange == null || columnFamily == null)) {
throw new IllegalArgumentException(
String.format("Either all of startRange, endRange and " +
"columnFamily should be non-null or null. " +
"startRange: '%s', endRange: '%s', columnFamily: '%s'.",
startRange, endRange, columnFamily));
}

return new FlushFileInfo(fileName, startRange, endRange,
columnFamily, pruned);
}
}
}
Loading