From 18b86a2debab4581db50c7edb5ca1cf191c0903f Mon Sep 17 00:00:00 2001 From: guojn1 Date: Fri, 21 Feb 2025 16:07:59 +0800 Subject: [PATCH] [fix][dingo-calcite] Resolve the issue of inconsistency between the number of copies in meta records and the actual number of copies --- .../src/main/codegen/includes/AlterTable.ftl | 11 ++- .../src/main/codegen/includes/Show.ftl | 1 + .../src/main/codegen/includes/parserImpls.ftl | 10 +- .../src/main/codegen/templates/Parser.jj | 98 +++++++++++-------- .../io/dingodb/calcite/DingoDdlExecutor.java | 17 +++- .../java/io/dingodb/calcite/DingoParser.java | 13 +++ .../dingodb/calcite/DingoParserContext.java | 9 +- .../executor/ShowCreateTableExecutor.java | 3 + .../executor/ShowWarningsExecutor.java | 2 +- .../grammar/ddl/SqlAlterAutoIncrement.java | 6 ++ .../calcite/program/DingoPrograms.java | 42 -------- .../calcite/sql/ddl/DingoSqlColumn.java | 3 + .../io/dingodb/common/mysql/DingoErr.java | 11 ++- .../common/mysql/error/ErrorMessage.java | 1 + .../io/dingodb/driver/DingoConnection.java | 16 +++ .../io/dingodb/driver/DingoDriverParser.java | 3 + .../java/io/dingodb/driver/DingoMeta.java | 8 +- .../driver/mysql/command/MysqlCommands.java | 3 + .../server/executor/ddl/DdlWorker.java | 59 ++++++++--- .../main/java/io/dingodb/meta/DdlService.java | 4 +- .../dingodb/meta/ddl/InfoSchemaBuilder.java | 7 +- .../dingodb/store/proxy/ddl/DdlHandler.java | 3 +- .../dingodb/store/proxy/ddl/DdlService.java | 3 +- .../dingodb/store/proxy/meta/MetaService.java | 69 +++++++++++-- 24 files changed, 271 insertions(+), 131 deletions(-) delete mode 100644 dingo-calcite/src/main/java/io/dingodb/calcite/program/DingoPrograms.java diff --git a/dingo-calcite/src/main/codegen/includes/AlterTable.ftl b/dingo-calcite/src/main/codegen/includes/AlterTable.ftl index 39bbcb8410..a390397783 100644 --- a/dingo-calcite/src/main/codegen/includes/AlterTable.ftl +++ b/dingo-calcite/src/main/codegen/includes/AlterTable.ftl @@ -91,6 +91,8 @@ SqlAlterTable SqlAlterTable(Span s, String scope): { alterTable = addColumn(s, scope, id) | alterTable = addConstraint(s, scope, id) + | + alterTable = foreign(s, id) | alterTable = addIndexByMode(s, scope, id, "fulltext") | @@ -242,7 +244,8 @@ SqlAlterTable addIndex(Span s, String scope, SqlIdentifier id): { [] columnList = indexColumns() ) ( - withColumnList = ParenthesizedSimpleIdentifierList() + LOOKAHEAD(2) + (withColumnList = ParenthesizedSimpleIdentifierList() | { strIdent(); }) | engine = dingoIdentifier() { if (engine.equalsIgnoreCase("innodb")) { engine = "TXN_LSM";} } | @@ -288,7 +291,8 @@ SqlAlterTable addUniqueIndex(Span s, String scope, SqlIdentifier id): { { index = getNextToken().image; } [] columnList = indexColumns() ( - withColumnList = ParenthesizedSimpleIdentifierList() + LOOKAHEAD(2) + (withColumnList = ParenthesizedSimpleIdentifierList() | { strIdent(); }) | engine = dingoIdentifier() { if (engine.equalsIgnoreCase("innodb")) { engine = "TXN_LSM";} } | @@ -334,7 +338,8 @@ SqlAlterTable addIndexByMode(Span s, String scope, SqlIdentifier id, String mode { index = getNextToken().image; } columnList = indexColumns() ( - withColumnList = ParenthesizedSimpleIdentifierList() + LOOKAHEAD(2) + (withColumnList = ParenthesizedSimpleIdentifierList() | { strIdent(); }) | engine = dingoIdentifier() { if (engine.equalsIgnoreCase("innodb")) { engine = "TXN_LSM";} } | diff --git a/dingo-calcite/src/main/codegen/includes/Show.ftl b/dingo-calcite/src/main/codegen/includes/Show.ftl index bfac11db09..55d4e1902e 100644 --- a/dingo-calcite/src/main/codegen/includes/Show.ftl +++ b/dingo-calcite/src/main/codegen/includes/Show.ftl @@ -80,6 +80,7 @@ SqlShow SqlShowTable(Span s): { } { ( + LOOKAHEAD(2) [ ( | ) { schema = token.image.toUpperCase(); } ] [ { pattern = token.image.toUpperCase().replace("'", ""); } ] { diff --git a/dingo-calcite/src/main/codegen/includes/parserImpls.ftl b/dingo-calcite/src/main/codegen/includes/parserImpls.ftl index bdf2ad6b73..c33fd4e9e0 100644 --- a/dingo-calcite/src/main/codegen/includes/parserImpls.ftl +++ b/dingo-calcite/src/main/codegen/includes/parserImpls.ftl @@ -209,7 +209,7 @@ void TableElement(List list) : | refTable = CompoundIdentifier() refColumnList = ParenthesizedSimpleIdentifierList() [(||)] - ( ( + ( LOOKAHEAD(2) ( updateRefOpt = referenceOpt() | deleteRefOpt = referenceOpt() @@ -226,7 +226,7 @@ void TableElement(List list) : } ) | - [ { s.add(this); } [name = SimpleIdentifier()] ] + [ LOOKAHEAD(2) { s.add(this); } [name = SimpleIdentifier()] ] ( { s.add(this); } e = Expression(ExprContext.ACCEPT_SUB_QUERY) [] (||{ String t = "";}){ @@ -247,7 +247,7 @@ void TableElement(List list) : { indexType = "text"; } columnList = ParenthesizedSimpleIdentifierList() ) - ( withColumnList = ParenthesizedSimpleIdentifierList() + ( (withColumnList = ParenthesizedSimpleIdentifierList() | { strIdent();}) | { engine = getNextToken().image; if (engine.equalsIgnoreCase("innodb")) { engine = "TXN_LSM";} } | @@ -403,7 +403,7 @@ ColumnOption parseColumnOption(): { | refTable = CompoundIdentifier() refColumnList = ParenthesizedSimpleIdentifierList() [(||)] - ( ( + ( LOOKAHEAD(2) ( updateRefOpt = referenceOpt() | deleteRefOpt = referenceOpt() @@ -915,8 +915,6 @@ Properties indexOption(): { | indexType = indexTypeName() { prop.put("indexType", indexType);} | - { prop.put("parser", strIdent()); } - | { prop.put("comment", strIdent());} | { prop.put("visible", "true"); } diff --git a/dingo-calcite/src/main/codegen/templates/Parser.jj b/dingo-calcite/src/main/codegen/templates/Parser.jj index 897fd2fabd..e150ae4d04 100644 --- a/dingo-calcite/src/main/codegen/templates/Parser.jj +++ b/dingo-calcite/src/main/codegen/templates/Parser.jj @@ -1079,6 +1079,7 @@ void AddArg0(List list, ExprContext exprContext) : LOOKAHEAD(3) e = TableParam() | + LOOKAHEAD(2) e = PartitionedQueryOrQueryOrExpr(firstExprContext) | e = TableFunctionCall() @@ -1107,6 +1108,7 @@ void AddArg(List list, ExprContext exprContext) : | e = Expression(exprContext) | + LOOKAHEAD(2) e = TableParam() | e = TableFunctionCall() @@ -1412,9 +1414,9 @@ SqlNode SqlSelect() : ( groupBy = GroupBy() | { groupBy = null; } ) ( having = Having() | { having = null; } ) ( windowDecls = Window() | { windowDecls = null; } ) - [ exportOptions = exportOptions() ] - [ { forUpdate=true; }] - [ { flashbackQuery = true; } + [ LOOKAHEAD(2) exportOptions = exportOptions() ] + [ LOOKAHEAD(2) { forUpdate=true; }] + [ LOOKAHEAD(2) { flashbackQuery = true; } ( { flashBackStr = token.image.replace("'", ""); } | @@ -1490,8 +1492,8 @@ SqlNode SqlTraceSelect() : ( windowDecls = Window() | { windowDecls = null; } ) ( orderBy = OrderBy(true) | { orderBy = null; } ) [ LimitClause(s, offsetFetch) ] - [ exportOptions = exportOptions() ] - [ { forUpdate=true; }] + [ LOOKAHEAD(2) exportOptions = exportOptions() ] + [ LOOKAHEAD(2) { forUpdate=true; }] | E() { fromClause = null; @@ -1542,15 +1544,12 @@ ExportOptions exportOptions(): { ] [ - [ [ { lineStarting = getSpecialBytes(token.image); }] + ( [ { lineStarting = getSpecialBytes(token.image); }] [ { lineStarting = getSpecialHexBytes(token.image); }] - ] - [ [ { lineTerminated = getSpecialBytes(token.image); } ] + | + [ { lineTerminated = getSpecialBytes(token.image); } ] [ { lineTerminated = getSpecialHexBytes(token.image); }] - ] - [ [ { lineStarting = getSpecialBytes(token.image); }] - [ { lineStarting = getSpecialHexBytes(token.image); }] - ] + )* ] [ ( | ) { startTs = Long.parseLong(token.image); }] { @@ -2447,7 +2446,7 @@ SqlNode TableRef3(ExprContext exprContext, boolean lateral) : ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) [ tableRef = ExtendTable(tableRef) ] tableRef = Over(tableRef) - [ tableRef = Snapshot(tableRef) ] + [ LOOKAHEAD(2) tableRef = Snapshot(tableRef) ] [ tableRef = MatchRecognize(tableRef) ] | LOOKAHEAD(2) @@ -2460,6 +2459,7 @@ SqlNode TableRef3(ExprContext exprContext, boolean lateral) : { s = span(); } args = ParenthesizedQueryOrCommaList(ExprContext.ACCEPT_SUB_QUERY) [ + LOOKAHEAD(2) { unnestOp = SqlStdOperatorTable.UNNEST_WITH_ORDINALITY; } @@ -2485,7 +2485,7 @@ SqlNode TableRef3(ExprContext exprContext, boolean lateral) : tableRef = Unpivot(tableRef) ] [ - [ ] + [ LOOKAHEAD(2) ] ( alias = SimpleIdentifier() | @@ -3771,6 +3771,7 @@ SqlNode LeafQueryOrExpr(ExprContext exprContext) : SqlNode e; } { + LOOKAHEAD(2) e = LeafQuery(exprContext) { return e; } | e = Expression(exprContext) { return e; } @@ -4342,6 +4343,7 @@ SqlNode AtomicRowExpression() : LOOKAHEAD( [] FunctionName() ) e = NamedFunctionCall() | + LOOKAHEAD(2) e = ContextVariable() | e = CompoundIdentifier() @@ -4435,7 +4437,8 @@ SqlSetOption SqlSetOption(Span s, String scope) : { s.add(this); } - [ + ( + LOOKAHEAD(2) ( | ) { user = token.image; } @@ -4448,14 +4451,18 @@ SqlSetOption SqlSetOption(Span s, String scope) : { return new SqlSetPassword(s.end(this), user, host, password); } - ] + | ( - [ ] - [ { scope = "SYSTEM"; } ] - [ { scope = "SESSION"; } ] - [ { scope = "EXECUTOR"; } ] - [ { scope = "USER"; } ] - [ { scope = "GLOBAL"; } ] + LOOKAHEAD(2) { scope = "SYSTEM"; } + | + LOOKAHEAD(2) { scope = "SESSION"; } + | + { scope = "EXECUTOR"; } + | + { scope = "USER"; } + | + { scope = "GLOBAL"; } + | name = CompoundIdentifier() ( { @@ -4468,13 +4475,17 @@ SqlSetOption SqlSetOption(Span s, String scope) : return new SqlSetOption(s.end(this), scope, name, val); } | - ( - | + + | - [ [] [ { val = new SqlIdentifier("read-committed", s.end(this)); } ] [ { val = new SqlIdentifier("read-uncommitted", s.end(this)); }] ] - [ [] { val = new SqlIdentifier("repeatable-read", s.end(this)); } ] - [ { val = new SqlIdentifier(token.image, s.end(this)); }] - ){ + ( + [] ( { val = new SqlIdentifier("read-committed", s.end(this)); } | { val = new SqlIdentifier("read-uncommitted", s.end(this)); }) + | + [] { val = new SqlIdentifier("repeatable-read", s.end(this)); } + | + { val = new SqlIdentifier(token.image, s.end(this)); } + ) + { if (name.names.size() != 1 || !name.names.get(0).equalsIgnoreCase("transaction")) { throw new RuntimeException("Syntax error"); } @@ -4485,7 +4496,7 @@ SqlSetOption SqlSetOption(Span s, String scope) : return new SqlSetOption(s.end(this), scope, name, val); } | - [ { val = new SqlIdentifier("off", s.end(this)); }]{ + { name = new SqlIdentifier("transaction_read_only", name.getParserPosition()); if (val == null) { val = new SqlIdentifier("on", s.end(this)); @@ -4513,8 +4524,10 @@ SqlSetOption SqlSetOption(Span s, String scope) : val = new SqlIdentifier(token.image.toUpperCase(Locale.ROOT), getPos()); } ) + [ ] ) )* + ) { if (scope == null) { scope = "session"; @@ -5417,17 +5430,17 @@ SqlIntervalQualifier TimeUnitOrName() : { { LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.NANOSECOND, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.MICROSECOND, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.MICROSECOND, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, getPos()); } | { return new SqlIntervalQualifier(TimeUnit.SECOND, null, getPos()); } | { return new SqlIntervalQualifier(TimeUnit.MINUTE, null, getPos()); } | { return new SqlIntervalQualifier(TimeUnit.HOUR, null, getPos()); } | { return new SqlIntervalQualifier(TimeUnit.DAY, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.DOW, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.DOY, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.ISODOW, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.ISOYEAR, null, getPos()); } -| { span = span(); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.DOW, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.DOY, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.ISODOW, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.ISOYEAR, null, getPos()); } +| LOOKAHEAD(2) { span = span(); } ( LOOKAHEAD(2) w = weekdayName() { @@ -5437,12 +5450,12 @@ SqlIntervalQualifier TimeUnitOrName() : { { return new SqlIntervalQualifier(TimeUnit.WEEK, null, getPos()); } ) | { return new SqlIntervalQualifier(TimeUnit.MONTH, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.QUARTER, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.QUARTER, null, getPos()); } | { return new SqlIntervalQualifier(TimeUnit.YEAR, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.EPOCH, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.DECADE, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.CENTURY, null, getPos()); } -| { return new SqlIntervalQualifier(TimeUnit.MILLENNIUM, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.EPOCH, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.DECADE, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.CENTURY, null, getPos()); } +| LOOKAHEAD(2) { return new SqlIntervalQualifier(TimeUnit.MILLENNIUM, null, getPos()); } | unitName = SimpleIdentifier() { return new SqlIntervalQualifier(unitName.getSimple(), unitName.getParserPosition()); @@ -7084,6 +7097,7 @@ SqlCall DateTruncFunctionCall() : // the BigQuery variant, e.g. "DATE_TRUNC(d, YEAR)", // and the Redshift variant, e.g. "DATE_TRUNC('year', DATE '2008-09-08')". ( + LOOKAHEAD(2) unit = TimeUnitOrName() { args.add(unit); } | AddExpression(args, ExprContext.ACCEPT_SUB_QUERY) @@ -7511,6 +7525,7 @@ SqlIdentifier FunctionName() : } { ( + LOOKAHEAD(2) qualifiedName = CompoundIdentifier() | qualifiedName = ReservedFunctionName() @@ -7703,6 +7718,7 @@ SqlNode JdbcFunctionCall() : LOOKAHEAD(1) ( | | | ) { name = unquotedIdentifier(); } | + LOOKAHEAD(2) // For cases like {fn power(1,2)} and {fn lower('a')} id = ReservedFunctionName() { name = id.getSimple(); } | diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/DingoDdlExecutor.java b/dingo-calcite/src/main/java/io/dingodb/calcite/DingoDdlExecutor.java index ba335436d1..4c1dd48928 100644 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/DingoDdlExecutor.java +++ b/dingo-calcite/src/main/java/io/dingodb/calcite/DingoDdlExecutor.java @@ -189,6 +189,7 @@ import static io.dingodb.calcite.runtime.DingoResource.DINGO_RESOURCE; import static io.dingodb.common.ddl.FieldTypeChecker.checkModifyTypeCompatible; import static io.dingodb.common.mysql.error.ErrorCode.ErrDropPartitionNonExistent; +import static io.dingodb.common.mysql.error.ErrorCode.ErrDupKeyName; import static io.dingodb.common.mysql.error.ErrorCode.ErrKeyDoesNotExist; import static io.dingodb.common.mysql.error.ErrorCode.ErrModifyColumnNotTran; import static io.dingodb.common.mysql.error.ErrorCode.ErrNoSuchTable; @@ -1686,9 +1687,10 @@ public void execute(SqlAlterAutoIncrement alterAutoIncrement, CalcitePrepare.Con } boolean hasInc = table.getColumns().stream().anyMatch(Column::isAutoIncrement); if (hasInc) { - DdlService.root().rebaseAutoInc( + String warning = DdlService.root().rebaseAutoInc( schema.getSchemaName(), tableName, table.getTableId().seq, alterAutoIncrement.autoInc ); + alterAutoIncrement.setWarning(warning); } } @@ -1746,6 +1748,12 @@ public void execute(SqlAlterRenameIndex sqlAlterRenameIndex, CalcitePrepare.Cont throw DingoErrUtil.newStdErr(ErrKeyDoesNotExist, originIndexName, tableName); } + hasIndex = table.getIndexes().stream() + .anyMatch(indexTable -> indexTable.getName().equalsIgnoreCase(sqlAlterRenameIndex.toIndexName)); + if (hasIndex) { + throw DingoErrUtil.newStdErr(ErrDupKeyName, sqlAlterRenameIndex.toIndexName); + } + DdlService.root().renameIndex( schema.getSchemaId(), schema.getSchemaName(), table, originIndexName, sqlAlterRenameIndex.toIndexName); @@ -2081,20 +2089,20 @@ private static void validateAddColumn(ColumnDefinition newColumn) { }); newColumn.setDefaultValue(defaultVal); } else { - throw DINGO_RESOURCE.invalidDefaultValue(newColumn.getDefaultValue()).ex(); + throw DINGO_RESOURCE.invalidDefaultValue(newColumn.getName()).ex(); } } else if (type instanceof MapType) { if (defaultVal.toUpperCase().startsWith("MAP[") && defaultVal.endsWith("]")) { defaultVal = defaultVal.substring(4, defaultVal.length() - 1); int itemSize = defaultVal.split(",").length; if (itemSize % 2 != 0) { - throw DINGO_RESOURCE.invalidDefaultValue(newColumn.getDefaultValue()).ex(); + throw DINGO_RESOURCE.invalidDefaultValue(newColumn.getName()).ex(); } newColumn.setDefaultValue(defaultVal); } } } catch (Exception e) { - throw DINGO_RESOURCE.invalidDefaultValue(newColumn.getDefaultValue()).ex(); + throw DINGO_RESOURCE.invalidDefaultValue(newColumn.getName()).ex(); } } @@ -2770,6 +2778,7 @@ public static void checkModifyTypes( if (exception != null) { throw exception; } + validateAddColumn(toColDef); // checkModifyCharsetAndCollation } diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParser.java b/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParser.java index 5f3937cd46..f4a680b2e5 100644 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParser.java +++ b/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParser.java @@ -110,6 +110,7 @@ import org.apache.calcite.rel.hint.HintStrategyTable; import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.runtime.Hook; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlNode; import org.apache.calcite.sql.SqlSetOption; @@ -122,6 +123,7 @@ import org.apache.calcite.sql2rel.SqlToRelConverter; import org.apache.calcite.tools.Program; import org.apache.calcite.tools.Programs; +import org.apache.calcite.util.Holder; import org.apache.calcite.util.Pair; import org.apache.commons.lang3.StringEscapeUtils; import org.apache.commons.lang3.StringUtils; @@ -129,6 +131,7 @@ import java.io.File; import java.sql.Connection; +import java.sql.SQLWarning; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -507,4 +510,14 @@ public long getGcLifeTime() { } } + public SQLWarning getWarning(SqlNode sqlNode) { + if (sqlNode instanceof SqlAlterAutoIncrement) { + SqlAlterAutoIncrement alterAutoIncrement = (SqlAlterAutoIncrement) sqlNode; + if (alterAutoIncrement.getWarning() != null) { + return new SQLWarning(alterAutoIncrement.getWarning(), "1105"); + } + } + return null; + } + } diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParserContext.java b/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParserContext.java index 29b3dd5ba5..bf47757d9d 100644 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParserContext.java +++ b/dingo-calcite/src/main/java/io/dingodb/calcite/DingoParserContext.java @@ -80,7 +80,7 @@ public final class DingoParserContext implements Context { @Setter @Getter - private List warningList; + private List warningList = new ArrayList<>(); @Getter private final Properties sessionVariables; @@ -250,6 +250,13 @@ public boolean keyExists(String field) { return options != null && options.containsKey(field); } + public void addWarning(SQLWarning warning) { + this.getWarningList().clear(); + if (warning != null) { + this.getWarningList().add(warning); + } + } + public synchronized void setUsedSchema(CalciteSchema schema) { this.usedSchema = schema; this.defaultSchemaName = schema.getName(); diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowCreateTableExecutor.java b/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowCreateTableExecutor.java index fdff53727b..372658f5ca 100644 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowCreateTableExecutor.java +++ b/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowCreateTableExecutor.java @@ -146,6 +146,9 @@ private String getCreateTable() { if (column.isAutoIncrement()) { createTableSqlStr.append(" auto_increment"); } + if (StringUtils.isNotEmpty(column.getComment())) { + createTableSqlStr.append(" COMMENT ").append(" '").append(column.getComment()).append("'"); + } if (i < colSize - 1) { createTableSqlStr.append(","); } diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowWarningsExecutor.java b/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowWarningsExecutor.java index 214865916f..c1ff5e69b6 100644 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowWarningsExecutor.java +++ b/dingo-calcite/src/main/java/io/dingodb/calcite/executor/ShowWarningsExecutor.java @@ -40,7 +40,7 @@ public Iterator getIterator() { } List res = warningList .stream() - .map(sqlWarning -> new Object[]{"Error", sqlWarning.getErrorCode(), sqlWarning.getMessage()}) + .map(sqlWarning -> new Object[]{"Warning", sqlWarning.getErrorCode(), sqlWarning.getMessage()}) .collect(Collectors.toList()); return res.iterator(); } diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/grammar/ddl/SqlAlterAutoIncrement.java b/dingo-calcite/src/main/java/io/dingodb/calcite/grammar/ddl/SqlAlterAutoIncrement.java index bd6cb8c205..0532be6170 100644 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/grammar/ddl/SqlAlterAutoIncrement.java +++ b/dingo-calcite/src/main/java/io/dingodb/calcite/grammar/ddl/SqlAlterAutoIncrement.java @@ -16,6 +16,8 @@ package io.dingodb.calcite.grammar.ddl; +import lombok.Getter; +import lombok.Setter; import org.apache.calcite.sql.SqlIdentifier; import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.SqlOperator; @@ -27,6 +29,10 @@ public class SqlAlterAutoIncrement extends SqlAlterTable { public Long autoInc; + @Getter + @Setter + private String warning; + private static final SqlOperator OPERATOR = new SqlSpecialOperator("ALTER TABLE INCREMENT", SqlKind.ALTER_TABLE); diff --git a/dingo-calcite/src/main/java/io/dingodb/calcite/program/DingoPrograms.java b/dingo-calcite/src/main/java/io/dingodb/calcite/program/DingoPrograms.java deleted file mode 100644 index a94b6f30a9..0000000000 --- a/dingo-calcite/src/main/java/io/dingodb/calcite/program/DingoPrograms.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2021 DataCanvas - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.dingodb.calcite.program; - -import com.google.common.collect.ImmutableList; -import org.apache.calcite.plan.hep.HepProgram; -import org.apache.calcite.plan.hep.HepProgramBuilder; -import org.apache.calcite.rel.metadata.RelMetadataProvider; -import org.apache.calcite.rel.rules.CoreRules; -import org.apache.calcite.tools.Program; -import org.apache.calcite.tools.Programs; - -public final class DingoPrograms { - private DingoPrograms() { - } - - public static Program subQuery(RelMetadataProvider metadataProvider) { - HepProgramBuilder builder = HepProgram.builder(); - builder.addRuleCollection(ImmutableList.of( - CoreRules.FILTER_SUB_QUERY_TO_CORRELATE, CoreRules.PROJECT_SUB_QUERY_TO_CORRELATE, - CoreRules.JOIN_SUB_QUERY_TO_CORRELATE, - CoreRules.JOIN_DERIVE_IS_NOT_NULL_FILTER_RULE - //CoreRules.FILTER_INTO_JOIN - )); - return Programs.of(builder.build(), true, metadataProvider); - } - -} diff --git a/dingo-calcite/src/main/java/org/apache/calcite/sql/ddl/DingoSqlColumn.java b/dingo-calcite/src/main/java/org/apache/calcite/sql/ddl/DingoSqlColumn.java index 8d74f452be..6d143ad000 100644 --- a/dingo-calcite/src/main/java/org/apache/calcite/sql/ddl/DingoSqlColumn.java +++ b/dingo-calcite/src/main/java/org/apache/calcite/sql/ddl/DingoSqlColumn.java @@ -52,6 +52,9 @@ public DingoSqlColumn( String collate ) { super(pos, name, dataType, expression, strategy); + if (comment != null) { + comment = comment.startsWith("'") ? comment.substring(1, comment.length() - 1) : comment; + } this.autoIncrement = autoIncrement; this.comment = comment; this.primaryKey = primary; diff --git a/dingo-common/src/main/java/io/dingodb/common/mysql/DingoErr.java b/dingo-common/src/main/java/io/dingodb/common/mysql/DingoErr.java index 1d16badf2e..308634edd7 100644 --- a/dingo-common/src/main/java/io/dingodb/common/mysql/DingoErr.java +++ b/dingo-common/src/main/java/io/dingodb/common/mysql/DingoErr.java @@ -23,15 +23,24 @@ public class DingoErr { public String state; public String errorMsg; public boolean encodeError; + public String warning; public DingoErr() { } + public DingoErr(String warning) { + this.warning = warning; + } + public DingoErr(int errorCode, String state, String errorMsg) { this.errorCode = errorCode; this.state = state; - this.errorMsg = errorMsg; + if (errorMsg == null) { + this.errorMsg = "NullPointException"; + } else { + this.errorMsg = errorMsg; + } } public void fillErrorByArgs(Object... param) { diff --git a/dingo-common/src/main/java/io/dingodb/common/mysql/error/ErrorMessage.java b/dingo-common/src/main/java/io/dingodb/common/mysql/error/ErrorMessage.java index 196df28b6c..403c09ec04 100644 --- a/dingo-common/src/main/java/io/dingodb/common/mysql/error/ErrorMessage.java +++ b/dingo-common/src/main/java/io/dingodb/common/mysql/error/ErrorMessage.java @@ -44,6 +44,7 @@ public final class ErrorMessage { errorMap.put(ErrKeyDoesNotExist, "Key '%s' doesn't exist in table '%s'"); errorMap.put(ErrPartitionMgmtOnNonpartitioned, "Partition management on a not partitioned table is not possible"); errorMap.put(ErrDropPartitionNonExistent, "Error in list of partitions to DROP"); + errorMap.put(ErrDupKeyName, "Duplicate key name '%s'"); } private ErrorMessage() { diff --git a/dingo-driver/host/src/main/java/io/dingodb/driver/DingoConnection.java b/dingo-driver/host/src/main/java/io/dingodb/driver/DingoConnection.java index 25013affe9..2da61fc70a 100644 --- a/dingo-driver/host/src/main/java/io/dingodb/driver/DingoConnection.java +++ b/dingo-driver/host/src/main/java/io/dingodb/driver/DingoConnection.java @@ -66,6 +66,7 @@ import java.sql.ResultSet; import java.sql.SQLClientInfoException; import java.sql.SQLException; +import java.sql.SQLWarning; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -580,6 +581,21 @@ public String getSchema() { return "dingo"; } + @Override + public SQLWarning getWarnings() { + List warningList = this.context.getWarningList(); + SQLWarning warning = null; + if (!warningList.isEmpty()) { + warning = warningList.get(0); + } + return warning; + } + + @Override + public void clearWarnings() throws SQLException { + this.context.getWarningList().clear(); + } + public void removeLockDDLJobs(Map jobsVerMap, Map jobsIdsMap) { Map relatedTableForMdl = this.context.getRootSchema().getRelatedTableForMdl(); //LogUtils.info(log, "[ddl] check mdl, rootCalciteSchema:{}, mdl size:{}", diff --git a/dingo-driver/host/src/main/java/io/dingodb/driver/DingoDriverParser.java b/dingo-driver/host/src/main/java/io/dingodb/driver/DingoDriverParser.java index 4ae4ad751a..bef8cb4bd9 100644 --- a/dingo-driver/host/src/main/java/io/dingodb/driver/DingoDriverParser.java +++ b/dingo-driver/host/src/main/java/io/dingodb/driver/DingoDriverParser.java @@ -113,6 +113,7 @@ import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.SQLException; +import java.sql.SQLWarning; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -306,6 +307,8 @@ public Meta.Signature parseQuery( beforeDdl(connection, sqlNode); final DdlExecutor ddlExecutor = PARSER_CONFIG.parserFactory().getDdlExecutor(); ddlExecutor.executeDdl(connection, sqlNode); + SQLWarning warning = getWarning(sqlNode); + connection.getContext().addWarning(warning); break; } catch (IllegalArgumentException e) { // Method not found: diff --git a/dingo-driver/host/src/main/java/io/dingodb/driver/DingoMeta.java b/dingo-driver/host/src/main/java/io/dingodb/driver/DingoMeta.java index b9cf64f7d3..7bf7fc25c7 100644 --- a/dingo-driver/host/src/main/java/io/dingodb/driver/DingoMeta.java +++ b/dingo-driver/host/src/main/java/io/dingodb/driver/DingoMeta.java @@ -210,7 +210,13 @@ private boolean verifyPrivilege(SubSnapshotSchema schema, String tableName, Stri @NonNull private Iterator createIterator(@NonNull AvaticaStatement statement) { if (statement instanceof DingoStatement) { - return ((DingoStatement) statement).createIterator(jobManager); + Iterator result = ((DingoStatement) statement).createIterator(jobManager); + try { + connection.clearWarnings(); + } catch (SQLException e) { + LogUtils.error(log, e.getMessage(), e); + } + return result; } else if (statement instanceof DingoPreparedStatement) { return ((DingoPreparedStatement) statement).createIterator(jobManager); } diff --git a/dingo-driver/mysql-service/src/main/java/io/dingodb/driver/mysql/command/MysqlCommands.java b/dingo-driver/mysql-service/src/main/java/io/dingodb/driver/mysql/command/MysqlCommands.java index e913193c64..043d86d8df 100644 --- a/dingo-driver/mysql-service/src/main/java/io/dingodb/driver/mysql/command/MysqlCommands.java +++ b/dingo-driver/mysql-service/src/main/java/io/dingodb/driver/mysql/command/MysqlCommands.java @@ -210,6 +210,9 @@ public void executeSingleQuery(String sql, AtomicLong packetId, // update insert delete int count = statement.getUpdateCount(); SQLWarning sqlWarning = statement.getWarnings(); + if (sqlWarning == null) { + sqlWarning = mysqlConnection.getConnection().getWarnings(); + } DingoStatement dingoStatement = (DingoStatement) statement; OKPacket okPacket; int initServerStatus = dingoStatement.getServerStatus(); diff --git a/dingo-executor/src/main/java/io/dingodb/server/executor/ddl/DdlWorker.java b/dingo-executor/src/main/java/io/dingodb/server/executor/ddl/DdlWorker.java index 26189b0f44..91ca349665 100644 --- a/dingo-executor/src/main/java/io/dingodb/server/executor/ddl/DdlWorker.java +++ b/dingo-executor/src/main/java/io/dingodb/server/executor/ddl/DdlWorker.java @@ -78,6 +78,7 @@ import static io.dingodb.common.mysql.error.ErrorCode.ErrDBDropExists; import static io.dingodb.common.mysql.error.ErrorCode.ErrDropPartitionNonExistent; import static io.dingodb.common.mysql.error.ErrorCode.ErrDupFieldName; +import static io.dingodb.common.mysql.error.ErrorCode.ErrDupKeyName; import static io.dingodb.common.mysql.error.ErrorCode.ErrInvalidDDLState; import static io.dingodb.common.mysql.error.ErrorCode.ErrKeyDoesNotExist; import static io.dingodb.common.mysql.error.ErrorCode.ErrNoSuchTable; @@ -354,7 +355,7 @@ public static Pair onCreateSchema(DdlContext dc, DdlJob job) { } if (schemaInfo.getSchemaState() == SchemaState.SCHEMA_NONE) { - synchronized (infoSchemaService) { + synchronized (DdlContext.INSTANCE) { schemaInfo.setSchemaState(SchemaState.SCHEMA_PUBLIC); SchemaInfo schemaInfoTmp = infoSchemaService.getSchema(schemaInfo.getName()); if (schemaInfoTmp != null && schemaInfoTmp.getSchemaState() == SchemaState.SCHEMA_PUBLIC) { @@ -508,7 +509,10 @@ public static Pair onTruncateTable(DdlContext dc, DdlJob job) { try { ms.truncateTable(job.getTableName(), newTableId, job.getId()); } catch (Exception e) { + job.setDingoErr(DingoErrUtil.newInternalErr(e.getMessage())); + job.setState(JobState.jobStateCancelled); LogUtils.error(log, "truncate table error", e); + return Pair.of(0L, job.getDingoErr().errorMsg); } //job.setTableId(tableId); Pair res = updateSchemaVersion(dc, job); @@ -1791,6 +1795,8 @@ public Pair onRebaseAuto(DdlContext dc, DdlJob job) { if (tableRes.getValue() != null && tableRes.getKey() == null) { return Pair.of(0L, tableRes.getValue()); } + TableDefinitionWithId withId = tableRes.getKey(); + withId.getTableDefinition().setAutoIncrement(autoInc); AutoIncrementService autoIncrementService = AutoIncrementService.INSTANCE; io.dingodb.server.executor.common.DingoCommonId dingoCommonId @@ -1800,9 +1806,12 @@ public Pair onRebaseAuto(DdlContext dc, DdlJob job) { long current = autoIncrementService.current(dingoCommonId); if (autoInc > current) { autoIncrementService.updateIncrement(dingoCommonId, autoInc); + } else { + job.setWarning("can not reset AUTO_INCREMENT to " + + autoInc + " without FORCE option, using " + current + " instead"); } job.finishTableJob(JobState.jobStateDone, SchemaState.SCHEMA_PUBLIC); - return updateSchemaVersion(dc, job); + return TableUtil.updateVersionAndTableInfos(dc, job, withId, true); } public Pair onResetAutoInc(DdlContext dc, DdlJob job) { @@ -1843,25 +1852,43 @@ public Pair onRenameIndex(DdlContext dc, DdlJob job) { } String toName = job.getArgs().get(1).toString(); String originName = job.getArgs().get(0).toString(); + try { + io.dingodb.store.proxy.meta.MetaService.cleanName(toName, "Index"); + } catch (Exception e) { + job.setState(JobState.jobStateCancelled); + return Pair.of(0L, e.getMessage()); + } Pair tableRes = checkTableExistAndCancelNonExistJob(job, job.getSchemaId()); if (tableRes.getValue() != null && tableRes.getKey() == null) { return Pair.of(0L, tableRes.getValue()); } - List indexList = InfoSchemaService.root().listIndex(job.getSchemaId(), job.getTableId()); - TableDefinitionWithId indexWithId = indexList.stream() - .map(idxTable -> (TableDefinitionWithId)idxTable) - .filter(idxTable -> - idxTable.getTableDefinition().getName().endsWith(originName) - || idxTable.getTableDefinition().getName().endsWith(originName.toUpperCase())).findFirst().orElse(null); - if (indexWithId == null) { - job.setDingoErr(DingoErrUtil.newInternalErr(ErrKeyDoesNotExist, originName, job.getTableName())); - return Pair.of(0L, job.getDingoErr().errorMsg); - } - toName = job.getTableName() + "." + toName; - indexWithId.getTableDefinition().setName(toName); + synchronized (dc) { + List indexList = InfoSchemaService.root().listIndex(job.getSchemaId(), job.getTableId()); + TableDefinitionWithId indexWithId = indexList.stream() + .map(idxTable -> (TableDefinitionWithId) idxTable) + .filter(idxTable -> + idxTable.getTableDefinition().getName().endsWith(originName) + || idxTable.getTableDefinition().getName().endsWith(originName.toUpperCase())).findFirst().orElse(null); + if (indexWithId == null) { + job.setDingoErr(DingoErrUtil.newInternalErr(ErrKeyDoesNotExist, originName, job.getTableName())); + return Pair.of(0L, job.getDingoErr().errorMsg); + } + TableDefinitionWithId toIndex = indexList.stream().map(idxTable -> (TableDefinitionWithId) idxTable) + .filter(idxTable -> + idxTable.getTableDefinition().getName().endsWith(toName) + || idxTable.getTableDefinition().getName().endsWith(toName.toUpperCase())) + .findFirst().orElse(null); + if (toIndex != null) { + job.setDingoErr(DingoErrUtil.newInternalErr(ErrDupKeyName, toName)); + return Pair.of(0L, job.getDingoErr().errorMsg); + } - job.finishTableJob(JobState.jobStateDone, SchemaState.SCHEMA_PUBLIC); - return TableUtil.updateVersionAndIndexInfos(dc, job, indexWithId, true); + String toNameTmp = job.getTableName() + "." + toName; + indexWithId.getTableDefinition().setName(toNameTmp); + + job.finishTableJob(JobState.jobStateDone, SchemaState.SCHEMA_PUBLIC); + return TableUtil.updateVersionAndIndexInfos(dc, job, indexWithId, true); + } } public Pair onModifyComment(DdlContext dc, DdlJob job) { diff --git a/dingo-meta-api/src/main/java/io/dingodb/meta/DdlService.java b/dingo-meta-api/src/main/java/io/dingodb/meta/DdlService.java index bbf43a0ca2..282b1ab32c 100644 --- a/dingo-meta-api/src/main/java/io/dingodb/meta/DdlService.java +++ b/dingo-meta-api/src/main/java/io/dingodb/meta/DdlService.java @@ -118,8 +118,8 @@ default void recoverSchema(RecoverInfo recoverInfo) { void dropSequence(String sequenceName, String connId); - default void rebaseAutoInc(String schemaName, String tableName, long tableId, long autoInc) { - + default String rebaseAutoInc(String schemaName, String tableName, long tableId, long autoInc) { + return null; } default void resetAutoInc() { diff --git a/dingo-meta-api/src/main/java/io/dingodb/meta/ddl/InfoSchemaBuilder.java b/dingo-meta-api/src/main/java/io/dingodb/meta/ddl/InfoSchemaBuilder.java index aaf8d8f0ce..73b65a1922 100644 --- a/dingo-meta-api/src/main/java/io/dingodb/meta/ddl/InfoSchemaBuilder.java +++ b/dingo-meta-api/src/main/java/io/dingodb/meta/ddl/InfoSchemaBuilder.java @@ -463,8 +463,11 @@ public Pair, String> applyModifyColumn(SchemaDiff diff) { } public Pair, String> applyRebaseAuto(SchemaDiff diff) { - MetaService.root().rebaseAutoInc( - new CommonId(CommonId.CommonType.TABLE, diff.getSchemaId(), diff.getTableId()) + Table table = InfoSchemaService.root().getTableDef(diff.getSchemaId(), diff.getTableId()); + long autoId = table.getAutoIncrement(); + autoId--; + MetaService.root().updateAutoIncrement( + new CommonId(CommonId.CommonType.TABLE, diff.getSchemaId(), diff.getTableId()), autoId ); List tableIdList = new ArrayList<>(); tableIdList.add(diff.getTableId()); diff --git a/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlHandler.java b/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlHandler.java index 34c08c3d1f..db3d31cbac 100644 --- a/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlHandler.java +++ b/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlHandler.java @@ -522,6 +522,7 @@ public static void doDdlJob(DdlJob job) { while (!Thread.interrupted()) { Pair res = historyJob(job.getId()); if (res.getKey()) { + job.setWarning(res.getValue().warning); return; } else if (res.getValue() != null) { LogUtils.error(log, "[ddl-error] doDdlJob error, reason: {}, job: {}", res.getValue(), job); @@ -555,7 +556,7 @@ public static Pair historyJob(long jobId) { } } if (ddlJob.getState() == JobState.jobStateSynced && ddlJob.getError() == null) { - return Pair.of(true, null); + return Pair.of(true, new DingoErr(ddlJob.getWarning())); } if (ddlJob.getError() != null) { if (ddlJob.getDingoErr() == null || ddlJob.getDingoErr().errorCode == 0) { diff --git a/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlService.java b/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlService.java index 0f4b60be70..293ba9097b 100644 --- a/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlService.java +++ b/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/ddl/DdlService.java @@ -164,7 +164,7 @@ public Table getTable(CommonId id) { } @Override - public void rebaseAutoInc(String schemaName, String tableName, long tableId, long autoInc) { + public String rebaseAutoInc(String schemaName, String tableName, long tableId, long autoInc) { SchemaInfo schemaInfo = InfoSchemaService.root().getSchema(schemaName); long schemaId = schemaInfo.getSchemaId(); DdlJob job = DdlJob.builder() @@ -178,6 +178,7 @@ public void rebaseAutoInc(String schemaName, String tableName, long tableId, lon args.add(autoInc); job.setArgs(args); DdlHandler.doDdlJob(job); + return job.getWarning(); } @Override diff --git a/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/meta/MetaService.java b/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/meta/MetaService.java index a0c3415b84..3f1ff9cfc1 100644 --- a/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/meta/MetaService.java +++ b/dingo-store-proxy/src/main/java/io/dingodb/store/proxy/meta/MetaService.java @@ -66,10 +66,12 @@ import io.dingodb.sdk.service.entity.common.RegionType; import io.dingodb.sdk.service.entity.coordinator.CreateIdsRequest; import io.dingodb.sdk.service.entity.coordinator.CreateRegionRequest; +import io.dingodb.sdk.service.entity.coordinator.CreateRegionResponse; import io.dingodb.sdk.service.entity.coordinator.DropRegionRequest; import io.dingodb.sdk.service.entity.coordinator.GetRegionMapRequest; import io.dingodb.sdk.service.entity.coordinator.IdEpochType; import io.dingodb.sdk.service.entity.coordinator.QueryRegionRequest; +import io.dingodb.sdk.service.entity.coordinator.QueryRegionResponse; import io.dingodb.sdk.service.entity.coordinator.RegionCmd.RequestNest.SplitRequest; import io.dingodb.sdk.service.entity.coordinator.SplitRegionRequest; import io.dingodb.sdk.service.entity.meta.CreateAutoIncrementRequest; @@ -192,7 +194,7 @@ private static String cleanSchemaName(String name) { return cleanName(name, "Schema"); } - private static String cleanName(String name, String source) { + public static String cleanName(String name, String source) { if (warnPattern.matcher(name).matches()) { LogUtils.warn(log, "{} name currently only supports uppercase letters, LowerCase -> UpperCase", source); @@ -376,6 +378,7 @@ public long createTables( // table region io.dingodb.sdk.service.entity.meta.TableDefinition withIdTableDefinition = tableDefinitionWithId.getTableDefinition(); + long regionId = 0; for (Partition partition : withIdTableDefinition.getTablePartition().getPartitions()) { CreateRegionRequest request = CreateRegionRequest .builder() @@ -391,8 +394,13 @@ public long createTables( .partId(partition.getId().getEntityId()) .tenantId(tableDefinitionWithId.getTenantId()) .build(); - coordinatorService.createRegion(tso(), request); + CreateRegionResponse createRegionResponse = coordinatorService.createRegion(tso(), request); + regionId = createRegionResponse.getRegionId(); } + // check replica count + addAfter(tableDefinitionWithId, regionId, false); + // check replica end + long incrementColCount = tableDefinition.getColumns() .stream() .filter(ColumnDefinition::isAutoIncrement) @@ -506,8 +514,10 @@ public long createTables( .indexId(withId.getTableId().getEntityId()) .indexParameter(indexParameter) .build(); - coordinatorService.createRegion(tso(), request); + CreateRegionResponse createRegionResponse = coordinatorService.createRegion(tso(), request); + regionId = createRegionResponse.getRegionId(); } + addAfter(withId, regionId, true); } return tableEntityId; } @@ -869,7 +879,7 @@ public Pair checkDropDiskAnnIndex(@NonNull CommonId index) { for (RangeDistribution rangeDistribution : rangeDistributions) { StoreInstance instance = io.dingodb.exec.Services.KV_STORE.getInstance(index, rangeDistribution.id()); String diskAnnStatus = instance.diskAnnStatus(tso, index); - if("DISKANN_BUILDING".equalsIgnoreCase(diskAnnStatus)){ + if ("DISKANN_BUILDING".equalsIgnoreCase(diskAnnStatus)) { msg = "diskann is building, please wait."; noDelete = true; break; @@ -974,9 +984,14 @@ public long truncateTable(long schemaId, @NonNull String tableName, long tableEn throw new RuntimeException(checkDropDiskAnn.getValue()); } } + // Generate new table ids. boolean autoInc = table.getTableDefinition().getColumns().stream() .anyMatch(io.dingodb.sdk.service.entity.meta.ColumnDefinition::isAutoIncrement); + checkRegionConsistent(table, false); + for (TableDefinitionWithId index : indexes) { + checkRegionConsistent(index, true); + } long ts = TsoService.getDefault().tso(); dropRegionByTable(MAPPER.idFrom(table.getTableId()), jobId, ts, autoInc); @@ -1459,6 +1474,29 @@ public void invalidateDistribution(CommonId tableId) { this.cache.invalidateDistribution(tableId); } + public void checkRegionConsistent(TableDefinitionWithId tableDefinitionWithId, boolean index) { + if (!index) { + int replica = io.dingodb.meta.InfoSchemaService.root().getStoreReplica(); + if (tableDefinitionWithId.getTableDefinition().getReplica() != replica) { + throw DingoErrUtil.newStdErr("Check for inconsistent number of copies"); + } + } else { + String indexType = tableDefinitionWithId.getTableDefinition() + .getProperties().getOrDefault("indexType", "scalar"); + int replica = 0; + if (indexType.equalsIgnoreCase("scalar")) { + replica = io.dingodb.meta.InfoSchemaService.root().getStoreReplica(); + } else if (indexType.equalsIgnoreCase("vector")) { + replica = io.dingodb.meta.InfoSchemaService.root().getIndexReplica(); + } else if (indexType.equalsIgnoreCase("document")) { + replica = io.dingodb.meta.InfoSchemaService.root().getDocumentReplica(); + } + if (tableDefinitionWithId.getTableDefinition().getReplica() != replica) { + throw DingoErrUtil.newStdErr("Check for inconsistent number of copies"); + } + } + } + public void createTenant(Tenant tenant) { CreateTenantRequest createTenantRequest = CreateTenantRequest.builder() .tenant(mapping(tenant)) @@ -1506,6 +1544,7 @@ public static void validatePartBy(TableDefinition tableDefinition) { public void createIndexRegion(TableDefinitionWithId withId, CommonId tableId, int replica) { CoordinatorService coordinatorService = Services.coordinatorService(Configuration.coordinatorSet()); io.dingodb.sdk.service.entity.meta.TableDefinition definition = withId.getTableDefinition(); + long regionId = 0; for (Partition partition : definition.getTablePartition().getPartitions()) { IndexParameter indexParameter = definition.getIndexParameter(); if (indexParameter.getVectorIndexParameter() != null) { @@ -1531,11 +1570,23 @@ public void createIndexRegion(TableDefinitionWithId withId, CommonId tableId, in .indexParameter(indexParameter) .build(); LogUtils.info(log, "create index region, range:{}", partition.getRange()); - try { - coordinatorService.createRegion(tso(), request); - } catch (Exception e) { - LogUtils.error(log, "create index region error, range:{}", partition.getRange()); - throw e; + CreateRegionResponse response = coordinatorService.createRegion(tso(), request); + regionId = response.getRegionId(); + } + addAfter(withId, regionId, true); + } + + public void addAfter(TableDefinitionWithId withId, long regionId, boolean index) { + CoordinatorService coordinatorService = Services.coordinatorService(Configuration.coordinatorSet()); + QueryRegionRequest queryRegionRequest = QueryRegionRequest.builder().regionId(regionId).build(); + QueryRegionResponse queryRegionResponse = coordinatorService.queryRegion(tso(), queryRegionRequest); + int peerCnt = queryRegionResponse.getRegion().getDefinition().getPeers().size(); + if (withId.getTableDefinition().getReplica() != peerCnt) { + withId.getTableDefinition().setReplica(peerCnt); + if (index) { + io.dingodb.meta.InfoSchemaService.root().updateIndex(withId.getTableId().getParentEntityId(), withId); + } else { + io.dingodb.meta.InfoSchemaService.root().updateTable(withId.getTableId().getParentEntityId(), withId); } } }