Skip to content

Commit

Permalink
Extend the index parameters to support the float type (#74)
Browse files Browse the repository at this point in the history
  • Loading branch information
laojianzi authored Jul 15, 2024
1 parent fa2bec7 commit 666418f
Show file tree
Hide file tree
Showing 10 changed files with 1,185 additions and 3 deletions.
5 changes: 5 additions & 0 deletions parser/ast.go
Original file line number Diff line number Diff line change
Expand Up @@ -1274,6 +1274,11 @@ func (a *TableIndex) String(level int) string {
builder.WriteString("INDEX")
builder.WriteByte(' ')
builder.WriteString(a.Name.String(0))
// a.ColumnExpr = *Ident --- e.g. INDEX idx column TYPE ...
// a.ColumnExpr = *ParamExprList --- e.g. INDEX idx(column) TYPE ...
if _, ok := a.ColumnExpr.(*Ident); ok {
builder.WriteByte(' ')
}
builder.WriteString(a.ColumnExpr.String(level))
builder.WriteByte(' ')
builder.WriteString("TYPE")
Expand Down
2 changes: 1 addition & 1 deletion parser/parser_column.go
Original file line number Diff line number Diff line change
Expand Up @@ -672,7 +672,7 @@ func (p *Parser) parseColumnType(_ Pos) (Expr, error) { // nolint:funlen
}
// like Datetime('Asia/Dubai')
return p.parseColumnTypeWithParams(ident, p.Pos())
case p.matchTokenKind(TokenInt):
case p.matchTokenKind(TokenInt), p.matchTokenKind(TokenFloat):
// fixed size
return p.parseColumnTypeWithParams(ident, p.Pos())
default:
Expand Down
2 changes: 1 addition & 1 deletion parser/parser_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ func (p *Parser) parseString(pos Pos) (*StringLiteral, error) {

func (p *Parser) parseLiteral(pos Pos) (Literal, error) {
switch {
case p.matchTokenKind(TokenInt):
case p.matchTokenKind(TokenInt), p.matchTokenKind(TokenFloat):
return p.parseNumber(pos)
case p.matchTokenKind(TokenString):
return p.parseString(pos)
Expand Down
1 change: 0 additions & 1 deletion parser/parser_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ func TestParser_ParseStatements(t *testing.T) {
goldie.WithDiffEngine(goldie.ColoredDiff),
goldie.WithFixtureDir(outputDir))
g.Assert(t, entry.Name(), outputBytes)

})
}
}
Expand Down
4 changes: 4 additions & 0 deletions parser/testdata/ddl/alter_table_add_index.sql
Original file line number Diff line number Diff line change
@@ -1 +1,5 @@
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2;
18 changes: 18 additions & 0 deletions parser/testdata/ddl/create_table_with_index.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
CREATE TABLE IF NOT EXISTS test_local
(
`id` UInt64 CODEC(Delta, ZSTD(1)),
`api_id` UInt64 CODEC(ZSTD(1)),
`arr` Array(Int64),
`content` String CODEC(ZSTD(1)),
`output` String,
INDEX id_idx id TYPE minmax GRANULARITY 10,
INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2,
INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3,
INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1,
INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2
)
ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}')
PARTITION BY toStartOfHour(`timestamp`)
ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`)
TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY
SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1;
16 changes: 16 additions & 0 deletions parser/testdata/ddl/format/alter_table_add_index.sql
Original file line number Diff line number Diff line change
@@ -1,8 +1,24 @@
-- Origin SQL:
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1;
ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2;


-- Format SQL:
ALTER TABLE test.events_local
ON CLUSTER 'default_cluster'
ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024;
ALTER TABLE test.events_local
ON CLUSTER 'default_cluster'
ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2;
ALTER TABLE test.events_local
ON CLUSTER 'default_cluster'
ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3;
ALTER TABLE test.events_local
ON CLUSTER 'default_cluster'
ADD INDEX content_idx content TYPE tokenbf_v1(30720,2,0) GRANULARITY 1;
ALTER TABLE test.events_local
ON CLUSTER 'default_cluster'
ADD INDEX output_idx output TYPE ngrambf_v1(3,10000,2,1) GRANULARITY 2;
40 changes: 40 additions & 0 deletions parser/testdata/ddl/format/create_table_with_index.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
-- Origin SQL:
CREATE TABLE IF NOT EXISTS test_local
(
`id` UInt64 CODEC(Delta, ZSTD(1)),
`api_id` UInt64 CODEC(ZSTD(1)),
`arr` Array(Int64),
`content` String CODEC(ZSTD(1)),
`output` String,
INDEX id_idx id TYPE minmax GRANULARITY 10,
INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2,
INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3,
INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1,
INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2
)
ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}')
PARTITION BY toStartOfHour(`timestamp`)
ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`)
TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY
SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1;


-- Format SQL:
CREATE TABLE IF NOT EXISTS test_local
(
`id` UInt64 CODEC(Delta, ZSTD(1)),
`api_id` UInt64 CODEC(ZSTD(1)),
`arr` Array(Int64),
`content` String CODEC(ZSTD(1)),
`output` String,
INDEX id_idx id TYPE minmax GRANULARITY 10,
INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2,
INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3,
INDEX content_idx content TYPE tokenbf_v1(30720,2,0) GRANULARITY 1,
INDEX output_idx output TYPE ngrambf_v1(3,10000,2,1) GRANULARITY 2
)
ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}')
PARTITION BY toStartOfHour(`timestamp`)
TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY
SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1
ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`);
Loading

0 comments on commit 666418f

Please sign in to comment.