diff --git a/.github/workflows/maven.yml b/.github/workflows/maven.yml
index 04d28a896..f3911d957 100644
--- a/.github/workflows/maven.yml
+++ b/.github/workflows/maven.yml
@@ -115,7 +115,7 @@ jobs:
fail-fast: false
matrix:
# BROKEN: tpch
- benchmark: [ 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
+ benchmark: [ 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
steps:
- name: Download artifact
uses: actions/download-artifact@v4
@@ -149,6 +149,10 @@ jobs:
echo "The ${{matrix.benchmark}} benchmark is not supported for sqlite."
exit 0
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ # Disable synchronous mode for sqlite tpcc data loading to save some time.
+ java -jar benchbase.jar -b tpcc -c config/sqlite/sample_tpcc_nosync_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/sqlite/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
@@ -179,7 +183,7 @@ jobs:
fail-fast: false
matrix:
# FIXME: Add tpch back in (#333).
- benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
+ benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
mariadb: # https://hub.docker.com/_/mariadb
image: mariadb:latest
@@ -233,6 +237,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh mariadb) &
java -jar benchbase.jar -b tpcc -c config/mariadb/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ java -jar benchbase.jar -b tpcc -c config/mariadb/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/mariadb/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
@@ -258,7 +265,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
+ benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
mysql: # https://hub.docker.com/_/mysql
image: mysql:latest
@@ -311,6 +318,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh mysql) &
java -jar benchbase.jar -b tpcc -c config/mysql/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ java -jar benchbase.jar -b tpcc -c config/mysql/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/mysql/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
@@ -336,7 +346,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
+ benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
oracle:
image: gvenzl/oracle-xe:21.3.0-slim-faststart
@@ -393,6 +403,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh oracle) &
java -jar benchbase.jar -b tpcc -c config/oracle/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ java -jar benchbase.jar -b tpcc -c config/oracle/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/oracle/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
@@ -424,7 +437,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
+ benchmark: [ 'auctionmark', 'chbenchmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpcc-with-reconnects', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
steps:
# Note: we download just the docker-compose scripts/configs rather than the
# whole source code repo for better testing.
@@ -479,6 +492,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh postgres) &
java -jar benchbase.jar -b tpcc -c config/postgres/sample_tpcc_config.xml -im 1000 -mt advanced --execute=true --json-histograms results/histograms.json
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ java -jar benchbase.jar -b tpcc -c config/postgres/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/postgres/sample_${{matrix.benchmark}}_config.xml -im 1000 -mt advanced --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
@@ -515,12 +531,14 @@ jobs:
fail-fast: false
matrix:
# TODO: Add tpcc-with-reconnects benchmark support
+ # TODO: Add chbenchmark benchmark support
benchmark: [ 'auctionmark', 'epinions', 'hyadapt', 'noop', 'otmetrics', 'resourcestresser', 'seats', 'sibench', 'smallbank', 'tatp', 'templated', 'tpcc', 'tpch', 'twitter', 'voter', 'wikipedia', 'ycsb' ]
services:
cockroach: # https://hub.docker.com/repository/docker/timveil/cockroachdb-single-node
image: timveil/cockroachdb-single-node:latest
env:
DATABASE_NAME: benchbase
+ # TODO: Expand for additional config adjustments (See Also: #405, #519, #525)
MEMORY_SIZE: .75
ports:
- 26257:26257
@@ -557,6 +575,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh cockroachdb) &
java -jar benchbase.jar -b tpcc -c config/cockroachdb/sample_tpcc_config.xml --execute=true --json-histograms results/histograms.json
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ java -jar benchbase.jar -b tpcc -c config/cockroachdb/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/cockroachdb/sample_${{matrix.benchmark}}_config.xml --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
@@ -655,6 +676,9 @@ jobs:
(sleep 10 && ./scripts/interrupt-docker-db-service.sh sqlserver) &
java -jar benchbase.jar -b tpcc -c config/sqlserver/sample_tpcc_config.xml -im 1000 -mt advanced --execute=true --json-histograms results/histograms.json
else
+ if [ ${{matrix.benchmark}} == chbenchmark ]; then
+ java -jar benchbase.jar -b tpcc -c config/sqlserver/sample_tpcc_config.xml --create=true --load=true --execute=false --json-histograms results/histograms.json
+ fi
java -jar benchbase.jar -b ${{matrix.benchmark}} -c config/sqlserver/sample_${{matrix.benchmark}}_config.xml -im 1000 -mt advanced --create=true --load=true --execute=true --json-histograms results/histograms.json
fi
diff --git a/config/sqlite/sample_chbenchmark_config.xml b/config/sqlite/sample_chbenchmark_config.xml
new file mode 100644
index 000000000..d0aae180d
--- /dev/null
+++ b/config/sqlite/sample_chbenchmark_config.xml
@@ -0,0 +1,161 @@
+
+
+
+
+ SQLITE
+ org.sqlite.JDBC
+ jdbc:sqlite:tpcc.db
+ TRANSACTION_SERIALIZABLE
+ 128
+
+
+ 1
+
+
+ 1
+
+
+
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 200
+
+
+ 45,43,4,4,4
+ 3, 2, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Q1
+
+
+ Q2
+
+
+ Q3
+
+
+ Q4
+
+
+ Q5
+
+
+ Q6
+
+
+ Q7
+
+
+ Q8
+
+
+ Q9
+
+
+ Q10
+
+
+ Q11
+
+
+ Q12
+
+
+ Q13
+
+
+ Q14
+
+
+ Q15
+
+
+ Q16
+
+
+ Q17
+
+
+ Q18
+
+
+ Q19
+
+
+ Q20
+
+
+ Q21
+
+
+ Q22
+
+
+
+
+
+
+ NewOrder
+
+
+ Payment
+
+
+ OrderStatus
+
+
+ Delivery
+
+
+ StockLevel
+
+
+
diff --git a/config/sqlite/sample_epinions_config.xml b/config/sqlite/sample_epinions_config.xml
index 3fc752d98..8debadc65 100644
--- a/config/sqlite/sample_epinions_config.xml
+++ b/config/sqlite/sample_epinions_config.xml
@@ -4,13 +4,13 @@
SQLITE
org.sqlite.JDBC
- jdbc:sqlite:resourcestresser.db
+ jdbc:sqlite:epinions.db
TRANSACTION_SERIALIZABLE
128
0.1
-
+
1
diff --git a/docker/build-run-benchmark-with-docker.sh b/docker/build-run-benchmark-with-docker.sh
index 69386f5e0..7f90eca44 100755
--- a/docker/build-run-benchmark-with-docker.sh
+++ b/docker/build-run-benchmark-with-docker.sh
@@ -33,7 +33,7 @@ if [ "$BENCHBASE_PROFILE" == 'sqlite' ]; then
fi
EXTRA_DOCKER_ARGS="-v $SRC_DIR/$benchmark.db:/benchbase/profiles/sqlite/$benchmark.db"
- if [ "$benchmark" == 'templated' ]; then
+ if echo "$benchmark" | egrep -qx '(templated|chbenchmark)'; then
# See notes below:
EXTRA_DOCKER_ARGS+=" -v $SRC_DIR/$benchmark.db:/benchbase/profiles/sqlite/tpcc.db"
fi
@@ -49,7 +49,7 @@ if [ "${SKIP_LOAD_DB:-false}" != 'true' ]; then
# For templated benchmarks, we need to preload some data for the test since by
# design, templated benchmarks do not support the 'load' operation
# In this case, we load the tpcc data.
- if [ "$benchmark" == 'templated' ]; then
+ if echo "$benchmark" | egrep -qx '(templated|chbenchmark)'; then
load_benchmark='tpcc'
echo "INFO: Loading tpcc data for templated benchmark"
@@ -59,15 +59,24 @@ if [ "${SKIP_LOAD_DB:-false}" != 'true' ]; then
else
config="config/sample_tpcc_config.xml"
fi
- else
+
+ BUILD_IMAGE=false EXTRA_DOCKER_ARGS="--network=host $EXTRA_DOCKER_ARGS" \
+ ./docker/benchbase/run-full-image.sh \
+ --config "$config" --bench "$load_benchmark" \
+ --create=true --load=true --execute=false
+ fi
+
+ # For chbenchmark, we also load it's data in addition to tpcc.
+ if ! echo "$benchmark" | egrep -qx '(templated)'; then
echo "INFO: Loading $benchmark data"
load_benchmark="$benchmark"
config="config/sample_${benchmark}_config.xml"
+
+ BUILD_IMAGE=false EXTRA_DOCKER_ARGS="--network=host $EXTRA_DOCKER_ARGS" \
+ ./docker/benchbase/run-full-image.sh \
+ --config "$config" --bench "$load_benchmark" \
+ --create=true --load=true --execute=false
fi
- BUILD_IMAGE=false EXTRA_DOCKER_ARGS="--network=host $EXTRA_DOCKER_ARGS" \
- ./docker/benchbase/run-full-image.sh \
- --config "$config" --bench "$load_benchmark" \
- --create=true --load=true --execute=false
else
echo "INFO: Skipping load of $benchmark data"
fi
diff --git a/src/main/resources/benchmarks/chbenchmark/ddl-sqlite.sql b/src/main/resources/benchmarks/chbenchmark/ddl-sqlite.sql
new file mode 100644
index 000000000..5bacb925f
--- /dev/null
+++ b/src/main/resources/benchmarks/chbenchmark/ddl-sqlite.sql
@@ -0,0 +1,31 @@
+DROP TABLE IF EXISTS supplier;
+DROP TABLE IF EXISTS nation;
+DROP TABLE IF EXISTS region;
+
+create table region
+(
+ r_regionkey int not null,
+ r_name char(55) not null,
+ r_comment char(152) not null,
+ PRIMARY KEY (r_regionkey)
+);
+
+create table nation
+(
+ n_nationkey int not null,
+ n_name char(25) not null,
+ n_regionkey int not null references region(r_regionkey) ON DELETE CASCADE,
+ n_comment char(152) not null,
+ PRIMARY KEY ( n_nationkey )
+);
+
+create table supplier (
+ su_suppkey int not null,
+ su_name char(25) not null,
+ su_address varchar(40) not null,
+ su_nationkey int not null references nation(n_nationkey) ON DELETE CASCADE,
+ su_phone char(15) not null,
+ su_acctbal numeric(12,2) not null,
+ su_comment char(101) not null,
+ PRIMARY KEY ( su_suppkey )
+);
diff --git a/src/main/resources/benchmarks/chbenchmark/dialect-oracle.xml b/src/main/resources/benchmarks/chbenchmark/dialect-oracle.xml
index 9d4559fd2..688b17393 100644
--- a/src/main/resources/benchmarks/chbenchmark/dialect-oracle.xml
+++ b/src/main/resources/benchmarks/chbenchmark/dialect-oracle.xml
@@ -68,7 +68,7 @@
OR
(n1.n_name = 'Cambodia' AND n2.n_name = 'Germany')
)
- GROUP BY su_nationkey, cust_nation, l_year
+ GROUP BY su_nationkey, substr(c_state, 1, 1), extract(YEAR FROM o_entry_d)
ORDER BY su_nationkey, cust_nation, l_year
@@ -82,7 +82,8 @@
AND ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id AND c_id = o_c_id AND c_w_id = o_w_id
AND c_d_id = o_d_id AND n1.n_nationkey = ascii(substr(c_state, 1, 1)) AND n1.n_regionkey = r_regionkey
AND 1000 > ol_i_id AND r_name = 'Europe' AND su_nationkey = n2.n_nationkey AND i_data LIKE '%b' AND i_id = ol_i_id
- GROUP BY l_year ORDER BY l_year
+ GROUP BY extract(YEAR FROM o_entry_d)
+ ORDER BY l_year
@@ -92,7 +93,8 @@
WHERE ol_i_id = s_i_id AND ol_supply_w_id = s_w_id AND MOD ((s_w_id * s_i_id), 10000) = su_suppkey
AND ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id AND ol_i_id = i_id AND su_nationkey = n_nationkey
AND i_data LIKE '%bb'
- GROUP BY n_name, l_year ORDER BY n_name, l_year DESC
+ GROUP BY n_name, extract(YEAR FROM o_entry_d)
+ ORDER BY n_name, l_year DESC
diff --git a/src/main/resources/benchmarks/chbenchmark/dialect-sqlite.xml b/src/main/resources/benchmarks/chbenchmark/dialect-sqlite.xml
new file mode 100644
index 000000000..7f61e9bd2
--- /dev/null
+++ b/src/main/resources/benchmarks/chbenchmark/dialect-sqlite.xml
@@ -0,0 +1,205 @@
+
+
+
+
+
+ SELECT ol_number, sum(ol_quantity) AS sum_qty, sum(ol_amount) AS sum_amount, avg(ol_quantity) AS avg_qty,
+ avg(ol_amount) AS avg_amount, count(*) AS count_order
+ FROM order_line
+ WHERE ol_delivery_d > datetime('2007-01-02 00:00:00')
+ GROUP BY ol_number
+ ORDER BY ol_number
+
+
+
+
+ SELECT su_suppkey, su_name, n_name, i_id, i_name, su_address, su_phone, su_comment
+ FROM item, supplier, stock, nation, region, (
+ SELECT s_i_id AS m_i_id, MIN(s_quantity) AS m_s_quantity
+ FROM stock, supplier, nation, region
+ WHERE MOD((s_w_id*s_i_id), 10000) = su_suppkey AND su_nationkey = n_nationkey
+ AND n_regionkey = r_regionkey AND r_name LIKE 'Europ%'
+ GROUP BY s_i_id) m
+ WHERE i_id = s_i_id AND MOD((s_w_id * s_i_id), 10000) = su_suppkey AND su_nationkey = n_nationkey
+ AND n_regionkey = r_regionkey AND i_data LIKE '%b' AND r_name LIKE 'Europ%' AND i_id=m_i_id
+ AND s_quantity = m_s_quantity
+ ORDER BY n_name, su_name, i_id
+
+
+
+
+ SELECT ol_o_id, ol_w_id, ol_d_id, sum(ol_amount) AS revenue, o_entry_d
+ FROM customer, new_order, oorder, order_line
+ WHERE c_state LIKE 'A%' AND c_id = o_c_id AND c_w_id = o_w_id AND c_d_id = o_d_id AND no_w_id = o_w_id
+ AND no_d_id = o_d_id AND no_o_id = o_id AND ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id
+ AND o_entry_d > datetime('2007-01-02 00:00:00')
+ GROUP BY ol_o_id, ol_w_id, ol_d_id, o_entry_d
+ ORDER BY revenue DESC , o_entry_d
+
+
+
+
+ SELECT n_name, sum(ol_amount) AS revenue
+ FROM customer, oorder, order_line, stock, supplier, nation, region
+ WHERE c_id = o_c_id AND c_w_id = o_w_id AND c_d_id = o_d_id AND ol_o_id = o_id AND ol_w_id = o_w_id
+ AND ol_d_id=o_d_id AND ol_w_id = s_w_id AND ol_i_id = s_i_id
+ AND MOD((s_w_id * s_i_id), 10000) = su_suppkey
+ AND unicode(substr(c_state, 1, 1)) = su_nationkey AND su_nationkey = n_nationkey AND n_regionkey = r_regionkey AND r_name = 'Europe'
+ AND o_entry_d >= datetime('2007-01-02 00:00:00')
+ GROUP BY n_name ORDER BY revenue DESC
+
+
+
+
+ SELECT sum(ol_amount) AS revenue FROM order_line WHERE ol_delivery_d >= datetime('1999-01-01 00:00:00')
+ AND datetime('2020-01-01 00:00:00') > ol_delivery_d AND ol_quantity BETWEEN 1 AND 100000
+
+
+
+
+ SELECT su_nationkey AS supp_nation, substr(c_state, 1, 1) AS cust_nation, strftime('%Y', o_entry_d) AS l_year,
+ sum(ol_amount) AS revenue
+ FROM supplier, stock, order_line, oorder, customer, nation n1, nation n2
+ WHERE ol_supply_w_id = s_w_id AND ol_i_id = s_i_id AND MOD ((s_w_id * s_i_id), 10000) = su_suppkey
+ AND ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id AND c_id = o_c_id AND c_w_id = o_w_id
+ AND c_d_id = o_d_id AND su_nationkey = n1.n_nationkey AND unicode(substr(c_state, 1, 1)) = n2.n_nationkey
+ AND (
+ (n1.n_name = 'Germany' AND n2.n_name = 'Cambodia')
+ OR
+ (n1.n_name = 'Cambodia' AND n2.n_name = 'Germany')
+ )
+ GROUP BY su_nationkey, cust_nation, l_year
+ ORDER BY su_nationkey, cust_nation, l_year
+
+
+
+
+ SELECT strftime('%Y', o_entry_d) AS l_year, sum(
+ CASE WHEN n2.n_name = 'Germany' THEN ol_amount ELSE 0 END
+ ) / sum(ol_amount) AS mkt_share
+ FROM item, supplier, stock, order_line, oorder, customer, nation n1, nation n2, region
+ WHERE i_id = s_i_id AND ol_i_id = s_i_id AND ol_supply_w_id = s_w_id AND MOD ((s_w_id * s_i_id), 10000) = su_suppkey
+ AND ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id AND c_id = o_c_id AND c_w_id = o_w_id
+ AND c_d_id = o_d_id AND n1.n_nationkey = unicode(substr(c_state, 1, 1)) AND n1.n_regionkey = r_regionkey
+ AND 1000 > ol_i_id AND r_name = 'Europe' AND su_nationkey = n2.n_nationkey AND i_data LIKE '%b' AND i_id = ol_i_id
+ GROUP BY l_year ORDER BY l_year
+
+
+
+
+ SELECT n_name, strftime('%Y', o_entry_d) AS l_year, sum(ol_amount) AS sum_profit
+ FROM item, stock, supplier, order_line, oorder, nation
+ WHERE ol_i_id = s_i_id AND ol_supply_w_id = s_w_id AND MOD ((s_w_id * s_i_id), 10000) = su_suppkey
+ AND ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id AND ol_i_id = i_id AND su_nationkey = n_nationkey
+ AND i_data LIKE '%bb'
+ GROUP BY n_name, l_year ORDER BY n_name, l_year DESC
+
+
+
+
+ SELECT c_id, c_last, sum(ol_amount) AS revenue, c_city, c_phone, n_name
+ FROM customer, oorder, order_line, nation
+ WHERE c_id = o_c_id AND c_w_id = o_w_id AND c_d_id = o_d_id AND ol_w_id = o_w_id AND ol_d_id = o_d_id
+ AND ol_o_id = o_id AND o_entry_d >= datetime('2007-01-02 00:00:00')
+ AND ol_delivery_d >= o_entry_d AND n_nationkey = unicode(substr(c_state, 1, 1))
+ GROUP BY c_id, c_last, c_city, c_phone, n_name ORDER BY revenue DESC
+
+
+
+
+ SELECT o_ol_cnt, sum(CASE WHEN o_carrier_id = 1 OR o_carrier_id = 2 THEN 1 ELSE 0 END) AS high_line_count,
+ sum(CASE WHEN o_carrier_id != 1 AND o_carrier_id != 2 THEN 1 ELSE 0 END) AS low_line_count
+ FROM oorder, order_line
+ WHERE ol_w_id = o_w_id AND ol_d_id = o_d_id AND ol_o_id = o_id AND ol_delivery_d >= o_entry_d
+ AND datetime('2020-01-01 00:00:00') > ol_delivery_d
+ GROUP BY o_ol_cnt ORDER BY o_ol_cnt
+
+
+
+
+ SELECT c_count, count(*) AS custdist FROM (
+ SELECT c_id, count(o_id) AS c_count
+ FROM customer LEFT OUTER JOIN oorder ON
+ (c_w_id = o_w_id AND c_d_id = o_d_id AND c_id = o_c_id AND o_carrier_id > 8)
+ GROUP BY c_id
+ )
+ GROUP BY c_count
+ ORDER BY custdist DESC, c_count DESC
+
+
+
+
+ SELECT (100.00 * sum(CASE WHEN i_data LIKE 'PR%' THEN ol_amount ELSE 0 END) / (1 + sum(ol_amount))) AS promo_revenue
+ FROM order_line, item
+ WHERE ol_i_id = i_id AND ol_delivery_d >= datetime('2007-01-02 00:00:00')
+ AND datetime('2020-01-02') > ol_delivery_d
+
+
+
+
+ CREATE view revenue0 (supplier_no, total_revenue) AS
+ SELECT mod((s_w_id * s_i_id),10000) as supplier_no, sum(ol_amount) as total_revenue
+ FROM order_line, stock
+ WHERE ol_i_id = s_i_id AND ol_supply_w_id = s_w_id
+ AND ol_delivery_d >= datetime('2007-01-02 00:00:00')
+ GROUP BY mod((s_w_id * s_i_id),10000)
+
+
+
+
+ SELECT i_name, substr(i_data, 1, 3) as brand, i_price, count(DISTINCT (mod((s_w_id * s_i_id),10000))) AS supplier_cnt
+ FROM stock, item
+ WHERE i_id = s_i_id AND i_data NOT LIKE 'zz%' AND (mod((s_w_id * s_i_id),10000) NOT IN
+ (SELECT su_suppkey FROM supplier WHERE su_comment LIKE '%bad%')
+ )
+ GROUP BY i_name, substr(i_data, 1, 3), i_price ORDER BY supplier_cnt DESC
+
+
+
+
+ SELECT sum(ol_amount) AS revenue
+ FROM order_line, item
+ WHERE (
+ ol_i_id = i_id AND i_data LIKE '%a' AND ol_quantity >= 1 AND 10 >= ol_quantity
+ AND i_price BETWEEN 1 AND 400000 AND ol_w_id IN (1, 2, 3)
+ ) OR (
+ ol_i_id = i_id AND i_data LIKE '%b' AND ol_quantity >= 1 AND 10 >= ol_quantity
+ AND i_price BETWEEN 1 AND 400000 AND ol_w_id IN (1, 2, 4)
+ ) OR (
+ ol_i_id = i_id AND i_data LIKE '%c' AND ol_quantity >= 1 AND 10 >= ol_quantity
+ AND i_price BETWEEN 1 AND 400000 AND ol_w_id IN (1, 5, 3)
+ )
+
+
+
+
+ SELECT su_name, su_address FROM supplier, nation
+ WHERE su_suppkey IN (
+ SELECT mod(s_i_id * s_w_id, 10000)
+ FROM stock INNER JOIN item ON i_id = s_i_id INNER JOIN order_line ON ol_i_id = s_i_id
+ WHERE ol_delivery_d > datetime('2010-05-23 12:00:00') AND i_data LIKE 'co%'
+ GROUP BY s_i_id, s_w_id, s_quantity
+ HAVING 2*s_quantity > sum(ol_quantity)
+ ) AND su_nationkey = n_nationkey AND n_name = 'Germany'
+ ORDER BY su_name
+
+
+
+
+ SELECT substr(c_state,1,1) AS country, count(*) AS numcust, sum(c_balance) AS totacctbal
+ FROM customer
+ WHERE substr(c_phone,1,1) IN ('1', '2', '3', '4', '5', '6', '7')
+ AND c_balance > (
+ SELECT avg(c_balance)
+ FROM customer
+ WHERE c_balance > 0.00 AND substr(c_phone,1,1) IN ('1', '2', '3', '4', '5', '6', '7'))
+ AND NOT EXISTS (
+ SELECT * FROM oorder
+ WHERE o_c_id = c_id AND o_w_id = c_w_id AND o_d_id = c_d_id
+ )
+ GROUP BY substr(c_state,1,1)
+ ORDER BY substr(c_state,1,1)
+
+
+
+