文章目录
- 1, clickhouse
- 1.1, 安装服务
- 1.2, 测试使用
- 1.3, 配置集群
- 验证集群
- 1.4, 备份建表数据
- 元数据备份
- 表数据导入导出
- 1.5, 通过mysql协议连接clickhouse
- 2, presto
- 2.1, 安装服务
- 部署集群版
- 2.2, 连接hive
- 2.3, 连接mysql
1, clickhouse
1.1, 安装服务
sudo yum install yum-utils
sudo rpm --import https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG
sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/stable/x86_64
sudo yum install clickhouse-server clickhouse-client
/etc/init.d/clickhouse-server start
#客户端测试连接:
[root@c7 ssh]# clickhouse-client -h c7 <<< 'SELECT 1'
1
[root@c7 ssh]# echo 'SELECT 1' | clickhouse-client -h c7
1
[root@c7 ssh]# clickhouse-client -h c73 -mn -q ' create database test; use default; show tables;'
test
1.2, 测试使用
示例建表语句和测试数据:https://clickhouse.com/docs/zh/getting-started/tutorial/
#1,创建数据库,表
clickhouse-client -h c73 --query "CREATE DATABASE IF NOT EXISTS tutorial"
clickhouse-client -h c73 -mn
c73 :)
CREATE TABLE tutorial.hits_v1
(
`WatchID` UInt64,
`JavaEnable` UInt8,
`Title` String,
`GoodEvent` Int16,
`EventTime` DateTime,
`EventDate` Date,
`CounterID` UInt32,
`ClientIP` UInt32,
`ClientIP6` FixedString(16),
`RegionID` UInt32,
`UserID` UInt64,
`CounterClass` Int8,
`OS` UInt8,
`UserAgent` UInt8,
`URL` String,
`Referer` String,
`URLDomain` String,
`RefererDomain` String,
`Refresh` UInt8,
`IsRobot` UInt8,
`RefererCategories` Array(UInt16),
...
`ParsedParams` Nested(
Key1 String,
...
ValueDouble Float64),
`IslandID` FixedString(16),
`RequestNum` UInt32,
`RequestTry` UInt8
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID);
c73 :)
CREATE TABLE tutorial.visits_v1
(
`CounterID` UInt32,
`StartDate` Date,
`Sign` Int8,
`IsNew` UInt8,
`VisitID` UInt64,
`UserID` UInt64,
`StartTime` DateTime,
`Duration` UInt32,
`UTCStartTime` DateTime,
`PageViews` Int32,
`Hits` Int32,
...
`IslandID` FixedString(16)
)
ENGINE = CollapsingMergeTree(Sign)
PARTITION BY toYYYYMM(StartDate)
ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID)
SAMPLE BY intHash32(UserID);
#2,下载测试数据:
curl https://datasets.clickhouse.com/hits/tsv/hits_v1.tsv.xz | unxz --threads=`nproc` > hits_v1.tsv
curl https://datasets.clickhouse.com/visits/tsv/visits_v1.tsv.xz | unxz --threads=`nproc` > visits_v1.tsv
[root@c73 tt]# ls -lh
-rw-r--r-- 1 root root 7.3G Dec 24 17:59 hits_v1.tsv
-rw-r--r-- 1 root root 2.5G Dec 28 10:47 visits_v1.tsv
clickhouse-client -h c73 --query "INSERT INTO tutorial.hits_v1 FORMAT TSV" --max_insert_block_size=100000 < hits_v1.tsv
clickhouse-client -h c73 --query "INSERT INTO tutorial.visits_v1 FORMAT TSV" --max_insert_block_size=100000 < visits_v1.tsv
#3,测试大数据量表的查询速度
[root@c73 tt]# clickhouse-client -h c73 -mn
c73 :) select count(1) from tutorial.hits_v1;
Query id: e326e9cb-ca05-45ec-a0e7-409552f4417a
┌─count()─┐
│ 8873898 │
└─────────┘
1 rows in set. Elapsed: 0.002 sec.
c73 :) select count(1) from tutorial.visits_v1;
Query id: 1e29469b-9f75-4e63-b315-3f52c73fa4ec9
┌─count()─┐
│ 1680531 │
└─────────┘
1 rows in set. Elapsed: 0.003 sec.
c73 :) SELECT StartURL AS URL, AVG(Duration) AS AvgDuration
FROM tutorial.visits_v1
WHERE (StartDate >= '2014-03-23') AND (StartDate <= '2014-03-30')
GROUP BY URL
ORDER BY AvgDuration DESC
LIMIT 2;
Query id: 7970fff2-9ccf-4853-90d5-7c6e90bf2a14
┌─URL──────────────────────────────────────────────┬─AvgDuration─┐
│ http://itpalanija-pri-patrivative=0&ads_app_user │ 60127 │
│ http://renaul-myd-ukraine │ 58938 │
└──────────────────────────────────────────────────┴─────────────┘
2 rows in set. Elapsed: 0.139 sec. Processed 1.47 million rows, 113.13 MB (10.59 million rows/s., 815.39 MB/s.)
c73 :) SELECT
sum(Sign) AS visits,
sumIf(Sign, has(Goals.ID, 1105530)) AS goal_visits,
(100. * goal_visits) / visits AS goal_percent
FROM tutorial.visits_v1
WHERE (CounterID = 912887) AND (toYYYYMM(StartDate) = 201403) AND (domain(StartURL) = 'yandex.ru')
Query id: 62569d93-532e-4e8e-a87f-5216f420647e
┌─visits─┬─goal_visits─┬──────goal_percent─┐
│ 10543 │ 8553 │ 81.12491700654462 │
└────────┴─────────────┴───────────────────┘
1 rows in set. Elapsed: 0.410 sec. Processed 47.52 thousand rows, 6.00 MB (115.90 thousand rows/s., 14.64 MB/s.)
1.3, 配置集群
- 三节点集群配置:
- shard规划(机器1-机器2, 机器2-机器3, 机器3-机器1)
config.xml
[root@c76 ~]# vim /etc/clickhouse-server/config.xml
<listen_host>0.0.0.0</listen_host>
<remote_servers incl="clickhouse_remote_servers" >
<!-- Test only shard config for testing distributed storage
<test_shard_localhost>
<shard>
<replica>
<host>localhost</host>
<port>9000</port>
</replica>
</shard>
</test_shard_localhost>
<test_shard_localhost_secure>
<shard>
<replica>
<host>localhost</host>
<port>9440</port>
<secure>1</secure>
</replica>
</shard>
</test_shard_localhost_secure> -->
</remote_servers>
<include_from>/etc/clickhouse-server/9000/metrika.xml</include_from>
metrika.xml
[root@c76 ~]# cat /etc/clickhouse-server/9000/metrika.xml
<yandex>
<clickhouse_remote_servers>
<dass>
<shard>
<internal_replication>true</internal_replication>
<replica>
<host>192.168.56.76</host>
<port>9000</port>
<user>default</user>
<password>123456</password>
<!-- <password></password>-->
</replica>
</shard>
</dass>
</clickhouse_remote_servers>
<!--zookeeper相关配置-->
<zookeeper-servers>
<node index="1">
<host>192.168.56.76</host>
<port>2181</port>
</node>
</zookeeper-servers>
<macros>
<layer>集群名称dass</layer>
<shard>本机9000服务所在的SHARD_ID</shard>
<replica>本机9000服务所在的HOST_IP</replica>
</macros>
<networks>
<ip>::/0</ip>
</networks>
<clickhouse_compression>
<case>
<min_part_size>10000000000</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio>
<method>lz4</method>
</case>
</clickhouse_compression>
</yandex>
users.xml
[root@c76 ~]# vim /etc/clickhouse-server/users.xml
<users>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<password>123456</password>
重启所有节点,查看集群信息:
验证集群
#1,本地表创建:分别在每个节点都建立本地表,或者使用 on cluster然后在其中一个节点执行即可
CREATE TABLE city_local on cluster dass (
`fdate` Int64,
`city_code` Int32,
`city_name` String,
`total_cnt` Int64
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/city_local', '{replica}')
PARTITION BY fdate
ORDER BY (fdate, city_code, city_name);
#2,分布式表创建:本身不存储数据,在其中一个节点执行即可。
CREATE TABLE IF NOT EXISTS city_all ON CLUSTER dass AS city_local
ENGINE = Distributed(dass, default, city_local, fdate);
####第一个参数是集群名称,第二个参数是数据库名称,第三个参数是对应的本地表,第四个分片的key
分布式表插入数据
insert into city_all (fdate, city_code, city_name, total_cnt) values (20210131, 4000, 'guangzhou', 420000);
insert into city_all (fdate, city_code, city_name, total_cnt) values (20210131, 6000, 'huizhou', 65000);
insert into city_all (fdate, city_code, city_name, total_cnt) values (20210201, 5000, 'shenzhan', 55000);
insert into city_all (fdate, city_code, city_name, total_cnt) values (20210310, 7000, 'huizhou', 75000);
#3,验证各节点的数据查询是否统一(逐一登录各节点,查看分布式表数据)
clickhouse-client -h c7 --password 123456 -m --port 9000 --query 'select * from city_all'
clickhouse-client -h c7 --password 123456 -m --port 9001 --query 'select * from city_all'
clickhouse-client -h c76 --password 123456 -m --port 9001 --query 'select * from city_all'
...
#4,验证各节点的数据是否分片,是否备份(逐一登录各节点,查看本地表数据)
clickhouse-client -h c7 --password 123456 -m --port 9000 --query 'select * from city_local'
clickhouse-client -h c7 --password 123456 -m --port 9001 --query 'select * from city_local'
clickhouse-client -h c76 --password 123456 -m --port 9001 --query 'select * from city_local'
1.4, 备份建表数据
元数据备份
在数据存储目录,会按数据库创建对应目录,里面保存有表元数据相关语句 (只需把attach 改为 create )
表数据导入导出
- 格式说明:https://clickhouse.com/docs/zh/interfaces/formats
- 导出:
select * from test1 limit 300 INTO OUTFILE '/tmp/service.txt' FORMAT TabSeparatedWithNames ;
- 导入:
clickhouse-client --password 123456 -h 192.168.1.7 --query="insert into upi.test1 FORMAT TabSeparatedWithNames" < service.txt
#1,登录任意节点,导出分布式表数据
[root@c7 clickhouse-server]# clickhouse-client --password 123456 -h c7 --port 9000 -m --query="select * from city_all FORMAT CSVWithNames" > /tmp/test2.csv
[root@c7 clickhouse-server]# cat /tmp/test2.csv
"fdate","city_code","city_name","total_cnt"
20210131,4000,"guangzhou",420000
20210131,6000,"huizhou",65000
20210201,5000,"shenzhan",55000
20210310,7000,"huizhou",75000
#2,登录所有节点,删除分布式表引用的本地表数据:truncate table city_local;
#3,登录任意节点,导入分布式表数据
[root@c7 clickhouse-server]# clickhouse-client --password 123456 -h c7 --port 9000 -m --query="insert into city_all FORMAT CSVWithNames" < /tmp/test2.csv
[root@c7 clickhouse-server]# clickhouse-client --password 123456 -h c7 --port 9000 -m --query="select * from city_all"
20210131 4000 guangzhou 420000
20210131 6000 huizhou 65000
20210201 5000 shenzhan 55000
20210310 7000 huizhou 75000
1.5, 通过mysql协议连接clickhouse
[root@c7 ~]# grep mysql /etc/clickhouse-server/config.xml
<mysql_port>9004</mysql_port>
[root@c7 ~]# mysql -udefault -p -P9004 -hc7
Enter password:
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 6
Server version: 20.8.3.18-ClickHouse 0
Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> show databases;
+--------------------------------+
| name |
+--------------------------------+
| _temporary_and_external_tables |
| default |
| system |
| test1 |
+--------------------------------+
3 rows in set (0.00 sec)
mysql> create database test2;
Query OK, 0 rows affected (0.01 sec)
mysql> use test2;
Database changed
mysql> create table t1(id int);
ERROR 62 (00000): Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 24 (end of query): . Expected one of: ENGINE, storage definition (version 20.8.3.18)
mysql> create table t1(id int) engine Memory;
Query OK, 0 rows affected (0.00 sec)
mysql> insert into t1 values(1);
Query OK, 1 row affected (0.00 sec)
ead 1 rows, 4.00 B in 0.000141798 sec., 7052 rows/sec., 27.55 KiB/sec.
mysql> select * from t1;
+------+
| id |
+------+
| 1 |
+------+
1 row in set (0.00 sec)
mysql> update t1 set id=11;
ERROR 62 (00000): Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 1 ('update'): update t1 set id=11. Expected one of: Query, SHOW, SELECT, CREATE, DROP, TRUNCATE, OPTIMIZE TABLE, CREATE POLICY, ALTER POLICY, CREATE PROFILE, ALTER PROFILE, SYSTEM, DESCRIBE, DETACH, ATTACH, ALTER QUOTA, CREATE QUOTA, ALTER ROLE, CREATE ROLE, ALTER ROW POLICY, CREATE ROW POLICY, ALTER SETTINGS PROFILE, CREATE SETTINGS PROFILE, ALTER USER, CREATE USER, GRANT, REVOKE, SET ROLE, SET ROLE DEFAULT, SET DEFAULT ROLE, SHOW
mysql> delete from t1 where id=1;
ERROR 62 (00000): Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 1 ('delete'): delete from t1 where id=1. Expected one of: Query, SHOW, SELECT, CREATE, DROP, TRUNCATE, OPTIMIZE TABLE, CREATE POLICY, ALTER POLICY, CREATE PROFILE, ALTER PROFILE, SYSTEM, DESCRIBE, DETACH, ATTACH, ALTER QUOTA, CREATE QUOTA, ALTER ROLE, CREATE ROLE, ALTER ROW POLICY, CREATE ROW POLICY, ALTER SETTINGS PROFILE, CREATE SETTINGS PROFILE, ALTER USER, CREATE USER, GRANT, REVOKE, SET ROLE, SET ROLE DEFAULT, SET DEFAULT ROLE
2, presto
presto 客户端下载:
presto 数据源连接器:https://prestodb.io/docs/current/connector.html presto hive kerberos : https://prestodb.io/docs/current/connector/hive-security.html
2.1, 安装服务
安装文档:https://prestodb.io/docs/current/installation/deployment.html#installing-presto docker镜像-选择presto版本:https://hub.docker.com/r/ahanaio/prestodb-sandbox/tags
#获取安装tar包: https://repo1.maven.org/maven2/com/facebook/presto/presto-server/0.266.1/presto-server-0.266.1.tar.gz
#以上下载速度如果慢,可以下载docker镜像 --> 拷贝出安装包 和客户端文件
[root@c7 ~]# docker pull ahanaio/prestodb-sandbox
[root@c7 ~]# docker run -d -p 8080:8080 --name presto ahanaio/prestodb-sandbox
acfe6ed23b0c841dbd89dd106c71c8c42482760ddcf1ca20b1b9ca8af5b59f12
[root@c7 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
acfe6ed23b0c ahanaio/prestodb-sandbox "/opt/entrypoint.sh …" 2 seconds ago Up 2 seconds 0.0.0.0:8080->8080/tcp presto
[root@c7 ~]# docker exec -it presto bash
bash-4.2# ls -l /opt/
total 24592
-rwxr-xr-x 1 root root 109 Dec 18 03:46 entrypoint.sh
-rwxr-xr-x 1 root root 15551203 Dec 18 03:48 presto-cli
-rw-r--r-- 1 root root 9616559 Dec 18 03:48 presto-jdbc.jar
drwxr-xr-x 1 root root 4096 Dec 18 03:47 presto-server
bash-4.2# ls -l /opt/presto-server/
total 220
-rw-r--r-- 1 root root 191539 Dec 17 23:21 NOTICE
-rw-r--r-- 1 root root 126 Dec 17 23:21 README.txt
drwxr-xr-x 3 root root 4096 Dec 17 23:37 bin
drwxr-xr-x 1 root root 4096 Dec 18 03:48 etc
drwxr-xr-x 2 root root 12288 Dec 18 03:47 lib
drwxr-xr-x 39 root root 4096 Dec 18 03:47 plugin
#客户端登录
bash-4.2# /opt/presto-cli
presto> show catalogs;
Catalog
---------
jmx
memory
system
tpcds
tpch
(5 rows)
Query 20220114_024445_00006_z4s5j, FINISHED, 1 node
Splits: 19 total, 19 done (100.00%)
0:01 [0 rows, 0B] [0 rows/s, 0B/s]
拉取安装包和客户端文件:
查看presto服务版本:
部署集群版
角色 | 主机名 /ip |
coordinator | c76/192.168.56.76 |
worker | c7/192.168.56.7 |
worker节点
[root@c7 ~]# cd presto-package/opt/
[root@c7 opt]# ls
entrypoint.sh presto-cli presto-jdbc.jar presto-server
[root@c7 opt]# cd presto-server/
[root@c7 presto-server]# ls
bin etc lib NOTICE plugin README.txt
[root@c7 presto-server]# ls etc/
catalog config.properties config.properties.example jvm.config jvm.config.example node.properties
[root@c7 presto-server]# ls etc/catalog/
jmx.properties memory.properties tpcds.properties tpch.properties
[root@c7 presto-server]# cat etc/config.properties
#worker
coordinator=false
http-server.http.port=8080
#query.max-memory=1GB
#query.max-memory-per-node=1GB
#query.max-total-memory-per-node=1GB
discovery.uri=http://192.168.56.76:8080
[root@c7 presto-server]# cat etc/node.properties
node.environment=test
node.data-dir=/var/presto/data
node.id=c7
##启动服务:(此处为单节点)
[root@c7 presto-server]# ./bin/launcher start
Started as 22096
[root@c7 presto-server]# ls
bin etc lib NOTICE plugin README.txt
[root@c7 presto-server]# ls
bin etc lib NOTICE plugin README.txt
##查看数据目录,日志文件
[root@c7 presto-server]# ls /var/presto/data/
etc plugin var
[root@c7 presto-server]# ls /var/presto/data/var/
log run
[root@c7 presto-server]# ls /var/presto/data/var/log/
http-request.log launcher.log server.log
[root@c7 presto-server]# tail /var/presto/data/var/log/server.log
2022-01-14T11:12:55.792+0800 INFO main com.facebook.presto.server.PluginManager -- Loading plugin /var/presto/data/plugin/ttl-fetchers --
2022-01-14T11:12:55.890+0800 INFO main com.facebook.presto.server.PluginManager Installing com.facebook.presto.nodettlfetchers.PrestoNodeTtlFetcherPlugin
2022-01-14T11:12:55.898+0800 INFO main com.facebook.presto.server.PluginManager Registering Ttl fetcher factory infinite
2022-01-14T11:12:55.898+0800 INFO main com.facebook.presto.server.PluginManager -- Finished loading plugin /var/presto/data/plugin/ttl-fetchers --
2022-01-14T11:12:55.923+0800 INFO main com.facebook.presto.metadata.StaticCatalogStore -- Loading catalog properties etc/catalog/jmx.properties --
2022-01-14T11:12:55.926+0800 INFO main com.facebook.presto.metadata.StaticCatalogStore -- Loading catalog jmx --
2022-01-14T11:12:56.310+0800 INFO main Bootstrap PROPERTY DEFAULT RUNTIME DESCRIPTION
2022-01-14T11:12:56.310+0800 INFO main Bootstrap jmx.dump-period 10.00s 10.00s
2022-01-14T11:12:56.310+0800 INFO main Bootstrap jmx.dump-tables [] []
2022-01-14T11:12:56.310+0800 INFO main Bootstrap jmx.max-entries 86400 86400
[root@c7 presto-server]#
[root@c7 presto-server]# ss -nltp |grep 80
LISTEN 0 128 [::]:8080 [::]:* users:(("presto-server",pid=22096,fd=181))
coordinator节点
[root@c76 presto-server]# cat etc/config.properties
#单节点
#coordinator=true
#node-scheduler.include-coordinator=true
#http-server.http.port=8080
#discovery-server.enabled=true
#discovery.uri=http://localhost:8080
#master节点
coordinator=true
node-scheduler.include-coordinator=false
http-server.http.port=8080
#query.max-memory=1GB
#query.max-memory-per-node=1GB
#query.max-total-memory-per-node=1GB
discovery-server.enabled=true
discovery.uri=http://192.168.56.76:8080
[root@c76 presto-server]# cat etc/node.properties
node.environment=test
node.data-dir=/var/presto/data
node.id=c76
客户端连接 master节点才能查询:
2.2, 连接hive
master节点和worker节点:同步配置文件 etc/catalog/hive.properties
注意:etc/catalog/xx.properties 中的文件名xx, 对应了show catalogs中的名称
#配置hive连接( 文件名就是catalog名称 )
[root@daas9 ~]# cat /export/presto-server/etc/catalog/hive.properties
connector.name=hive-hadoop2
hive.metastore.uri=thrift://10.1.0.11:9083
hive.config.resources=/export/common/hadoop/conf/hdfs-site.xml,/export/common/hadoop/conf/core-site.xml,/export/common/hive/conf/hive-site.xml
hive.hdfs.impersonation.enabled=true
hive.metastore.authentication.type=NONE
#登录presto客户端,测试sql语句
[root@daas9 ~]# su hdfs
[root@daas9 ~]# chmod +x /tmp/presto-cli-0.266.1-executable.jar
[hdfs@daas9 root]$ /tmp/presto-cli-0.266.1-executable.jar --server localhost:8081
presto> show catalogs;
Catalog
---------
hive
system
(3 rows)
Query 20211228_085013_00028_5hy6d, FINISHED, 1 node
Splits: 19 total, 19 done (100.00%)
0:01 [0 rows, 0B] [0 rows/s, 0B/s]
presto> show schemas from hive;
Schema
--------------------
default
information_schema
t1
(3 rows)
Query 20211228_085035_00029_5hy6d, FINISHED, 2 nodes
Splits: 19 total, 19 done (100.00%)
400ms [3 rows, 42B] [7 rows/s, 105B/s]
presto> show tables from hive.t1;
Table
-------
per
(1 row)
Query 20211228_085049_00030_5hy6d, FINISHED, 2 nodes
Splits: 19 total, 19 done (100.00%)
423ms [1 rows, 15B] [2 rows/s, 35B/s]
presto> select * from hive.t1.per;
id
----
1
2
(2 rows)
Query 20211228_085059_00031_5hy6d, FINISHED, 1 node
Splits: 18 total, 18 done (100.00%)
0:02 [2 rows, 24B] [1 rows/s, 14B/s]
#插入数据,并查询
presto> insert into hive.t1.per values(3);
INSERT: 1 row
Query 20211228_085208_00032_5hy6d, FINISHED, 2 nodes
Splits: 35 total, 35 done (100.00%)
0:01 [0 rows, 0B] [0 rows/s, 0B/s]
presto> select * from hive.t1.per;
id
----
1
3
2
(3 rows)
Query 20211228_085212_00033_5hy6d, FINISHED, 1 node
Splits: 19 total, 19 done (100.00%)
185ms [3 rows, 46B] [16 rows/s, 248B/s]
2.3, 连接mysql
master节点和worker节点:同步配置文件 etc/catalog/mysql.properties
注意:etc/catalog/xx.properties 中的文件名xx, 对应了show catalogs中的名称
[root@c7 catalog]# cat mysql.properties
connector.name=mysql
connection-url=jdbc:mysql://192.168.56.1:3306
connection-user=root
connection-password=root
[root@c7 catalog]# /root/presto-cli-0.266.1-executable.jar --server c76:8080
presto> select * from mysql.test.t1;
id
----
1
2
3
(3 rows)
Query 20220113_125210_00009_ey72y, FINISHED, 1 node
Splits: 17 total, 17 done (100.00%)
0:04 [3 rows, 0B] [0 rows/s, 0B/s]
presto> show catalogs;
Catalog
---------
jmx
memory
mysql
system
tpcds
tpch
(6 rows)
Query 20220113_125228_00010_ey72y, FINISHED, 1 node
Splits: 19 total, 19 done (100.00%)
438ms [0 rows, 0B] [0 rows/s, 0B/s]
presto> show schemas from mysql;
Schema
-------------------------
apolloconfigdb
sys
test
uas
(3 rows)
Query 20220113_125238_00011_ey72y, FINISHED, 2 nodes
Splits: 19 total, 19 done (100.00%)
0:01 [3 rows, 202B] [14 rows/s, 219B/s]
presto> show tables from mysql.test;
Table
-----------
discounts
t1
(2 rows)
Query 20220113_125250_00012_ey72y, FINISHED, 2 nodes
Splits: 19 total, 19 done (100.00%)
0:01 [2 rows, 39B] [2 rows/s, 57B/s]
presto> select * from mysql.test.t1;
id
----
1
2
3
(3 rows)
Query 20220113_125258_00013_ey72y, FINISHED, 1 node
Splits: 17 total, 17 done (100.00%)
375ms [3 rows, 0B] [7 rows/s, 0B/s]