Skip to content

Commit 5f31083

Browse files
authored
MySQL Type Consolidation (#115)
MySQL: - Added generator function for MySQL serverId - Logging cleanup - Using JsonContainer for Json values in MySQL - Unified type mappings for replicated and snapshot mysql data types - Added test running config to github actions - Mapping all integer types to bigint - Handling Set data types as json array - Added test for float mapping
1 parent ac03d8f commit 5f31083

File tree

17 files changed

+1105
-67
lines changed

17 files changed

+1105
-67
lines changed

.changeset/lemon-terms-play.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
---
2+
'@powersync/service-module-mysql': minor
3+
---
4+
5+
Generate random serverId based on syncrule id for MySQL replication client
6+
Consolidated type mappings between snapshot and replicated values
7+
Enabled MySQL tests in CI
8+

.github/workflows/test.yml

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,3 +143,68 @@ jobs:
143143

144144
- name: Test
145145
run: pnpm test --filter='./modules/module-postgres'
146+
147+
run-mysql-tests:
148+
name: MySQL Test
149+
runs-on: ubuntu-latest
150+
needs: run-core-tests
151+
152+
strategy:
153+
fail-fast: false
154+
matrix:
155+
mysql-version: [ 8.0, 8.4 ]
156+
157+
steps:
158+
- uses: actions/checkout@v4
159+
160+
- name: Start MySQL
161+
run: |
162+
docker run \
163+
--name MySQLTestDatabase \
164+
-e MYSQL_ROOT_PASSWORD=mypassword \
165+
-e MYSQL_DATABASE=mydatabase \
166+
-p 3306:3306 \
167+
-d mysql:${{ matrix.mysql-version }} \
168+
--log-bin=/var/lib/mysql/mysql-bin.log \
169+
--gtid_mode=ON \
170+
--enforce_gtid_consistency=ON
171+
172+
- name: Start MongoDB
173+
uses: supercharge/[email protected]
174+
with:
175+
mongodb-version: '6.0'
176+
mongodb-replica-set: test-rs
177+
178+
- name: Setup NodeJS
179+
uses: actions/setup-node@v4
180+
with:
181+
node-version-file: '.nvmrc'
182+
183+
- uses: pnpm/action-setup@v4
184+
name: Install pnpm
185+
with:
186+
version: 9
187+
run_install: false
188+
189+
- name: Get pnpm store directory
190+
shell: bash
191+
run: |
192+
echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV
193+
194+
- uses: actions/cache@v3
195+
name: Setup pnpm cache
196+
with:
197+
path: ${{ env.STORE_PATH }}
198+
key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }}
199+
restore-keys: |
200+
${{ runner.os }}-pnpm-store-
201+
202+
- name: Install dependencies
203+
run: pnpm install
204+
205+
- name: Build
206+
shell: bash
207+
run: pnpm build
208+
209+
- name: Test
210+
run: pnpm test --filter='./modules/module-mysql'

modules/module-mysql/dev/config/sync_rules.yaml

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,8 @@
33
# Note that changes to this file are not watched.
44
# The service needs to be restarted for changes to take effect.
55

6-
# Note that specifying the schema is currently required due to the default
7-
# schema being specified as `public`, but in mysql the schema is the database name
86
bucket_definitions:
97
global:
108
data:
11-
- SELECT * FROM mydatabase.lists
12-
- SELECT * FROM mydatabase.todos
9+
- SELECT * FROM lists
10+
- SELECT * FROM todos

modules/module-mysql/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@
3232
"@powersync/service-core": "workspace:*",
3333
"@powersync/service-sync-rules": "workspace:*",
3434
"@powersync/service-types": "workspace:*",
35-
"@powersync/mysql-zongji": "0.0.0-dev-20241023144335",
35+
"@powersync/service-jsonbig": "workspace:*",
36+
"@powersync/mysql-zongji": "0.0.0-dev-20241031142605",
3637
"semver": "^7.5.4",
3738
"async": "^3.2.4",
3839
"mysql2": "^3.11.0",

modules/module-mysql/src/common/mysql-to-sqlite.ts

Lines changed: 147 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,155 @@ import * as sync_rules from '@powersync/service-sync-rules';
22
import { ExpressionType } from '@powersync/service-sync-rules';
33
import { ColumnDescriptor } from '@powersync/service-core';
44
import mysql from 'mysql2';
5+
import { JSONBig, JsonContainer } from '@powersync/service-jsonbig';
6+
import { ColumnDefinition, TableMapEntry } from '@powersync/mysql-zongji';
57

6-
export function toSQLiteRow(row: Record<string, any>, columns?: Map<string, ColumnDescriptor>): sync_rules.SqliteRow {
8+
export enum ADDITIONAL_MYSQL_TYPES {
9+
DATETIME2 = 18,
10+
TIMESTAMP2 = 17,
11+
BINARY = 100,
12+
VARBINARY = 101,
13+
TEXT = 102
14+
}
15+
16+
export const MySQLTypesMap: { [key: number]: string } = {};
17+
for (const [name, code] of Object.entries(mysql.Types)) {
18+
MySQLTypesMap[code as number] = name;
19+
}
20+
for (const [name, code] of Object.entries(ADDITIONAL_MYSQL_TYPES)) {
21+
MySQLTypesMap[code as number] = name;
22+
}
23+
24+
export function toColumnDescriptors(columns: mysql.FieldPacket[]): Map<string, ColumnDescriptor>;
25+
export function toColumnDescriptors(tableMap: TableMapEntry): Map<string, ColumnDescriptor>;
26+
27+
export function toColumnDescriptors(columns: mysql.FieldPacket[] | TableMapEntry): Map<string, ColumnDescriptor> {
28+
const columnMap = new Map<string, ColumnDescriptor>();
29+
if (Array.isArray(columns)) {
30+
for (const column of columns) {
31+
columnMap.set(column.name, toColumnDescriptorFromFieldPacket(column));
32+
}
33+
} else {
34+
for (const column of columns.columns) {
35+
columnMap.set(column.name, toColumnDescriptorFromDefinition(column));
36+
}
37+
}
38+
39+
return columnMap;
40+
}
41+
42+
export function toColumnDescriptorFromFieldPacket(column: mysql.FieldPacket): ColumnDescriptor {
43+
let typeId = column.type!;
44+
const BINARY_FLAG = 128;
45+
const MYSQL_ENUM_FLAG = 256;
46+
const MYSQL_SET_FLAG = 2048;
47+
48+
switch (column.type) {
49+
case mysql.Types.STRING:
50+
if (((column.flags as number) & BINARY_FLAG) !== 0) {
51+
typeId = ADDITIONAL_MYSQL_TYPES.BINARY;
52+
} else if (((column.flags as number) & MYSQL_ENUM_FLAG) !== 0) {
53+
typeId = mysql.Types.ENUM;
54+
} else if (((column.flags as number) & MYSQL_SET_FLAG) !== 0) {
55+
typeId = mysql.Types.SET;
56+
}
57+
break;
58+
59+
case mysql.Types.VAR_STRING:
60+
typeId = ((column.flags as number) & BINARY_FLAG) !== 0 ? ADDITIONAL_MYSQL_TYPES.VARBINARY : column.type;
61+
break;
62+
case mysql.Types.BLOB:
63+
typeId = ((column.flags as number) & BINARY_FLAG) === 0 ? ADDITIONAL_MYSQL_TYPES.TEXT : column.type;
64+
break;
65+
}
66+
67+
const columnType = MySQLTypesMap[typeId];
68+
69+
return {
70+
name: column.name,
71+
type: columnType,
72+
typeId: typeId
73+
};
74+
}
75+
76+
export function toColumnDescriptorFromDefinition(column: ColumnDefinition): ColumnDescriptor {
77+
let typeId = column.type;
78+
79+
switch (column.type) {
80+
case mysql.Types.STRING:
81+
typeId = !column.charset ? ADDITIONAL_MYSQL_TYPES.BINARY : column.type;
82+
break;
83+
case mysql.Types.VAR_STRING:
84+
case mysql.Types.VARCHAR:
85+
typeId = !column.charset ? ADDITIONAL_MYSQL_TYPES.VARBINARY : column.type;
86+
break;
87+
case mysql.Types.BLOB:
88+
typeId = column.charset ? ADDITIONAL_MYSQL_TYPES.TEXT : column.type;
89+
break;
90+
}
91+
92+
const columnType = MySQLTypesMap[typeId];
93+
94+
return {
95+
name: column.name,
96+
type: columnType,
97+
typeId: typeId
98+
};
99+
}
100+
101+
export function toSQLiteRow(row: Record<string, any>, columns: Map<string, ColumnDescriptor>): sync_rules.SqliteRow {
7102
for (let key in row) {
8-
if (row[key] instanceof Date) {
9-
const column = columns?.get(key);
10-
if (column?.typeId == mysql.Types.DATE) {
11-
// Only parse the date part
12-
row[key] = row[key].toISOString().split('T')[0];
13-
} else {
14-
row[key] = row[key].toISOString();
103+
// We are very much expecting the column to be there
104+
const column = columns.get(key)!;
105+
106+
if (row[key] !== null) {
107+
switch (column.typeId) {
108+
case mysql.Types.DATE:
109+
// Only parse the date part
110+
row[key] = row[key].toISOString().split('T')[0];
111+
break;
112+
case mysql.Types.DATETIME:
113+
case ADDITIONAL_MYSQL_TYPES.DATETIME2:
114+
case mysql.Types.TIMESTAMP:
115+
case ADDITIONAL_MYSQL_TYPES.TIMESTAMP2:
116+
row[key] = row[key].toISOString();
117+
break;
118+
case mysql.Types.JSON:
119+
if (typeof row[key] === 'string') {
120+
row[key] = new JsonContainer(row[key]);
121+
}
122+
break;
123+
case mysql.Types.BIT:
124+
case mysql.Types.BLOB:
125+
case mysql.Types.TINY_BLOB:
126+
case mysql.Types.MEDIUM_BLOB:
127+
case mysql.Types.LONG_BLOB:
128+
case ADDITIONAL_MYSQL_TYPES.BINARY:
129+
case ADDITIONAL_MYSQL_TYPES.VARBINARY:
130+
row[key] = new Uint8Array(Object.values(row[key]));
131+
break;
132+
case mysql.Types.LONGLONG:
133+
if (typeof row[key] === 'string') {
134+
row[key] = BigInt(row[key]);
135+
} else if (typeof row[key] === 'number') {
136+
// Zongji returns BIGINT as a number when it can be represented as a number
137+
row[key] = BigInt(row[key]);
138+
}
139+
break;
140+
case mysql.Types.TINY:
141+
case mysql.Types.SHORT:
142+
case mysql.Types.LONG:
143+
case mysql.Types.INT24:
144+
// Handle all integer values a BigInt
145+
if (typeof row[key] === 'number') {
146+
row[key] = BigInt(row[key]);
147+
}
148+
break;
149+
case mysql.Types.SET:
150+
// Convert to JSON array from string
151+
const values = row[key].split(',');
152+
row[key] = JSONBig.stringify(values);
153+
break;
15154
}
16155
}
17156
}

modules/module-mysql/src/common/read-executed-gtid.ts

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@ import { gte } from 'semver';
44

55
import { ReplicatedGTID } from './ReplicatedGTID.js';
66
import { getMySQLVersion } from './check-source-configuration.js';
7-
import { logger } from '@powersync/lib-services-framework';
87

98
/**
109
* Gets the current master HEAD GTID
@@ -33,8 +32,6 @@ export async function readExecutedGtid(connection: mysqlPromise.Connection): Pro
3332
offset: parseInt(binlogStatus.Position)
3433
};
3534

36-
logger.info('Succesfully read executed GTID', { position });
37-
3835
return new ReplicatedGTID({
3936
// The head always points to the next position to start replication from
4037
position,

0 commit comments

Comments
 (0)