generated from fun-stack/example
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdatabase.go
585 lines (488 loc) · 15.6 KB
/
database.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
package main
import (
"context"
"database/sql"
"fmt"
"os"
"time"
stdlib "github.com/multiprocessio/go-sqlite3-stdlib"
"github.com/pkg/errors"
"golang.org/x/exp/slog"
)
type newsDatabase struct {
*sql.DB
db *sql.DB
upvotesDB *sql.DB
sqliteDataDir string
}
/* Attach the frontpage dataset for each context, to solve "no such table" errors,
per suggestion here https://stackoverflow.com/users/saves/2573589
*/
func (ndb newsDatabase) upvotesDBWithDataset(ctx context.Context) (*sql.Conn, error) {
conn, err := ndb.upvotesDB.Conn(ctx)
if err != nil {
return nil, errors.Wrap(err, "ndb.upvotesDB.Conn")
}
frontpageDatabaseFilename := fmt.Sprintf("%s/%s", ndb.sqliteDataDir, sqliteDataFilename)
// attach frontpage database as readonly. This way, we can write to the upvotes database while the crawler
// is writing to the frontpage database.
s := fmt.Sprintf("attach database 'file:%s?mode=ro' as frontpage", frontpageDatabaseFilename)
_, err = conn.ExecContext(ctx, s)
if err != nil && err.Error() != "database frontpage is already in use" {
return conn, errors.Wrap(err, "attach frontpage database")
}
return conn, nil
}
func (ndb newsDatabase) attachFrontpageDB() error {
frontpageDatabaseFilename := fmt.Sprintf("%s/%s", ndb.sqliteDataDir, sqliteDataFilename)
s := fmt.Sprintf("attach database 'file:%s?mode=ro' as frontpage", frontpageDatabaseFilename)
_, err := ndb.upvotesDB.Exec(s)
if err != nil && err.Error() != "database frontpage is already in use" {
return errors.Wrap(err, "attach frontpage database")
}
return nil
}
func (ndb newsDatabase) close() {
ndb.db.Close()
}
const sqliteDataFilename = "frontpage.sqlite"
func createDataDirIfNotExists(sqliteDataDir string) {
if _, err := os.Stat(sqliteDataDir); errors.Is(err, os.ErrNotExist) {
err := os.Mkdir(sqliteDataDir, os.ModePerm)
if err != nil {
LogFatal(slog.Default(), "create sqlite data dir", err)
}
}
}
func (ndb newsDatabase) initFrontpageDB() error {
seedStatements := []string{
`
CREATE TABLE IF NOT EXISTS stories(
id int primary key
, by text not null
, title text not null
, url text not null
, timestamp int not null
, job boolean not null default false
);
`,
`
CREATE TABLE IF NOT EXISTS dataset (
id integer not null
, score integer not null
, descendants integer not null
, sampleTime integer not null
, submissionTime integer not null
, topRank integer
, newRank integer
, bestRank integer
, askRank integer
, showRank integer
, qnRank integer
, cumulativeUpvotes integer not null default 0
, cumulativeExpectedUpvotes real not null default 0
, flagged boolean not null default false
, dupe boolean not null default false
, ageApprox int not null
, penalty real not null default 0
, currentPenalty real
, rawRank int
, upvoteRate float not null default 1
, upvoteRateWindow int
);
`,
`
CREATE INDEX IF NOT EXISTS dataset_sampletime_id
ON dataset(sampletime, id);
`,
`
CREATE INDEX IF NOT EXISTS dataset_id_sampletime
ON dataset(id, sampletime);
`,
`
CREATE INDEX IF NOT EXISTS dataset_id
ON dataset(id);
`,
`
drop view if exists previousCrawl
`,
`PRAGMA auto_vacuum=NONE`,
}
for _, s := range seedStatements {
_, err := ndb.db.Exec(s)
if err != nil {
return errors.Wrapf(err, "seeding database: %s", s)
}
}
alterStatements := []string{
`alter table dataset add column upvoteRateWindow int`,
`alter table dataset add column upvoteRate float default 0 not null`,
`alter table stories add column archived boolean default false not null`,
`DROP INDEX if exists archived`,
`CREATE INDEX IF NOT EXISTS dataset_sampletime on dataset(sampletime)`,
`update dataset set upvoteRate = ( cumulativeUpvotes + 2.3 ) / ( cumulativeExpectedUpvotes + 2.3) where upvoteRate = 0`,
}
for _, s := range alterStatements {
_, _ = ndb.db.Exec(s)
}
return nil
}
func (ndb newsDatabase) initUpvotesDB() error {
seedStatements := []string{
`create table if not exists votes(userID int not null, storyID int not null, direction int8 not null, entryTime int not null, entryUpvotes int not null, entryExpectedUpvotes int not null)`,
`create index if not exists votes_ids on votes(storyID, userID)`,
`create index if not exists votes_storyID on votes(storyID)`,
`create index if not exists votes_userid on votes(userID)`,
`drop view if exists positions`,
`create view if not exists positions as
with exits as (
select
votes.rowID as positionID
, votes.*
, first_value(entryTime) over ( partition by userID, storyID order by entryTime rows between current row and unbounded following exclude current row) as exitTime
, first_value(entryUpvotes) over ( partition by userID, storyID order by entryTime rows between current row and unbounded following exclude current row) as exitUpvotes
, first_value(entryExpectedUpvotes) over ( partition by userID, storyID order by entryTime rows between current row and unbounded following exclude current row) as exitExpectedUpvotes
from votes
) select * from exits where direction != 0
`,
}
for _, s := range seedStatements {
_, err := ndb.upvotesDB.Exec(s)
if err != nil {
return errors.Wrapf(err, "seeding votes database: %s", s)
}
}
alterStatements := []string{}
for _, s := range alterStatements {
_, _ = ndb.upvotesDB.Exec(s)
}
frontpageDatabaseFilename := fmt.Sprintf("%s/%s", ndb.sqliteDataDir, sqliteDataFilename)
// attach the dataset table
s := fmt.Sprintf("attach database 'file:%s?mode=ro' as frontpage", frontpageDatabaseFilename)
_, err := ndb.upvotesDB.Exec(s)
return errors.Wrap(err, "attach frontpage database")
}
func openNewsDatabase(sqliteDataDir string) (newsDatabase, error) {
createDataDirIfNotExists(sqliteDataDir)
frontpageDatabaseFilename := fmt.Sprintf("%s/%s", sqliteDataDir, sqliteDataFilename)
ndb := newsDatabase{sqliteDataDir: sqliteDataDir}
var err error
// Register some extension functions from go-sqlite3-stdlib so we can actually do math in sqlite3.
stdlib.Register("sqlite3_ext")
// Connect to database
ndb.db, err = sql.Open("sqlite3_ext", fmt.Sprintf("file:%s?_journal_mode=WAL", frontpageDatabaseFilename))
if err != nil {
return ndb, errors.Wrap(err, "open frontpageDatabase")
}
// err = ndb.registerExtensions()
// if err != nil {
// return ndb, errors.Wrap(err, "ndb.registerExtensions()")
// }
err = ndb.initFrontpageDB()
if err != nil {
return ndb, errors.Wrap(err, "init frontpageDatabase")
}
{
upvotesDatabaseFilename := fmt.Sprintf("%s/upvotes.sqlite", sqliteDataDir)
ndb.upvotesDB, err = sql.Open("sqlite3_ext", fmt.Sprintf("file:%s?_journal_mode=WAL", upvotesDatabaseFilename))
if err != nil {
return ndb, errors.Wrap(err, "open upvotesDB")
}
err = ndb.initUpvotesDB()
if err != nil {
return ndb, errors.Wrap(err, "initUpvotesDB")
}
}
// No need to prepare statements anymore as we're removing prepared statements
err = ndb.importPenaltiesData(sqliteDataDir)
return ndb, err
}
func rankToNullableInt(rank int) (result sql.NullInt32) {
if rank == 0 {
result = sql.NullInt32{}
} else {
result = sql.NullInt32{Int32: int32(rank), Valid: true}
}
return
}
func (ndb newsDatabase) insertDataPoint(tx *sql.Tx, d dataPoint) error {
sqlStatement := `
INSERT INTO dataset (
id
, score
, descendants
, sampleTime
, submissionTime
, ageApprox
, topRank
, newRank
, bestRank
, askRank
, showRank
, cumulativeUpvotes
, cumulativeExpectedUpvotes
, flagged
, dupe
) VALUES (
?, ?, ?, ?, ?,
?, ?, ?, ?, ?,
?, ?, ?, ?, ?
)
`
_, err := tx.Exec(sqlStatement,
d.id,
d.score,
d.descendants,
d.sampleTime,
d.submissionTime,
d.ageApprox,
rankToNullableInt(d.ranks[0]),
rankToNullableInt(d.ranks[1]),
rankToNullableInt(d.ranks[2]),
rankToNullableInt(d.ranks[3]),
rankToNullableInt(d.ranks[4]),
d.cumulativeUpvotes,
d.cumulativeExpectedUpvotes,
d.flagged,
d.dupe,
)
if err != nil {
return err
}
return nil
}
func (ndb newsDatabase) insertOrReplaceStory(tx *sql.Tx, story Story) (int64, error) {
sqlStatement := `
INSERT INTO stories (id, by, title, url, timestamp, job) VALUES (?, ?, ?, ?, ?, ?)
ON CONFLICT DO UPDATE SET title = excluded.title, url = excluded.url, job = excluded.job
`
r, err := tx.Exec(sqlStatement, story.ID, story.By, story.Title, story.URL, story.SubmissionTime, story.Job)
if err != nil {
return 0, err
}
return r.RowsAffected()
}
func (ndb newsDatabase) selectLastSeenData(tx *sql.Tx, id int) (int, int, float64, int, error) {
var score int
var cumulativeUpvotes int
var cumulativeExpectedUpvotes float64
var lastSeenTime int
sqlStatement := `
SELECT score, cumulativeUpvotes, cumulativeExpectedUpvotes, sampleTime
FROM dataset
WHERE id = ?
ORDER BY sampleTime DESC LIMIT 1
`
err := tx.QueryRow(sqlStatement, id).Scan(&score, &cumulativeUpvotes, &cumulativeExpectedUpvotes, &lastSeenTime)
if err != nil {
return score, cumulativeUpvotes, cumulativeExpectedUpvotes, lastSeenTime, err
}
return score, cumulativeUpvotes, cumulativeExpectedUpvotes, lastSeenTime, nil
}
func (ndb newsDatabase) selectLastCrawlTime() (int, error) {
var sampleTime int
sqlStatement := `
SELECT ifnull(max(sampleTime),0) from dataset
`
err := ndb.db.QueryRow(sqlStatement).Scan(&sampleTime)
return sampleTime, err
}
func (ndb newsDatabase) selectStoriesToArchive(ctx context.Context) ([]int, error) {
var storyIDs []int
sqlStatement := `
with latest as (
select id, score, sampleTime from dataset
where sampleTime <= strftime('%s', 'now') - 21*24*60*60
and score > 2
order by sampleTime
)
select distinct(id) from latest limit 10
`
// Check context before query
if err := ctx.Err(); err != nil {
return nil, errors.Wrap(err, "context cancelled before query")
}
rows, err := ndb.db.QueryContext(ctx, sqlStatement)
if err != nil {
if err == context.DeadlineExceeded || err == context.Canceled {
return nil, errors.Wrap(err, "context cancelled during query")
}
return storyIDs, errors.Wrap(err, "selectStoriesToArchive QueryContext")
}
defer rows.Close()
for rows.Next() {
// Check context in loop
if err := ctx.Err(); err != nil {
return nil, errors.Wrap(err, "context cancelled during row iteration")
}
var storyID int
if err := rows.Scan(&storyID); err != nil {
return nil, errors.Wrap(err, "scan story ID")
}
storyIDs = append(storyIDs, storyID)
}
if err := rows.Err(); err != nil {
if err == context.DeadlineExceeded || err == context.Canceled {
return nil, errors.Wrap(err, "context cancelled after row iteration")
}
return nil, errors.Wrap(err, "iterating story IDs")
}
return storyIDs, nil
}
func (ndb newsDatabase) purgeStory(ctx context.Context, storyID int) error {
tx, err := ndb.db.Begin()
if err != nil {
return errors.Wrap(err, "starting transaction")
}
defer func() {
if p := recover(); p != nil {
_ = tx.Rollback()
panic(p)
} else if err != nil {
_ = tx.Rollback()
} else {
err = tx.Commit()
}
}()
// Delete all data points
_, err = tx.ExecContext(ctx, `DELETE FROM dataset WHERE id = ?`, storyID)
if err != nil {
return errors.Wrap(err, "delete from dataset")
}
// Delete story record
_, err = tx.ExecContext(ctx, `DELETE FROM stories WHERE id = ?`, storyID)
if err != nil {
return errors.Wrap(err, "delete from stories")
}
return nil
}
func (ndb newsDatabase) selectStoryDetails(id int) (Story, error) {
var s Story
sqlStatement := `
SELECT
id
, by
, title
, url
, submissionTime
, timestamp as originalSubmissionTime
, unixepoch() - sampleTime + coalesce(ageApprox, sampleTime - submissionTime)
, score
, descendants
, cumulativeUpvotes
, cumulativeExpectedUpvotes
, topRank
, qnRank
, rawRank
, flagged
, dupe
, job
, archived
from stories
JOIN dataset
USING (id)
WHERE id = ?
ORDER BY sampleTime DESC
LIMIT 1
`
err := ndb.db.QueryRow(sqlStatement, id).Scan(&s.ID, &s.By, &s.Title, &s.URL, &s.SubmissionTime, &s.OriginalSubmissionTime, &s.AgeApprox, &s.Score, &s.Comments, &s.CumulativeUpvotes, &s.CumulativeExpectedUpvotes, &s.TopRank, &s.QNRank, &s.RawRank, &s.Flagged, &s.Dupe, &s.Job, &s.Archived)
if err != nil {
return s, err
}
return s, nil
}
func (ndb *newsDatabase) resetConnection() error {
// Create new connections first
frontpageDatabaseFilename := fmt.Sprintf("%s/%s", ndb.sqliteDataDir, sqliteDataFilename)
newDB, err := sql.Open("sqlite3_ext", fmt.Sprintf("file:%s?_journal_mode=WAL", frontpageDatabaseFilename))
if err != nil {
return errors.Wrap(err, "reopen frontpageDatabase")
}
upvotesDatabaseFilename := fmt.Sprintf("%s/upvotes.sqlite", ndb.sqliteDataDir)
newUpvotesDB, err := sql.Open("sqlite3_ext", fmt.Sprintf("file:%s?_journal_mode=WAL", upvotesDatabaseFilename))
if err != nil {
newDB.Close()
return errors.Wrap(err, "reopen upvotesDB")
}
// Swap to new connections
ndb.db = newDB
ndb.upvotesDB = newUpvotesDB
return nil
}
func (ndb newsDatabase) storyCount(tx *sql.Tx) (int, error) {
var count int
sqlStatement := `
SELECT count(distinct id) from stories
`
row := tx.QueryRow(sqlStatement)
if row == nil {
return 0, errors.New("invalid transaction state in storyCount")
}
err := row.Scan(&count)
if err != nil {
// We can't reset connection during transaction, so just return the error
return 0, errors.Wrap(err, "scanning story count")
}
return count, nil
}
func (ndb newsDatabase) vacuumIfNeeded(ctx context.Context, logger *slog.Logger) error {
size, freelist, fragmentation, err := ndb.getDatabaseStats()
if err != nil {
return errors.Wrap(err, "getDatabaseStats")
}
logger.Info("Database stats",
"size_mb", float64(size)/(1024*1024),
"freelist_pages", freelist,
"fragmentation_pct", fragmentation)
if fragmentation > 20.0 {
logger.Info("Starting vacuum operation",
"fragmentation_pct", fragmentation)
startTime := time.Now()
_, err := ndb.db.ExecContext(ctx, "VACUUM")
if err != nil {
return errors.Wrap(err, "vacuum database")
}
newSize, _, newFragmentation, err := ndb.getDatabaseStats()
if err != nil {
return errors.Wrap(err, "getDatabaseStats after vacuum")
}
logger.Info("Vacuum completed",
"duration_seconds", time.Since(startTime).Seconds(),
"size_before_mb", float64(size)/(1024*1024),
"size_after_mb", float64(newSize)/(1024*1024),
"space_reclaimed_mb", float64(size-newSize)/(1024*1024),
"fragmentation_before", fragmentation,
"fragmentation_after", newFragmentation)
vacuumOperationsTotal.Inc()
}
return nil
}
func (ndb newsDatabase) getDatabaseStats() (size int64, freelist int64, fragmentation float64, err error) {
err = ndb.db.QueryRow(`
SELECT
(SELECT page_count FROM pragma_page_count()) *
(SELECT page_size FROM pragma_page_size()) as total_bytes,
(SELECT freelist_count FROM pragma_freelist_count()) as free_pages,
ROUND(
100.0 * (SELECT freelist_count FROM pragma_freelist_count()) /
(SELECT page_count FROM pragma_page_count()), 1
) as fragmentation_pct
`).Scan(&size, &freelist, &fragmentation)
return
}
func (ndb newsDatabase) deleteOldData(ctx context.Context) (int64, error) {
// Delete data older than one month. Stories whose first datapoint is more than 21 days old with score greater than 2 are already being archived. So this
// should delete what's left -- as long as archiving is working!
sqlStatement := `
delete from dataset where sampleTime <= unixepoch()-30*24*60*60
`
result, err := ndb.db.ExecContext(ctx, sqlStatement)
if err != nil {
return 0, errors.Wrap(err, "executing delete old data query")
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return 0, errors.Wrap(err, "getting rows affected")
}
return rowsAffected, nil
}