generated from fun-stack/example
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathrankcrawler.go
446 lines (359 loc) · 12.4 KB
/
rankcrawler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
package main
import (
"context"
"database/sql"
"fmt"
"sort"
"strings"
"time"
"github.com/pkg/errors"
"golang.org/x/exp/slog"
)
type pageTypeInt int
var (
top pageTypeInt = 0
new pageTypeInt = 1
best pageTypeInt = 2
ask pageTypeInt = 3
show pageTypeInt = 4
)
var pageTypes = map[pageTypeInt]string{
top: "top",
new: "new",
best: "best",
ask: "ask",
show: "show",
}
type ranksArray [5]int // the ranks of a story for different pageTypes
type dataPoint struct {
// One datapoint represents the state of a single story at a specific point in time.
// It is one row of the `dataset` table.
id int
score int
descendants int
sampleTime int64
submissionTime int64
ageApprox int64
ranks ranksArray
cumulativeExpectedUpvotes float64
cumulativeUpvotes int
flagged bool
dupe bool
}
// only accumulate upvotes if we haven't gone more than 2
// minutes since last crawl. Otherwise our assumption that the
// story has been at this rank since the last crawl starts to
// become less and less reasonable
const maxElapsedTime = 120
func (app app) crawlAndPostprocess(ctx context.Context) (err error) {
// this function is called every minute. It crawls the hacker news
// website, collects all stories which appear on a rank < 90 and stores
// its ranks for all different pageTypes, and updates the story in the DB
ndb := app.ndb
logger := app.logger
maxRetries := 3
for attempt := 0; attempt < maxRetries; attempt++ {
err = func() error {
tx, e := ndb.db.BeginTx(ctx, nil)
if e != nil {
return errors.Wrap(e, "BeginTX")
}
// Use the commit/rollback in a defer pattern
defer func() {
if err != nil {
if tx != nil {
if rbErr := tx.Rollback(); rbErr != nil && rbErr != sql.ErrTxDone {
logger.Error("tx.Rollback crawlAndPostprocess", rbErr)
}
}
return
}
logger.Debug("Commit transaction")
if err = tx.Commit(); err != nil {
logger.Error("tx.Commit crawlAndPostprocess", err)
return
}
}()
// Get initial count
initialStoryCount, err := ndb.storyCount(tx)
if err != nil {
return errors.Wrap(err, "initial storyCount")
}
// Perform crawl
sitewideUpvotes, err := app.crawl(ctx, tx)
if err != nil {
return errors.Wrap(err, "crawl")
}
// Get final count
finalStoryCount, err := ndb.storyCount(tx)
if err != nil {
return errors.Wrap(err, "final storyCount")
}
// Run post-processing
if err = app.crawlPostprocess(ctx, tx); err != nil {
return errors.Wrap(err, "crawlPostprocess")
}
// Update metrics after successful transaction
submissionsTotal.Add(finalStoryCount - initialStoryCount)
upvotesTotal.Add(int(sitewideUpvotes))
return nil
}()
if err != nil {
if strings.Contains(err.Error(), "sql: Rows are closed") {
// If this is not the last attempt, try resetting the connection
if attempt < maxRetries-1 {
logger.Info("Resetting database connection due to closed rows", "attempt", attempt+1)
if resetErr := ndb.resetConnection(); resetErr != nil {
return errors.Wrap(resetErr, "reset connection failed")
}
continue
}
}
crawlErrorsTotal.Inc()
return err
}
return nil
}
return errors.New("exceeded maximum retry attempts for crawlAndPostprocess")
}
const maxGoroutines = 50
func (app app) crawl(ctx context.Context, tx *sql.Tx) (int, error) {
ndb := app.ndb
client := app.hnClient
logger := app.logger
t := time.Now()
defer crawlDuration.UpdateDuration(t)
sampleTime := t.Unix()
storyRanks, err := app.getRanksFromAPI(ctx)
if err != nil {
return 0, errors.Wrap(err, "getRanksFromAPI")
}
// make sure we also get data for every story that was ranked on QN in the previous crawl
idsFromPreviousCrawl, err := app.getQNTopFromPreviousCrawl(ctx, tx)
if err != nil {
return 0, errors.Wrap(err, "getIDSFromPreviousCrawl")
}
for _, id := range idsFromPreviousCrawl {
if _, ok := storyRanks[id]; !ok {
// create an empty ranks array for stories that were not ranked on
// any of the HN pages but where ranked on QN in the last crawl
storyRanks[id] = ranksArray{}
}
}
stories, err := app.scrapeFrontPageStories(ctx)
if err != nil {
return 0, errors.Wrap(err, "scrapeFrontPageStories")
}
uniqueStoryIds := getKeys(storyRanks)
// Now use the API to get details for stories we did not find on the front page
{
missingStoryIDs := make([]int, 0, len(uniqueStoryIds))
for _, id := range uniqueStoryIds {
if _, ok := stories[id]; !ok {
missingStoryIDs = append(missingStoryIDs, id)
}
}
t := time.Now()
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
// get story details
logger.Info("Getting story details from API for stories that were not on the front page", "num_stories", len(uniqueStoryIds), "missing_stories", len(missingStoryIDs))
missingStories, err := client.GetItems(ctx, missingStoryIDs, maxGoroutines)
if err != nil {
return 0, errors.Wrap(err, "client.GetItems")
}
if len(missingStoryIDs) != len(missingStories) {
panic(fmt.Sprintf("Story counts don't add up after downloading missing stories: %d, %d", len(missingStoryIDs), len(missingStories)))
}
for _, s := range missingStories {
stories[s.ID] = ScrapedStory{
Story: Story{
ID: s.ID,
By: s.By,
Title: s.Title,
URL: s.URL,
SubmissionTime: int64(s.Timestamp),
OriginalSubmissionTime: int64(s.Timestamp),
AgeApprox: sampleTime - int64(s.Timestamp),
Score: s.Score,
Comments: s.Descendants,
},
Source: "api",
}
}
// Output some errors if there are inconsistencies between the set of story IDs the API tells us are on top page
// and the set of stories we were able to fetch details for from the API or the scraper
if len(uniqueStoryIds) != len(getKeys(stories)) {
for _, id := range uniqueStoryIds {
if _, ok := stories[id]; !ok {
logger.Warn("failed to get story details for story", "story_id", id)
}
}
for id := range stories {
if _, ok := storyRanks[id]; !ok {
logger.Warn("found story on top page from scraper but not on top page from API", "story_id", id)
}
}
}
logger.Info("Got story details from API", "nitems", len(missingStoryIDs), slog.Duration("elapsed", time.Since(t)))
}
// for every story, calculate metrics used for ranking per story:
var sitewideUpvotes float64
deltaUpvotes := make([]int, len(uniqueStoryIds)) // number of upvotes (since last sample point)
lastCumulativeUpvotes := make([]int, len(uniqueStoryIds)) // last number of upvotes tracked by our crawler
lastCumulativeExpectedUpvotes := make([]float64, len(uniqueStoryIds))
lastSeenTimes := make([]int, len(uniqueStoryIds))
newRankChanges := make([]int, 0, 10)
logger.Info("Inserting stories into DB", "nitems", len(uniqueStoryIds))
// insert stories into DB and update aggregate metrics
STORY:
for i, id := range uniqueStoryIds {
story := stories[id]
// Skip any stories that were not fetched successfully.
if story.ID == 0 {
LogErrorf(logger, "Missing story id in story %d", id)
crawlErrorsTotal.Inc()
continue STORY
}
storyID := story.ID
lastSeenScore, lastSeenUpvotes, lastSeenExpectedUpvotes, lastSeenTime, err := ndb.selectLastSeenData(tx, storyID)
if err != nil {
if !errors.Is(err, sql.ErrNoRows) {
return 0, errors.Wrap(err, "selectLastSeenScore")
}
if story.SubmissionTime == 0 {
panic(story)
}
if storyRanks[story.ID][1] != 0 {
newRankChanges = append(newRankChanges, int(sampleTime)-int(story.SubmissionTime))
lastSeenTimes[i] = int(story.SubmissionTime)
}
} else {
lastCumulativeUpvotes[i] = lastSeenUpvotes
lastCumulativeExpectedUpvotes[i] = lastSeenExpectedUpvotes
lastSeenTimes[i] = lastSeenTime
elapsedTime := int(sampleTime) - lastSeenTime
if elapsedTime < maxElapsedTime {
deltaUpvotes[i] = story.Score - lastSeenScore
sitewideUpvotes += float64(deltaUpvotes[i]*60) / float64(elapsedTime)
}
}
// save story details in database
_, err = ndb.insertOrReplaceStory(tx, story.Story)
if err != nil {
return 0, errors.Wrap(err, "insertOrReplaceStory")
}
}
logger.Info("Inserting rank data into DB", "nitems", len(uniqueStoryIds))
var sitewideDeltaExpectedUpvotes float64
var sitewideExpectedUpvotesShare float64
if len(newRankChanges) > 0 {
// If there have been N new submissions, each story above rank N has occupied N+1 ranks
// So add a rank change time corresponding to the beginning of the crawl period.
sort.Ints(newRankChanges)
}
for i, id := range uniqueStoryIds {
story := stories[id]
ranks := storyRanks[id]
cumulativeUpvotes := lastCumulativeUpvotes[i]
cumulativeExpectedUpvotes := lastCumulativeExpectedUpvotes[i]
elapsedTime := int(sampleTime) - lastSeenTimes[i]
if elapsedTime < maxElapsedTime {
cumulativeUpvotes += deltaUpvotes[i]
RANKS:
for pt, rank := range ranks {
pageType := pageTypeInt(pt)
if rank == 0 {
continue RANKS
}
exUpvoteShare := 0.0
if pageType == new && len(newRankChanges) > 0 {
exUpvoteShare = expectedUpvoteShareNewPage(rank, elapsedTime, newRankChanges)
} else {
exUpvoteShare = expectedUpvoteShare(pageType, rank)
}
deltaExpectedUpvotes := exUpvoteShare * float64(sitewideUpvotes)
cumulativeExpectedUpvotes += deltaExpectedUpvotes
sitewideDeltaExpectedUpvotes += deltaExpectedUpvotes
sitewideExpectedUpvotesShare += exUpvoteShare
}
}
datapoint := dataPoint{
id: id,
score: story.Score,
descendants: story.Comments,
sampleTime: sampleTime,
submissionTime: story.SubmissionTime,
ranks: ranks,
cumulativeExpectedUpvotes: cumulativeExpectedUpvotes,
cumulativeUpvotes: cumulativeUpvotes,
ageApprox: story.AgeApprox,
flagged: story.Flagged,
dupe: story.Dupe,
}
if err := ndb.insertDataPoint(tx, datapoint); err != nil {
return int(sitewideUpvotes), errors.Wrap(err, "insertDataPoint")
}
}
logger.Info("Finished crawl",
"nitems", len(stories), slog.Duration("elapsed", time.Since(t)),
"deltaExpectedUpvotes", sitewideDeltaExpectedUpvotes,
"sitewideUpvotes", sitewideUpvotes,
"sitewideExpectedUpvotesShare", sitewideExpectedUpvotesShare,
"dataPoints", len(stories))
return int(sitewideUpvotes), nil
}
// getRanksFromAPI gets all ranks for all page types from the API and puts them into
// a map[int]ranksArray
func (app app) getRanksFromAPI(ctx context.Context) (map[int]ranksArray, error) {
app.logger.Info("Getting ranks from API")
t := time.Now()
storyRanks := map[int]ranksArray{}
client := app.hnClient
for pageType := top; pageType <= show; pageType++ {
pageTypeName := pageTypes[pageType]
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
defer cancel()
storyIDs, err := client.Stories(ctx, pageTypeName)
if err != nil {
return storyRanks, errors.Wrap(err, "client.Stories")
}
for zeroBasedRank, ID := range storyIDs {
var ranks ranksArray
var ok bool
if ranks, ok = storyRanks[ID]; !ok {
// if story is not in storyRanks, initialize it with empty ranks
ranks = ranksArray{}
}
ranks[pageType] = zeroBasedRank + 1
storyRanks[ID] = ranks
// only take stories which appear on the first 90 ranks
if zeroBasedRank+1 >= 90 {
break
}
}
}
app.logger.Info("Got ranks from api", slog.Duration("elapsed", time.Since(t)))
return storyRanks, nil
}
func (app app) getQNTopFromPreviousCrawl(ctx context.Context, tx *sql.Tx) ([]int, error) {
result := make([]int, 0, 90)
s, err := tx.Prepare("select id from dataset where qnRank <= 90 and sampleTime = (select max(sampleTime) from dataset where sampleTime != (select max(sampleTime) from dataset))")
if err != nil {
return nil, errors.Wrap(err, "preparing getQNTopFromPreviousCrawl sql")
}
rows, err := s.QueryContext(ctx)
if err != nil {
return nil, errors.Wrap(err, "executing getQNTopFromPreviousCrawl sql")
}
defer rows.Close()
for rows.Next() {
var id sql.NullInt32
err := rows.Scan(&id)
if err != nil {
return nil, errors.Wrap(err, "rows.Scan")
}
result = append(result, int(id.Int32))
}
return result, nil
}