1
0
mirror of https://github.com/vlang/v.git synced 2023-08-10 21:13:21 +03:00
v/examples/news_fetcher.v

50 lines
1.5 KiB
V
Raw Normal View History

// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
2019-06-23 05:21:30 +03:00
// Use of this source code is governed by an MIT license
// that can be found in the LICENSE file.
2019-12-30 07:42:23 +03:00
import net.http
2019-06-22 22:51:07 +03:00
import json
import sync
struct Story {
title string
2019-12-22 01:41:42 +03:00
url string
2019-06-22 22:51:07 +03:00
}
fn worker_fetch(p &sync.PoolProcessor, cursor int, worker_id int) voidptr {
id := p.get_int_item(cursor)
resp := http.get('https://hacker-news.firebaseio.com/v0/item/${id}.json') or {
println('failed to fetch data from /v0/item/${id}.json')
return sync.no_result
}
story := json.decode(Story,resp.text) or {
println('failed to decode a story')
return sync.no_result
2019-06-22 22:51:07 +03:00
}
println('# $cursor) $story.title | $story.url')
return sync.no_result
2019-06-22 22:51:07 +03:00
}
// Fetches top HN stories in parallel, depending on how many cores you have
2019-06-22 22:51:07 +03:00
fn main() {
2019-07-29 20:18:26 +03:00
resp := http.get('https://hacker-news.firebaseio.com/v0/topstories.json') or {
println('failed to fetch data from /v0/topstories.json')
return
}
2019-12-22 01:41:42 +03:00
mut ids := json.decode([]int,resp.text) or {
2019-07-29 20:18:26 +03:00
println('failed to decode topstories.json')
2019-06-22 22:51:07 +03:00
return
}
2019-07-30 16:06:16 +03:00
if ids.len > 10 {
ids = ids[0..10]
2019-07-30 16:06:16 +03:00
}
mut fetcher_pool := sync.new_pool_processor({
callback: worker_fetch
})
// NB: if you do not call set_max_jobs, the pool will try to use an optimal
// number of threads, one per each core in your system, which in most
// cases is what you want anyway... You can override the automatic choice
// by setting the VJOBS environment variable too.
// fetcher_pool.set_max_jobs( 4 )
fetcher_pool.work_on_items_i(ids)
2019-06-22 22:51:07 +03:00
}