run crawlers with the webapp
This commit is contained in:
@@ -1,7 +0,0 @@
|
|||||||
package collectors
|
|
||||||
|
|
||||||
import "crowsnest/internal/model/database"
|
|
||||||
|
|
||||||
type Collector struct {
|
|
||||||
Articles *database.ArticleModel
|
|
||||||
}
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crowsnest/cmd/crawler/collectors"
|
|
||||||
"crowsnest/internal/model/database"
|
|
||||||
"database/sql"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// collect environement variables
|
|
||||||
databaseURL := os.Getenv("DB_URL")
|
|
||||||
|
|
||||||
// connect to database
|
|
||||||
db, err := sql.Open("postgres", databaseURL)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// collect websites
|
|
||||||
coll := collectors.Collector{
|
|
||||||
Articles: &database.ArticleModel{DB: db},
|
|
||||||
}
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
wg.Add(2)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
coll.CollectSpiegel()
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
go coll.CollectZeit()
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
@@ -2,10 +2,12 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crowsnest/internal/app"
|
"crowsnest/internal/app"
|
||||||
|
"crowsnest/internal/crawler"
|
||||||
"crowsnest/internal/middleware"
|
"crowsnest/internal/middleware"
|
||||||
"crowsnest/internal/model/database"
|
"crowsnest/internal/model/database"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
_ "github.com/lib/pq"
|
_ "github.com/lib/pq"
|
||||||
)
|
)
|
||||||
@@ -16,6 +18,18 @@ func main() {
|
|||||||
log.Fatal("failed to connect to database due to", err.Error())
|
log.Fatal("failed to connect to database due to", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// run web crawlers
|
||||||
|
coll := crawler.Crawler{
|
||||||
|
Articles: &database.ArticleModel{DB: db},
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
coll.ZeitCollectIndex()
|
||||||
|
coll.SpiegelCollectIndex()
|
||||||
|
time.Sleep(5 * time.Minute)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
// define app
|
// define app
|
||||||
webapp := app.NewApp(db)
|
webapp := app.NewApp(db)
|
||||||
|
|
||||||
|
|||||||
48
src/internal/crawler/crawler.go
Normal file
48
src/internal/crawler/crawler.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package crawler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crowsnest/internal/model/database"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/PuerkitoBio/goquery"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Crawler struct {
|
||||||
|
Articles *database.ArticleModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllURLs fetches all URLs from a given web page URL
|
||||||
|
func (c *Crawler) GetAllURLs(pageURL string) ([]string, error) {
|
||||||
|
// Send a GET request to the provided URL
|
||||||
|
resp, err := http.Get(pageURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to fetch URL %s: %w", pageURL, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// Check if the response status is OK
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return nil, fmt.Errorf("HTTP request failed with status code %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the HTML document
|
||||||
|
doc, err := goquery.NewDocumentFromReader(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse HTML: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice to store the extracted URLs
|
||||||
|
var urls []string
|
||||||
|
|
||||||
|
// Select all anchor tags and extract the href attribute
|
||||||
|
doc.Find("a").Each(func(index int, element *goquery.Selection) {
|
||||||
|
// Get the href attribute
|
||||||
|
href, exists := element.Attr("href")
|
||||||
|
if exists {
|
||||||
|
urls = append(urls, href)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return urls, nil
|
||||||
|
}
|
||||||
@@ -1,10 +1,9 @@
|
|||||||
package collectors
|
package crawler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crowsnest/internal/model"
|
"crowsnest/internal/model"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,34 +12,48 @@ import (
|
|||||||
"github.com/gocolly/colly/v2"
|
"github.com/gocolly/colly/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *Collector) CollectSpiegel() {
|
func (c *Crawler) SpiegelCollector() *colly.Collector {
|
||||||
collycollector := colly.NewCollector(
|
// set cache, domain pattern and max recursion deepth
|
||||||
|
collector := colly.NewCollector(
|
||||||
colly.AllowedDomains("www.spiegel.de", "spiegel.de"),
|
colly.AllowedDomains("www.spiegel.de", "spiegel.de"),
|
||||||
colly.CacheDir("./persistence/spiegel_cache"),
|
colly.CacheDir("./persistence/spiegel_cache"),
|
||||||
colly.MaxDepth(3),
|
colly.MaxDepth(5),
|
||||||
)
|
)
|
||||||
|
|
||||||
// store articles
|
// store articles
|
||||||
collycollector.OnResponse(func(r *colly.Response) {
|
collector.OnResponse(func(r *colly.Response) {
|
||||||
url := r.Request.URL.String()
|
url := r.Request.URL.String()
|
||||||
err := c.ExtractSpiegel(url, r.Body)
|
c.SpiegelExtract(url, r.Body)
|
||||||
if err == nil {
|
|
||||||
log.Println("added article", url)
|
|
||||||
} else {
|
|
||||||
log.Println("failed to add article:", err, "("+url+")")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// cascade
|
// cascade
|
||||||
collycollector.OnHTML("a[href]", func(e *colly.HTMLElement) {
|
collector.OnHTML("a[href]", func(e *colly.HTMLElement) {
|
||||||
url := e.Attr("href")
|
url := e.Attr("href")
|
||||||
|
|
||||||
if !strings.HasPrefix(url, "http") {
|
if !strings.HasPrefix(url, "http") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
e.Request.Visit(url)
|
e.Request.Visit(url)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return collector
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Crawler) SpiegelCollectIndex() error {
|
||||||
|
urls, err := c.GetAllURLs("https://www.spiegel.de/")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
collector := c.SpiegelCollector()
|
||||||
|
collector.MaxDepth = 1
|
||||||
|
|
||||||
|
for _, url := range urls {
|
||||||
|
collector.Visit(url)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Crawler) SpiegelCollectArchive() {
|
||||||
|
collector := c.SpiegelCollector()
|
||||||
|
|
||||||
// go through archive
|
// go through archive
|
||||||
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
|
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||||
currentDate := time.Now()
|
currentDate := time.Now()
|
||||||
@@ -49,11 +62,11 @@ func (c *Collector) CollectSpiegel() {
|
|||||||
urlDate := date.Format("02.01.2006")
|
urlDate := date.Format("02.01.2006")
|
||||||
url := fmt.Sprintf("https://www.spiegel.de/nachrichtenarchiv/artikel-%s.html", urlDate)
|
url := fmt.Sprintf("https://www.spiegel.de/nachrichtenarchiv/artikel-%s.html", urlDate)
|
||||||
|
|
||||||
collycollector.Visit(url)
|
collector.Visit(url)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Collector) ExtractSpiegel(url string, body []byte) error {
|
func (c *Crawler) SpiegelExtract(url string, body []byte) error {
|
||||||
paywall_pattern := regexp.MustCompile(`"paywall":{"attributes":{"is_active":true`)
|
paywall_pattern := regexp.MustCompile(`"paywall":{"attributes":{"is_active":true`)
|
||||||
url_pattern := regexp.MustCompile(`^https://(www\.)?spiegel.de.*`)
|
url_pattern := regexp.MustCompile(`^https://(www\.)?spiegel.de.*`)
|
||||||
whitespace := regexp.MustCompile(`\s+`)
|
whitespace := regexp.MustCompile(`\s+`)
|
||||||
@@ -1,10 +1,9 @@
|
|||||||
package collectors
|
package crawler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crowsnest/internal/model"
|
"crowsnest/internal/model"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,36 +12,50 @@ import (
|
|||||||
"github.com/gocolly/colly/v2"
|
"github.com/gocolly/colly/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Gets every page of the archive of zeit.de and stores the responses into the
|
func (c *Crawler) ZeitCollector() *colly.Collector {
|
||||||
// database.
|
// set cache, domain pattern and max recursion deepth
|
||||||
func (c *Collector) CollectZeit() {
|
collector := colly.NewCollector(
|
||||||
collycollector := colly.NewCollector(
|
|
||||||
colly.AllowedDomains("www.zeit.de", "zeit.de"),
|
colly.AllowedDomains("www.zeit.de", "zeit.de"),
|
||||||
colly.CacheDir("./persistence/zeit_cache"),
|
colly.CacheDir("./persistence/zeit_cache"),
|
||||||
colly.MaxDepth(3),
|
colly.MaxDepth(5),
|
||||||
)
|
)
|
||||||
|
|
||||||
// store articles
|
// store articles
|
||||||
collycollector.OnResponse(func(r *colly.Response) {
|
collector.OnResponse(func(r *colly.Response) {
|
||||||
url := r.Request.URL.String()
|
url := r.Request.URL.String()
|
||||||
err := c.ExtractZeit(url, r.Body)
|
c.ZeitExtract(url, r.Body)
|
||||||
if err == nil {
|
|
||||||
log.Println("added article", url)
|
|
||||||
} else {
|
|
||||||
log.Println("failed to add article:", err, "("+url+")")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// cascade
|
// cascade
|
||||||
collycollector.OnHTML("a[href]", func(e *colly.HTMLElement) {
|
collector.OnHTML("a[href]", func(e *colly.HTMLElement) {
|
||||||
url := e.Attr("href")
|
url := e.Attr("href")
|
||||||
|
|
||||||
if !strings.HasPrefix(url, "http") {
|
if !strings.HasPrefix(url, "http") {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
e.Request.Visit(url)
|
e.Request.Visit(url)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return collector
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Crawler) ZeitCollectIndex() error {
|
||||||
|
urls, err := c.GetAllURLs("https://www.zeit.de/index")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
collector := c.ZeitCollector()
|
||||||
|
collector.MaxDepth = 1
|
||||||
|
|
||||||
|
for _, url := range urls {
|
||||||
|
collector.Visit(url)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets every page of the archive of zeit.de and stores the responses into the
|
||||||
|
// database.
|
||||||
|
func (c *Crawler) ZeitCollectArchive() {
|
||||||
|
collector := c.ZeitCollector()
|
||||||
|
|
||||||
// go through archive
|
// go through archive
|
||||||
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
|
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||||
//startDate := time.Date(1946, time.January, 1, 0, 0, 0, 0, time.UTC)
|
//startDate := time.Date(1946, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||||
@@ -52,12 +65,12 @@ func (c *Collector) CollectZeit() {
|
|||||||
year, week := date.ISOWeek()
|
year, week := date.ISOWeek()
|
||||||
url := fmt.Sprintf("https://www.zeit.de/%04d/%02d/index", year, week)
|
url := fmt.Sprintf("https://www.zeit.de/%04d/%02d/index", year, week)
|
||||||
|
|
||||||
collycollector.Visit(url)
|
collector.Visit(url)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Collector) ExtractZeit(url string, body []byte) error {
|
func (c *Crawler) ZeitExtract(url string, body []byte) error {
|
||||||
url_pattern := regexp.MustCompile(`^https://(www\.)?zeit.de.*`)
|
url_pattern := regexp.MustCompile(`^https://(www\.)?zeit\.de[^#]*$`)
|
||||||
whitespace := regexp.MustCompile(`\s+`)
|
whitespace := regexp.MustCompile(`\s+`)
|
||||||
|
|
||||||
var exists bool
|
var exists bool
|
||||||
Reference in New Issue
Block a user