run crawlers with the webapp

This commit is contained in:
2025-01-12 03:35:19 +01:00
parent 3e808899c1
commit 48d8b99fc3
6 changed files with 127 additions and 89 deletions

View File

@@ -1,7 +0,0 @@
package collectors
import "crowsnest/internal/model/database"
type Collector struct {
Articles *database.ArticleModel
}

View File

@@ -1,43 +0,0 @@
package main
import (
"crowsnest/cmd/crawler/collectors"
"crowsnest/internal/model/database"
"database/sql"
"log"
"os"
"sync"
_ "github.com/lib/pq"
)
func main() {
// collect environement variables
databaseURL := os.Getenv("DB_URL")
// connect to database
db, err := sql.Open("postgres", databaseURL)
if err != nil {
log.Fatal(err)
}
defer db.Close()
// collect websites
coll := collectors.Collector{
Articles: &database.ArticleModel{DB: db},
}
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
coll.CollectSpiegel()
}()
go func() {
defer wg.Done()
go coll.CollectZeit()
}()
wg.Wait()
}

View File

@@ -2,10 +2,12 @@ package main
import (
"crowsnest/internal/app"
"crowsnest/internal/crawler"
"crowsnest/internal/middleware"
"crowsnest/internal/model/database"
"log"
"net/http"
"time"
_ "github.com/lib/pq"
)
@@ -16,6 +18,18 @@ func main() {
log.Fatal("failed to connect to database due to", err.Error())
}
// run web crawlers
coll := crawler.Crawler{
Articles: &database.ArticleModel{DB: db},
}
go func() {
for {
coll.ZeitCollectIndex()
coll.SpiegelCollectIndex()
time.Sleep(5 * time.Minute)
}
}()
// define app
webapp := app.NewApp(db)

View File

@@ -0,0 +1,48 @@
package crawler
import (
"crowsnest/internal/model/database"
"fmt"
"net/http"
"github.com/PuerkitoBio/goquery"
)
type Crawler struct {
Articles *database.ArticleModel
}
// GetAllURLs fetches all URLs from a given web page URL
func (c *Crawler) GetAllURLs(pageURL string) ([]string, error) {
// Send a GET request to the provided URL
resp, err := http.Get(pageURL)
if err != nil {
return nil, fmt.Errorf("failed to fetch URL %s: %w", pageURL, err)
}
defer resp.Body.Close()
// Check if the response status is OK
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("HTTP request failed with status code %d", resp.StatusCode)
}
// Parse the HTML document
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to parse HTML: %w", err)
}
// Slice to store the extracted URLs
var urls []string
// Select all anchor tags and extract the href attribute
doc.Find("a").Each(func(index int, element *goquery.Selection) {
// Get the href attribute
href, exists := element.Attr("href")
if exists {
urls = append(urls, href)
}
})
return urls, nil
}

View File

@@ -1,10 +1,9 @@
package collectors
package crawler
import (
"crowsnest/internal/model"
"errors"
"fmt"
"log"
"regexp"
"strings"
"time"
@@ -13,34 +12,48 @@ import (
"github.com/gocolly/colly/v2"
)
func (c *Collector) CollectSpiegel() {
collycollector := colly.NewCollector(
func (c *Crawler) SpiegelCollector() *colly.Collector {
// set cache, domain pattern and max recursion deepth
collector := colly.NewCollector(
colly.AllowedDomains("www.spiegel.de", "spiegel.de"),
colly.CacheDir("./persistence/spiegel_cache"),
colly.MaxDepth(3),
colly.MaxDepth(5),
)
// store articles
collycollector.OnResponse(func(r *colly.Response) {
collector.OnResponse(func(r *colly.Response) {
url := r.Request.URL.String()
err := c.ExtractSpiegel(url, r.Body)
if err == nil {
log.Println("added article", url)
} else {
log.Println("failed to add article:", err, "("+url+")")
}
c.SpiegelExtract(url, r.Body)
})
// cascade
collycollector.OnHTML("a[href]", func(e *colly.HTMLElement) {
collector.OnHTML("a[href]", func(e *colly.HTMLElement) {
url := e.Attr("href")
if !strings.HasPrefix(url, "http") {
return
}
e.Request.Visit(url)
})
return collector
}
func (c *Crawler) SpiegelCollectIndex() error {
urls, err := c.GetAllURLs("https://www.spiegel.de/")
if err != nil {
return err
}
collector := c.SpiegelCollector()
collector.MaxDepth = 1
for _, url := range urls {
collector.Visit(url)
}
return nil
}
func (c *Crawler) SpiegelCollectArchive() {
collector := c.SpiegelCollector()
// go through archive
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
currentDate := time.Now()
@@ -49,11 +62,11 @@ func (c *Collector) CollectSpiegel() {
urlDate := date.Format("02.01.2006")
url := fmt.Sprintf("https://www.spiegel.de/nachrichtenarchiv/artikel-%s.html", urlDate)
collycollector.Visit(url)
collector.Visit(url)
}
}
func (c *Collector) ExtractSpiegel(url string, body []byte) error {
func (c *Crawler) SpiegelExtract(url string, body []byte) error {
paywall_pattern := regexp.MustCompile(`"paywall":{"attributes":{"is_active":true`)
url_pattern := regexp.MustCompile(`^https://(www\.)?spiegel.de.*`)
whitespace := regexp.MustCompile(`\s+`)

View File

@@ -1,10 +1,9 @@
package collectors
package crawler
import (
"crowsnest/internal/model"
"errors"
"fmt"
"log"
"regexp"
"strings"
"time"
@@ -13,36 +12,50 @@ import (
"github.com/gocolly/colly/v2"
)
// Gets every page of the archive of zeit.de and stores the responses into the
// database.
func (c *Collector) CollectZeit() {
collycollector := colly.NewCollector(
func (c *Crawler) ZeitCollector() *colly.Collector {
// set cache, domain pattern and max recursion deepth
collector := colly.NewCollector(
colly.AllowedDomains("www.zeit.de", "zeit.de"),
colly.CacheDir("./persistence/zeit_cache"),
colly.MaxDepth(3),
colly.MaxDepth(5),
)
// store articles
collycollector.OnResponse(func(r *colly.Response) {
collector.OnResponse(func(r *colly.Response) {
url := r.Request.URL.String()
err := c.ExtractZeit(url, r.Body)
if err == nil {
log.Println("added article", url)
} else {
log.Println("failed to add article:", err, "("+url+")")
}
c.ZeitExtract(url, r.Body)
})
// cascade
collycollector.OnHTML("a[href]", func(e *colly.HTMLElement) {
collector.OnHTML("a[href]", func(e *colly.HTMLElement) {
url := e.Attr("href")
if !strings.HasPrefix(url, "http") {
return
}
e.Request.Visit(url)
})
return collector
}
func (c *Crawler) ZeitCollectIndex() error {
urls, err := c.GetAllURLs("https://www.zeit.de/index")
if err != nil {
return err
}
collector := c.ZeitCollector()
collector.MaxDepth = 1
for _, url := range urls {
collector.Visit(url)
}
return nil
}
// Gets every page of the archive of zeit.de and stores the responses into the
// database.
func (c *Crawler) ZeitCollectArchive() {
collector := c.ZeitCollector()
// go through archive
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
//startDate := time.Date(1946, time.January, 1, 0, 0, 0, 0, time.UTC)
@@ -52,12 +65,12 @@ func (c *Collector) CollectZeit() {
year, week := date.ISOWeek()
url := fmt.Sprintf("https://www.zeit.de/%04d/%02d/index", year, week)
collycollector.Visit(url)
collector.Visit(url)
}
}
func (c *Collector) ExtractZeit(url string, body []byte) error {
url_pattern := regexp.MustCompile(`^https://(www\.)?zeit.de.*`)
func (c *Crawler) ZeitExtract(url string, body []byte) error {
url_pattern := regexp.MustCompile(`^https://(www\.)?zeit\.de[^#]*$`)
whitespace := regexp.MustCompile(`\s+`)
var exists bool