move ai python server into seperate repo
This commit is contained in:
7
src/cmd/crawler/collectors/collector.go
Normal file
7
src/cmd/crawler/collectors/collector.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package collectors
|
||||
|
||||
import "crowsnest/internal/model/database"
|
||||
|
||||
type Collector struct {
|
||||
Articles *database.ArticleModel
|
||||
}
|
||||
139
src/cmd/crawler/collectors/spiegel.go
Normal file
139
src/cmd/crawler/collectors/spiegel.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"crowsnest/internal/model"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/PuerkitoBio/goquery"
|
||||
"github.com/gocolly/colly/v2"
|
||||
)
|
||||
|
||||
func (c *Collector) CollectSpiegel() {
|
||||
collycollector := colly.NewCollector(
|
||||
colly.AllowedDomains("www.spiegel.de", "spiegel.de"),
|
||||
colly.CacheDir("./persistence/spiegel_cache"),
|
||||
colly.MaxDepth(3),
|
||||
)
|
||||
|
||||
// store articles
|
||||
collycollector.OnResponse(func(r *colly.Response) {
|
||||
url := r.Request.URL.String()
|
||||
err := c.ExtractSpiegel(url, r.Body)
|
||||
if err == nil {
|
||||
log.Println("added article", url)
|
||||
} else {
|
||||
log.Println("failed to add article:", err, "("+url+")")
|
||||
}
|
||||
})
|
||||
|
||||
// cascade
|
||||
collycollector.OnHTML("a[href]", func(e *colly.HTMLElement) {
|
||||
url := e.Attr("href")
|
||||
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
return
|
||||
}
|
||||
e.Request.Visit(url)
|
||||
})
|
||||
|
||||
// go through archive
|
||||
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
currentDate := time.Now()
|
||||
|
||||
for date := startDate; date.Before(currentDate) || date.Equal(currentDate); date = date.AddDate(0, 0, 1) {
|
||||
urlDate := date.Format("02.01.2006")
|
||||
url := fmt.Sprintf("https://www.spiegel.de/nachrichtenarchiv/artikel-%s.html", urlDate)
|
||||
|
||||
collycollector.Visit(url)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) ExtractSpiegel(url string, body []byte) error {
|
||||
paywall_pattern := regexp.MustCompile(`"paywall":{"attributes":{"is_active":true`)
|
||||
url_pattern := regexp.MustCompile(`^https://(www\.)?spiegel.de.*`)
|
||||
whitespace := regexp.MustCompile(`\s+`)
|
||||
|
||||
var exists bool
|
||||
var pagetype, title, content, datestr string
|
||||
var tag *goquery.Selection
|
||||
var date time.Time
|
||||
|
||||
// check url url pattern
|
||||
if !url_pattern.Match([]byte(url)) {
|
||||
return errors.New("invalid url pattern")
|
||||
}
|
||||
|
||||
// check for paywall
|
||||
if paywall_pattern.Match(body) {
|
||||
return errors.New("unable to extract article due to paywal")
|
||||
}
|
||||
|
||||
// construct goquery doc
|
||||
doc, err := goquery.NewDocumentFromReader(strings.NewReader(string(body)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check for article type
|
||||
tag = doc.Find("meta[property='og:type']")
|
||||
pagetype, exists = tag.Attr("content")
|
||||
if !exists || pagetype != "article" {
|
||||
return errors.New("unable to extract article, not of type article")
|
||||
}
|
||||
|
||||
// get title
|
||||
tag = doc.Find("meta[property='og:title']")
|
||||
title, exists = tag.Attr("content")
|
||||
if !exists {
|
||||
return errors.New("unable to extract article, no title tag")
|
||||
}
|
||||
|
||||
// prepend description to content of article
|
||||
tag = doc.Find("meta[name='description']")
|
||||
content, exists = tag.Attr("content")
|
||||
content += " "
|
||||
if !exists {
|
||||
return errors.New("unable to extract article, no description tag")
|
||||
}
|
||||
|
||||
// get publishing date
|
||||
tag = doc.Find("meta[name='date']")
|
||||
datestr, exists = tag.Attr("content")
|
||||
if !exists {
|
||||
return errors.New("unable to extract article, no date tag")
|
||||
}
|
||||
|
||||
date, err = time.Parse("2006-01-02T15:04:05-07:00", datestr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get content
|
||||
tag = doc.Find("main[id='Inhalt'] div > p")
|
||||
|
||||
tag.Each(func(index int, p *goquery.Selection) {
|
||||
content += " " + p.Text()
|
||||
})
|
||||
|
||||
// clean up content string
|
||||
content = string(whitespace.ReplaceAll([]byte(content), []byte(" ")))
|
||||
content = strings.ReplaceAll(content, "»", "\"")
|
||||
content = strings.ReplaceAll(content, "«", "\"")
|
||||
|
||||
// insert new article
|
||||
article := model.Article{
|
||||
SourceUrl: url,
|
||||
PublishDate: date,
|
||||
FetchDate: time.Now(),
|
||||
Title: title,
|
||||
Content: content,
|
||||
}
|
||||
|
||||
err = c.Articles.Insert(&article)
|
||||
return err
|
||||
}
|
||||
147
src/cmd/crawler/collectors/zeit.go
Normal file
147
src/cmd/crawler/collectors/zeit.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"crowsnest/internal/model"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/PuerkitoBio/goquery"
|
||||
"github.com/gocolly/colly/v2"
|
||||
)
|
||||
|
||||
// Gets every page of the archive of zeit.de and stores the responses into the
|
||||
// database.
|
||||
func (c *Collector) CollectZeit() {
|
||||
collycollector := colly.NewCollector(
|
||||
colly.AllowedDomains("www.zeit.de", "zeit.de"),
|
||||
colly.CacheDir("./persistence/zeit_cache"),
|
||||
colly.MaxDepth(3),
|
||||
)
|
||||
|
||||
// store articles
|
||||
collycollector.OnResponse(func(r *colly.Response) {
|
||||
url := r.Request.URL.String()
|
||||
err := c.ExtractZeit(url, r.Body)
|
||||
if err == nil {
|
||||
log.Println("added article", url)
|
||||
} else {
|
||||
log.Println("failed to add article:", err, "("+url+")")
|
||||
}
|
||||
})
|
||||
|
||||
// cascade
|
||||
collycollector.OnHTML("a[href]", func(e *colly.HTMLElement) {
|
||||
url := e.Attr("href")
|
||||
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
return
|
||||
}
|
||||
e.Request.Visit(url)
|
||||
})
|
||||
|
||||
// go through archive
|
||||
startDate := time.Date(2020, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
//startDate := time.Date(1946, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
currentDate := time.Now()
|
||||
|
||||
for date := startDate; date.Before(currentDate) || date.Equal(currentDate); date = date.AddDate(0, 0, 7) {
|
||||
year, week := date.ISOWeek()
|
||||
url := fmt.Sprintf("https://www.zeit.de/%04d/%02d/index", year, week)
|
||||
|
||||
collycollector.Visit(url)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collector) ExtractZeit(url string, body []byte) error {
|
||||
url_pattern := regexp.MustCompile(`^https://(www\.)?zeit.de.*`)
|
||||
whitespace := regexp.MustCompile(`\s+`)
|
||||
|
||||
var exists bool
|
||||
var pagetype, title, content, datestr string
|
||||
var tag *goquery.Selection
|
||||
var date time.Time
|
||||
|
||||
// check url url pattern
|
||||
if !url_pattern.Match([]byte(url)) {
|
||||
return errors.New("invalid url pattern")
|
||||
}
|
||||
|
||||
// construct goquery doc
|
||||
doc, err := goquery.NewDocumentFromReader(strings.NewReader(string(body)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check for article type
|
||||
tag = doc.Find("meta[property='og:type']")
|
||||
pagetype, exists = tag.Attr("content")
|
||||
if !exists || pagetype != "article" {
|
||||
return errors.New("unable to extract article, not of type article")
|
||||
}
|
||||
|
||||
// check for paywall
|
||||
tag = doc.Find("meta[property='article:content_tier']")
|
||||
pagetype, exists = tag.Attr("content")
|
||||
if !exists || pagetype != "free" {
|
||||
return errors.New("unable to extract article due to paywal")
|
||||
}
|
||||
|
||||
// get title
|
||||
tag = doc.Find("meta[property='og:title']")
|
||||
title, exists = tag.Attr("content")
|
||||
if !exists {
|
||||
return errors.New("unable to extract article, no title tag")
|
||||
}
|
||||
|
||||
// prepend description to content of article
|
||||
tag = doc.Find("meta[name='description']")
|
||||
content, exists = tag.Attr("content")
|
||||
content += " "
|
||||
if !exists {
|
||||
return errors.New("unable to extract article, no description tag")
|
||||
}
|
||||
|
||||
if strings.Contains(content, "Das Liveblog") {
|
||||
return errors.New("unable to extract article, no support for liveblog")
|
||||
}
|
||||
|
||||
// get publishing date
|
||||
tag = doc.Find("meta[name='date']")
|
||||
datestr, exists = tag.Attr("content")
|
||||
if !exists {
|
||||
return errors.New("unable to extract article, no date tag")
|
||||
}
|
||||
|
||||
date, err = time.Parse("2006-01-02T15:04:05-07:00", datestr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get content
|
||||
tag = doc.Find("main > article > div.article-body p.article__item")
|
||||
|
||||
tag.Each(func(index int, p *goquery.Selection) {
|
||||
content += " " + p.Text()
|
||||
})
|
||||
|
||||
// clean up content string
|
||||
content = string(whitespace.ReplaceAll([]byte(content), []byte(" ")))
|
||||
content = strings.ReplaceAll(content, "»", "\"")
|
||||
content = strings.ReplaceAll(content, "«", "\"")
|
||||
|
||||
// insert new article
|
||||
article := model.Article{
|
||||
SourceUrl: url,
|
||||
PublishDate: date,
|
||||
FetchDate: time.Now(),
|
||||
Title: title,
|
||||
Content: content,
|
||||
}
|
||||
|
||||
err = c.Articles.Insert(&article)
|
||||
return err
|
||||
}
|
||||
43
src/cmd/crawler/main.go
Normal file
43
src/cmd/crawler/main.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crowsnest/cmd/crawler/collectors"
|
||||
"crowsnest/internal/model/database"
|
||||
"database/sql"
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// collect environement variables
|
||||
databaseURL := os.Getenv("DB_URL")
|
||||
|
||||
// connect to database
|
||||
db, err := sql.Open("postgres", databaseURL)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// collect websites
|
||||
coll := collectors.Collector{
|
||||
Articles: &database.ArticleModel{DB: db},
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
coll.CollectSpiegel()
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
go coll.CollectZeit()
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
31
src/cmd/frontend/Index.go
Normal file
31
src/cmd/frontend/Index.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crowsnest/internal/model"
|
||||
"html/template"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// List the latest articles using the base template.
|
||||
func (app *App) Index(w http.ResponseWriter, req *http.Request) {
|
||||
// get articles
|
||||
articles, err := app.articles.All(30)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// convert to viewmodel
|
||||
articleVMs := make([]*model.ArticleViewModel, 0, len(articles))
|
||||
for _, a := range articles {
|
||||
articleVMs = append(articleVMs, a.ViewModel())
|
||||
}
|
||||
|
||||
// render template
|
||||
t := template.Must(template.ParseFiles("assets/templates/article.html", "assets/templates/layout.html"))
|
||||
err = t.ExecuteTemplate(w, "base", articleVMs)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to render template", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
40
src/cmd/frontend/UpSearch.go
Normal file
40
src/cmd/frontend/UpSearch.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crowsnest/internal/model"
|
||||
"html/template"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Enpoint that returns a list of articles given search terms in the post
|
||||
// request of a search form. Uses the content template.
|
||||
func (app *App) UpSearch(w http.ResponseWriter, req *http.Request) {
|
||||
// construct search query
|
||||
searchTerms := req.FormValue("search")
|
||||
if searchTerms == "" {
|
||||
app.Index(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// get articles
|
||||
articles, err := app.articles.Search(searchTerms)
|
||||
if err != nil {
|
||||
// treat as no result
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// convert to viewmodel
|
||||
articleVMs := make([]*model.ArticleViewModel, 0, len(articles))
|
||||
for _, a := range articles {
|
||||
articleVMs = append(articleVMs, a.ViewModel())
|
||||
}
|
||||
|
||||
// render template
|
||||
t := template.Must(template.ParseFiles("assets/templates/article.html"))
|
||||
err = t.ExecuteTemplate(w, "content", articleVMs)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to render template", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
34
src/cmd/frontend/main.go
Normal file
34
src/cmd/frontend/main.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crowsnest/internal/model/database"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
type App struct {
|
||||
articles *database.ArticleModel
|
||||
}
|
||||
|
||||
func main() {
|
||||
db, err := database.DbConnection()
|
||||
if err != nil {
|
||||
log.Fatal("failed to connect to database due to", err.Error())
|
||||
}
|
||||
|
||||
// define app
|
||||
app := &App{
|
||||
articles: &database.ArticleModel{DB: db},
|
||||
}
|
||||
|
||||
// start web server
|
||||
server := http.Server{
|
||||
Addr: ":8080",
|
||||
Handler: app.routes(),
|
||||
}
|
||||
|
||||
log.Println("server started, listening on :8080")
|
||||
server.ListenAndServe()
|
||||
}
|
||||
33
src/cmd/frontend/routes.go
Normal file
33
src/cmd/frontend/routes.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LoggingMiddleware logs details about each incoming HTTP request.
|
||||
func LoggingMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
|
||||
// Call the next handler
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
log.Printf("[request] %s %s from %s (%v)", r.URL.Path, r.Method, r.RemoteAddr, time.Since(start))
|
||||
})
|
||||
}
|
||||
|
||||
func (app *App) routes() http.Handler {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// dynamic routes
|
||||
mux.Handle("GET /", LoggingMiddleware(http.HandlerFunc(app.Index)))
|
||||
mux.Handle("POST /up/search", LoggingMiddleware(http.HandlerFunc(app.UpSearch)))
|
||||
|
||||
// serve files from the "static" directory
|
||||
fs := http.FileServer(http.Dir("assets/static"))
|
||||
mux.Handle("GET /static/", http.StripPrefix("/", fs))
|
||||
|
||||
return mux
|
||||
}
|
||||
Reference in New Issue
Block a user