21 Commits

Author SHA1 Message Date
61a6fcee03 feat(docs): update readme 2026-01-25 13:23:57 +03:00
ad31687111 feat(config): add config example in config.dist.toml 2026-01-25 13:19:50 +03:00
43c2c22de7 feat(logs): add logs for list handler
All checks were successful
release / docker-image (push) Successful in 59s
release / goreleaser (push) Successful in 10m1s
2026-01-25 13:00:41 +03:00
fbc43946f8 feat(logs): add more logs 2026-01-25 12:58:16 +03:00
daf99c5b66 feat(proxy): add proxy from env variables 2026-01-25 12:55:07 +03:00
4f8118562e chore(agents): update agents.md 2026-01-25 12:50:23 +03:00
1e32de279e chore(build): fix fetch tags for goreleaser
All checks were successful
release / docker-image (push) Successful in 46s
release / goreleaser (push) Successful in 9m59s
2026-01-09 15:02:41 +03:00
b5c8ec45aa chore(build): fix fetch tags for goreleaser
Some checks failed
release / goreleaser (push) Failing after 35s
release / docker-image (push) Successful in 48s
2026-01-09 15:00:51 +03:00
6e51b420d4 chore(build): fetch tags for goreleaser
Some checks failed
release / docker-image (push) Successful in 55s
release / goreleaser (push) Failing after 35s
2026-01-09 14:54:26 +03:00
f9732d9c33 chore(build): fix go build
All checks were successful
release / docker-image (push) Successful in 46s
release / goreleaser (push) Successful in 9m57s
2026-01-09 14:23:25 +03:00
cfeced8bd9 chore(build): improve build
Some checks failed
release / goreleaser (push) Failing after 9s
release / docker-image (push) Successful in 46s
- union workflows
- take go version from go.mod
2026-01-09 14:06:28 +03:00
9539c193c6 chore(build): add docker image build workflow
All checks were successful
release / goreleaser (push) Successful in 11m45s
docker-release / build-and-push (push) Successful in 12m3s
2026-01-09 13:51:35 +03:00
450b4caa95 chore(build): add dockerfile 2026-01-09 13:46:59 +03:00
a3c392f3b3 chore(build): fix goreleaser version 2026-01-09 13:46:48 +03:00
228b3423e7 fix goreleaser options
All checks were successful
release / goreleaser (push) Successful in 10m7s
2026-01-09 12:46:01 +03:00
01f8bf706f fix release workflow
All checks were successful
release / goreleaser (push) Successful in 10m24s
2026-01-09 12:35:39 +03:00
1fffa0172d add release workflow
Some checks failed
release / goreleaser (push) Failing after 5m51s
2026-01-09 12:23:49 +03:00
1ab6f13547 add server shutdown log 2026-01-09 11:50:18 +03:00
c890dad54f fix lines in list endpoint 2026-01-09 11:48:49 +03:00
803ba54a1c add base code 2026-01-09 11:40:13 +03:00
6a9840c8da add readme and agents.md 2026-01-09 11:30:15 +03:00
11 changed files with 657 additions and 0 deletions

View File

@@ -0,0 +1,58 @@
name: release
on:
push:
tags:
- 'v*'
jobs:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
fetch-tags: true
- name: Setup Go
uses: actions/setup-go@v6
with:
go-version-file: 'go.mod'
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
version: 'v2.13.2'
distribution: goreleaser
args: release --clean
env:
GITEA_TOKEN: '${{ secrets.RELEASE_TOKEN }}'
docker-image:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Yandex Cloud Container Registry
uses: docker/login-action@v3
with:
registry: cr.yandex
username: oauth
password: ${{ secrets.YANDEX_CLOUD_OAUTH_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v6
with:
file: ./Dockerfile
context: .
push: true
tags: |
cr.yandex/${{ secrets.YANDEX_CLOUD_REGISTRY_ID }}/trackers:${{ github.ref_name }}
cr.yandex/${{ secrets.YANDEX_CLOUD_REGISTRY_ID }}/trackers:latest
platforms: linux/amd64

3
.gitignore vendored
View File

@@ -1 +1,4 @@
.idea/
cache/
/config.toml

37
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,37 @@
version: 2
project_name: trackers
builds:
- id: trackers
main: ./main.go
binary: trackers
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
- arm64
archives:
- id: trackers
formats:
- tar.gz
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- README.md
checksum:
name_template: checksums.txt
changelog:
sort: asc
filters:
exclude:
- "^docs:"
- "^test:"
gitea_urls:
api: https://git.vakhrushev.me/api/v1
download: https://git.vakhrushev.me

104
AGENTS.md Normal file
View File

@@ -0,0 +1,104 @@
# AGENTS.md
This file provides guidance to LLM agents when working with code in this repository.
## Project Overview
Trackers is a Go application that aggregates torrent tracker links from multiple sources (HTTP, HTTPS, and local files). It:
- Polls configured sources at regular intervals
- Deduplicates and validates tracker links
- Caches results to disk for resilience
- Serves aggregated tracker lists via HTTP API
- Runs a single HTTP server with minimal dependencies
This is an educational project written in Go 1.25.5.
## Development Commands
### Build
```bash
go build -o trackers ./main.go
```
### Run
```bash
go run main.go -config config.toml
```
### Test
The codebase has no tests currently. Tests can be added with:
```bash
go test ./...
```
To run a single test:
```bash
go test -run TestName
```
### Lint
```bash
go fmt ./...
go vet ./...
```
### Release Build
Uses goreleaser for cross-platform builds (Linux amd64/arm64):
```bash
goreleaser build --snapshot --clean
```
## Architecture
**Single File Design**: All code is in `main.go` with clear functional separation:
1. **Config System** (`loadConfig`): Parses TOML configuration with defaults
- `port`: HTTP server port (default: 8080)
- `cache_dir`: Directory for caching tracker lists (default: cache/)
- `poll_interval`: How often to refresh sources (default: 60m)
- `sources`: Array of URLs/file paths to fetch tracker lists from
2. **Aggregator** (type `Aggregator`): Thread-safe in-memory deduplication
- Maintains per-source tracker sets using `sync.RWMutex`
- `Update()`: Stores new tracker list for a source
- `List()`: Returns combined sorted list across all sources
3. **Polling System** (`pollSource`/`runOnce`): Background goroutine per source
- Fetches source on startup and at configured intervals
- Updates aggregator and writes to cache on success
- Graceful shutdown on SIGINT/SIGTERM
4. **HTTP Handler** (`/list` endpoint): Returns deduplicated tracker list as plain text
- Links separated by double newlines
- Read timeouts enforce reasonable request handling
5. **Source Fetching** (`fetchSource`): Pluggable source handlers
- **HTTP/HTTPS**: Makes requests with context support and timeout (15s)
- **File**: Reads local files via `file://` URLs
- Response is parsed line-by-line
6. **Link Validation** (`normalizeLinks`/`isValidTrackerLink`):
- Strips whitespace and empty lines
- Validates URL format and supported schemes: `http`, `https`, `udp`, `ws`, `wss`
- Deduplicates via map-based set
7. **Caching** (`writeCache`/`loadCachedLinks`): SHA1-hashed filenames in `cache_dir/`
- Enables graceful degradation if source becomes unavailable
- Filenames are hex-encoded SHA1(source_url) + ".txt"
## Key Design Decisions
- **No external dependencies except go-toml**: Keeps binary small and build simple
- **Simple HTTP server**: Uses stdlib `net/http` instead of frameworks
- **Per-source goroutines**: Allows independent polling without blocking
- **RWMutex for reads**: Readers don't block each other when listing trackers
- **Context propagation**: Respects shutdown signals in all async operations
- **Line-based parsing**: Flexible input format (handles various tracker list formats)
## Testing Notes
The project follows Go conventions but has no test files. Consider adding tests for:
- Link validation edge cases
- Config parsing with invalid inputs
- Concurrent aggregator updates
- Cache file I/O

5
CLAUDE.md Normal file
View File

@@ -0,0 +1,5 @@
# CLAUDE.md
@AGENTS.md

21
Dockerfile Normal file
View File

@@ -0,0 +1,21 @@
FROM golang:1.25.5-alpine AS build
WORKDIR /src
COPY go.mod .
COPY go.sum .
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o /out/trackers ./main.go
FROM gcr.io/distroless/static:nonroot
COPY --from=build /out/trackers /trackers
USER nonroot:nonroot
ENTRYPOINT ["/trackers"]

51
README.md Normal file
View File

@@ -0,0 +1,51 @@
# Trackers
A torrent tracker aggregator that polls multiple sources and serves a unified,
deduplicated list of tracker URLs via HTTP API.
**Educational project written in Go 1.25.5**
## Features
- Polls tracker sources at configurable intervals
- Supports HTTP/HTTPS and local file sources
- Deduplicates and validates tracker URLs
- Persistent disk caching for resilience
- HTTP proxy support via environment variables
- Request logging for monitoring
- Single binary with minimal dependencies
## Usage
```shell
trackers -config config.toml
```
Get the aggregated tracker list:
```shell
curl http://127.0.0.1:8080/list
```
## Configuration
See [config.dist.toml](config.dist.toml) for a complete configuration example with comments.
Copy the example file and customize:
```shell
cp config.dist.toml config.toml
# Edit config.toml with your sources
```
## Proxy Support
HTTP requests respect standard proxy environment variables:
```shell
export HTTP_PROXY=http://proxy.example.com:8080
export HTTPS_PROXY=http://proxy.example.com:8080
trackers -config config.toml
```
See [config.dist.toml](config.dist.toml) for details.

44
config.dist.toml Normal file
View File

@@ -0,0 +1,44 @@
# Trackers Configuration Example
# Copy this file to config.toml and adjust values for your setup
# HTTP server port
# Default: 8080
port = 8080
# Directory for caching tracker lists
# Used to persist tracker data between restarts and handle source failures
# Default: "cache"
cache_dir = "cache"
# Interval between polling each source
# Valid units: s (seconds), m (minutes), h (hours)
# Examples: "30s", "5m", "1h", "90m"
# Default: "60m"
poll_interval = "60m"
# List of tracker sources to aggregate
# Supported schemes:
# - http:// / https:// - Remote HTTP endpoints
# - file:// - Local files (e.g., file:///path/to/trackers.txt)
#
# Each source should return tracker URLs, one per line
# Blank lines and duplicates are automatically filtered
sources = [
"https://example.com/trackers/all.txt",
"https://another-source.org/trackers.txt",
# "file:///etc/trackers/local.txt",
]
# Proxy Configuration
# ==================
# HTTP requests automatically respect standard proxy environment variables:
#
# HTTP_PROXY - Proxy for HTTP requests (e.g., http://proxy.example.com:8080)
# HTTPS_PROXY - Proxy for HTTPS requests (e.g., http://proxy.example.com:8080)
# NO_PROXY - Comma-separated list of hosts to bypass proxy (e.g., localhost,127.0.0.1)
#
# Example usage:
# export HTTP_PROXY=http://proxy.example.com:8080
# export HTTPS_PROXY=http://proxy.example.com:8080
# export NO_PROXY=localhost,127.0.0.1,.local
# ./trackers -config config.toml

2
go.mod
View File

@@ -1,3 +1,5 @@
module git.vakhrushev.me/av/trackers
go 1.25.5
require github.com/pelletier/go-toml/v2 v2.2.4

2
go.sum Normal file
View File

@@ -0,0 +1,2 @@
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=

330
main.go Normal file
View File

@@ -0,0 +1,330 @@
package main
import (
"context"
"crypto/sha1"
"encoding/hex"
"errors"
"flag"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"sort"
"strings"
"sync"
"syscall"
"time"
toml "github.com/pelletier/go-toml/v2"
)
type Config struct {
Port int `toml:"port"`
PollInterval string `toml:"poll_interval"`
CacheDir string `toml:"cache_dir"`
Sources []string `toml:"sources"`
}
func loadConfig(path string) (Config, time.Duration, error) {
data, err := os.ReadFile(path)
if err != nil {
return Config{}, 0, fmt.Errorf("read config: %w", err)
}
var cfg Config
if err := toml.Unmarshal(data, &cfg); err != nil {
return Config{}, 0, fmt.Errorf("parse config: %w", err)
}
if cfg.Port == 0 {
cfg.Port = 8080
}
if cfg.CacheDir == "" {
cfg.CacheDir = "cache"
}
intervalText := cfg.PollInterval
if intervalText == "" {
intervalText = "60m"
}
interval, err := time.ParseDuration(intervalText)
if err != nil {
return Config{}, 0, fmt.Errorf("parse poll_interval: %w", err)
}
if interval <= 0 {
return Config{}, 0, errors.New("poll_interval must be positive")
}
if len(cfg.Sources) == 0 {
return Config{}, 0, errors.New("no sources configured")
}
return cfg, interval, nil
}
type Aggregator struct {
mu sync.RWMutex
perSource map[string]map[string]struct{}
}
func NewAggregator() *Aggregator {
return &Aggregator{perSource: make(map[string]map[string]struct{})}
}
func (a *Aggregator) Update(source string, links []string) {
set := make(map[string]struct{}, len(links))
for _, link := range links {
set[link] = struct{}{}
}
a.mu.Lock()
a.perSource[source] = set
a.mu.Unlock()
}
func (a *Aggregator) List() []string {
a.mu.RLock()
defer a.mu.RUnlock()
combined := make(map[string]struct{})
for _, set := range a.perSource {
for link := range set {
combined[link] = struct{}{}
}
}
list := make([]string, 0, len(combined))
for link := range combined {
list = append(list, link)
}
sort.Strings(list)
return list
}
func main() {
configPath := flag.String("config", "config.toml", "path to config file")
flag.Parse()
cfg, interval, err := loadConfig(*configPath)
if err != nil {
log.Fatalf("config error: %v", err)
}
if err := os.MkdirAll(cfg.CacheDir, 0o755); err != nil {
log.Fatalf("cache dir: %v", err)
}
agg := NewAggregator()
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
}
client := &http.Client{
Timeout: 15 * time.Second,
Transport: transport,
}
for _, source := range cfg.Sources {
cached, err := loadCachedLinks(cfg.CacheDir, source)
if err != nil {
log.Printf("load cache for %s: %v", source, err)
}
if len(cached) > 0 {
agg.Update(source, cached)
}
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop()
for _, source := range cfg.Sources {
go pollSource(ctx, source, interval, cfg.CacheDir, agg, client)
}
mux := http.NewServeMux()
mux.HandleFunc("/list", func(w http.ResponseWriter, r *http.Request) {
log.Printf("request /list from %s [%s %s]", r.RemoteAddr, r.Method, r.UserAgent())
links := agg.List()
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
for i, link := range links {
if i > 0 {
_, _ = w.Write([]byte("\n\n"))
}
_, _ = w.Write([]byte(link))
}
log.Printf("response /list to %s: %d links", r.RemoteAddr, len(links))
})
server := &http.Server{
Addr: fmt.Sprintf(":%d", cfg.Port),
Handler: mux,
ReadHeaderTimeout: 5 * time.Second,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
go func() {
<-ctx.Done()
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
log.Printf("server shutdown")
if err := server.Shutdown(shutdownCtx); err != nil {
log.Printf("server shutdown error: %v", err)
}
}()
log.Printf("listening on :%d", cfg.Port)
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.Fatalf("server error: %v", err)
}
}
func pollSource(ctx context.Context, source string, interval time.Duration, cacheDir string, agg *Aggregator, client *http.Client) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
runOnce(ctx, source, cacheDir, agg, client)
for {
select {
case <-ctx.Done():
log.Printf("poller shutdown")
return
case <-ticker.C:
runOnce(ctx, source, cacheDir, agg, client)
}
}
}
func runOnce(ctx context.Context, source string, cacheDir string, agg *Aggregator, client *http.Client) {
links, err := fetchSource(ctx, source, client)
if err != nil {
log.Printf("poll %s: failed - %v", source, err)
return
}
if len(links) == 0 {
log.Printf("poll %s: success - 0 links", source)
agg.Update(source, nil)
if err := writeCache(cacheDir, source, nil); err != nil {
log.Printf("write cache %s: %v", source, err)
}
return
}
log.Printf("poll %s: success - %d links", source, len(links))
agg.Update(source, links)
if err := writeCache(cacheDir, source, links); err != nil {
log.Printf("write cache %s: %v", source, err)
}
}
func fetchSource(ctx context.Context, source string, client *http.Client) ([]string, error) {
u, err := url.Parse(source)
if err != nil {
return nil, fmt.Errorf("invalid source url: %w", err)
}
switch u.Scheme {
case "http", "https":
req, err := http.NewRequestWithContext(ctx, http.MethodGet, source, nil)
if err != nil {
return nil, fmt.Errorf("build request: %w", err)
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, fmt.Errorf("unexpected status: %s", resp.Status)
}
log.Printf("fetch %s: HTTP %d", source, resp.StatusCode)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read body: %w", err)
}
return normalizeLinks(string(body)), nil
case "file":
path := u.Path
if path == "" {
return nil, errors.New("file source path is empty")
}
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("read file: %w", err)
}
log.Printf("fetch %s: file read %d bytes", source, len(data))
return normalizeLinks(string(data)), nil
default:
return nil, fmt.Errorf("unsupported source scheme: %s", u.Scheme)
}
}
func normalizeLinks(content string) []string {
rawLines := strings.Split(content, "\n")
set := make(map[string]struct{})
for _, line := range rawLines {
link := strings.TrimSpace(line)
if link == "" {
continue
}
if !isValidTrackerLink(link) {
continue
}
set[link] = struct{}{}
}
result := make([]string, 0, len(set))
for link := range set {
result = append(result, link)
}
sort.Strings(result)
return result
}
func isValidTrackerLink(link string) bool {
u, err := url.Parse(link)
if err != nil {
return false
}
switch u.Scheme {
case "http", "https", "udp", "ws", "wss":
default:
return false
}
if u.Hostname() == "" {
return false
}
return true
}
func cacheFilePath(cacheDir, source string) string {
sum := sha1.Sum([]byte(source))
filename := hex.EncodeToString(sum[:]) + ".txt"
return filepath.Join(cacheDir, filename)
}
func writeCache(cacheDir, source string, links []string) error {
path := cacheFilePath(cacheDir, source)
data := strings.Join(links, "\n")
return os.WriteFile(path, []byte(data), 0o644)
}
func loadCachedLinks(cacheDir, source string) ([]string, error) {
path := cacheFilePath(cacheDir, source)
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
return normalizeLinks(string(data)), nil
}