Merge branch 'master' into feature-release_profile

This commit is contained in:
Spike 2022-12-04 12:31:07 -08:00 committed by GitHub
commit f33d44c5a5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 1847 additions and 426 deletions

View File

@ -33,6 +33,6 @@ jobs:
file: ./Dockerfile.arm
platforms: linux/arm64
push: true
tags: spikecodes/libreddit:arm
tags: libreddit/libreddit:arm
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -36,6 +36,6 @@ jobs:
file: ./Dockerfile.armv7
platforms: linux/arm/v7
push: true
tags: spikecodes/libreddit:armv7
tags: libreddit/libreddit:armv7
cache-from: type=gha
cache-to: type=gha,mode=max

View File

@ -26,6 +26,12 @@ jobs:
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Docker Hub Description
uses: peter-evans/dockerhub-description@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
repository: libreddit/libreddit
- name: Build and push
uses: docker/build-push-action@v2
with:
@ -33,6 +39,6 @@ jobs:
file: ./Dockerfile
platforms: linux/amd64
push: true
tags: spikecodes/libreddit:latest
tags: libreddit/libreddit:latest
cache-from: type=gha
cache-to: type=gha,mode=max

82
CREDITS Normal file
View File

@ -0,0 +1,82 @@
5trongthany <65565784+5trongthany@users.noreply.github.com>
674Y3r <87250374+674Y3r@users.noreply.github.com>
accountForIssues <52367365+accountForIssues@users.noreply.github.com>
Adrian Lebioda <adrianlebioda@gmail.com>
alefvanoon <53198048+alefvanoon@users.noreply.github.com>
alyaeanyx <alexandra.hollmeier@mailbox.org>
AndreVuillemot160 <84594011+AndreVuillemot160@users.noreply.github.com>
Andrew Kaufman <57281817+andrew-kaufman@users.noreply.github.com>
Artemis <51862164+artemislena@users.noreply.github.com>
arthomnix <35371030+arthomnix@users.noreply.github.com>
Arya K <73596856+gi-yt@users.noreply.github.com>
Austin Huang <im@austinhuang.me>
Basti <pred2k@users.noreply.github.com>
Ben Smith <37027883+smithbm2316@users.noreply.github.com>
BobIsMyManager <ahoumatt@yahoo.com>
curlpipe <11898833+curlpipe@users.noreply.github.com>
dacousb <53299044+dacousb@users.noreply.github.com>
Daniel Valentine <Daniel-Valentine@users.noreply.github.com>
Daniel Valentine <daniel@vielle.ws>
dbrennand <52419383+dbrennand@users.noreply.github.com>
Diego Magdaleno <38844659+DiegoMagdaleno@users.noreply.github.com>
Dyras <jevwmguf@duck.com>
Edward <101938856+EdwardLangdon@users.noreply.github.com>
erdnaxe <erdnaxe@users.noreply.github.com>
Esmail EL BoB <github.defilable@simplelogin.co>
FireMasterK <20838718+FireMasterK@users.noreply.github.com>
George Roubos <cowkingdom@hotmail.com>
git-bruh <e817509a-8ee9-4332-b0ad-3a6bdf9ab63f@aleeas.com>
guaddy <67671414+guaddy@users.noreply.github.com>
Harsh Mishra <erbeusgriffincasper@gmail.com>
igna <igna@intent.cool>
imabritishcow <bcow@protonmail.com>
Josiah <70736638+fres7h@users.noreply.github.com>
JPyke3 <pyke.jacob1@gmail.com>
Kavin <20838718+FireMasterK@users.noreply.github.com>
Kazi <kzshantonu@users.noreply.github.com>
Kieran <42723993+EnderDev@users.noreply.github.com>
Kieran <kieran@dothq.co>
Kyle Roth <kylrth@gmail.com>
laazyCmd <laazy.pr00gramming@protonmail.com>
Laurențiu Nicola <lnicola@users.noreply.github.com>
Lena <102762572+MarshDeer@users.noreply.github.com>
Macic <46872282+Macic-Dev@users.noreply.github.com>
Mario A <10923513+Midblyte@users.noreply.github.com>
Matthew Crossman <matt@crossman.page>
Matthew E <matt@matthew.science>
Mennaruuk <52135169+Mennaruuk@users.noreply.github.com>
mikupls <93015331+mikupls@users.noreply.github.com>
Nainar <nainar.mb@gmail.com>
Nathan Moos <moosingin3space@gmail.com>
Nicholas Christopher <nchristopher@tuta.io>
Nick Lowery <ClockVapor@users.noreply.github.com>
Nico <github@dr460nf1r3.org>
NKIPSC <15067635+NKIPSC@users.noreply.github.com>
obeho <71698631+obeho@users.noreply.github.com>
obscurity <z@x4.pm>
Om G <34579088+OxyMagnesium@users.noreply.github.com>
RiversideRocks <59586759+RiversideRocks@users.noreply.github.com>
robin <8597693+robrobinbin@users.noreply.github.com>
Robin <8597693+robrobinbin@users.noreply.github.com>
robrobinbin <>
robrobinbin <8597693+robrobinbin@users.noreply.github.com>
robrobinbin <robindepril@gmail.com>
Ruben Elshof <15641671+rubenelshof@users.noreply.github.com>
Scoder12 <34356756+Scoder12@users.noreply.github.com>
Slayer <51095261+GhostSlayer@users.noreply.github.com>
Soheb <somoso@users.noreply.github.com>
somini <somini@users.noreply.github.com>
somoso <github@soheb.anonaddy.com>
Spike <19519553+spikecodes@users.noreply.github.com>
spikecodes <19519553+spikecodes@users.noreply.github.com>
sybenx <syb@duck.com>
TheCultLeader666 <65368815+TheCultLeader666@users.noreply.github.com>
TheFrenchGhosty <47571719+TheFrenchGhosty@users.noreply.github.com>
The TwilightBlood <hwengerstickel@protonmail.com>
tirz <36501933+tirz@users.noreply.github.com>
Tsvetomir Bonev <invakid404@riseup.net>
Vladislav Nepogodin <nepogodin.vlad@gmail.com>
Walkx <walkxnl@gmail.com>
Wichai <1482605+Chengings@users.noreply.github.com>
xatier <xatierlike@gmail.com>
Zach <72994911+zachjmurphy@users.noreply.github.com>

186
Cargo.lock generated
View File

@ -2,6 +2,12 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "adler32"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234"
[[package]]
name = "aho-corasick"
version = "0.7.19"
@ -11,6 +17,21 @@ dependencies = [
"memchr",
]
[[package]]
name = "alloc-no-stdlib"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3"
[[package]]
name = "alloc-stdlib"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece"
dependencies = [
"alloc-no-stdlib",
]
[[package]]
name = "askama"
version = "0.11.1"
@ -109,6 +130,27 @@ dependencies = [
"generic-array",
]
[[package]]
name = "brotli"
version = "3.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
"brotli-decompressor",
]
[[package]]
name = "brotli-decompressor"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
]
[[package]]
name = "bstr"
version = "0.2.17"
@ -169,9 +211,9 @@ checksum = "3a4f925191b4367301851c6d99b09890311d74b0d43f274c0b34c86d308a3663"
[[package]]
name = "cc"
version = "1.0.74"
version = "1.0.76"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581f5dba903aac52ea3feb5ec4810848460ee833876f1f9b0fdeab1f19091574"
checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f"
[[package]]
name = "cfg-if"
@ -181,9 +223,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "4.0.18"
version = "4.0.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "335867764ed2de42325fafe6d18b8af74ba97ee0c590fa016f157535b42ab04b"
checksum = "60494cedb60cb47462c0ff7be53de32c0e42a6fc2c772184554fa12bd9489c03"
dependencies = [
"bitflags",
"clap_lex",
@ -233,6 +275,15 @@ dependencies = [
"libc",
]
[[package]]
name = "crc32fast"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
dependencies = [
"cfg-if",
]
[[package]]
name = "crypto-common"
version = "0.1.6"
@ -398,6 +449,17 @@ dependencies = [
"version_check",
]
[[package]]
name = "getrandom"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "globset"
version = "0.4.9"
@ -481,9 +543,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
[[package]]
name = "hyper"
version = "0.14.22"
version = "0.14.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064"
checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c"
dependencies = [
"bytes",
"futures-channel",
@ -580,18 +642,41 @@ version = "0.2.137"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
[[package]]
name = "libflate"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05605ab2bce11bcfc0e9c635ff29ef8b2ea83f29be257ee7d730cac3ee373093"
dependencies = [
"adler32",
"crc32fast",
"libflate_lz77",
]
[[package]]
name = "libflate_lz77"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a"
dependencies = [
"rle-decode-fast",
]
[[package]]
name = "libreddit"
version = "0.23.1"
version = "0.25.0"
dependencies = [
"askama",
"async-recursion",
"brotli",
"cached",
"clap",
"cookie",
"futures-lite",
"hyper",
"hyper-rustls",
"libflate",
"lipsum",
"percent-encoding",
"regex",
"route-recognizer",
@ -603,6 +688,16 @@ dependencies = [
"url",
]
[[package]]
name = "lipsum"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8451846f1f337e44486666989fbce40be804da139d5a4477d6b88ece5dc69f4"
dependencies = [
"rand",
"rand_chacha",
]
[[package]]
name = "lock_api"
version = "0.4.9"
@ -674,23 +769,14 @@ dependencies = [
[[package]]
name = "num_cpus"
version = "1.13.1"
version = "1.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "num_threads"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44"
dependencies = [
"libc",
]
[[package]]
name = "once_cell"
version = "1.16.0"
@ -705,9 +791,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "os_str_bytes"
version = "6.3.1"
version = "6.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3baf96e39c5359d2eb0dd6ccb42c62b91d9678aa68160d261b9e0ccbf9e9dea9"
checksum = "7b5bf27447411e9ee3ff51186bf7a08e16c341efdde93f4d823e8844429bed7e"
[[package]]
name = "parking"
@ -756,6 +842,12 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "ppv-lite86"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
version = "1.0.47"
@ -774,6 +866,36 @@ dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
dependencies = [
"libc",
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
dependencies = [
"getrandom",
]
[[package]]
name = "redox_syscall"
version = "0.2.16"
@ -785,9 +907,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.6.0"
version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a"
dependencies = [
"aho-corasick",
"memchr",
@ -796,9 +918,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
version = "0.6.27"
version = "0.6.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848"
[[package]]
name = "ring"
@ -815,6 +937,12 @@ dependencies = [
"winapi",
]
[[package]]
name = "rle-decode-fast"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422"
[[package]]
name = "route-recognizer"
version = "0.3.1"
@ -1074,13 +1202,11 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.16"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca"
checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376"
dependencies = [
"itoa",
"libc",
"num_threads",
"serde",
"time-core",
"time-macros",
@ -1094,9 +1220,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
[[package]]
name = "time-macros"
version = "0.2.5"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b"
checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2"
dependencies = [
"time-core",
]

View File

@ -3,7 +3,7 @@ name = "libreddit"
description = " Alternative private front-end to Reddit"
license = "AGPL-3.0"
repository = "https://github.com/spikecodes/libreddit"
version = "0.23.1"
version = "0.25.0"
authors = ["spikecodes <19519553+spikecodes@users.noreply.github.com>"]
edition = "2021"
@ -11,22 +11,27 @@ edition = "2021"
askama = { version = "0.11.1", default-features = false }
async-recursion = "1.0.0"
cached = "0.40.0"
clap = { version = "4.0.18", default-features = false, features = ["std"] }
regex = "1.6.0"
clap = { version = "4.0.24", default-features = false, features = ["std"] }
regex = "1.7.0"
serde = { version = "1.0.147", features = ["derive"] }
cookie = "0.16.1"
futures-lite = "1.12.0"
hyper = { version = "0.14.22", features = ["full"] }
hyper = { version = "0.14.23", features = ["full"] }
hyper-rustls = "0.23.0"
percent-encoding = "2.2.0"
route-recognizer = "0.3.1"
serde_json = "1.0.87"
tokio = { version = "1.21.2", features = ["full"] }
time = "0.3.16"
time = "0.3.17"
url = "2.3.1"
rust-embed = { version = "6.4.2", features = ["include-exclude"] }
libflate = "1.2.0"
brotli = { version = "3.3.4", features = ["std"] }
[dev-dependencies]
lipsum = "0.8.2"
[profile.release]
codegen-units = 1
lto = true
strip = true
strip = true

View File

@ -3,13 +3,18 @@
####################################################################################################
FROM rust:alpine AS builder
RUN apk add --no-cache g++
RUN apk add --no-cache g++ git
WORKDIR /usr/src/libreddit
COPY . .
RUN cargo install --path .
# net.git-fetch-with-cli is specified in order to prevent a potential OOM kill
# in low memory environments. See:
# https://users.rust-lang.org/t/cargo-uses-too-much-memory-being-run-in-qemu/76531
# This is tracked under issue #641. This also requires us to install git in the
# builder.
RUN cargo install --config net.git-fetch-with-cli=true --path .
####################################################################################################
## Final image

120
README.md
View File

@ -29,86 +29,17 @@ I appreciate any donations! Your support allows me to continue developing Libred
# Instances
Feel free to [open an issue](https://github.com/spikecodes/libreddit/issues/new) to have your [selfhosted instance](#deployment) listed here!
🔗 **Want to automatically redirect Reddit links to Libreddit? Use [LibRedirect](https://github.com/libredirect/libredirect) or [Privacy Redirect](https://github.com/SimonBrazell/privacy-redirect)!**
| Website | Country | Cloudflare |
|-|-|-|
| [libredd.it](https://libredd.it) (official) | 🇺🇸 US | |
| [libreddit.spike.codes](https://libreddit.spike.codes) (official) | 🇺🇸 US | |
| [libreddit.dothq.co](https://libreddit.dothq.co) | 🇩🇪 DE | ✅ |
| [libreddit.kavin.rocks](https://libreddit.kavin.rocks) | 🇮🇳 IN | |
| [reddit.invak.id](https://reddit.invak.id) | 🇧🇬 BG | |
| [lr.riverside.rocks](https://lr.riverside.rocks) | 🇺🇸 US | |
| [libreddit.strongthany.cc](https://libreddit.strongthany.cc) | 🇺🇸 US | |
| [libreddit.privacy.com.de](https://libreddit.privacy.com.de) | 🇩🇪 DE | |
| [libreddit.domain.glass](https://libreddit.domain.glass) | 🇺🇸 US | ✅ |
| [r.nf](https://r.nf) | 🇩🇪 DE | ✅ |
| [reddit.stuehieyr.com](https://reddit.stuehieyr.com) | 🇩🇪 DE | |
| [lr.mint.lgbt](https://lr.mint.lgbt) | 🇨🇦 CA | |
| [libreddit.intent.cool](https://libreddit.intent.cool) | 🇺🇸 US | |
| [libreddit.drivet.xyz](https://libreddit.drivet.xyz) | 🇵🇱 PL | |
| [libreddit.de](https://libreddit.de) | 🇩🇪 DE | |
| [libreddit.pussthecat.org](https://libreddit.pussthecat.org) | 🇩🇪 DE | |
| [libreddit.mutahar.rocks](https://libreddit.mutahar.rocks) | 🇫🇷 FR | |
| [libreddit.northboot.xyz](https://libreddit.northboot.xyz) | 🇩🇪 DE | |
| [leddit.xyz](https://leddit.xyz) | 🇺🇸 US | |
| [de.leddit.xyz](https://de.leddit.xyz) | 🇩🇪 DE | |
| [lr.cowfee.moe](https://lr.cowfee.moe) | 🇺🇸 US | |
| [libreddit.hu](https://libreddit.hu) | 🇫🇮 FI | ✅ |
| [libreddit.totaldarkness.net](https://libreddit.totaldarkness.net) | 🇨🇦 CA | |
| [libreddit.esmailelbob.xyz](https://libreddit.esmailelbob.xyz) | 🇨🇦 CA | |
| [lr.vern.cc](https://lr.vern.cc) | 🇨🇦 CA | |
| [libreddit.nl](https://libreddit.nl) | 🇳🇱 NL | |
| [lr.stilic.ml](https://lr.stilic.ml) | 🇫🇷 FR | ✅ |
| [reddi.tk](https://reddi.tk) | 🇺🇸 US | ✅ |
| [libreddit.bus-hit.me](https://libreddit.bus-hit.me) | 🇨🇦 CA | |
| [r.walkx.org](https://r.walkx.org) | 🇳🇱 NL | ✅ |
| [libreddit.kylrth.com](https://libreddit.kylrth.com) | 🇨🇦 CA | |
| [libreddit.yonalee.eu](https://libreddit.yonalee.eu) | 🇱🇺 LU | ✅ |
| [libreddit.winscloud.net](https://libreddit.winscloud.net) | 🇹🇭 TH | ✅ |
| [libreddit.tiekoetter.com](https://libreddit.tiekoetter.com) | 🇩🇪 DE | |
| [reddit.rtrace.io](https://reddit.rtrace.io) | 🇩🇪 DE | |
| [libreddit.lunar.icu](https://libreddit.lunar.icu) | 🇩🇪 DE | ✅ |
| [libreddit.privacydev.net](https://libreddit.privacydev.net) | 🇺🇸 US | |
| [libreddit.notyourcomputer.net](https://libreddit.notyourcomputer.net) | 🇺🇸 US | |
| [r.ahwx.org](https://r.ahwx.org) | 🇳🇱 NL | ✅ |
| [bob.fr.to](https://bob.fr.to) | 🇺🇸 US | |
| [reddit.beparanoid.de](https://reddit.beparanoid.de) | 🇨🇭 CH | |
| [libreddit.dcs0.hu](https://libreddit.dcs0.hu) | 🇭🇺 HU | |
| [reddit.dr460nf1r3.org](https://reddit.dr460nf1r3.org) | 🇩🇪 DE | ✅ |
| [rd.jae.su](https://rd.jae.su) | 🇫🇮 FI | |
| [libreddit.mha.fi](https://libreddit.mha.fi) | 🇫🇮 FI | |
| [libreddit.foss.wtf](https://libreddit.foss.wtf) | 🇩🇪 DE | |
| [libreddit.encrypted-data.xyz](https://libreddit.encrypted-data.xyz)| 🇫🇷 FR | ✅ |
| [libreddit.eu.org](https://libreddit.eu.org)| 🇮🇪 IE | ✅ |
| [l.opnxng.com](https://l.opnxng.com)| 🇸🇬 SG | |
| [libreddit.cachyos.org](https://libreddit.cachyos.org) | 🇩🇪 DE | ✅ |
| [libreddit.oxymagnesium.com](https://libreddit.oxymagnesium.com) | 🇺🇸 US | |
| [spjmllawtheisznfs7uryhxumin26ssv2draj7oope3ok3wuhy43eoyd.onion](http://spjmllawtheisznfs7uryhxumin26ssv2draj7oope3ok3wuhy43eoyd.onion) | 🇮🇳 IN | |
| [fwhhsbrbltmrct5hshrnqlqygqvcgmnek3cnka55zj4y7nuus5muwyyd.onion](http://fwhhsbrbltmrct5hshrnqlqygqvcgmnek3cnka55zj4y7nuus5muwyyd.onion) | 🇩🇪 DE | |
| [kphht2jcflojtqte4b4kyx7p2ahagv4debjj32nre67dxz7y57seqwyd.onion](http://kphht2jcflojtqte4b4kyx7p2ahagv4debjj32nre67dxz7y57seqwyd.onion) | 🇳🇱 NL | |
| [liredejj74h5xjqr2dylnl5howb2bpikfowqoveub55ru27x43357iid.onion](http://liredejj74h5xjqr2dylnl5howb2bpikfowqoveub55ru27x43357iid.onion) | 🇩🇪 DE | |
| [kzhfp3nvb4qp575vy23ccbrgfocezjtl5dx66uthgrhu7nscu6rcwjyd.onion](http://kzhfp3nvb4qp575vy23ccbrgfocezjtl5dx66uthgrhu7nscu6rcwjyd.onion) | 🇺🇸 US | |
| [ecue64ybzvn6vjzl37kcsnwt4ycmbsyf74nbttyg7rkc3t3qwnj7mcyd.onion](http://ecue64ybzvn6vjzl37kcsnwt4ycmbsyf74nbttyg7rkc3t3qwnj7mcyd.onion) | 🇩🇪 DE | |
| [ledditqo2mxfvlgobxnlhrkq4dh34jss6evfkdkb2thlvy6dn4f4gpyd.onion](http://ledditqo2mxfvlgobxnlhrkq4dh34jss6evfkdkb2thlvy6dn4f4gpyd.onion) | 🇺🇸 US | |
| [libredoxhxwnmsb6dvzzd35hmgzmawsq5i764es7witwhddvpc2razid.onion](http://libredoxhxwnmsb6dvzzd35hmgzmawsq5i764es7witwhddvpc2razid.onion) | 🇺🇸 US | |
| [libreddit.2syis2nnyytz6jnusnjurva4swlaizlnleiks5mjp46phuwjbdjqwgqd.onion](http://libreddit.2syis2nnyytz6jnusnjurva4swlaizlnleiks5mjp46phuwjbdjqwgqd.onion) | 🇪🇬 EG | |
| [ol5begilptoou34emq2sshf3may3hlblvipdjtybbovpb7c7zodxmtqd.onion](http://ol5begilptoou34emq2sshf3may3hlblvipdjtybbovpb7c7zodxmtqd.onion) | 🇩🇪 DE | |
| [lbrdtjaj7567ptdd4rv74lv27qhxfkraabnyphgcvptl64ijx2tijwid.onion](http://lbrdtjaj7567ptdd4rv74lv27qhxfkraabnyphgcvptl64ijx2tijwid.onion) | 🇨🇦 CA | |
| [libreddit.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd.onion](http://libreddit.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd.onion) | 🇨🇦 CA | |
| [reddit.prnoid54e44a4bduq5due64jkk7wcnkxcp5kv3juncm7veptjcqudgyd.onion](http://reddit.prnoid54e44a4bduq5due64jkk7wcnkxcp5kv3juncm7veptjcqudgyd.onion) | 🇨🇭 CH | |
| [inz6tbezfwzexva6dize4cqraj2tjdhygxabmcgysccesvw2pybzhbyd.onion](http://inz6tbezfwzexva6dize4cqraj2tjdhygxabmcgysccesvw2pybzhbyd.onion) | 🇫🇮 FI | |
| [libreddit.micohauwkjbyw5meacrb4ipicwvwg4xtzl7y7viv53kig2mdcsvwkyyd.onion](http://libreddit.micohauwkjbyw5meacrb4ipicwvwg4xtzl7y7viv53kig2mdcsvwkyyd.onion/)| 🇫🇮 FI | |
| [lr.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad.onion](http://lr.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad.onion/) | 🇨🇦 CA | |
A checkmark in the "Cloudflare" category here refers to the use of the reverse proxy, [Cloudflare](https://cloudflare.com). The checkmark will not be listed for a site that uses Cloudflare DNS but rather the proxying service which grants Cloudflare the ability to monitor traffic to the website.
[Follow this link](https://github.com/libreddit/libreddit-instances/blob/master/instances.md) for an up-to-date table of instances in markdown format. This list is also available as [a machine-readable JSON](https://github.com/libreddit/libreddit-instances/blob/master/instances.json).
Both files are part of the [libreddit-instances](https://github.com/libreddit/libreddit-instances) repository. To contribute your [self-hosted instance](#deployment) to the list, see the [libreddit-instances README](https://github.com/libreddit/libreddit-instances/blob/master/README.md).
---
# About
Find Libreddit on 💬 [Matrix](https://matrix.to/#/#libreddit:kde.org), 🐋 [Docker](https://hub.docker.com/r/spikecodes/libreddit), :octocat: [GitHub](https://github.com/spikecodes/libreddit), and 🦊 [GitLab](https://gitlab.com/spikecodes/libreddit).
Find Libreddit on 💬 [Matrix](https://matrix.to/#/#libreddit:kde.org), 🐋 [Docker](https://hub.docker.com/r/libreddit/libreddit), :octocat: [GitHub](https://github.com/libreddit/libreddit), and 🦊 [GitLab](https://gitlab.com/libreddit/libreddit).
## Built with
@ -120,7 +51,7 @@ Find Libreddit on 💬 [Matrix](https://matrix.to/#/#libreddit:kde.org), 🐋 [D
## Info
Libreddit hopes to provide an easier way to browse Reddit, without the ads, trackers, and bloat. Libreddit was inspired by other alternative front-ends to popular services such as [Invidious](https://github.com/iv-org/invidious) for YouTube, [Nitter](https://github.com/zedeus/nitter) for Twitter, and [Bibliogram](https://sr.ht/~cadence/bibliogram/) for Instagram.
Libreddit currently implements most of Reddit's (signed-out) functionalities but still lacks [a few features](https://github.com/spikecodes/libreddit/issues).
Libreddit currently implements most of Reddit's (signed-out) functionalities but still lacks [a few features](https://github.com/libreddit/libreddit/issues).
## How does it compare to Teddit?
@ -138,15 +69,15 @@ This section outlines how Libreddit compares to Reddit.
## Speed
Lasted tested Jan 17, 2021.
Lasted tested Nov 11, 2022.
Results from Google Lighthouse ([Libreddit Report](https://lighthouse-dot-webdotdevsite.appspot.com/lh/html?url=https%3A%2F%2Flibredd.it), [Reddit Report](https://lighthouse-dot-webdotdevsite.appspot.com/lh/html?url=https%3A%2F%2Fwww.reddit.com%2F)).
Results from Google PageSpeed Insights ([Libreddit Report](https://pagespeed.web.dev/report?url=https%3A%2F%2Flibreddit.spike.codes%2F), [Reddit Report](https://pagespeed.web.dev/report?url=https://www.reddit.com)).
| | Libreddit | Reddit |
|------------------------|---------------|------------|
| Requests | 20 | 70 |
| Resource Size (card ui)| 1,224 KiB | 1,690 KiB |
| Time to Interactive | **1.5 s** | **11.2 s** |
| | Libreddit | Reddit |
|------------------------|-------------|-----------|
| Requests | 60 | 83 |
| Speed Index | 2.0s | 10.4s |
| Time to Interactive | **2.8s** | **12.4s** |
## Privacy
@ -205,21 +136,21 @@ cargo install libreddit
## 2) Docker
Deploy the [Docker image](https://hub.docker.com/r/spikecodes/libreddit) of Libreddit:
Deploy the [Docker image](https://hub.docker.com/r/libreddit/libreddit) of Libreddit:
```
docker pull spikecodes/libreddit
docker run -d --name libreddit -p 8080:8080 spikecodes/libreddit
docker pull libreddit/libreddit
docker run -d --name libreddit -p 8080:8080 libreddit/libreddit
```
Deploy using a different port (in this case, port 80):
```
docker pull spikecodes/libreddit
docker run -d --name libreddit -p 80:8080 spikecodes/libreddit
docker pull libreddit/libreddit
docker run -d --name libreddit -p 80:8080 libreddit/libreddit
```
To deploy on `arm64` platforms, simply replace `spikecodes/libreddit` in the commands above with `spikecodes/libreddit:arm`.
To deploy on `arm64` platforms, simply replace `libreddit/libreddit` in the commands above with `libreddit/libreddit:arm`.
To deploy on `armv7` platforms, simply replace `spikecodes/libreddit` in the commands above with `spikecodes/libreddit:armv7`.
To deploy on `armv7` platforms, simply replace `libreddit/libreddit` in the commands above with `libreddit/libreddit:armv7`.
## 3) AUR
@ -231,14 +162,14 @@ yay -S libreddit-git
## 4) GitHub Releases
If you're on Linux and none of these methods work for you, you can grab a Linux binary from [the newest release](https://github.com/spikecodes/libreddit/releases/latest).
If you're on Linux and none of these methods work for you, you can grab a Linux binary from [the newest release](https://github.com/libreddit/libreddit/releases/latest).
## 5) Replit/Heroku/Glitch
**Note:** These are free hosting options but they are *not* private and will monitor server usage to prevent abuse. If you need a free and easy setup, this method may work best for you.
<a href="https://repl.it/github/spikecodes/libreddit"><img src="https://repl.it/badge/github/spikecodes/libreddit" alt="Run on Repl.it" height="32" /></a>
[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/spikecodes/libreddit)
<a href="https://repl.it/github/libreddit/libreddit"><img src="https://repl.it/badge/github/libreddit/libreddit" alt="Run on Repl.it" height="32" /></a>
[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/libreddit/libreddit)
[![Remix on Glitch](https://cdn.glitch.com/2703baf2-b643-4da7-ab91-7ee2a2d00b5b%2Fremix-button-v2.svg)](https://glitch.com/edit/#!/remix/libreddit)
---
@ -257,13 +188,14 @@ Assign a default value for each setting by passing environment variables to Libr
| Name | Possible values | Default value |
|-------------------------|-----------------------------------------------------------------------------------------------------|---------------|
| `THEME` | `["system", "light", "dark", "black", "dracula", "nord", "laserwave", "violet", "gold", "rosebox"]` | `system` |
| `THEME` | `["system", "light", "dark", "black", "dracula", "nord", "laserwave", "violet", "gold", "rosebox", "gruvboxdark", "gruvboxlight"]` | `system` |
| `FRONT_PAGE` | `["default", "popular", "all"]` | `default` |
| `LAYOUT` | `["card", "clean", "compact"]` | `card` |
| `WIDE` | `["on", "off"]` | `off` |
| `POST_SORT` | `["hot", "new", "top", "rising", "controversial"]` | `hot` |
| `COMMENT_SORT` | `["confidence", "top", "new", "controversial", "old"]` | `confidence` |
| `SHOW_NSFW` | `["on", "off"]` | `off` |
| `BLUR_NSFW` | `["on", "off"]` | `off` |
| `USE_HLS` | `["on", "off"]` | `off` |
| `HIDE_HLS_NOTIFICATION` | `["on", "off"]` | `off` |
| `AUTOPLAY_VIDEOS` | `["on", "off"]` | `off` |
@ -280,7 +212,7 @@ LIBREDDIT_DEFAULT_WIDE=on LIBREDDIT_DEFAULT_THEME=dark libreddit -r
## Proxying using NGINX
**NOTE** If you're [proxying Libreddit through an NGINX Reverse Proxy](https://github.com/spikecodes/libreddit/issues/122#issuecomment-782226853), add
**NOTE** If you're [proxying Libreddit through an NGINX Reverse Proxy](https://github.com/libreddit/libreddit/issues/122#issuecomment-782226853), add
```nginx
proxy_http_version 1.1;
```
@ -308,7 +240,7 @@ Before=nginx.service
## Building
```
git clone https://github.com/spikecodes/libreddit
git clone https://github.com/libreddit/libreddit
cd libreddit
cargo run
```

View File

@ -32,6 +32,9 @@
"LIBREDDIT_DEFAULT_SHOW_NSFW": {
"required": false
},
"LIBREDDIT_DEFAULT_BLUR_NSFW": {
"required": false
},
"LIBREDDIT_USE_HLS": {
"required": false
},

15
scripts/gen-credits.sh Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
# This scripts generates the CREDITS file in the repository root, which
# contains a list of all contributors ot the Libreddit project.
#
# We use git-log to surface the names and emails of all authors and committers,
# and grep will filter any automated commits due to GitHub.
set -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")/../" || exit 1
git --no-pager log --pretty='%an <%ae>%n%cn <%ce>' master \
| sort -t'<' -u -k1,1 -k2,2 \
| grep -Fv -- 'GitHub <noreply@github.com>' \
> CREDITS

View File

@ -1,12 +1,55 @@
use cached::proc_macro::cached;
use futures_lite::{future::Boxed, FutureExt};
use hyper::{body::Buf, client, Body, Request, Response, Uri};
use hyper::{body, body::Buf, client, header, Body, Method, Request, Response, Uri};
use libflate::gzip;
use percent_encoding::{percent_encode, CONTROLS};
use serde_json::Value;
use std::result::Result;
use std::{io, result::Result};
use crate::dbg_msg;
use crate::server::RequestExt;
const REDDIT_URL_BASE: &str = "https://www.reddit.com";
/// Gets the canonical path for a resource on Reddit. This is accomplished by
/// making a `HEAD` request to Reddit at the path given in `path`.
///
/// This function returns `Ok(Some(path))`, where `path`'s value is identical
/// to that of the value of the argument `path`, if Reddit responds to our
/// `HEAD` request with a 2xx-family HTTP code. It will also return an
/// `Ok(Some(String))` if Reddit responds to our `HEAD` request with a
/// `Location` header in the response, and the HTTP code is in the 3xx-family;
/// the `String` will contain the path as reported in `Location`. The return
/// value is `Ok(None)` if Reddit responded with a 3xx, but did not provide a
/// `Location` header. An `Err(String)` is returned if Reddit responds with a
/// 429, or if we were unable to decode the value in the `Location` header.
#[cached(size = 1024, time = 600, result = true)]
pub async fn canonical_path(path: String) -> Result<Option<String>, String> {
let res = reddit_head(path.clone(), true).await?;
if res.status() == 429 {
return Err("Too many requests.".to_string());
};
// If Reddit responds with a 2xx, then the path is already canonical.
if res.status().to_string().starts_with('2') {
return Ok(Some(path));
}
// If Reddit responds with anything other than 3xx (except for the 2xx as
// above), return a None.
if !res.status().to_string().starts_with('3') {
return Ok(None);
}
Ok(
res
.headers()
.get(header::LOCATION)
.map(|val| percent_encode(val.as_bytes(), CONTROLS).to_string().trim_start_matches(REDDIT_URL_BASE).to_string()),
)
}
pub async fn proxy(req: Request<Body>, format: &str) -> Result<Response<Body>, String> {
let mut url = format!("{}?{}", format, req.uri().query().unwrap_or_default());
@ -62,20 +105,39 @@ async fn stream(url: &str, req: &Request<Body>) -> Result<Response<Body>, String
.map_err(|e| e.to_string())
}
fn request(url: String, quarantine: bool) -> Boxed<Result<Response<Body>, String>> {
/// Makes a GET request to Reddit at `path`. By default, this will honor HTTP
/// 3xx codes Reddit returns and will automatically redirect.
fn reddit_get(path: String, quarantine: bool) -> Boxed<Result<Response<Body>, String>> {
request(&Method::GET, path, true, quarantine)
}
/// Makes a HEAD request to Reddit at `path`. This will not follow redirects.
fn reddit_head(path: String, quarantine: bool) -> Boxed<Result<Response<Body>, String>> {
request(&Method::HEAD, path, false, quarantine)
}
/// Makes a request to Reddit. If `redirect` is `true`, request_with_redirect
/// will recurse on the URL that Reddit provides in the Location HTTP header
/// in its response.
fn request(method: &'static Method, path: String, redirect: bool, quarantine: bool) -> Boxed<Result<Response<Body>, String>> {
// Build Reddit URL from path.
let url = format!("{}{}", REDDIT_URL_BASE, path);
// Prepare the HTTPS connector.
let https = hyper_rustls::HttpsConnectorBuilder::new().with_native_roots().https_or_http().enable_http1().build();
// Construct the hyper client from the HTTPS connector.
let client: client::Client<_, hyper::Body> = client::Client::builder().build(https);
// Build request
// Build request to Reddit. When making a GET, request gzip compression.
// (Reddit doesn't do brotli yet.)
let builder = Request::builder()
.method("GET")
.method(method)
.uri(&url)
.header("User-Agent", format!("web:libreddit:{}", env!("CARGO_PKG_VERSION")))
.header("Host", "www.reddit.com")
.header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8")
.header("Accept-Encoding", if method == Method::GET { "gzip" } else { "identity" })
.header("Accept-Language", "en-US,en;q=0.5")
.header("Connection", "keep-alive")
.header("Cookie", if quarantine { "_options=%7B%22pref_quarantine_optin%22%3A%20true%7D" } else { "" })
@ -84,26 +146,94 @@ fn request(url: String, quarantine: bool) -> Boxed<Result<Response<Body>, String
async move {
match builder {
Ok(req) => match client.request(req).await {
Ok(response) => {
Ok(mut response) => {
// Reddit may respond with a 3xx. Decide whether or not to
// redirect based on caller params.
if response.status().to_string().starts_with('3') {
request(
if !redirect {
return Ok(response);
};
return request(
method,
response
.headers()
.get("Location")
.get(header::LOCATION)
.map(|val| {
let new_url = percent_encode(val.as_bytes(), CONTROLS).to_string();
format!("{}{}raw_json=1", new_url, if new_url.contains('?') { "&" } else { "?" })
// We need to make adjustments to the URI
// we get back from Reddit. Namely, we
// must:
//
// 1. Remove the authority (e.g.
// https://www.reddit.com) that may be
// present, so that we recurse on the
// path (and query parameters) as
// required.
//
// 2. Percent-encode the path.
let new_path = percent_encode(val.as_bytes(), CONTROLS).to_string().trim_start_matches(REDDIT_URL_BASE).to_string();
format!("{}{}raw_json=1", new_path, if new_path.contains('?') { "&" } else { "?" })
})
.unwrap_or_default()
.to_string(),
true,
quarantine,
)
.await
} else {
Ok(response)
.await;
};
match response.headers().get(header::CONTENT_ENCODING) {
// Content not compressed.
None => Ok(response),
// Content encoded (hopefully with gzip).
Some(hdr) => {
match hdr.to_str() {
Ok(val) => match val {
"gzip" => {}
"identity" => return Ok(response),
_ => return Err("Reddit response was encoded with an unsupported compressor".to_string()),
},
Err(_) => return Err("Reddit response was invalid".to_string()),
}
// We get here if the body is gzip-compressed.
// The body must be something that implements
// std::io::Read, hence the conversion to
// bytes::buf::Buf and then transformation into a
// Reader.
let mut decompressed: Vec<u8>;
{
let mut aggregated_body = match body::aggregate(response.body_mut()).await {
Ok(b) => b.reader(),
Err(e) => return Err(e.to_string()),
};
let mut decoder = match gzip::Decoder::new(&mut aggregated_body) {
Ok(decoder) => decoder,
Err(e) => return Err(e.to_string()),
};
decompressed = Vec::<u8>::new();
if let Err(e) = io::copy(&mut decoder, &mut decompressed) {
return Err(e.to_string());
};
}
response.headers_mut().remove(header::CONTENT_ENCODING);
response.headers_mut().insert(header::CONTENT_LENGTH, decompressed.len().into());
*(response.body_mut()) = Body::from(decompressed);
Ok(response)
}
}
}
Err(e) => Err(e.to_string()),
Err(e) => {
dbg_msg!("{} {}: {}", method, path, e);
Err(e.to_string())
}
},
Err(_) => Err("Post url contains non-ASCII characters".to_string()),
}
@ -114,9 +244,6 @@ fn request(url: String, quarantine: bool) -> Boxed<Result<Response<Body>, String
// Make a request to a Reddit API and parse the JSON response
#[cached(size = 100, time = 30, result = true)]
pub async fn json(path: String, quarantine: bool) -> Result<Value, String> {
// Build Reddit url from path
let url = format!("https://www.reddit.com{}", path);
// Closure to quickly build errors
let err = |msg: &str, e: String| -> Result<Value, String> {
// eprintln!("{} - {}: {}", url, msg, e);
@ -124,7 +251,7 @@ pub async fn json(path: String, quarantine: bool) -> Result<Value, String> {
};
// Fetch the url...
match request(url.clone(), quarantine).await {
match reddit_get(path.clone(), quarantine).await {
Ok(response) => {
let status = response.status();
@ -142,7 +269,7 @@ pub async fn json(path: String, quarantine: bool) -> Result<Value, String> {
.as_str()
.unwrap_or_else(|| {
json["message"].as_str().unwrap_or_else(|| {
eprintln!("{} - Error parsing reddit error", url);
eprintln!("{}{} - Error parsing reddit error", REDDIT_URL_BASE, path);
"Error parsing reddit error"
})
})

228
src/duplicates.rs Normal file
View File

@ -0,0 +1,228 @@
// Handler for post duplicates.
use crate::client::json;
use crate::server::RequestExt;
use crate::subreddit::{can_access_quarantine, quarantine};
use crate::utils::{error, filter_posts, get_filters, parse_post, template, Post, Preferences};
use askama::Template;
use hyper::{Body, Request, Response};
use serde_json::Value;
use std::borrow::ToOwned;
use std::collections::HashSet;
use std::vec::Vec;
/// DuplicatesParams contains the parameters in the URL.
struct DuplicatesParams {
before: String,
after: String,
sort: String,
}
/// DuplicatesTemplate defines an Askama template for rendering duplicate
/// posts.
#[derive(Template)]
#[template(path = "duplicates.html")]
struct DuplicatesTemplate {
/// params contains the relevant request parameters.
params: DuplicatesParams,
/// post is the post whose ID is specified in the reqeust URL. Note that
/// this is not necessarily the "original" post.
post: Post,
/// duplicates is the list of posts that, per Reddit, are duplicates of
/// Post above.
duplicates: Vec<Post>,
/// prefs are the user preferences.
prefs: Preferences,
/// url is the request URL.
url: String,
/// num_posts_filtered counts how many posts were filtered from the
/// duplicates list.
num_posts_filtered: u64,
/// all_posts_filtered is true if every duplicate was filtered. This is an
/// edge case but can still happen.
all_posts_filtered: bool,
}
/// Make the GET request to Reddit. It assumes `req` is the appropriate Reddit
/// REST endpoint for enumerating post duplicates.
pub async fn item(req: Request<Body>) -> Result<Response<Body>, String> {
let path: String = format!("{}.json?{}&raw_json=1", req.uri().path(), req.uri().query().unwrap_or_default());
let sub = req.param("sub").unwrap_or_default();
let quarantined = can_access_quarantine(&req, &sub);
// Log the request in debugging mode
#[cfg(debug_assertions)]
dbg!(req.param("id").unwrap_or_default());
// Send the GET, and await JSON.
match json(path, quarantined).await {
// Process response JSON.
Ok(response) => {
let filters = get_filters(&req);
let post = parse_post(&response[0]["data"]["children"][0]).await;
let (duplicates, num_posts_filtered, all_posts_filtered) = parse_duplicates(&response[1], &filters).await;
// These are the values for the "before=", "after=", and "sort="
// query params, respectively.
let mut before: String = String::new();
let mut after: String = String::new();
let mut sort: String = String::new();
// FIXME: We have to perform a kludge to work around a Reddit API
// bug.
//
// The JSON object in "data" will never contain a "before" value so
// it is impossible to use it to determine our position in a
// listing. We'll make do by getting the ID of the first post in
// the listing, setting that as our "before" value, and ask Reddit
// to give us a batch of duplicate posts up to that post.
//
// Likewise, if we provide a "before" request in the GET, the
// result won't have an "after" in the JSON, in addition to missing
// the "before." So we will have to use the final post in the list
// of duplicates.
//
// That being said, we'll also need to capture the value of the
// "sort=" parameter as well, so we will need to inspect the
// query key-value pairs anyway.
let l = duplicates.len();
if l > 0 {
// This gets set to true if "before=" is one of the GET params.
let mut have_before: bool = false;
// This gets set to true if "after=" is one of the GET params.
let mut have_after: bool = false;
// Inspect the query key-value pairs. We will need to record
// the value of "sort=", along with checking to see if either
// one of "before=" or "after=" are given.
//
// If we're in the middle of the batch (evidenced by the
// presence of a "before=" or "after=" parameter in the GET),
// then use the first post as the "before" reference.
//
// We'll do this iteratively. Better than with .map_or()
// since a closure will continue to operate on remaining
// elements even after we've determined one of "before=" or
// "after=" (or both) are in the GET request.
//
// In practice, here should only ever be one of "before=" or
// "after=" and never both.
let query_str = req.uri().query().unwrap_or_default().to_string();
if !query_str.is_empty() {
for param in query_str.split('&') {
let kv: Vec<&str> = param.split('=').collect();
if kv.len() < 2 {
// Reject invalid query parameter.
continue;
}
let key: &str = kv[0];
match key {
"before" => have_before = true,
"after" => have_after = true,
"sort" => {
let val: &str = kv[1];
match val {
"new" | "num_comments" => sort = val.to_string(),
_ => {}
}
}
_ => {}
}
}
}
if have_after {
before = "t3_".to_owned();
before.push_str(&duplicates[0].id);
}
// Address potentially missing "after". If "before=" is in the
// GET, then "after" will be null in the JSON (see FIXME
// above).
if have_before {
// The next batch will need to start from one after the
// last post in the current batch.
after = "t3_".to_owned();
after.push_str(&duplicates[l - 1].id);
// Here is where things get terrible. Notice that we
// haven't set `before`. In order to do so, we will
// need to know if there is a batch that exists before
// this one, and doing so requires actually fetching the
// previous batch. In other words, we have to do yet one
// more GET to Reddit. There is no other way to determine
// whether or not to define `before`.
//
// We'll mitigate that by requesting at most one duplicate.
let new_path: String = format!(
"{}.json?before=t3_{}&sort={}&limit=1&raw_json=1",
req.uri().path(),
&duplicates[0].id,
if sort.is_empty() { "num_comments".to_string() } else { sort.clone() }
);
match json(new_path, true).await {
Ok(response) => {
if !response[1]["data"]["children"].as_array().unwrap_or(&Vec::new()).is_empty() {
before = "t3_".to_owned();
before.push_str(&duplicates[0].id);
}
}
Err(msg) => {
// Abort entirely if we couldn't get the previous
// batch.
return error(req, msg).await;
}
}
} else {
after = response[1]["data"]["after"].as_str().unwrap_or_default().to_string();
}
}
let url = req.uri().to_string();
template(DuplicatesTemplate {
params: DuplicatesParams { before, after, sort },
post,
duplicates,
prefs: Preferences::new(req),
url,
num_posts_filtered,
all_posts_filtered,
})
}
// Process error.
Err(msg) => {
if msg == "quarantined" {
let sub = req.param("sub").unwrap_or_default();
quarantine(req, sub)
} else {
error(req, msg).await
}
}
}
}
// DUPLICATES
async fn parse_duplicates(json: &serde_json::Value, filters: &HashSet<String>) -> (Vec<Post>, u64, bool) {
let post_duplicates: &Vec<Value> = &json["data"]["children"].as_array().map_or(Vec::new(), ToOwned::to_owned);
let mut duplicates: Vec<Post> = Vec::new();
// Process each post and place them in the Vec<Post>.
for val in post_duplicates.iter() {
let post: Post = parse_post(val).await;
duplicates.push(post);
}
let (num_posts_filtered, all_posts_filtered) = filter_posts(&mut duplicates, filters);
(duplicates, num_posts_filtered, all_posts_filtered)
}

View File

@ -3,6 +3,7 @@
#![allow(clippy::cmp_owned)]
// Reference local files
mod duplicates;
mod post;
mod search;
mod settings;
@ -17,7 +18,7 @@ use futures_lite::FutureExt;
use hyper::{header::HeaderValue, Body, Request, Response};
mod client;
use client::proxy;
use client::{canonical_path, proxy};
use server::RequestExt;
use utils::{error, redirect, ThemeAssets};
@ -244,6 +245,11 @@ async fn main() {
app.at("/comments/:id/:title").get(|r| post::item(r).boxed());
app.at("/comments/:id/:title/:comment_id").get(|r| post::item(r).boxed());
app.at("/r/:sub/duplicates/:id").get(|r| duplicates::item(r).boxed());
app.at("/r/:sub/duplicates/:id/:title").get(|r| duplicates::item(r).boxed());
app.at("/duplicates/:id").get(|r| duplicates::item(r).boxed());
app.at("/duplicates/:id/:title").get(|r| duplicates::item(r).boxed());
app.at("/r/:sub/search").get(|r| search::find(r).boxed());
app
@ -259,9 +265,6 @@ async fn main() {
app.at("/r/:sub/:sort").get(|r| subreddit::community(r).boxed());
// Comments handler
app.at("/comments/:id").get(|r| post::item(r).boxed());
// Front page
app.at("/").get(|r| subreddit::community(r).boxed());
@ -279,13 +282,25 @@ async fn main() {
// Handle about pages
app.at("/about").get(|req| error(req, "About pages aren't added yet".to_string()).boxed());
app.at("/:id").get(|req: Request<Body>| match req.param("id").as_deref() {
// Sort front page
Some("best" | "hot" | "new" | "top" | "rising" | "controversial") => subreddit::community(req).boxed(),
// Short link for post
Some(id) if id.len() > 4 && id.len() < 7 => post::item(req).boxed(),
// Error message for unknown pages
_ => error(req, "Nothing here".to_string()).boxed(),
app.at("/:id").get(|req: Request<Body>| {
Box::pin(async move {
match req.param("id").as_deref() {
// Sort front page
Some("best" | "hot" | "new" | "top" | "rising" | "controversial") => subreddit::community(req).await,
// Short link for post
Some(id) if (5..7).contains(&id.len()) => match canonical_path(format!("/{}", id)).await {
Ok(path_opt) => match path_opt {
Some(path) => Ok(redirect(path)),
None => error(req, "Post ID is invalid. It may point to a post on a community that has been banned.").await,
},
Err(e) => error(req, e).await,
},
// Error message for unknown pages
_ => error(req, "Nothing here".to_string()).await,
}
})
});
// Default service in case no routes match

View File

@ -3,7 +3,7 @@ use crate::client::json;
use crate::server::RequestExt;
use crate::subreddit::{can_access_quarantine, quarantine};
use crate::utils::{
error, format_num, format_url, get_filters, param, rewrite_urls, setting, template, time, val, Author, Awards, Comment, Flags, Flair, FlairPart, Media, Post, Preferences,
error, format_num, get_filters, param, parse_post, rewrite_urls, setting, template, time, val, Author, Awards, Comment, Flair, FlairPart, Post, Preferences,
};
use hyper::{Body, Request, Response};
@ -54,7 +54,7 @@ pub async fn item(req: Request<Body>) -> Result<Response<Body>, String> {
// Otherwise, grab the JSON output from the request
Ok(response) => {
// Parse the JSON into Post and Comment structs
let post = parse_post(&response[0]).await;
let post = parse_post(&response[0]["data"]["children"][0]).await;
let comments = parse_comments(&response[1], &post.permalink, &post.author.name, highlighted_comment, &get_filters(&req));
let url = req.uri().to_string();
@ -80,92 +80,6 @@ pub async fn item(req: Request<Body>) -> Result<Response<Body>, String> {
}
}
// POSTS
async fn parse_post(json: &serde_json::Value) -> Post {
// Retrieve post (as opposed to comments) from JSON
let post: &serde_json::Value = &json["data"]["children"][0];
// Grab UTC time as unix timestamp
let (rel_time, created) = time(post["data"]["created_utc"].as_f64().unwrap_or_default());
// Parse post score and upvote ratio
let score = post["data"]["score"].as_i64().unwrap_or_default();
let ratio: f64 = post["data"]["upvote_ratio"].as_f64().unwrap_or(1.0) * 100.0;
// Determine the type of media along with the media URL
let (post_type, media, gallery) = Media::parse(&post["data"]).await;
let awards: Awards = Awards::parse(&post["data"]["all_awardings"]);
let permalink = val(post, "permalink");
let body = if val(post, "removed_by_category") == "moderator" {
format!(
"<div class=\"md\"><p>[removed] — <a href=\"https://www.unddit.com{}\">view removed post</a></p></div>",
permalink
)
} else {
rewrite_urls(&val(post, "selftext_html"))
};
// Build a post using data parsed from Reddit post API
Post {
id: val(post, "id"),
title: val(post, "title"),
community: val(post, "subreddit"),
body,
author: Author {
name: val(post, "author"),
flair: Flair {
flair_parts: FlairPart::parse(
post["data"]["author_flair_type"].as_str().unwrap_or_default(),
post["data"]["author_flair_richtext"].as_array(),
post["data"]["author_flair_text"].as_str(),
),
text: val(post, "link_flair_text"),
background_color: val(post, "author_flair_background_color"),
foreground_color: val(post, "author_flair_text_color"),
},
distinguished: val(post, "distinguished"),
},
permalink,
score: format_num(score),
upvote_ratio: ratio as i64,
post_type,
media,
thumbnail: Media {
url: format_url(val(post, "thumbnail").as_str()),
alt_url: String::new(),
width: post["data"]["thumbnail_width"].as_i64().unwrap_or_default(),
height: post["data"]["thumbnail_height"].as_i64().unwrap_or_default(),
poster: "".to_string(),
},
flair: Flair {
flair_parts: FlairPart::parse(
post["data"]["link_flair_type"].as_str().unwrap_or_default(),
post["data"]["link_flair_richtext"].as_array(),
post["data"]["link_flair_text"].as_str(),
),
text: val(post, "link_flair_text"),
background_color: val(post, "link_flair_background_color"),
foreground_color: if val(post, "link_flair_text_color") == "dark" {
"black".to_string()
} else {
"white".to_string()
},
},
flags: Flags {
nsfw: post["data"]["over_18"].as_bool().unwrap_or(false),
stickied: post["data"]["stickied"].as_bool().unwrap_or(false),
},
domain: val(post, "domain"),
rel_time,
created,
comments: format_num(post["data"]["num_comments"].as_i64().unwrap_or_default()),
gallery,
awards,
}
}
// COMMENTS
fn parse_comments(json: &serde_json::Value, post_link: &str, post_author: &str, highlighted_comment: &str, filters: &HashSet<String>) -> Vec<Comment> {
// Parse the comment JSON into a Vector of Comments

View File

@ -42,6 +42,8 @@ struct SearchTemplate {
/// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place,
/// and all fetched posts being filtered).
all_posts_filtered: bool,
/// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW)
all_posts_hidden_nsfw: bool,
}
// SERVICES
@ -100,12 +102,13 @@ pub async fn find(req: Request<Body>) -> Result<Response<Body>, String> {
url,
is_filtered: true,
all_posts_filtered: false,
all_posts_hidden_nsfw: false,
})
} else {
match Post::fetch(&path, quarantined).await {
Ok((mut posts, after)) => {
let all_posts_filtered = filter_posts(&mut posts, &filters);
let (_, all_posts_filtered) = filter_posts(&mut posts, &filters);
let all_posts_hidden_nsfw = posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on";
template(SearchTemplate {
posts,
subreddits,
@ -123,6 +126,7 @@ pub async fn find(req: Request<Body>) -> Result<Response<Body>, String> {
url,
is_filtered: false,
all_posts_filtered,
all_posts_hidden_nsfw,
})
}
Err(msg) => {

View File

@ -1,17 +1,80 @@
use brotli::enc::{BrotliCompress, BrotliEncoderParams};
use cached::proc_macro::cached;
use cookie::Cookie;
use core::f64;
use futures_lite::{future::Boxed, Future, FutureExt};
use hyper::{
header::HeaderValue,
body,
body::HttpBody,
header,
service::{make_service_fn, service_fn},
HeaderMap,
};
use hyper::{Body, Method, Request, Response, Server as HyperServer};
use libflate::gzip;
use route_recognizer::{Params, Router};
use std::{pin::Pin, result::Result};
use std::{
cmp::Ordering,
io,
pin::Pin,
result::Result,
str::{from_utf8, Split},
string::ToString,
};
use time::Duration;
use crate::dbg_msg;
type BoxResponse = Pin<Box<dyn Future<Output = Result<Response<Body>, String>> + Send>>;
/// Compressors for the response Body, in ascending order of preference.
#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
enum CompressionType {
Passthrough,
Gzip,
Brotli,
}
/// All browsers support gzip, so if we are given `Accept-Encoding: *`, deliver
/// gzipped-content.
///
/// Brotli would be nice universally, but Safari (iOS, iPhone, macOS) reportedly
/// doesn't support it yet.
const DEFAULT_COMPRESSOR: CompressionType = CompressionType::Gzip;
impl CompressionType {
/// Returns a `CompressionType` given a content coding
/// in [RFC 7231](https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.4)
/// format.
fn parse(s: &str) -> Option<CompressionType> {
let c = match s {
// Compressors we support.
"gzip" => CompressionType::Gzip,
"br" => CompressionType::Brotli,
// The wildcard means that we can choose whatever
// compression we prefer. In this case, use the
// default.
"*" => DEFAULT_COMPRESSOR,
// Compressor not supported.
_ => return None,
};
Some(c)
}
}
impl ToString for CompressionType {
fn to_string(&self) -> String {
match self {
CompressionType::Gzip => "gzip".to_string(),
CompressionType::Brotli => "br".to_string(),
_ => String::new(),
}
}
}
pub struct Route<'a> {
router: &'a mut Router<fn(Request<Body>) -> BoxResponse>,
path: String,
@ -97,7 +160,7 @@ impl ResponseExt for Response<Body> {
}
fn insert_cookie(&mut self, cookie: Cookie) {
if let Ok(val) = HeaderValue::from_str(&cookie.to_string()) {
if let Ok(val) = header::HeaderValue::from_str(&cookie.to_string()) {
self.headers_mut().append("Set-Cookie", val);
}
}
@ -106,7 +169,7 @@ impl ResponseExt for Response<Body> {
let mut cookie = Cookie::named(name);
cookie.set_path("/");
cookie.set_max_age(Duration::seconds(1));
if let Ok(val) = HeaderValue::from_str(&cookie.to_string()) {
if let Ok(val) = header::HeaderValue::from_str(&cookie.to_string()) {
self.headers_mut().append("Set-Cookie", val);
}
}
@ -156,10 +219,11 @@ impl Server {
// let shared_router = router.clone();
async move {
Ok::<_, String>(service_fn(move |req: Request<Body>| {
let headers = default_headers.clone();
let req_headers = req.headers().clone();
let def_headers = default_headers.clone();
// Remove double slashes and decode encoded slashes
let mut path = req.uri().path().replace("//", "/").replace("%2F","/");
let mut path = req.uri().path().replace("//", "/").replace("%2F", "/");
// Remove trailing slashes
if path != "/" && path.ends_with('/') {
@ -176,26 +240,20 @@ impl Server {
// Run the route's function
let func = (found.handler().to_owned().to_owned())(parammed);
async move {
let res: Result<Response<Body>, String> = func.await;
// Add default headers to response
res.map(|mut response| {
response.headers_mut().extend(headers);
response
})
match func.await {
Ok(mut res) => {
res.headers_mut().extend(def_headers);
let _ = compress_response(req_headers, &mut res).await;
Ok(res)
}
Err(msg) => new_boilerplate(def_headers, req_headers, 500, Body::from(msg)).await,
}
}
.boxed()
}
// If there was a routing error
Err(e) => async move {
// Return a 404 error
let res: Result<Response<Body>, String> = Ok(Response::builder().status(404).body(e.into()).unwrap_or_default());
// Add default headers to response
res.map(|mut response| {
response.headers_mut().extend(headers);
response
})
}
.boxed(),
Err(e) => async move { new_boilerplate(def_headers, req_headers, 404, e.into()).await }.boxed(),
}
}))
}
@ -213,3 +271,480 @@ impl Server {
server.boxed()
}
}
/// Create a boilerplate Response for error conditions. This response will be
/// compressed if requested by client.
async fn new_boilerplate(
default_headers: HeaderMap<header::HeaderValue>,
req_headers: HeaderMap<header::HeaderValue>,
status: u16,
body: Body,
) -> Result<Response<Body>, String> {
match Response::builder().status(status).body(body) {
Ok(mut res) => {
let _ = compress_response(req_headers, &mut res).await;
res.headers_mut().extend(default_headers.clone());
Ok(res)
}
Err(msg) => Err(msg.to_string()),
}
}
/// Determines the desired compressor based on the Accept-Encoding header.
///
/// This function will honor the [q-value](https://developer.mozilla.org/en-US/docs/Glossary/Quality_values)
/// for each compressor. The q-value is an optional parameter, a decimal value
/// on \[0..1\], to order the compressors by preference. An Accept-Encoding value
/// with no q-values is also accepted.
///
/// Here are [examples](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#examples)
/// of valid Accept-Encoding headers.
///
/// ```http
/// Accept-Encoding: gzip
/// Accept-Encoding: gzip, compress, br
/// Accept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1
/// ```
fn determine_compressor(accept_encoding: &str) -> Option<CompressionType> {
if accept_encoding.is_empty() {
return None;
};
// Keep track of the compressor candidate based on both the client's
// preference and our own. Concrete examples:
//
// 1. "Accept-Encoding: gzip, br" => assuming we like brotli more than
// gzip, and the browser supports brotli, we choose brotli
//
// 2. "Accept-Encoding: gzip;q=0.8, br;q=0.3" => the client has stated a
// preference for gzip over brotli, so we choose gzip
//
// To do this, we need to define a struct which contains the requested
// requested compressor (abstracted as a CompressionType enum) and the
// q-value. If no q-value is defined for the compressor, we assume one of
// 1.0. We first compare compressor candidates by comparing q-values, and
// then CompressionTypes. We keep track of whatever is the greatest per our
// ordering.
struct CompressorCandidate {
alg: CompressionType,
q: f64,
}
impl Ord for CompressorCandidate {
fn cmp(&self, other: &Self) -> Ordering {
// Compare q-values. Break ties with the
// CompressionType values.
match self.q.total_cmp(&other.q) {
Ordering::Equal => self.alg.cmp(&other.alg),
ord => ord,
}
}
}
impl PartialOrd for CompressorCandidate {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// Guard against NAN, both on our end and on the other.
if self.q.is_nan() || other.q.is_nan() {
return None;
};
// f64 and CompressionType are ordered, except in the case
// where the f64 is NAN (which we checked against), so we
// can safely return a Some here.
Some(self.cmp(other))
}
}
impl PartialEq for CompressorCandidate {
fn eq(&self, other: &Self) -> bool {
(self.q == other.q) && (self.alg == other.alg)
}
}
impl Eq for CompressorCandidate {}
// This is the current candidate.
//
// Assmume no candidate so far. We do this by assigning the sentinel value
// of negative infinity to the q-value. If this value is negative infinity,
// that means there was no viable compressor candidate.
let mut cur_candidate = CompressorCandidate {
alg: CompressionType::Passthrough,
q: f64::NEG_INFINITY,
};
// This loop reads the requested compressors and keeps track of whichever
// one has the highest priority per our heuristic.
for val in accept_encoding.to_string().split(',') {
let mut q: f64 = 1.0;
// The compressor and q-value (if the latter is defined)
// will be delimited by semicolons.
let mut spl: Split<char> = val.split(';');
// Get the compressor. For example, in
// gzip;q=0.8
// this grabs "gzip" in the string. It
// will further validate the compressor against the
// list of those we support. If it is not supported,
// we move onto the next one.
let compressor: CompressionType = match spl.next() {
// CompressionType::parse will return the appropriate enum given
// a string. For example, it will return CompressionType::Gzip
// when given "gzip".
Some(s) => match CompressionType::parse(s.trim()) {
Some(candidate) => candidate,
// We don't support the requested compression algorithm.
None => continue,
},
// We should never get here, but I'm paranoid.
None => continue,
};
// Get the q-value. This might not be defined, in which case assume
// 1.0.
if let Some(s) = spl.next() {
if !(s.len() > 2 && s.starts_with("q=")) {
// If the q-value is malformed, the header is malformed, so
// abort.
return None;
}
match s[2..].parse::<f64>() {
Ok(val) => {
if (0.0..=1.0).contains(&val) {
q = val;
} else {
// If the value is outside [0..1], header is malformed.
// Abort.
return None;
};
}
Err(_) => {
// If this isn't a f64, then assume a malformed header
// value and abort.
return None;
}
}
};
// If new_candidate > cur_candidate, make new_candidate the new
// cur_candidate. But do this safely! It is very possible that
// someone gave us the string "NAN", which (&str).parse::<f64>
// will happily translate to f64::NAN.
let new_candidate = CompressorCandidate { alg: compressor, q };
if let Some(ord) = new_candidate.partial_cmp(&cur_candidate) {
if ord == Ordering::Greater {
cur_candidate = new_candidate;
}
};
}
if cur_candidate.q != f64::NEG_INFINITY {
Some(cur_candidate.alg)
} else {
None
}
}
/// Compress the response body, if possible or desirable. The Body will be
/// compressed in place, and a new header Content-Encoding will be set
/// indicating the compression algorithm.
///
/// This function deems Body eligible compression if and only if the following
/// conditions are met:
///
/// 1. the HTTP client requests a compression encoding in the Content-Encoding
/// header (hence the need for the req_headers);
///
/// 2. the content encoding corresponds to a compression algorithm we support;
///
/// 3. the Media type in the Content-Type response header is text with any
/// subtype (e.g. text/plain) or application/json.
///
/// compress_response returns Ok on successful compression, or if not all three
/// conditions above are met. It returns Err if there was a problem decoding
/// any header in either req_headers or res, but res will remain intact.
///
/// This function logs errors to stderr, but only in debug mode. No information
/// is logged in release builds.
async fn compress_response(req_headers: HeaderMap<header::HeaderValue>, res: &mut Response<Body>) -> Result<(), String> {
// Check if the data is eligible for compression.
if let Some(hdr) = res.headers().get(header::CONTENT_TYPE) {
match from_utf8(hdr.as_bytes()) {
Ok(val) => {
let s = val.to_string();
// TODO: better determination of what is eligible for compression
if !(s.starts_with("text/") || s.starts_with("application/json")) {
return Ok(());
};
}
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
};
} else {
// Response declares no Content-Type. Assume for simplicity that it
// cannot be compressed.
return Ok(());
};
// Don't bother if the size of the size of the response body will fit
// within an IP frame (less the bytes that make up the TCP/IP and HTTP
// headers).
if res.body().size_hint().lower() < 1452 {
return Ok(());
};
// Quick and dirty closure for extracting a header from the request and
// returning it as a &str.
let get_req_header = |k: header::HeaderName| -> Option<&str> {
match req_headers.get(k) {
Some(hdr) => match from_utf8(hdr.as_bytes()) {
Ok(val) => Some(val),
#[cfg(debug_assertions)]
Err(e) => {
dbg_msg!(e);
None
}
#[cfg(not(debug_assertions))]
Err(_) => None,
},
None => None,
}
};
// Check to see which compressor is requested, and if we can use it.
let accept_encoding: &str = match get_req_header(header::ACCEPT_ENCODING) {
Some(val) => val,
None => return Ok(()), // Client requested no compression.
};
let compressor: CompressionType = match determine_compressor(accept_encoding) {
Some(c) => c,
None => return Ok(()),
};
// Get the body from the response.
let body_bytes: Vec<u8> = match body::to_bytes(res.body_mut()).await {
Ok(b) => b.to_vec(),
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
};
// Compress!
match compress_body(compressor, body_bytes) {
Ok(compressed) => {
// We get here iff the compression was successful. Replace the body
// with the compressed payload, and add the appropriate
// Content-Encoding header in the response.
res.headers_mut().insert(header::CONTENT_ENCODING, compressor.to_string().parse().unwrap());
*(res.body_mut()) = Body::from(compressed);
}
Err(e) => return Err(e),
}
Ok(())
}
/// Compresses a `Vec<u8>` given a [`CompressionType`].
///
/// This is a helper function for [`compress_response`] and should not be
/// called directly.
// I've chosen a TTL of 600 (== 10 minutes) since compression is
// computationally expensive and we don't want to be doing it often. This is
// larger than client::json's TTL, but that's okay, because if client::json
// returns a new serde_json::Value, body_bytes changes, so this function will
// execute again.
#[cached(size = 100, time = 600, result = true)]
fn compress_body(compressor: CompressionType, body_bytes: Vec<u8>) -> Result<Vec<u8>, String> {
// io::Cursor implements io::Read, required for our encoders.
let mut reader = io::Cursor::new(body_bytes);
let compressed: Vec<u8> = match compressor {
CompressionType::Gzip => {
let mut gz: gzip::Encoder<Vec<u8>> = match gzip::Encoder::new(Vec::new()) {
Ok(gz) => gz,
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
};
match io::copy(&mut reader, &mut gz) {
Ok(_) => match gz.finish().into_result() {
Ok(compressed) => compressed,
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
},
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
}
}
CompressionType::Brotli => {
// We may want to make the compression parameters configurable
// in the future. For now, the defaults are sufficient.
let brotli_params = BrotliEncoderParams::default();
let mut compressed = Vec::<u8>::new();
match BrotliCompress(&mut reader, &mut compressed, &brotli_params) {
Ok(_) => compressed,
Err(e) => {
dbg_msg!(e);
return Err(e.to_string());
}
}
}
// This arm is for any requested compressor for which we don't yet
// have an implementation.
_ => {
let msg = "unsupported compressor".to_string();
return Err(msg);
}
};
Ok(compressed)
}
#[cfg(test)]
mod tests {
use super::*;
use brotli::Decompressor as BrotliDecompressor;
use futures_lite::future::block_on;
use lipsum::lipsum;
use std::{boxed::Box, io};
#[test]
fn test_determine_compressor() {
// Single compressor given.
assert_eq!(determine_compressor("unsupported"), None);
assert_eq!(determine_compressor("gzip"), Some(CompressionType::Gzip));
assert_eq!(determine_compressor("*"), Some(DEFAULT_COMPRESSOR));
// Multiple compressors.
assert_eq!(determine_compressor("gzip, br"), Some(CompressionType::Brotli));
assert_eq!(determine_compressor("gzip;q=0.8, br;q=0.3"), Some(CompressionType::Gzip));
assert_eq!(determine_compressor("br, gzip"), Some(CompressionType::Brotli));
assert_eq!(determine_compressor("br;q=0.3, gzip;q=0.4"), Some(CompressionType::Gzip));
// Invalid q-values.
assert_eq!(determine_compressor("gzip;q=NAN"), None);
}
#[test]
fn test_compress_response() {
// This macro generates an Accept-Encoding header value given any number of
// compressors.
macro_rules! ae_gen {
($x:expr) => {
$x.to_string().as_str()
};
($x:expr, $($y:expr),+) => {
format!("{}, {}", $x.to_string(), ae_gen!($($y),+)).as_str()
};
}
for accept_encoding in [
"*",
ae_gen!(CompressionType::Gzip),
ae_gen!(CompressionType::Brotli, CompressionType::Gzip),
ae_gen!(CompressionType::Brotli),
] {
// Determine what the expected encoding should be based on both the
// specific encodings we accept.
let expected_encoding: CompressionType = match determine_compressor(accept_encoding) {
Some(s) => s,
None => panic!("determine_compressor(accept_encoding) => None"),
};
// Build headers with our Accept-Encoding.
let mut req_headers = HeaderMap::new();
req_headers.insert(header::ACCEPT_ENCODING, header::HeaderValue::from_str(accept_encoding).unwrap());
// Build test response.
let lorem_ipsum: String = lipsum(10000);
let expected_lorem_ipsum = Vec::<u8>::from(lorem_ipsum.as_str());
let mut res = Response::builder()
.status(200)
.header(header::CONTENT_TYPE, "text/plain")
.body(Body::from(lorem_ipsum))
.unwrap();
// Perform the compression.
if let Err(e) = block_on(compress_response(req_headers, &mut res)) {
panic!("compress_response(req_headers, &mut res) => Err(\"{}\")", e);
};
// If the content was compressed, we expect the Content-Encoding
// header to be modified.
assert_eq!(
res
.headers()
.get(header::CONTENT_ENCODING)
.unwrap_or_else(|| panic!("missing content-encoding header"))
.to_str()
.unwrap_or_else(|_| panic!("failed to convert Content-Encoding header::HeaderValue to String")),
expected_encoding.to_string()
);
// Decompress body and make sure it's equal to what we started
// with.
//
// In the case of no compression, just make sure the "new" body in
// the Response is the same as what with which we start.
let body_vec = match block_on(body::to_bytes(res.body_mut())) {
Ok(b) => b.to_vec(),
Err(e) => panic!("{}", e),
};
if expected_encoding == CompressionType::Passthrough {
assert!(body_vec.eq(&expected_lorem_ipsum));
continue;
}
// This provides an io::Read for the underlying body.
let mut body_cursor: io::Cursor<Vec<u8>> = io::Cursor::new(body_vec);
// Match the appropriate decompresor for the given
// expected_encoding.
let mut decoder: Box<dyn io::Read> = match expected_encoding {
CompressionType::Gzip => match gzip::Decoder::new(&mut body_cursor) {
Ok(dgz) => Box::new(dgz),
Err(e) => panic!("{}", e),
},
CompressionType::Brotli => Box::new(BrotliDecompressor::new(body_cursor, expected_lorem_ipsum.len())),
_ => panic!("no decompressor for {}", expected_encoding.to_string()),
};
let mut decompressed = Vec::<u8>::new();
match io::copy(&mut decoder, &mut decompressed) {
Ok(_) => {}
Err(e) => panic!("{}", e),
};
assert!(decompressed.eq(&expected_lorem_ipsum));
}
}
}

View File

@ -19,7 +19,7 @@ struct SettingsTemplate {
// CONSTANTS
const PREFS: [&str; 10] = [
const PREFS: [&str; 11] = [
"theme",
"front_page",
"layout",
@ -27,6 +27,7 @@ const PREFS: [&str; 10] = [
"comment_sort",
"post_sort",
"show_nsfw",
"blur_nsfw",
"use_hls",
"hide_hls_notification",
"autoplay_videos",

View File

@ -24,6 +24,8 @@ struct SubredditTemplate {
/// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place,
/// and all fetched posts being filtered).
all_posts_filtered: bool,
/// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW)
all_posts_hidden_nsfw: bool,
}
#[derive(Template)]
@ -111,12 +113,13 @@ pub async fn community(req: Request<Body>) -> Result<Response<Body>, String> {
redirect_url,
is_filtered: true,
all_posts_filtered: false,
all_posts_hidden_nsfw: false,
})
} else {
match Post::fetch(&path, quarantined).await {
Ok((mut posts, after)) => {
let all_posts_filtered = filter_posts(&mut posts, &filters);
let (_, all_posts_filtered) = filter_posts(&mut posts, &filters);
let all_posts_hidden_nsfw = posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on";
template(SubredditTemplate {
sub,
posts,
@ -127,6 +130,7 @@ pub async fn community(req: Request<Body>) -> Result<Response<Body>, String> {
redirect_url,
is_filtered: false,
all_posts_filtered,
all_posts_hidden_nsfw,
})
}
Err(msg) => match msg.as_str() {

View File

@ -1,7 +1,7 @@
// CRATES
use crate::client::json;
use crate::server::RequestExt;
use crate::utils::{error, filter_posts, format_url, get_filters, param, template, Post, Preferences, User};
use crate::utils::{error, filter_posts, format_url, get_filters, param, setting, template, Post, Preferences, User};
use askama::Template;
use hyper::{Body, Request, Response};
use time::{macros::format_description, OffsetDateTime};
@ -24,6 +24,8 @@ struct UserTemplate {
/// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place,
/// and all fetched posts being filtered).
all_posts_filtered: bool,
/// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW)
all_posts_hidden_nsfw: bool,
}
// FUNCTIONS
@ -58,13 +60,14 @@ pub async fn profile(req: Request<Body>) -> Result<Response<Body>, String> {
redirect_url,
is_filtered: true,
all_posts_filtered: false,
all_posts_hidden_nsfw: false,
})
} else {
// Request user posts/comments from Reddit
match Post::fetch(&path, false).await {
Ok((mut posts, after)) => {
let all_posts_filtered = filter_posts(&mut posts, &filters);
let (_, all_posts_filtered) = filter_posts(&mut posts, &filters);
let all_posts_hidden_nsfw = posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on";
template(UserTemplate {
user,
posts,
@ -76,6 +79,7 @@ pub async fn profile(req: Request<Body>) -> Result<Response<Body>, String> {
redirect_url,
is_filtered: false,
all_posts_filtered,
all_posts_hidden_nsfw,
})
}
// If there is an error show error page

View File

@ -13,6 +13,21 @@ use std::str::FromStr;
use time::{macros::format_description, Duration, OffsetDateTime};
use url::Url;
/// Write a message to stderr on debug mode. This function is a no-op on
/// release code.
#[macro_export]
macro_rules! dbg_msg {
($x:expr) => {
#[cfg(debug_assertions)]
eprintln!("{}:{}: {}", file!(), line!(), $x.to_string())
};
($($x:expr),+) => {
#[cfg(debug_assertions)]
dbg_msg!(format!($($x),+))
};
}
// Post flair with content, background color and foreground color
pub struct Flair {
pub flair_parts: Vec<FlairPart>,
@ -210,6 +225,7 @@ pub struct Post {
pub domain: String,
pub rel_time: String,
pub created: String,
pub num_duplicates: u64,
pub comments: (String, String),
pub gallery: Vec<GalleryMedia>,
pub awards: Awards,
@ -304,11 +320,12 @@ impl Post {
},
flags: Flags {
nsfw: data["over_18"].as_bool().unwrap_or_default(),
stickied: data["stickied"].as_bool().unwrap_or_default(),
stickied: data["stickied"].as_bool().unwrap_or_default() || data["pinned"].as_bool().unwrap_or_default(),
},
permalink: val(post, "permalink"),
rel_time,
created,
num_duplicates: post["data"]["num_duplicates"].as_u64().unwrap_or(0),
comments: format_num(data["num_comments"].as_i64().unwrap_or_default()),
gallery,
awards,
@ -447,6 +464,7 @@ pub struct Preferences {
pub layout: String,
pub wide: String,
pub show_nsfw: String,
pub blur_nsfw: String,
pub hide_hls_notification: String,
pub use_hls: String,
pub autoplay_videos: String,
@ -478,6 +496,7 @@ impl Preferences {
layout: setting(&req, "layout"),
wide: setting(&req, "wide"),
show_nsfw: setting(&req, "show_nsfw"),
blur_nsfw: setting(&req, "blur_nsfw"),
use_hls: setting(&req, "use_hls"),
hide_hls_notification: setting(&req, "hide_hls_notification"),
autoplay_videos: setting(&req, "autoplay_videos"),
@ -494,15 +513,110 @@ pub fn get_filters(req: &Request<Body>) -> HashSet<String> {
setting(req, "filters").split('+').map(String::from).filter(|s| !s.is_empty()).collect::<HashSet<String>>()
}
/// Filters a `Vec<Post>` by the given `HashSet` of filters (each filter being a subreddit name or a user name). If a
/// `Post`'s subreddit or author is found in the filters, it is removed. Returns `true` if _all_ posts were filtered
/// out, or `false` otherwise.
pub fn filter_posts(posts: &mut Vec<Post>, filters: &HashSet<String>) -> bool {
/// Filters a `Vec<Post>` by the given `HashSet` of filters (each filter being
/// a subreddit name or a user name). If a `Post`'s subreddit or author is
/// found in the filters, it is removed.
///
/// The first value of the return tuple is the number of posts filtered. The
/// second return value is `true` if all posts were filtered.
pub fn filter_posts(posts: &mut Vec<Post>, filters: &HashSet<String>) -> (u64, bool) {
// This is the length of the Vec<Post> prior to applying the filter.
let lb: u64 = posts.len().try_into().unwrap_or(0);
if posts.is_empty() {
false
(0, false)
} else {
posts.retain(|p| !filters.contains(&p.community) && !filters.contains(&["u_", &p.author.name].concat()));
posts.is_empty()
posts.retain(|p| !(filters.contains(&p.community) || filters.contains(&["u_", &p.author.name].concat())));
// Get the length of the Vec<Post> after applying the filter.
// If lb > la, then at least one post was removed.
let la: u64 = posts.len().try_into().unwrap_or(0);
(lb - la, posts.is_empty())
}
}
/// Creates a [`Post`] from a provided JSON.
pub async fn parse_post(post: &serde_json::Value) -> Post {
// Grab UTC time as unix timestamp
let (rel_time, created) = time(post["data"]["created_utc"].as_f64().unwrap_or_default());
// Parse post score and upvote ratio
let score = post["data"]["score"].as_i64().unwrap_or_default();
let ratio: f64 = post["data"]["upvote_ratio"].as_f64().unwrap_or(1.0) * 100.0;
// Determine the type of media along with the media URL
let (post_type, media, gallery) = Media::parse(&post["data"]).await;
let awards: Awards = Awards::parse(&post["data"]["all_awardings"]);
let permalink = val(post, "permalink");
let body = if val(post, "removed_by_category") == "moderator" {
format!(
"<div class=\"md\"><p>[removed] — <a href=\"https://www.unddit.com{}\">view removed post</a></p></div>",
permalink
)
} else {
rewrite_urls(&val(post, "selftext_html"))
};
// Build a post using data parsed from Reddit post API
Post {
id: val(post, "id"),
title: val(post, "title"),
community: val(post, "subreddit"),
body,
author: Author {
name: val(post, "author"),
flair: Flair {
flair_parts: FlairPart::parse(
post["data"]["author_flair_type"].as_str().unwrap_or_default(),
post["data"]["author_flair_richtext"].as_array(),
post["data"]["author_flair_text"].as_str(),
),
text: val(post, "link_flair_text"),
background_color: val(post, "author_flair_background_color"),
foreground_color: val(post, "author_flair_text_color"),
},
distinguished: val(post, "distinguished"),
},
permalink,
score: format_num(score),
upvote_ratio: ratio as i64,
post_type,
media,
thumbnail: Media {
url: format_url(val(post, "thumbnail").as_str()),
alt_url: String::new(),
width: post["data"]["thumbnail_width"].as_i64().unwrap_or_default(),
height: post["data"]["thumbnail_height"].as_i64().unwrap_or_default(),
poster: String::new(),
},
flair: Flair {
flair_parts: FlairPart::parse(
post["data"]["link_flair_type"].as_str().unwrap_or_default(),
post["data"]["link_flair_richtext"].as_array(),
post["data"]["link_flair_text"].as_str(),
),
text: val(post, "link_flair_text"),
background_color: val(post, "link_flair_background_color"),
foreground_color: if val(post, "link_flair_text_color") == "dark" {
"black".to_string()
} else {
"white".to_string()
},
},
flags: Flags {
nsfw: post["data"]["over_18"].as_bool().unwrap_or_default(),
stickied: post["data"]["stickied"].as_bool().unwrap_or_default() || post["data"]["pinned"].as_bool().unwrap_or(false),
},
domain: val(post, "domain"),
rel_time,
created,
num_duplicates: post["data"]["num_duplicates"].as_u64().unwrap_or(0),
comments: format_num(post["data"]["num_comments"].as_i64().unwrap_or_default()),
gallery,
awards,
}
}
@ -701,10 +815,11 @@ pub fn redirect(path: String) -> Response<Body> {
.unwrap_or_default()
}
pub async fn error(req: Request<Body>, msg: String) -> Result<Response<Body>, String> {
/// Renders a generic error landing page.
pub async fn error(req: Request<Body>, msg: impl ToString) -> Result<Response<Body>, String> {
let url = req.uri().to_string();
let body = ErrorTemplate {
msg,
msg: msg.to_string(),
prefs: Preferences::new(req),
url,
}
@ -716,8 +831,7 @@ pub async fn error(req: Request<Body>, msg: String) -> Result<Response<Body>, St
#[cfg(test)]
mod tests {
use super::format_num;
use super::rewrite_urls;
use super::{format_num, format_url, rewrite_urls};
#[test]
fn format_num_works() {
@ -737,4 +851,33 @@ mod tests {
r#"<a href="https://www.reddit.com/r/linux_gaming/comments/x/just_a_test/">https://www.reddit.com/r/linux_gaming/comments/x/just_a_test/</a>"#
)
}
#[test]
fn test_format_url() {
assert_eq!(format_url("https://a.thumbs.redditmedia.com/XYZ.jpg"), "/thumb/a/XYZ.jpg");
assert_eq!(format_url("https://emoji.redditmedia.com/a/b"), "/emoji/a/b");
assert_eq!(
format_url("https://external-preview.redd.it/foo.jpg?auto=webp&s=bar"),
"/preview/external-pre/foo.jpg?auto=webp&s=bar"
);
assert_eq!(format_url("https://i.redd.it/foobar.jpg"), "/img/foobar.jpg");
assert_eq!(
format_url("https://preview.redd.it/qwerty.jpg?auto=webp&s=asdf"),
"/preview/pre/qwerty.jpg?auto=webp&s=asdf"
);
assert_eq!(format_url("https://v.redd.it/foo/DASH_360.mp4?source=fallback"), "/vid/foo/360.mp4");
assert_eq!(
format_url("https://v.redd.it/foo/HLSPlaylist.m3u8?a=bar&v=1&f=sd"),
"/hls/foo/HLSPlaylist.m3u8?a=bar&v=1&f=sd"
);
assert_eq!(format_url("https://www.redditstatic.com/gold/awards/icon/icon.png"), "/static/gold/awards/icon/icon.png");
assert_eq!(format_url(""), "");
assert_eq!(format_url("self"), "");
assert_eq!(format_url("default"), "");
assert_eq!(format_url("nsfw"), "");
assert_eq!(format_url("spoiler"), "");
}
}

View File

@ -154,6 +154,7 @@ main {
}
#column_one {
width: 100%;
max-width: 750px;
border-radius: 5px;
overflow: inherit;
@ -716,22 +717,39 @@ a.search_subreddit:hover {
font-weight: bold;
}
.post_media_image, .post .__NoScript_PlaceHolder__, .post_media_video, .gallery {
.post_media_content, .post .__NoScript_PlaceHolder__, .gallery {
max-width: calc(100% - 40px);
grid-area: post_media;
margin: 15px auto 5px auto;
width: auto;
height: auto;
overflow: hidden;
}
.post_media_video.short {
max-height: 512px;
.post_media_video {
width: auto;
height: auto;
max-width: 100%;
max-height: 512px;
display: block;
margin: auto;
}
.post_media_image.short svg, .post_media_image.short img{
max-height: 512px;
width: auto;
height: auto;
max-width: 100%;
max-height: 512px;
display: block;
margin: auto;
}
.post_nsfw_blur {
filter: blur(1.5rem);
}
.post_nsfw_blur:hover {
filter: none;
}
.post_media_image svg{
@ -817,6 +835,16 @@ a.search_subreddit:hover {
margin-right: 15px;
}
#post_links > li.desktop_item {
display: auto;
}
@media screen and (min-width: 480px) {
#post_links > li.mobile_item {
display: none;
}
}
.post_thumbnail {
border-radius: 5px;
border: var(--panel-border);
@ -827,13 +855,25 @@ a.search_subreddit:hover {
margin: 5px;
}
.post_thumbnail svg {
.post_thumbnail div {
grid-area: 1 / 1 / 2 / 2;
width: 100%;
height: auto;
object-fit: cover;
align-self: center;
justify-self: center;
overflow: hidden;
}
.post_thumbnail div svg {
width: 100%;
height: auto;
}
.post_thumbnail span {
z-index: 0;
}
.thumb_nsfw_blur {
filter: blur(0.3rem)
}
.post_thumbnail.no_thumbnail {
@ -1182,16 +1222,21 @@ input[type="submit"] {
color: var(--accent);
}
.md .md-spoiler-text {
.md .md-spoiler-text, .md-spoiler-text a {
background: var(--highlighted);
color: transparent;
}
.md .md-spoiler-text:hover {
.md-spoiler-text:hover {
background: var(--foreground);
color: var(--text);
}
.md-spoiler-text:hover a {
background: var(--foreground);
color: var(--accent);
}
.md li { margin: 10px 0; }
.toc_child { list-style: none; }
@ -1238,6 +1283,29 @@ td, th {
#error h3 { opacity: 0.85; }
#error a { color: var(--accent); }
/* Messages */
#duplicates_msg h3 {
display: inline-block;
margin-top: 10px;
margin-bottom: 10px;
text-align: center;
width: 100%;
}
/* Warnings */
.listing_warn {
display: inline-block;
margin: 10px;
text-align: center;
width: 100%;
}
.listing_warn a {
color: var(--accent);
}
/* Mobile */
@media screen and (max-width: 800px) {
@ -1338,4 +1406,9 @@ td, th {
padding: 7px 0px;
margin-right: -5px;
}
#post_links > li { margin-right: 10px }
#post_links > li.desktop_item { display: none }
#post_links > li.mobile_item { display: auto }
.post_footer > p > span#upvoted { display: none }
}

13
static/themes/doomone.css Normal file
View File

@ -0,0 +1,13 @@
.doomone {
--accent: #51afef;
--green: #00a229;
--text: #bbc2cf;
--foreground: #3d4148;
--background: #282c34;
--outside: #52565c;
--post: #24272e;
--panel-border: 2px solid #52565c;
--highlighted: #686b70;
--visited: #969692;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}

View File

@ -0,0 +1,13 @@
/* Gruvbox-Dark theme setting */
.gruvboxdark {
--accent: #8ec07c;
--green: #b8bb26;
--text: #ebdbb2;
--foreground: #3c3836;
--background: #282828;
--outside: #3c3836;
--post: #3c3836;
--panel-border: 1px solid #504945;
--highlighted: #282828;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.5);
}

View File

@ -0,0 +1,13 @@
/* Gruvbox-Light theme setting */
.gruvboxlight {
--accent: #427b58;
--green: #79740e;
--text: #3c3836;
--foreground: #ebdbb2;
--background: #fbf1c7;
--outside: #ebdbb2;
--post: #ebdbb2;
--panel-border: 1px solid #d5c4a1;
--highlighted: #fbf1c7;
--shadow: 0 1px 3px rgba(0, 0, 0, 0.25);
}

View File

@ -19,7 +19,7 @@
<!-- PWA Manifest -->
<link rel="manifest" type="application/json" href="/manifest.json">
<link rel="shortcut icon" type="image/x-icon" href="/favicon.ico">
<link rel="stylesheet" type="text/css" href="/style.css">
<link rel="stylesheet" type="text/css" href="/style.css?v={{ env!("CARGO_PKG_VERSION") }}">
{% endblock %}
</head>
<body class="

107
templates/duplicates.html Normal file
View File

@ -0,0 +1,107 @@
{% extends "base.html" %}
{% import "utils.html" as utils %}
{% block title %}{{ post.title }} - r/{{ post.community }}{% endblock %}
{% block search %}
{% call utils::search(["/r/", post.community.as_str()].concat(), "") %}
{% endblock %}
{% block root %}/r/{{ post.community }}{% endblock %}{% block location %}r/{{ post.community }}{% endblock %}
{% block head %}
{% call super() %}
{% endblock %}
{% block subscriptions %}
{% call utils::sub_list(post.community.as_str()) %}
{% endblock %}
{% block content %}
<div id="column_one">
{% call utils::post(post) %}
<!-- DUPLICATES -->
{% if post.num_duplicates == 0 %}
<span class="listing_warn">(No duplicates found)</span>
{% else if post.flags.nsfw && prefs.show_nsfw != "on" %}
<span class="listing_warn">(Enable "Show NSFW posts" in <a href="/settings">settings</a> to show duplicates)</span>
{% else %}
<div id="duplicates_msg"><h3>Duplicates</h3></div>
{% if num_posts_filtered > 0 %}
<span class="listing_warn">
{% if all_posts_filtered %}
(All posts have been filtered)
{% else %}
(Some posts have been filtered)
{% endif %}
</span>
{% endif %}
<div id="sort">
<div id="sort_options">
<a {% if params.sort.is_empty() || params.sort.eq("num_comments") %}class="selected"{% endif %} href="?sort=num_comments">
Number of comments
</a>
<a {% if params.sort.eq("new") %}class="selected"{% endif %} href="?sort=new">
New
</a>
</div>
</div>
<div id="posts">
{% for post in duplicates -%}
{# TODO: utils::post should be reworked to permit a truncated display of a post as below #}
{% if !(post.flags.nsfw) || prefs.show_nsfw == "on" %}
<div class="post {% if post.flags.stickied %}stickied{% endif %}" id="{{ post.id }}">
<p class="post_header">
{% let community -%}
{% if post.community.starts_with("u_") -%}
{% let community = format!("u/{}", &post.community[2..]) -%}
{% else -%}
{% let community = format!("r/{}", post.community) -%}
{% endif -%}
<a class="post_subreddit" href="/r/{{ post.community }}">{{ post.community }}</a>
<span class="dot">&bull;</span>
<a class="post_author {{ post.author.distinguished }}" href="/u/{{ post.author.name }}">u/{{ post.author.name }}</a>
<span class="dot">&bull;</span>
<span class="created" title="{{ post.created }}">{{ post.rel_time }}</span>
{% if !post.awards.is_empty() %}
{% for award in post.awards.clone() %}
<span class="award" title="{{ award.name }}">
<img alt="{{ award.name }}" src="{{ award.icon_url }}" width="16" height="16"/>
</span>
{% endfor %}
{% endif %}
</p>
<h2 class="post_title">
{% if post.flair.flair_parts.len() > 0 %}
<a href="/r/{{ post.community }}/search?q=flair_name%3A%22{{ post.flair.text }}%22&restrict_sr=on"
class="post_flair"
style="color:{{ post.flair.foreground_color }}; background:{{ post.flair.background_color }};"
dir="ltr">{% call utils::render_flair(post.flair.flair_parts) %}</a>
{% endif %}
<a href="{{ post.permalink }}">{{ post.title }}</a>{% if post.flags.nsfw %} <small class="nsfw">NSFW</small>{% endif %}
</h2>
<div class="post_score" title="{{ post.score.1 }}">{{ post.score.0 }}<span class="label"> Upvotes</span></div>
<div class="post_footer">
<a href="{{ post.permalink }}" class="post_comments" title="{{ post.comments.1 }} comments">{{ post.comments.0 }} comments</a>
</div>
</div>
{% endif %}
{%- endfor %}
</div>
<footer>
{% if params.before != "" %}
<a href="?before={{ params.before }}{% if !params.sort.is_empty() %}&sort={{ params.sort }}{% endif %}" accesskey="P">PREV</a>
{% endif %}
{% if params.after != "" %}
<a href="?after={{ params.after }}{% if !params.sort.is_empty() %}&sort={{ params.sort }}{% endif %}" accesskey="N">NEXT</a>
{% endif %}
</footer>
{% endif %}
</div>
{% endblock %}

View File

@ -13,16 +13,25 @@
<!-- Meta Tags -->
<meta name="author" content="u/{{ post.author.name }}">
<meta name="title" content="{{ post.title }} - r/{{ post.community }}">
<meta property="og:type" content="website">
<meta property="og:url" content="{{ post.permalink }}">
<meta property="og:title" content="{{ post.title }} - r/{{ post.community }}">
<meta property="og:description" content="View on Libreddit, an alternative private front-end to Reddit.">
<meta property="og:image" content="{{ post.thumbnail.url }}">
<meta property="twitter:card" content="summary_large_image">
<meta property="og:url" content="{{ post.permalink }}">
<meta property="twitter:url" content="{{ post.permalink }}">
<meta property="twitter:title" content="{{ post.title }} - r/{{ post.community }}">
<meta property="twitter:description" content="View on Libreddit, an alternative private front-end to Reddit.">
{% if post.post_type == "image" %}
<meta property="og:type" content="image">
<meta property="og:image" content="{{ post.thumbnail.url }}">
<meta property="twitter:card" content="summary_large_image">
<meta property="twitter:image" content="{{ post.thumbnail.url }}">
{% else if post.post_type == "video" || post.post_type == "gif" %}
<meta property="twitter:card" content="video">
<meta property="og:type" content="video">
<meta property="og:video" content="{{ post.media.url }}">
<meta property="og:video:type" content="video/mp4">
{% else %}
<meta property="og:type" content="website">
{% endif %}
{% endblock %}
{% block subscriptions %}
@ -31,95 +40,7 @@
{% block content %}
<div id="column_one">
<!-- POST CONTENT -->
<div class="post highlighted">
<p class="post_header">
<a class="post_subreddit" href="/r/{{ post.community }}">r/{{ post.community }}</a>
<span class="dot">&bull;</span>
<a class="post_author {{ post.author.distinguished }}" href="/user/{{ post.author.name }}">u/{{ post.author.name }}</a>
{% if post.author.flair.flair_parts.len() > 0 %}
<small class="author_flair">{% call utils::render_flair(post.author.flair.flair_parts) %}</small>
{% endif %}
<span class="dot">&bull;</span>
<span class="created" title="{{ post.created }}">{{ post.rel_time }}</span>
{% if !post.awards.is_empty() %}
<span class="dot">&bull;</span>
<span class="awards">
{% for award in post.awards.clone() %}
<span class="award" title="{{ award.name }}">
<img alt="{{ award.name }}" src="{{ award.icon_url }}" width="16" height="16"/>
{{ award.count }}
</span>
{% endfor %}
</span>
{% endif %}
</p>
<h1 class="post_title">
{{ post.title }}
{% if post.flair.flair_parts.len() > 0 %}
<a href="/r/{{ post.community }}/search?q=flair_name%3A%22{{ post.flair.text }}%22&restrict_sr=on"
class="post_flair"
style="color:{{ post.flair.foreground_color }}; background:{{ post.flair.background_color }};">{% call utils::render_flair(post.flair.flair_parts) %}</a>
{% endif %}
{% if post.flags.nsfw %} <small class="nsfw">NSFW</small>{% endif %}
</h1>
<!-- POST MEDIA -->
<!-- post_type: {{ post.post_type }} -->
{% if post.post_type == "image" %}
<a href="{{ post.media.url }}" class="post_media_image" >
<svg
width="{{ post.media.width }}px"
height="{{ post.media.height }}px"
xmlns="http://www.w3.org/2000/svg">
<image width="100%" height="100%" href="{{ post.media.url }}"/>
<desc>
<img loading="lazy" alt="Post image" src="{{ post.media.url }}"/>
</desc>
</svg>
</a>
{% else if post.post_type == "video" || post.post_type == "gif" %}
{% if prefs.use_hls == "on" && !post.media.alt_url.is_empty() %}
<script src="/hls.min.js"></script>
<video class="post_media_video short {% if prefs.autoplay_videos == "on" %}hls_autoplay{% endif %}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" preload="none" controls>
<source src="{{ post.media.alt_url }}" type="application/vnd.apple.mpegurl" />
<source src="{{ post.media.url }}" type="video/mp4" />
</video>
<script src="/playHLSVideo.js"></script>
{% else %}
<video class="post_media_video" src="{{ post.media.url }}" controls {% if prefs.autoplay_videos == "on" %}autoplay{% endif %} loop><a href={{ post.media.url }}>Video</a></video>
{% call utils::render_hls_notification(post.permalink[1..]) %}
{% endif %}
{% else if post.post_type == "gallery" %}
<div class="gallery">
{% for image in post.gallery -%}
<figure>
<a href="{{ image.url }}" ><img loading="lazy" alt="Gallery image" src="{{ image.url }}"/></a>
<figcaption>
<p>{{ image.caption }}</p>
{% if image.outbound_url.len() > 0 %}
<p><a class="outbound_url" href="{{ image.outbound_url }}" rel="nofollow">{{ image.outbound_url }}</a>
{% endif %}
</figcaption>
</figure>
{%- endfor %}
</div>
{% else if post.post_type == "link" %}
<a id="post_url" href="{{ post.media.url }}" rel="nofollow">{{ post.media.url }}</a>
{% endif %}
<!-- POST BODY -->
<div class="post_body">{{ post.body|safe }}</div>
<div class="post_score" title="{{ post.score.1 }}">{{ post.score.0 }}<span class="label"> Upvotes</span></div>
<div class="post_footer">
<ul id="post_links">
<li><a href="/{{ post.id }}">permalink</a></li>
<li><a href="https://reddit.com/{{ post.id }}" rel="nofollow">reddit</a></li>
</ul>
<p>{{ post.upvote_ratio }}% Upvoted</p>
</div>
</div>
{% call utils::post(post) %}
<!-- SORT FORM -->
<form id="sort">
@ -138,7 +59,7 @@
{% for c in comments -%}
<div class="thread">
{% if single_thread %}
<p class="thread_nav"><a href="/{{ post.id }}">View all comments</a></p>
<p class="thread_nav"><a href="{{ post.permalink }}">View all comments</a></p>
{% if c.parent_kind == "t1" %}
<p class="thread_nav"><a href="?context=9999">Show parent comments</a></p>
{% endif %}

View File

@ -56,10 +56,15 @@
</div>
{% endif %}
{% endif %}
{% if all_posts_hidden_nsfw %}
<span class="listing_warn">All posts are hidden because they are NSFW. Enable "Show NSFW posts" in settings to view.</span>
{% endif %}
{% if all_posts_filtered %}
<center>(All content on this page has been filtered)</center>
<span class="listing_warn">(All content on this page has been filtered)</span>
{% else if is_filtered %}
<center>(Content from r/{{ sub }} has been filtered)</center>
<span class="listing_warn">(Content from r/{{ sub }} has been filtered)</span>
{% else if params.typed != "sr_user" %}
{% for post in posts %}
{% if post.flags.nsfw && prefs.show_nsfw != "on" %}

View File

@ -54,6 +54,11 @@
<input type="hidden" value="off" name="show_nsfw">
<input type="checkbox" name="show_nsfw" {% if prefs.show_nsfw == "on" %}checked{% endif %}>
</div>
<div id="blur_nsfw">
<label for="blur_nsfw">Blur NSFW previews:</label>
<input type="hidden" value="off" name="blur_nsfw">
<input type="checkbox" name="blur_nsfw" {% if prefs.blur_nsfw == "on" %}checked{% endif %}>
</div>
<div id="autoplay_videos">
<label for="autoplay_videos">Autoplay videos</label>
<input type="hidden" value="off" name="autoplay_videos">
@ -110,7 +115,7 @@
<div id="settings_note">
<p><b>Note:</b> settings and subscriptions are saved in browser cookies. Clearing your cookies will reset them.</p><br>
<p>You can restore your current settings and subscriptions after clearing your cookies using <a href="/settings/restore/?theme={{ prefs.theme }}&front_page={{ prefs.front_page }}&layout={{ prefs.layout }}&wide={{ prefs.wide }}&post_sort={{ prefs.post_sort }}&comment_sort={{ prefs.comment_sort }}&show_nsfw={{ prefs.show_nsfw }}&use_hls={{ prefs.use_hls }}&hide_hls_notification={{ prefs.hide_hls_notification }}&subscriptions={{ prefs.subscriptions.join("%2B") }}&filters={{ prefs.filters.join("%2B") }}">this link</a>.</p>
<p>You can restore your current settings and subscriptions after clearing your cookies using <a href="/settings/restore/?theme={{ prefs.theme }}&front_page={{ prefs.front_page }}&layout={{ prefs.layout }}&wide={{ prefs.wide }}&post_sort={{ prefs.post_sort }}&comment_sort={{ prefs.comment_sort }}&show_nsfw={{ prefs.show_nsfw }}&blur_nsfw={{ prefs.blur_nsfw }}&use_hls={{ prefs.use_hls }}&hide_hls_notification={{ prefs.hide_hls_notification }}&subscriptions={{ prefs.subscriptions.join("%2B") }}&filters={{ prefs.filters.join("%2B") }}">this link</a>.</p>
</div>
</div>

View File

@ -46,6 +46,10 @@
</form>
{% endif %}
{% if all_posts_hidden_nsfw %}
<center>All posts are hidden because they are NSFW. Enable "Show NSFW posts" in settings to view.</center>
{% endif %}
{% if all_posts_filtered %}
<center>(All content on this page has been filtered)</center>
{% else %}

View File

@ -32,6 +32,10 @@
</button>
</form>
{% if all_posts_hidden_nsfw %}
<center>All posts are hidden because they are NSFW. Enable "Show NSFW posts" in settings to view.</center>
{% endif %}
{% if all_posts_filtered %}
<center>(All content on this page has been filtered)</center>
{% else %}

View File

@ -61,6 +61,109 @@
{% endif %}
{%- endmacro %}
{% macro post(post) -%}
<!-- POST CONTENT -->
<div class="post highlighted">
<p class="post_header">
<a class="post_subreddit" href="/r/{{ post.community }}">r/{{ post.community }}</a>
<span class="dot">&bull;</span>
<a class="post_author {{ post.author.distinguished }}" href="/user/{{ post.author.name }}">u/{{ post.author.name }}</a>
{% if post.author.flair.flair_parts.len() > 0 %}
<small class="author_flair">{% call render_flair(post.author.flair.flair_parts) %}</small>
{% endif %}
<span class="dot">&bull;</span>
<span class="created" title="{{ post.created }}">{{ post.rel_time }}</span>
{% if !post.awards.is_empty() %}
<span class="dot">&bull;</span>
<span class="awards">
{% for award in post.awards.clone() %}
<span class="award" title="{{ award.name }}">
<img alt="{{ award.name }}" src="{{ award.icon_url }}" width="16" height="16"/>
{{ award.count }}
</span>
{% endfor %}
</span>
{% endif %}
</p>
<h1 class="post_title">
{{ post.title }}
{% if post.flair.flair_parts.len() > 0 %}
<a href="/r/{{ post.community }}/search?q=flair_name%3A%22{{ post.flair.text }}%22&restrict_sr=on"
class="post_flair"
style="color:{{ post.flair.foreground_color }}; background:{{ post.flair.background_color }};">{% call render_flair(post.flair.flair_parts) %}</a>
{% endif %}
{% if post.flags.nsfw %} <small class="nsfw">NSFW</small>{% endif %}
</h1>
<!-- POST MEDIA -->
<!-- post_type: {{ post.post_type }} -->
{% if post.post_type == "image" %}
<div class="post_media_content">
<a href="{{ post.media.url }}" class="post_media_image" >
<svg
width="{{ post.media.width }}px"
height="{{ post.media.height }}px"
xmlns="http://www.w3.org/2000/svg">
<image width="100%" height="100%" href="{{ post.media.url }}"/>
<desc>
<img loading="lazy" alt="Post image" src="{{ post.media.url }}"/>
</desc>
</svg>
</a>
</div>
{% else if post.post_type == "video" || post.post_type == "gif" %}
{% if prefs.use_hls == "on" && !post.media.alt_url.is_empty() %}
<script src="/hls.min.js"></script>
<div class="post_media_content">
<video class="post_media_video short {% if prefs.autoplay_videos == "on" %}hls_autoplay{% endif %}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" preload="none" controls>
<source src="{{ post.media.alt_url }}" type="application/vnd.apple.mpegurl" />
<source src="{{ post.media.url }}" type="video/mp4" />
</video>
</div>
<script src="/playHLSVideo.js"></script>
{% else %}
<div class="post_media_content">
<video class="post_media_video" src="{{ post.media.url }}" controls {% if prefs.autoplay_videos == "on" %}autoplay{% endif %} loop><a href={{ post.media.url }}>Video</a></video>
</div>
{% call render_hls_notification(post.permalink[1..]) %}
{% endif %}
{% else if post.post_type == "gallery" %}
<div class="gallery">
{% for image in post.gallery -%}
<figure>
<a href="{{ image.url }}" ><img loading="lazy" alt="Gallery image" src="{{ image.url }}"/></a>
<figcaption>
<p>{{ image.caption }}</p>
{% if image.outbound_url.len() > 0 %}
<p><a class="outbound_url" href="{{ image.outbound_url }}" rel="nofollow">{{ image.outbound_url }}</a>
{% endif %}
</figcaption>
</figure>
{%- endfor %}
</div>
{% else if post.post_type == "link" %}
<a id="post_url" href="{{ post.media.url }}" rel="nofollow">{{ post.media.url }}</a>
{% endif %}
<!-- POST BODY -->
<div class="post_body">{{ post.body|safe }}</div>
<div class="post_score" title="{{ post.score.1 }}">{{ post.score.0 }}<span class="label"> Upvotes</span></div>
<div class="post_footer">
<ul id="post_links">
<li class="desktop_item"><a href="{{ post.permalink }}">permalink</a></li>
<li class="mobile_item"><a href="{{ post.permalink }}">link</a></li>
{% if post.num_duplicates > 0 %}
<li class="desktop_item"><a href="/r/{{ post.community }}/duplicates/{{ post.id }}">duplicates</a></li>
<li class="mobile_item"><a href="/r/{{ post.community }}/duplicates/{{ post.id }}">dupes</a></li>
{% endif %}
<li class="desktop_item"><a href="https://reddit.com{{ post.permalink }}" rel="nofollow">reddit</a></li>
<li class="mobile_item"><a href="https://reddit.com{{ post.permalink }}" rel="nofollow">reddit</a></li>
</ul>
<p>{{ post.upvote_ratio }}%<span id="upvoted"> Upvoted</span></p>
</div>
</div>
{%- endmacro %}
{% macro post_in_list(post) -%}
<div class="post {% if post.flags.stickied %}stickied{% endif %}" id="{{ post.id }}">
<p class="post_header">
@ -94,27 +197,36 @@
</h2>
<!-- POST MEDIA/THUMBNAIL -->
{% if (prefs.layout.is_empty() || prefs.layout == "card") && post.post_type == "image" %}
<a href="{{ post.media.url }}" class="post_media_image {% if post.media.height / post.media.width < 2 %}short{% endif %}" >
<svg
width="{{ post.media.width }}px"
height="{{ post.media.height }}px"
xmlns="http://www.w3.org/2000/svg">
<image width="100%" height="100%" href="{{ post.media.url }}"/>
<desc>
<img loading="lazy" alt="Post image" src="{{ post.media.url }}"/>
</desc>
</svg>
</a>
<div class="post_media_content">
<a href="{{ post.media.url }}" class="post_media_image {% if post.media.height / post.media.width < 2 %}short{% endif %}" >
<svg
{%if post.flags.nsfw && prefs.blur_nsfw=="on" %}class="post_nsfw_blur"{% endif %}
width="{{ post.media.width }}px"
height="{{ post.media.height }}px"
xmlns="http://www.w3.org/2000/svg">
<image width="100%" height="100%" href="{{ post.media.url }}"/>
<desc>
<img loading="lazy" alt="Post image" src="{{ post.media.url }}"/>
</desc>
</svg>
</a>
</div>
{% else if (prefs.layout.is_empty() || prefs.layout == "card") && post.post_type == "gif" %}
<video class="post_media_video short" src="{{ post.media.url }}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" preload="none" controls loop {% if prefs.autoplay_videos == "on" %}autoplay{% endif %}><a href={{ post.media.url }}>Video</a></video>
<div class="post_media_content">
<video class="post_media_video short {%if post.flags.nsfw && prefs.blur_nsfw=="on" %}post_nsfw_blur{% endif %}" src="{{ post.media.url }}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" preload="none" controls loop {% if prefs.autoplay_videos == "on" %}autoplay{% endif %}><a href={{ post.media.url }}>Video</a></video>
</div>
{% else if (prefs.layout.is_empty() || prefs.layout == "card") && post.post_type == "video" %}
{% if prefs.use_hls == "on" && !post.media.alt_url.is_empty() %}
<video class="post_media_video short {% if prefs.autoplay_videos == "on" %}hls_autoplay{% endif %}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" controls preload="none">
<source src="{{ post.media.alt_url }}" type="application/vnd.apple.mpegurl" />
<source src="{{ post.media.url }}" type="video/mp4" />
</video>
<div class="post_media_content">
<video class="post_media_video short {%if post.flags.nsfw && prefs.blur_nsfw=="on" %}post_nsfw_blur{% endif %} {% if prefs.autoplay_videos == "on" %}hls_autoplay{% endif %}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" controls preload="none">
<source src="{{ post.media.alt_url }}" type="application/vnd.apple.mpegurl" />
<source src="{{ post.media.url }}" type="video/mp4" />
</video>
</div>
{% else %}
<video class="post_media_video short" src="{{ post.media.url }}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" preload="none" controls {% if prefs.autoplay_videos == "on" %}autoplay{% endif %}><a href={{ post.media.url }}>Video</a></video>
<div class="post_media_content">
<video class="post_media_video short {%if post.flags.nsfw && prefs.blur_nsfw=="on" %}post_nsfw_blur{% endif %}" src="{{ post.media.url }}" width="{{ post.media.width }}" height="{{ post.media.height }}" poster="{{ post.media.poster }}" preload="none" controls {% if prefs.autoplay_videos == "on" %}autoplay{% endif %}><a href={{ post.media.url }}>Video</a></video>
</div>
{% call render_hls_notification(format!("{}%23{}", &self.url[1..].replace("&", "%26").replace("+", "%2B"), post.id)) %}
{% endif %}
{% else if post.post_type != "self" %}
@ -125,12 +237,14 @@
<path d="M35,15h-15a10,10 0,0,0 0,20h25a10,10 0,0,0 10,-10m-12.5,0a10, 10 0,0,1 10, -10h25a10,10 0,0,1 0,20h-15" fill="none" stroke-width="5" stroke-linecap="round"/>
</svg>
{% else %}
<svg width="{{ post.thumbnail.width }}px" height="{{ post.thumbnail.height }}px" xmlns="http://www.w3.org/2000/svg">
<image width="100%" height="100%" href="{{ post.thumbnail.url }}"/>
<desc>
<img loading="lazy" alt="Thumbnail" src="{{ post.thumbnail.url }}"/>
</desc>
</svg>
<div style="max-width:{{ post.thumbnail.width }}px;max-height:{{ post.thumbnail.height }}px;">
<svg {% if post.flags.nsfw && prefs.blur_nsfw=="on" %} class="thumb_nsfw_blur" {% endif %} width="{{ post.thumbnail.width }}px" height="{{ post.thumbnail.height }}px" xmlns="http://www.w3.org/2000/svg">
<image width="100%" height="100%" href="{{ post.thumbnail.url }}"/>
<desc>
<img loading="lazy" alt="Thumbnail" src="{{ post.thumbnail.url }}"/>
</desc>
</svg>
</div>
{% endif %}
<span>{% if post.post_type == "link" %}{{ post.domain }}{% else %}{{ post.post_type }}{% endif %}</span>
</a>