diff --git a/.github/workflows/docker-arm.yml b/.github/workflows/docker-arm.yml index 6e34e79..251e325 100644 --- a/.github/workflows/docker-arm.yml +++ b/.github/workflows/docker-arm.yml @@ -33,6 +33,6 @@ jobs: file: ./Dockerfile.arm platforms: linux/arm64 push: true - tags: spikecodes/libreddit:arm + tags: libreddit/libreddit:arm cache-from: type=gha cache-to: type=gha,mode=max diff --git a/.github/workflows/docker-armv7.yml b/.github/workflows/docker-armv7.yml index 7c51db2..d2817d8 100644 --- a/.github/workflows/docker-armv7.yml +++ b/.github/workflows/docker-armv7.yml @@ -36,6 +36,6 @@ jobs: file: ./Dockerfile.armv7 platforms: linux/arm/v7 push: true - tags: spikecodes/libreddit:armv7 + tags: libreddit/libreddit:armv7 cache-from: type=gha cache-to: type=gha,mode=max diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index ed1bf73..c90bd4d 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -26,6 +26,12 @@ jobs: with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} + - name: Docker Hub Description + uses: peter-evans/dockerhub-description@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + repository: libreddit/libreddit - name: Build and push uses: docker/build-push-action@v2 with: @@ -33,6 +39,6 @@ jobs: file: ./Dockerfile platforms: linux/amd64 push: true - tags: spikecodes/libreddit:latest + tags: libreddit/libreddit:latest cache-from: type=gha cache-to: type=gha,mode=max diff --git a/CREDITS b/CREDITS new file mode 100644 index 0000000..0d7d117 --- /dev/null +++ b/CREDITS @@ -0,0 +1,82 @@ +5trongthany <65565784+5trongthany@users.noreply.github.com> +674Y3r <87250374+674Y3r@users.noreply.github.com> +accountForIssues <52367365+accountForIssues@users.noreply.github.com> +Adrian Lebioda +alefvanoon <53198048+alefvanoon@users.noreply.github.com> +alyaeanyx +AndreVuillemot160 <84594011+AndreVuillemot160@users.noreply.github.com> +Andrew Kaufman <57281817+andrew-kaufman@users.noreply.github.com> +Artemis <51862164+artemislena@users.noreply.github.com> +arthomnix <35371030+arthomnix@users.noreply.github.com> +Arya K <73596856+gi-yt@users.noreply.github.com> +Austin Huang +Basti +Ben Smith <37027883+smithbm2316@users.noreply.github.com> +BobIsMyManager +curlpipe <11898833+curlpipe@users.noreply.github.com> +dacousb <53299044+dacousb@users.noreply.github.com> +Daniel Valentine +Daniel Valentine +dbrennand <52419383+dbrennand@users.noreply.github.com> +Diego Magdaleno <38844659+DiegoMagdaleno@users.noreply.github.com> +Dyras +Edward <101938856+EdwardLangdon@users.noreply.github.com> +erdnaxe +Esmail EL BoB +FireMasterK <20838718+FireMasterK@users.noreply.github.com> +George Roubos +git-bruh +guaddy <67671414+guaddy@users.noreply.github.com> +Harsh Mishra +igna +imabritishcow +Josiah <70736638+fres7h@users.noreply.github.com> +JPyke3 +Kavin <20838718+FireMasterK@users.noreply.github.com> +Kazi +Kieran <42723993+EnderDev@users.noreply.github.com> +Kieran +Kyle Roth +laazyCmd +Laurenศ›iu Nicola +Lena <102762572+MarshDeer@users.noreply.github.com> +Macic <46872282+Macic-Dev@users.noreply.github.com> +Mario A <10923513+Midblyte@users.noreply.github.com> +Matthew Crossman +Matthew E +Mennaruuk <52135169+Mennaruuk@users.noreply.github.com> +mikupls <93015331+mikupls@users.noreply.github.com> +Nainar +Nathan Moos +Nicholas Christopher +Nick Lowery +Nico +NKIPSC <15067635+NKIPSC@users.noreply.github.com> +obeho <71698631+obeho@users.noreply.github.com> +obscurity +Om G <34579088+OxyMagnesium@users.noreply.github.com> +RiversideRocks <59586759+RiversideRocks@users.noreply.github.com> +robin <8597693+robrobinbin@users.noreply.github.com> +Robin <8597693+robrobinbin@users.noreply.github.com> +robrobinbin <> +robrobinbin <8597693+robrobinbin@users.noreply.github.com> +robrobinbin +Ruben Elshof <15641671+rubenelshof@users.noreply.github.com> +Scoder12 <34356756+Scoder12@users.noreply.github.com> +Slayer <51095261+GhostSlayer@users.noreply.github.com> +Soheb +somini +somoso +Spike <19519553+spikecodes@users.noreply.github.com> +spikecodes <19519553+spikecodes@users.noreply.github.com> +sybenx +TheCultLeader666 <65368815+TheCultLeader666@users.noreply.github.com> +TheFrenchGhosty <47571719+TheFrenchGhosty@users.noreply.github.com> +The TwilightBlood +tirz <36501933+tirz@users.noreply.github.com> +Tsvetomir Bonev +Vladislav Nepogodin +Walkx +Wichai <1482605+Chengings@users.noreply.github.com> +xatier +Zach <72994911+zachjmurphy@users.noreply.github.com> diff --git a/Cargo.lock b/Cargo.lock index 9937801..87c1b9a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "adler32" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" + [[package]] name = "aho-corasick" version = "0.7.19" @@ -11,6 +17,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "askama" version = "0.11.1" @@ -109,6 +130,27 @@ dependencies = [ "generic-array", ] +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bstr" version = "0.2.17" @@ -169,9 +211,9 @@ checksum = "3a4f925191b4367301851c6d99b09890311d74b0d43f274c0b34c86d308a3663" [[package]] name = "cc" -version = "1.0.74" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581f5dba903aac52ea3feb5ec4810848460ee833876f1f9b0fdeab1f19091574" +checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" [[package]] name = "cfg-if" @@ -181,9 +223,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "4.0.18" +version = "4.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335867764ed2de42325fafe6d18b8af74ba97ee0c590fa016f157535b42ab04b" +checksum = "60494cedb60cb47462c0ff7be53de32c0e42a6fc2c772184554fa12bd9489c03" dependencies = [ "bitflags", "clap_lex", @@ -233,6 +275,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -398,6 +449,17 @@ dependencies = [ "version_check", ] +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + [[package]] name = "globset" version = "0.4.9" @@ -481,9 +543,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.22" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abfba89e19b959ca163c7752ba59d737c1ceea53a5d31a149c805446fc958064" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -580,18 +642,41 @@ version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +[[package]] +name = "libflate" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05605ab2bce11bcfc0e9c635ff29ef8b2ea83f29be257ee7d730cac3ee373093" +dependencies = [ + "adler32", + "crc32fast", + "libflate_lz77", +] + +[[package]] +name = "libflate_lz77" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39a734c0493409afcd49deee13c006a04e3586b9761a03543c6272c9c51f2f5a" +dependencies = [ + "rle-decode-fast", +] + [[package]] name = "libreddit" -version = "0.23.1" +version = "0.25.0" dependencies = [ "askama", "async-recursion", + "brotli", "cached", "clap", "cookie", "futures-lite", "hyper", "hyper-rustls", + "libflate", + "lipsum", "percent-encoding", "regex", "route-recognizer", @@ -603,6 +688,16 @@ dependencies = [ "url", ] +[[package]] +name = "lipsum" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8451846f1f337e44486666989fbce40be804da139d5a4477d6b88ece5dc69f4" +dependencies = [ + "rand", + "rand_chacha", +] + [[package]] name = "lock_api" version = "0.4.9" @@ -674,23 +769,14 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi", "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "once_cell" version = "1.16.0" @@ -705,9 +791,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "os_str_bytes" -version = "6.3.1" +version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3baf96e39c5359d2eb0dd6ccb42c62b91d9678aa68160d261b9e0ccbf9e9dea9" +checksum = "7b5bf27447411e9ee3ff51186bf7a08e16c341efdde93f4d823e8844429bed7e" [[package]] name = "parking" @@ -756,6 +842,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + [[package]] name = "proc-macro2" version = "1.0.47" @@ -774,6 +866,36 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -785,9 +907,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -796,9 +918,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "ring" @@ -815,6 +937,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "rle-decode-fast" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" + [[package]] name = "route-recognizer" version = "0.3.1" @@ -1074,13 +1202,11 @@ dependencies = [ [[package]] name = "time" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fab5c8b9980850e06d92ddbe3ab839c062c801f3927c0fb8abd6fc8e918fbca" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", - "libc", - "num_threads", "serde", "time-core", "time-macros", @@ -1094,9 +1220,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bb801831d812c562ae7d2bfb531f26e66e4e1f6b17307ba4149c5064710e5b" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" dependencies = [ "time-core", ] diff --git a/Cargo.toml b/Cargo.toml index 909af50..283a5cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "libreddit" description = " Alternative private front-end to Reddit" license = "AGPL-3.0" repository = "https://github.com/spikecodes/libreddit" -version = "0.23.1" +version = "0.25.0" authors = ["spikecodes <19519553+spikecodes@users.noreply.github.com>"] edition = "2021" @@ -11,22 +11,27 @@ edition = "2021" askama = { version = "0.11.1", default-features = false } async-recursion = "1.0.0" cached = "0.40.0" -clap = { version = "4.0.18", default-features = false, features = ["std"] } -regex = "1.6.0" +clap = { version = "4.0.24", default-features = false, features = ["std"] } +regex = "1.7.0" serde = { version = "1.0.147", features = ["derive"] } cookie = "0.16.1" futures-lite = "1.12.0" -hyper = { version = "0.14.22", features = ["full"] } +hyper = { version = "0.14.23", features = ["full"] } hyper-rustls = "0.23.0" percent-encoding = "2.2.0" route-recognizer = "0.3.1" serde_json = "1.0.87" tokio = { version = "1.21.2", features = ["full"] } -time = "0.3.16" +time = "0.3.17" url = "2.3.1" rust-embed = { version = "6.4.2", features = ["include-exclude"] } +libflate = "1.2.0" +brotli = { version = "3.3.4", features = ["std"] } + +[dev-dependencies] +lipsum = "0.8.2" [profile.release] codegen-units = 1 lto = true -strip = true +strip = true \ No newline at end of file diff --git a/Dockerfile.arm b/Dockerfile.arm index 93703d0..098bf13 100644 --- a/Dockerfile.arm +++ b/Dockerfile.arm @@ -3,13 +3,18 @@ #################################################################################################### FROM rust:alpine AS builder -RUN apk add --no-cache g++ +RUN apk add --no-cache g++ git WORKDIR /usr/src/libreddit COPY . . -RUN cargo install --path . +# net.git-fetch-with-cli is specified in order to prevent a potential OOM kill +# in low memory environments. See: +# https://users.rust-lang.org/t/cargo-uses-too-much-memory-being-run-in-qemu/76531 +# This is tracked under issue #641. This also requires us to install git in the +# builder. +RUN cargo install --config net.git-fetch-with-cli=true --path . #################################################################################################### ## Final image diff --git a/README.md b/README.md index 988e896..891e6c9 100644 --- a/README.md +++ b/README.md @@ -29,86 +29,17 @@ I appreciate any donations! Your support allows me to continue developing Libred # Instances -Feel free to [open an issue](https://github.com/spikecodes/libreddit/issues/new) to have your [selfhosted instance](#deployment) listed here! - ๐Ÿ”— **Want to automatically redirect Reddit links to Libreddit? Use [LibRedirect](https://github.com/libredirect/libredirect) or [Privacy Redirect](https://github.com/SimonBrazell/privacy-redirect)!** -| Website | Country | Cloudflare | -|-|-|-| -| [libredd.it](https://libredd.it) (official) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.spike.codes](https://libreddit.spike.codes) (official) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.dothq.co](https://libreddit.dothq.co) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | โœ… | -| [libreddit.kavin.rocks](https://libreddit.kavin.rocks) | ๐Ÿ‡ฎ๐Ÿ‡ณ IN | | -| [reddit.invak.id](https://reddit.invak.id) | ๐Ÿ‡ง๐Ÿ‡ฌ BG | | -| [lr.riverside.rocks](https://lr.riverside.rocks) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.strongthany.cc](https://libreddit.strongthany.cc) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.privacy.com.de](https://libreddit.privacy.com.de) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [libreddit.domain.glass](https://libreddit.domain.glass) | ๐Ÿ‡บ๐Ÿ‡ธ US | โœ… | -| [r.nf](https://r.nf) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | โœ… | -| [reddit.stuehieyr.com](https://reddit.stuehieyr.com) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [lr.mint.lgbt](https://lr.mint.lgbt) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [libreddit.intent.cool](https://libreddit.intent.cool) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.drivet.xyz](https://libreddit.drivet.xyz) | ๐Ÿ‡ต๐Ÿ‡ฑ PL | | -| [libreddit.de](https://libreddit.de) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [libreddit.pussthecat.org](https://libreddit.pussthecat.org) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [libreddit.mutahar.rocks](https://libreddit.mutahar.rocks) | ๐Ÿ‡ซ๐Ÿ‡ท FR | | -| [libreddit.northboot.xyz](https://libreddit.northboot.xyz) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [leddit.xyz](https://leddit.xyz) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [de.leddit.xyz](https://de.leddit.xyz) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [lr.cowfee.moe](https://lr.cowfee.moe) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.hu](https://libreddit.hu) | ๐Ÿ‡ซ๐Ÿ‡ฎ FI | โœ… | -| [libreddit.totaldarkness.net](https://libreddit.totaldarkness.net) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [libreddit.esmailelbob.xyz](https://libreddit.esmailelbob.xyz) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [lr.vern.cc](https://lr.vern.cc) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [libreddit.nl](https://libreddit.nl) | ๐Ÿ‡ณ๐Ÿ‡ฑ NL | | -| [lr.stilic.ml](https://lr.stilic.ml) | ๐Ÿ‡ซ๐Ÿ‡ท FR | โœ… | -| [reddi.tk](https://reddi.tk) | ๐Ÿ‡บ๐Ÿ‡ธ US | โœ… | -| [libreddit.bus-hit.me](https://libreddit.bus-hit.me) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [r.walkx.org](https://r.walkx.org) | ๐Ÿ‡ณ๐Ÿ‡ฑ NL | โœ… | -| [libreddit.kylrth.com](https://libreddit.kylrth.com) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [libreddit.yonalee.eu](https://libreddit.yonalee.eu) | ๐Ÿ‡ฑ๐Ÿ‡บ LU | โœ… | -| [libreddit.winscloud.net](https://libreddit.winscloud.net) | ๐Ÿ‡น๐Ÿ‡ญ TH | โœ… | -| [libreddit.tiekoetter.com](https://libreddit.tiekoetter.com) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [reddit.rtrace.io](https://reddit.rtrace.io) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [libreddit.lunar.icu](https://libreddit.lunar.icu) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | โœ… | -| [libreddit.privacydev.net](https://libreddit.privacydev.net) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.notyourcomputer.net](https://libreddit.notyourcomputer.net) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [r.ahwx.org](https://r.ahwx.org) | ๐Ÿ‡ณ๐Ÿ‡ฑ NL | โœ… | -| [bob.fr.to](https://bob.fr.to) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [reddit.beparanoid.de](https://reddit.beparanoid.de) | ๐Ÿ‡จ๐Ÿ‡ญ CH | | -| [libreddit.dcs0.hu](https://libreddit.dcs0.hu) | ๐Ÿ‡ญ๐Ÿ‡บ HU | | -| [reddit.dr460nf1r3.org](https://reddit.dr460nf1r3.org) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | โœ… | -| [rd.jae.su](https://rd.jae.su) | ๐Ÿ‡ซ๐Ÿ‡ฎ FI | | -| [libreddit.mha.fi](https://libreddit.mha.fi) | ๐Ÿ‡ซ๐Ÿ‡ฎ FI | | -| [libreddit.foss.wtf](https://libreddit.foss.wtf) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [libreddit.encrypted-data.xyz](https://libreddit.encrypted-data.xyz)| ๐Ÿ‡ซ๐Ÿ‡ท FR | โœ… | -| [libreddit.eu.org](https://libreddit.eu.org)| ๐Ÿ‡ฎ๐Ÿ‡ช IE | โœ… | -| [l.opnxng.com](https://l.opnxng.com)| ๐Ÿ‡ธ๐Ÿ‡ฌ SG | | -| [libreddit.cachyos.org](https://libreddit.cachyos.org) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | โœ… | -| [libreddit.oxymagnesium.com](https://libreddit.oxymagnesium.com) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [spjmllawtheisznfs7uryhxumin26ssv2draj7oope3ok3wuhy43eoyd.onion](http://spjmllawtheisznfs7uryhxumin26ssv2draj7oope3ok3wuhy43eoyd.onion) | ๐Ÿ‡ฎ๐Ÿ‡ณ IN | | -| [fwhhsbrbltmrct5hshrnqlqygqvcgmnek3cnka55zj4y7nuus5muwyyd.onion](http://fwhhsbrbltmrct5hshrnqlqygqvcgmnek3cnka55zj4y7nuus5muwyyd.onion) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [kphht2jcflojtqte4b4kyx7p2ahagv4debjj32nre67dxz7y57seqwyd.onion](http://kphht2jcflojtqte4b4kyx7p2ahagv4debjj32nre67dxz7y57seqwyd.onion) | ๐Ÿ‡ณ๐Ÿ‡ฑ NL | | -| [liredejj74h5xjqr2dylnl5howb2bpikfowqoveub55ru27x43357iid.onion](http://liredejj74h5xjqr2dylnl5howb2bpikfowqoveub55ru27x43357iid.onion) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [kzhfp3nvb4qp575vy23ccbrgfocezjtl5dx66uthgrhu7nscu6rcwjyd.onion](http://kzhfp3nvb4qp575vy23ccbrgfocezjtl5dx66uthgrhu7nscu6rcwjyd.onion) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [ecue64ybzvn6vjzl37kcsnwt4ycmbsyf74nbttyg7rkc3t3qwnj7mcyd.onion](http://ecue64ybzvn6vjzl37kcsnwt4ycmbsyf74nbttyg7rkc3t3qwnj7mcyd.onion) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [ledditqo2mxfvlgobxnlhrkq4dh34jss6evfkdkb2thlvy6dn4f4gpyd.onion](http://ledditqo2mxfvlgobxnlhrkq4dh34jss6evfkdkb2thlvy6dn4f4gpyd.onion) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libredoxhxwnmsb6dvzzd35hmgzmawsq5i764es7witwhddvpc2razid.onion](http://libredoxhxwnmsb6dvzzd35hmgzmawsq5i764es7witwhddvpc2razid.onion) | ๐Ÿ‡บ๐Ÿ‡ธ US | | -| [libreddit.2syis2nnyytz6jnusnjurva4swlaizlnleiks5mjp46phuwjbdjqwgqd.onion](http://libreddit.2syis2nnyytz6jnusnjurva4swlaizlnleiks5mjp46phuwjbdjqwgqd.onion) | ๐Ÿ‡ช๐Ÿ‡ฌ EG | | -| [ol5begilptoou34emq2sshf3may3hlblvipdjtybbovpb7c7zodxmtqd.onion](http://ol5begilptoou34emq2sshf3may3hlblvipdjtybbovpb7c7zodxmtqd.onion) | ๐Ÿ‡ฉ๐Ÿ‡ช DE | | -| [lbrdtjaj7567ptdd4rv74lv27qhxfkraabnyphgcvptl64ijx2tijwid.onion](http://lbrdtjaj7567ptdd4rv74lv27qhxfkraabnyphgcvptl64ijx2tijwid.onion) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [libreddit.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd.onion](http://libreddit.esmail5pdn24shtvieloeedh7ehz3nrwcdivnfhfcedl7gf4kwddhkqd.onion) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -| [reddit.prnoid54e44a4bduq5due64jkk7wcnkxcp5kv3juncm7veptjcqudgyd.onion](http://reddit.prnoid54e44a4bduq5due64jkk7wcnkxcp5kv3juncm7veptjcqudgyd.onion) | ๐Ÿ‡จ๐Ÿ‡ญ CH | | -| [inz6tbezfwzexva6dize4cqraj2tjdhygxabmcgysccesvw2pybzhbyd.onion](http://inz6tbezfwzexva6dize4cqraj2tjdhygxabmcgysccesvw2pybzhbyd.onion) | ๐Ÿ‡ซ๐Ÿ‡ฎ FI | | -| [libreddit.micohauwkjbyw5meacrb4ipicwvwg4xtzl7y7viv53kig2mdcsvwkyyd.onion](http://libreddit.micohauwkjbyw5meacrb4ipicwvwg4xtzl7y7viv53kig2mdcsvwkyyd.onion/)| ๐Ÿ‡ซ๐Ÿ‡ฎ FI | | -| [lr.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad.onion](http://lr.vernccvbvyi5qhfzyqengccj7lkove6bjot2xhh5kajhwvidqafczrad.onion/) | ๐Ÿ‡จ๐Ÿ‡ฆ CA | | -A checkmark in the "Cloudflare" category here refers to the use of the reverse proxy, [Cloudflare](https://cloudflare.com). The checkmark will not be listed for a site that uses Cloudflare DNS but rather the proxying service which grants Cloudflare the ability to monitor traffic to the website. +[Follow this link](https://github.com/libreddit/libreddit-instances/blob/master/instances.md) for an up-to-date table of instances in markdown format. This list is also available as [a machine-readable JSON](https://github.com/libreddit/libreddit-instances/blob/master/instances.json). + +Both files are part of the [libreddit-instances](https://github.com/libreddit/libreddit-instances) repository. To contribute your [self-hosted instance](#deployment) to the list, see the [libreddit-instances README](https://github.com/libreddit/libreddit-instances/blob/master/README.md). --- # About -Find Libreddit on ๐Ÿ’ฌ [Matrix](https://matrix.to/#/#libreddit:kde.org), ๐Ÿ‹ [Docker](https://hub.docker.com/r/spikecodes/libreddit), :octocat: [GitHub](https://github.com/spikecodes/libreddit), and ๐ŸฆŠ [GitLab](https://gitlab.com/spikecodes/libreddit). +Find Libreddit on ๐Ÿ’ฌ [Matrix](https://matrix.to/#/#libreddit:kde.org), ๐Ÿ‹ [Docker](https://hub.docker.com/r/libreddit/libreddit), :octocat: [GitHub](https://github.com/libreddit/libreddit), and ๐ŸฆŠ [GitLab](https://gitlab.com/libreddit/libreddit). ## Built with @@ -120,7 +51,7 @@ Find Libreddit on ๐Ÿ’ฌ [Matrix](https://matrix.to/#/#libreddit:kde.org), ๐Ÿ‹ [D ## Info Libreddit hopes to provide an easier way to browse Reddit, without the ads, trackers, and bloat. Libreddit was inspired by other alternative front-ends to popular services such as [Invidious](https://github.com/iv-org/invidious) for YouTube, [Nitter](https://github.com/zedeus/nitter) for Twitter, and [Bibliogram](https://sr.ht/~cadence/bibliogram/) for Instagram. -Libreddit currently implements most of Reddit's (signed-out) functionalities but still lacks [a few features](https://github.com/spikecodes/libreddit/issues). +Libreddit currently implements most of Reddit's (signed-out) functionalities but still lacks [a few features](https://github.com/libreddit/libreddit/issues). ## How does it compare to Teddit? @@ -138,15 +69,15 @@ This section outlines how Libreddit compares to Reddit. ## Speed -Lasted tested Jan 17, 2021. +Lasted tested Nov 11, 2022. -Results from Google Lighthouse ([Libreddit Report](https://lighthouse-dot-webdotdevsite.appspot.com/lh/html?url=https%3A%2F%2Flibredd.it), [Reddit Report](https://lighthouse-dot-webdotdevsite.appspot.com/lh/html?url=https%3A%2F%2Fwww.reddit.com%2F)). +Results from Google PageSpeed Insights ([Libreddit Report](https://pagespeed.web.dev/report?url=https%3A%2F%2Flibreddit.spike.codes%2F), [Reddit Report](https://pagespeed.web.dev/report?url=https://www.reddit.com)). -| | Libreddit | Reddit | -|------------------------|---------------|------------| -| Requests | 20 | 70 | -| Resource Size (card ui)| 1,224 KiB | 1,690 KiB | -| Time to Interactive | **1.5 s** | **11.2 s** | +| | Libreddit | Reddit | +|------------------------|-------------|-----------| +| Requests | 60 | 83 | +| Speed Index | 2.0s | 10.4s | +| Time to Interactive | **2.8s** | **12.4s** | ## Privacy @@ -205,21 +136,21 @@ cargo install libreddit ## 2) Docker -Deploy the [Docker image](https://hub.docker.com/r/spikecodes/libreddit) of Libreddit: +Deploy the [Docker image](https://hub.docker.com/r/libreddit/libreddit) of Libreddit: ``` -docker pull spikecodes/libreddit -docker run -d --name libreddit -p 8080:8080 spikecodes/libreddit +docker pull libreddit/libreddit +docker run -d --name libreddit -p 8080:8080 libreddit/libreddit ``` Deploy using a different port (in this case, port 80): ``` -docker pull spikecodes/libreddit -docker run -d --name libreddit -p 80:8080 spikecodes/libreddit +docker pull libreddit/libreddit +docker run -d --name libreddit -p 80:8080 libreddit/libreddit ``` -To deploy on `arm64` platforms, simply replace `spikecodes/libreddit` in the commands above with `spikecodes/libreddit:arm`. +To deploy on `arm64` platforms, simply replace `libreddit/libreddit` in the commands above with `libreddit/libreddit:arm`. -To deploy on `armv7` platforms, simply replace `spikecodes/libreddit` in the commands above with `spikecodes/libreddit:armv7`. +To deploy on `armv7` platforms, simply replace `libreddit/libreddit` in the commands above with `libreddit/libreddit:armv7`. ## 3) AUR @@ -231,14 +162,14 @@ yay -S libreddit-git ## 4) GitHub Releases -If you're on Linux and none of these methods work for you, you can grab a Linux binary from [the newest release](https://github.com/spikecodes/libreddit/releases/latest). +If you're on Linux and none of these methods work for you, you can grab a Linux binary from [the newest release](https://github.com/libreddit/libreddit/releases/latest). ## 5) Replit/Heroku/Glitch **Note:** These are free hosting options but they are *not* private and will monitor server usage to prevent abuse. If you need a free and easy setup, this method may work best for you. -Run on Repl.it -[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/spikecodes/libreddit) +Run on Repl.it +[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy?template=https://github.com/libreddit/libreddit) [![Remix on Glitch](https://cdn.glitch.com/2703baf2-b643-4da7-ab91-7ee2a2d00b5b%2Fremix-button-v2.svg)](https://glitch.com/edit/#!/remix/libreddit) --- @@ -257,13 +188,14 @@ Assign a default value for each setting by passing environment variables to Libr | Name | Possible values | Default value | |-------------------------|-----------------------------------------------------------------------------------------------------|---------------| -| `THEME` | `["system", "light", "dark", "black", "dracula", "nord", "laserwave", "violet", "gold", "rosebox"]` | `system` | +| `THEME` | `["system", "light", "dark", "black", "dracula", "nord", "laserwave", "violet", "gold", "rosebox", "gruvboxdark", "gruvboxlight"]` | `system` | | `FRONT_PAGE` | `["default", "popular", "all"]` | `default` | | `LAYOUT` | `["card", "clean", "compact"]` | `card` | | `WIDE` | `["on", "off"]` | `off` | | `POST_SORT` | `["hot", "new", "top", "rising", "controversial"]` | `hot` | | `COMMENT_SORT` | `["confidence", "top", "new", "controversial", "old"]` | `confidence` | | `SHOW_NSFW` | `["on", "off"]` | `off` | +| `BLUR_NSFW` | `["on", "off"]` | `off` | | `USE_HLS` | `["on", "off"]` | `off` | | `HIDE_HLS_NOTIFICATION` | `["on", "off"]` | `off` | | `AUTOPLAY_VIDEOS` | `["on", "off"]` | `off` | @@ -280,7 +212,7 @@ LIBREDDIT_DEFAULT_WIDE=on LIBREDDIT_DEFAULT_THEME=dark libreddit -r ## Proxying using NGINX -**NOTE** If you're [proxying Libreddit through an NGINX Reverse Proxy](https://github.com/spikecodes/libreddit/issues/122#issuecomment-782226853), add +**NOTE** If you're [proxying Libreddit through an NGINX Reverse Proxy](https://github.com/libreddit/libreddit/issues/122#issuecomment-782226853), add ```nginx proxy_http_version 1.1; ``` @@ -308,7 +240,7 @@ Before=nginx.service ## Building ``` -git clone https://github.com/spikecodes/libreddit +git clone https://github.com/libreddit/libreddit cd libreddit cargo run ``` diff --git a/app.json b/app.json index 8573671..fd41fc8 100644 --- a/app.json +++ b/app.json @@ -32,6 +32,9 @@ "LIBREDDIT_DEFAULT_SHOW_NSFW": { "required": false }, + "LIBREDDIT_DEFAULT_BLUR_NSFW": { + "required": false + }, "LIBREDDIT_USE_HLS": { "required": false }, diff --git a/scripts/gen-credits.sh b/scripts/gen-credits.sh new file mode 100755 index 0000000..33ce9f4 --- /dev/null +++ b/scripts/gen-credits.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This scripts generates the CREDITS file in the repository root, which +# contains a list of all contributors ot the Libreddit project. +# +# We use git-log to surface the names and emails of all authors and committers, +# and grep will filter any automated commits due to GitHub. + +set -o pipefail + +cd "$(dirname "${BASH_SOURCE[0]}")/../" || exit 1 +git --no-pager log --pretty='%an <%ae>%n%cn <%ce>' master \ + | sort -t'<' -u -k1,1 -k2,2 \ + | grep -Fv -- 'GitHub ' \ + > CREDITS diff --git a/src/client.rs b/src/client.rs index da271dd..f577f95 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,12 +1,55 @@ use cached::proc_macro::cached; use futures_lite::{future::Boxed, FutureExt}; -use hyper::{body::Buf, client, Body, Request, Response, Uri}; +use hyper::{body, body::Buf, client, header, Body, Method, Request, Response, Uri}; +use libflate::gzip; use percent_encoding::{percent_encode, CONTROLS}; use serde_json::Value; -use std::result::Result; +use std::{io, result::Result}; +use crate::dbg_msg; use crate::server::RequestExt; +const REDDIT_URL_BASE: &str = "https://www.reddit.com"; + +/// Gets the canonical path for a resource on Reddit. This is accomplished by +/// making a `HEAD` request to Reddit at the path given in `path`. +/// +/// This function returns `Ok(Some(path))`, where `path`'s value is identical +/// to that of the value of the argument `path`, if Reddit responds to our +/// `HEAD` request with a 2xx-family HTTP code. It will also return an +/// `Ok(Some(String))` if Reddit responds to our `HEAD` request with a +/// `Location` header in the response, and the HTTP code is in the 3xx-family; +/// the `String` will contain the path as reported in `Location`. The return +/// value is `Ok(None)` if Reddit responded with a 3xx, but did not provide a +/// `Location` header. An `Err(String)` is returned if Reddit responds with a +/// 429, or if we were unable to decode the value in the `Location` header. +#[cached(size = 1024, time = 600, result = true)] +pub async fn canonical_path(path: String) -> Result, String> { + let res = reddit_head(path.clone(), true).await?; + + if res.status() == 429 { + return Err("Too many requests.".to_string()); + }; + + // If Reddit responds with a 2xx, then the path is already canonical. + if res.status().to_string().starts_with('2') { + return Ok(Some(path)); + } + + // If Reddit responds with anything other than 3xx (except for the 2xx as + // above), return a None. + if !res.status().to_string().starts_with('3') { + return Ok(None); + } + + Ok( + res + .headers() + .get(header::LOCATION) + .map(|val| percent_encode(val.as_bytes(), CONTROLS).to_string().trim_start_matches(REDDIT_URL_BASE).to_string()), + ) +} + pub async fn proxy(req: Request, format: &str) -> Result, String> { let mut url = format!("{}?{}", format, req.uri().query().unwrap_or_default()); @@ -62,20 +105,39 @@ async fn stream(url: &str, req: &Request) -> Result, String .map_err(|e| e.to_string()) } -fn request(url: String, quarantine: bool) -> Boxed, String>> { +/// Makes a GET request to Reddit at `path`. By default, this will honor HTTP +/// 3xx codes Reddit returns and will automatically redirect. +fn reddit_get(path: String, quarantine: bool) -> Boxed, String>> { + request(&Method::GET, path, true, quarantine) +} + +/// Makes a HEAD request to Reddit at `path`. This will not follow redirects. +fn reddit_head(path: String, quarantine: bool) -> Boxed, String>> { + request(&Method::HEAD, path, false, quarantine) +} + +/// Makes a request to Reddit. If `redirect` is `true`, request_with_redirect +/// will recurse on the URL that Reddit provides in the Location HTTP header +/// in its response. +fn request(method: &'static Method, path: String, redirect: bool, quarantine: bool) -> Boxed, String>> { + // Build Reddit URL from path. + let url = format!("{}{}", REDDIT_URL_BASE, path); + // Prepare the HTTPS connector. let https = hyper_rustls::HttpsConnectorBuilder::new().with_native_roots().https_or_http().enable_http1().build(); // Construct the hyper client from the HTTPS connector. let client: client::Client<_, hyper::Body> = client::Client::builder().build(https); - // Build request + // Build request to Reddit. When making a GET, request gzip compression. + // (Reddit doesn't do brotli yet.) let builder = Request::builder() - .method("GET") + .method(method) .uri(&url) .header("User-Agent", format!("web:libreddit:{}", env!("CARGO_PKG_VERSION"))) .header("Host", "www.reddit.com") .header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8") + .header("Accept-Encoding", if method == Method::GET { "gzip" } else { "identity" }) .header("Accept-Language", "en-US,en;q=0.5") .header("Connection", "keep-alive") .header("Cookie", if quarantine { "_options=%7B%22pref_quarantine_optin%22%3A%20true%7D" } else { "" }) @@ -84,26 +146,94 @@ fn request(url: String, quarantine: bool) -> Boxed, String async move { match builder { Ok(req) => match client.request(req).await { - Ok(response) => { + Ok(mut response) => { + // Reddit may respond with a 3xx. Decide whether or not to + // redirect based on caller params. if response.status().to_string().starts_with('3') { - request( + if !redirect { + return Ok(response); + }; + + return request( + method, response .headers() - .get("Location") + .get(header::LOCATION) .map(|val| { - let new_url = percent_encode(val.as_bytes(), CONTROLS).to_string(); - format!("{}{}raw_json=1", new_url, if new_url.contains('?') { "&" } else { "?" }) + // We need to make adjustments to the URI + // we get back from Reddit. Namely, we + // must: + // + // 1. Remove the authority (e.g. + // https://www.reddit.com) that may be + // present, so that we recurse on the + // path (and query parameters) as + // required. + // + // 2. Percent-encode the path. + let new_path = percent_encode(val.as_bytes(), CONTROLS).to_string().trim_start_matches(REDDIT_URL_BASE).to_string(); + format!("{}{}raw_json=1", new_path, if new_path.contains('?') { "&" } else { "?" }) }) .unwrap_or_default() .to_string(), + true, quarantine, ) - .await - } else { - Ok(response) + .await; + }; + + match response.headers().get(header::CONTENT_ENCODING) { + // Content not compressed. + None => Ok(response), + + // Content encoded (hopefully with gzip). + Some(hdr) => { + match hdr.to_str() { + Ok(val) => match val { + "gzip" => {} + "identity" => return Ok(response), + _ => return Err("Reddit response was encoded with an unsupported compressor".to_string()), + }, + Err(_) => return Err("Reddit response was invalid".to_string()), + } + + // We get here if the body is gzip-compressed. + + // The body must be something that implements + // std::io::Read, hence the conversion to + // bytes::buf::Buf and then transformation into a + // Reader. + let mut decompressed: Vec; + { + let mut aggregated_body = match body::aggregate(response.body_mut()).await { + Ok(b) => b.reader(), + Err(e) => return Err(e.to_string()), + }; + + let mut decoder = match gzip::Decoder::new(&mut aggregated_body) { + Ok(decoder) => decoder, + Err(e) => return Err(e.to_string()), + }; + + decompressed = Vec::::new(); + if let Err(e) = io::copy(&mut decoder, &mut decompressed) { + return Err(e.to_string()); + }; + } + + response.headers_mut().remove(header::CONTENT_ENCODING); + response.headers_mut().insert(header::CONTENT_LENGTH, decompressed.len().into()); + *(response.body_mut()) = Body::from(decompressed); + + Ok(response) + } } } - Err(e) => Err(e.to_string()), + Err(e) => { + dbg_msg!("{} {}: {}", method, path, e); + + Err(e.to_string()) + } }, Err(_) => Err("Post url contains non-ASCII characters".to_string()), } @@ -114,9 +244,6 @@ fn request(url: String, quarantine: bool) -> Boxed, String // Make a request to a Reddit API and parse the JSON response #[cached(size = 100, time = 30, result = true)] pub async fn json(path: String, quarantine: bool) -> Result { - // Build Reddit url from path - let url = format!("https://www.reddit.com{}", path); - // Closure to quickly build errors let err = |msg: &str, e: String| -> Result { // eprintln!("{} - {}: {}", url, msg, e); @@ -124,7 +251,7 @@ pub async fn json(path: String, quarantine: bool) -> Result { }; // Fetch the url... - match request(url.clone(), quarantine).await { + match reddit_get(path.clone(), quarantine).await { Ok(response) => { let status = response.status(); @@ -142,7 +269,7 @@ pub async fn json(path: String, quarantine: bool) -> Result { .as_str() .unwrap_or_else(|| { json["message"].as_str().unwrap_or_else(|| { - eprintln!("{} - Error parsing reddit error", url); + eprintln!("{}{} - Error parsing reddit error", REDDIT_URL_BASE, path); "Error parsing reddit error" }) }) diff --git a/src/duplicates.rs b/src/duplicates.rs new file mode 100644 index 0000000..6a64fc8 --- /dev/null +++ b/src/duplicates.rs @@ -0,0 +1,228 @@ +// Handler for post duplicates. + +use crate::client::json; +use crate::server::RequestExt; +use crate::subreddit::{can_access_quarantine, quarantine}; +use crate::utils::{error, filter_posts, get_filters, parse_post, template, Post, Preferences}; + +use askama::Template; +use hyper::{Body, Request, Response}; +use serde_json::Value; +use std::borrow::ToOwned; +use std::collections::HashSet; +use std::vec::Vec; + +/// DuplicatesParams contains the parameters in the URL. +struct DuplicatesParams { + before: String, + after: String, + sort: String, +} + +/// DuplicatesTemplate defines an Askama template for rendering duplicate +/// posts. +#[derive(Template)] +#[template(path = "duplicates.html")] +struct DuplicatesTemplate { + /// params contains the relevant request parameters. + params: DuplicatesParams, + + /// post is the post whose ID is specified in the reqeust URL. Note that + /// this is not necessarily the "original" post. + post: Post, + + /// duplicates is the list of posts that, per Reddit, are duplicates of + /// Post above. + duplicates: Vec, + + /// prefs are the user preferences. + prefs: Preferences, + + /// url is the request URL. + url: String, + + /// num_posts_filtered counts how many posts were filtered from the + /// duplicates list. + num_posts_filtered: u64, + + /// all_posts_filtered is true if every duplicate was filtered. This is an + /// edge case but can still happen. + all_posts_filtered: bool, +} + +/// Make the GET request to Reddit. It assumes `req` is the appropriate Reddit +/// REST endpoint for enumerating post duplicates. +pub async fn item(req: Request) -> Result, String> { + let path: String = format!("{}.json?{}&raw_json=1", req.uri().path(), req.uri().query().unwrap_or_default()); + let sub = req.param("sub").unwrap_or_default(); + let quarantined = can_access_quarantine(&req, &sub); + + // Log the request in debugging mode + #[cfg(debug_assertions)] + dbg!(req.param("id").unwrap_or_default()); + + // Send the GET, and await JSON. + match json(path, quarantined).await { + // Process response JSON. + Ok(response) => { + let filters = get_filters(&req); + let post = parse_post(&response[0]["data"]["children"][0]).await; + let (duplicates, num_posts_filtered, all_posts_filtered) = parse_duplicates(&response[1], &filters).await; + + // These are the values for the "before=", "after=", and "sort=" + // query params, respectively. + let mut before: String = String::new(); + let mut after: String = String::new(); + let mut sort: String = String::new(); + + // FIXME: We have to perform a kludge to work around a Reddit API + // bug. + // + // The JSON object in "data" will never contain a "before" value so + // it is impossible to use it to determine our position in a + // listing. We'll make do by getting the ID of the first post in + // the listing, setting that as our "before" value, and ask Reddit + // to give us a batch of duplicate posts up to that post. + // + // Likewise, if we provide a "before" request in the GET, the + // result won't have an "after" in the JSON, in addition to missing + // the "before." So we will have to use the final post in the list + // of duplicates. + // + // That being said, we'll also need to capture the value of the + // "sort=" parameter as well, so we will need to inspect the + // query key-value pairs anyway. + let l = duplicates.len(); + if l > 0 { + // This gets set to true if "before=" is one of the GET params. + let mut have_before: bool = false; + + // This gets set to true if "after=" is one of the GET params. + let mut have_after: bool = false; + + // Inspect the query key-value pairs. We will need to record + // the value of "sort=", along with checking to see if either + // one of "before=" or "after=" are given. + // + // If we're in the middle of the batch (evidenced by the + // presence of a "before=" or "after=" parameter in the GET), + // then use the first post as the "before" reference. + // + // We'll do this iteratively. Better than with .map_or() + // since a closure will continue to operate on remaining + // elements even after we've determined one of "before=" or + // "after=" (or both) are in the GET request. + // + // In practice, here should only ever be one of "before=" or + // "after=" and never both. + let query_str = req.uri().query().unwrap_or_default().to_string(); + + if !query_str.is_empty() { + for param in query_str.split('&') { + let kv: Vec<&str> = param.split('=').collect(); + if kv.len() < 2 { + // Reject invalid query parameter. + continue; + } + + let key: &str = kv[0]; + match key { + "before" => have_before = true, + "after" => have_after = true, + "sort" => { + let val: &str = kv[1]; + match val { + "new" | "num_comments" => sort = val.to_string(), + _ => {} + } + } + _ => {} + } + } + } + + if have_after { + before = "t3_".to_owned(); + before.push_str(&duplicates[0].id); + } + + // Address potentially missing "after". If "before=" is in the + // GET, then "after" will be null in the JSON (see FIXME + // above). + if have_before { + // The next batch will need to start from one after the + // last post in the current batch. + after = "t3_".to_owned(); + after.push_str(&duplicates[l - 1].id); + + // Here is where things get terrible. Notice that we + // haven't set `before`. In order to do so, we will + // need to know if there is a batch that exists before + // this one, and doing so requires actually fetching the + // previous batch. In other words, we have to do yet one + // more GET to Reddit. There is no other way to determine + // whether or not to define `before`. + // + // We'll mitigate that by requesting at most one duplicate. + let new_path: String = format!( + "{}.json?before=t3_{}&sort={}&limit=1&raw_json=1", + req.uri().path(), + &duplicates[0].id, + if sort.is_empty() { "num_comments".to_string() } else { sort.clone() } + ); + match json(new_path, true).await { + Ok(response) => { + if !response[1]["data"]["children"].as_array().unwrap_or(&Vec::new()).is_empty() { + before = "t3_".to_owned(); + before.push_str(&duplicates[0].id); + } + } + Err(msg) => { + // Abort entirely if we couldn't get the previous + // batch. + return error(req, msg).await; + } + } + } else { + after = response[1]["data"]["after"].as_str().unwrap_or_default().to_string(); + } + } + let url = req.uri().to_string(); + + template(DuplicatesTemplate { + params: DuplicatesParams { before, after, sort }, + post, + duplicates, + prefs: Preferences::new(req), + url, + num_posts_filtered, + all_posts_filtered, + }) + } + + // Process error. + Err(msg) => { + if msg == "quarantined" { + let sub = req.param("sub").unwrap_or_default(); + quarantine(req, sub) + } else { + error(req, msg).await + } + } + } +} + +// DUPLICATES +async fn parse_duplicates(json: &serde_json::Value, filters: &HashSet) -> (Vec, u64, bool) { + let post_duplicates: &Vec = &json["data"]["children"].as_array().map_or(Vec::new(), ToOwned::to_owned); + let mut duplicates: Vec = Vec::new(); + + // Process each post and place them in the Vec. + for val in post_duplicates.iter() { + let post: Post = parse_post(val).await; + duplicates.push(post); + } + + let (num_posts_filtered, all_posts_filtered) = filter_posts(&mut duplicates, filters); + (duplicates, num_posts_filtered, all_posts_filtered) +} diff --git a/src/main.rs b/src/main.rs index 4ce4a96..3b45bd2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -3,6 +3,7 @@ #![allow(clippy::cmp_owned)] // Reference local files +mod duplicates; mod post; mod search; mod settings; @@ -17,7 +18,7 @@ use futures_lite::FutureExt; use hyper::{header::HeaderValue, Body, Request, Response}; mod client; -use client::proxy; +use client::{canonical_path, proxy}; use server::RequestExt; use utils::{error, redirect, ThemeAssets}; @@ -244,6 +245,11 @@ async fn main() { app.at("/comments/:id/:title").get(|r| post::item(r).boxed()); app.at("/comments/:id/:title/:comment_id").get(|r| post::item(r).boxed()); + app.at("/r/:sub/duplicates/:id").get(|r| duplicates::item(r).boxed()); + app.at("/r/:sub/duplicates/:id/:title").get(|r| duplicates::item(r).boxed()); + app.at("/duplicates/:id").get(|r| duplicates::item(r).boxed()); + app.at("/duplicates/:id/:title").get(|r| duplicates::item(r).boxed()); + app.at("/r/:sub/search").get(|r| search::find(r).boxed()); app @@ -259,9 +265,6 @@ async fn main() { app.at("/r/:sub/:sort").get(|r| subreddit::community(r).boxed()); - // Comments handler - app.at("/comments/:id").get(|r| post::item(r).boxed()); - // Front page app.at("/").get(|r| subreddit::community(r).boxed()); @@ -279,13 +282,25 @@ async fn main() { // Handle about pages app.at("/about").get(|req| error(req, "About pages aren't added yet".to_string()).boxed()); - app.at("/:id").get(|req: Request| match req.param("id").as_deref() { - // Sort front page - Some("best" | "hot" | "new" | "top" | "rising" | "controversial") => subreddit::community(req).boxed(), - // Short link for post - Some(id) if id.len() > 4 && id.len() < 7 => post::item(req).boxed(), - // Error message for unknown pages - _ => error(req, "Nothing here".to_string()).boxed(), + app.at("/:id").get(|req: Request| { + Box::pin(async move { + match req.param("id").as_deref() { + // Sort front page + Some("best" | "hot" | "new" | "top" | "rising" | "controversial") => subreddit::community(req).await, + + // Short link for post + Some(id) if (5..7).contains(&id.len()) => match canonical_path(format!("/{}", id)).await { + Ok(path_opt) => match path_opt { + Some(path) => Ok(redirect(path)), + None => error(req, "Post ID is invalid. It may point to a post on a community that has been banned.").await, + }, + Err(e) => error(req, e).await, + }, + + // Error message for unknown pages + _ => error(req, "Nothing here".to_string()).await, + } + }) }); // Default service in case no routes match diff --git a/src/post.rs b/src/post.rs index 5f3142a..e467fe7 100644 --- a/src/post.rs +++ b/src/post.rs @@ -3,7 +3,7 @@ use crate::client::json; use crate::server::RequestExt; use crate::subreddit::{can_access_quarantine, quarantine}; use crate::utils::{ - error, format_num, format_url, get_filters, param, rewrite_urls, setting, template, time, val, Author, Awards, Comment, Flags, Flair, FlairPart, Media, Post, Preferences, + error, format_num, get_filters, param, parse_post, rewrite_urls, setting, template, time, val, Author, Awards, Comment, Flair, FlairPart, Post, Preferences, }; use hyper::{Body, Request, Response}; @@ -54,7 +54,7 @@ pub async fn item(req: Request) -> Result, String> { // Otherwise, grab the JSON output from the request Ok(response) => { // Parse the JSON into Post and Comment structs - let post = parse_post(&response[0]).await; + let post = parse_post(&response[0]["data"]["children"][0]).await; let comments = parse_comments(&response[1], &post.permalink, &post.author.name, highlighted_comment, &get_filters(&req)); let url = req.uri().to_string(); @@ -80,92 +80,6 @@ pub async fn item(req: Request) -> Result, String> { } } -// POSTS -async fn parse_post(json: &serde_json::Value) -> Post { - // Retrieve post (as opposed to comments) from JSON - let post: &serde_json::Value = &json["data"]["children"][0]; - - // Grab UTC time as unix timestamp - let (rel_time, created) = time(post["data"]["created_utc"].as_f64().unwrap_or_default()); - // Parse post score and upvote ratio - let score = post["data"]["score"].as_i64().unwrap_or_default(); - let ratio: f64 = post["data"]["upvote_ratio"].as_f64().unwrap_or(1.0) * 100.0; - - // Determine the type of media along with the media URL - let (post_type, media, gallery) = Media::parse(&post["data"]).await; - - let awards: Awards = Awards::parse(&post["data"]["all_awardings"]); - - let permalink = val(post, "permalink"); - - let body = if val(post, "removed_by_category") == "moderator" { - format!( - "

[removed] โ€” view removed post

", - permalink - ) - } else { - rewrite_urls(&val(post, "selftext_html")) - }; - - // Build a post using data parsed from Reddit post API - Post { - id: val(post, "id"), - title: val(post, "title"), - community: val(post, "subreddit"), - body, - author: Author { - name: val(post, "author"), - flair: Flair { - flair_parts: FlairPart::parse( - post["data"]["author_flair_type"].as_str().unwrap_or_default(), - post["data"]["author_flair_richtext"].as_array(), - post["data"]["author_flair_text"].as_str(), - ), - text: val(post, "link_flair_text"), - background_color: val(post, "author_flair_background_color"), - foreground_color: val(post, "author_flair_text_color"), - }, - distinguished: val(post, "distinguished"), - }, - permalink, - score: format_num(score), - upvote_ratio: ratio as i64, - post_type, - media, - thumbnail: Media { - url: format_url(val(post, "thumbnail").as_str()), - alt_url: String::new(), - width: post["data"]["thumbnail_width"].as_i64().unwrap_or_default(), - height: post["data"]["thumbnail_height"].as_i64().unwrap_or_default(), - poster: "".to_string(), - }, - flair: Flair { - flair_parts: FlairPart::parse( - post["data"]["link_flair_type"].as_str().unwrap_or_default(), - post["data"]["link_flair_richtext"].as_array(), - post["data"]["link_flair_text"].as_str(), - ), - text: val(post, "link_flair_text"), - background_color: val(post, "link_flair_background_color"), - foreground_color: if val(post, "link_flair_text_color") == "dark" { - "black".to_string() - } else { - "white".to_string() - }, - }, - flags: Flags { - nsfw: post["data"]["over_18"].as_bool().unwrap_or(false), - stickied: post["data"]["stickied"].as_bool().unwrap_or(false), - }, - domain: val(post, "domain"), - rel_time, - created, - comments: format_num(post["data"]["num_comments"].as_i64().unwrap_or_default()), - gallery, - awards, - } -} - // COMMENTS fn parse_comments(json: &serde_json::Value, post_link: &str, post_author: &str, highlighted_comment: &str, filters: &HashSet) -> Vec { // Parse the comment JSON into a Vector of Comments diff --git a/src/search.rs b/src/search.rs index 4dd3b5f..0a62b06 100644 --- a/src/search.rs +++ b/src/search.rs @@ -42,6 +42,8 @@ struct SearchTemplate { /// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place, /// and all fetched posts being filtered). all_posts_filtered: bool, + /// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW) + all_posts_hidden_nsfw: bool, } // SERVICES @@ -100,12 +102,13 @@ pub async fn find(req: Request) -> Result, String> { url, is_filtered: true, all_posts_filtered: false, + all_posts_hidden_nsfw: false, }) } else { match Post::fetch(&path, quarantined).await { Ok((mut posts, after)) => { - let all_posts_filtered = filter_posts(&mut posts, &filters); - + let (_, all_posts_filtered) = filter_posts(&mut posts, &filters); + let all_posts_hidden_nsfw = posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on"; template(SearchTemplate { posts, subreddits, @@ -123,6 +126,7 @@ pub async fn find(req: Request) -> Result, String> { url, is_filtered: false, all_posts_filtered, + all_posts_hidden_nsfw, }) } Err(msg) => { diff --git a/src/server.rs b/src/server.rs index 979dbd7..c277b6b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,17 +1,80 @@ +use brotli::enc::{BrotliCompress, BrotliEncoderParams}; +use cached::proc_macro::cached; use cookie::Cookie; +use core::f64; use futures_lite::{future::Boxed, Future, FutureExt}; use hyper::{ - header::HeaderValue, + body, + body::HttpBody, + header, service::{make_service_fn, service_fn}, HeaderMap, }; use hyper::{Body, Method, Request, Response, Server as HyperServer}; +use libflate::gzip; use route_recognizer::{Params, Router}; -use std::{pin::Pin, result::Result}; +use std::{ + cmp::Ordering, + io, + pin::Pin, + result::Result, + str::{from_utf8, Split}, + string::ToString, +}; use time::Duration; +use crate::dbg_msg; + type BoxResponse = Pin, String>> + Send>>; +/// Compressors for the response Body, in ascending order of preference. +#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +enum CompressionType { + Passthrough, + Gzip, + Brotli, +} + +/// All browsers support gzip, so if we are given `Accept-Encoding: *`, deliver +/// gzipped-content. +/// +/// Brotli would be nice universally, but Safari (iOS, iPhone, macOS) reportedly +/// doesn't support it yet. +const DEFAULT_COMPRESSOR: CompressionType = CompressionType::Gzip; + +impl CompressionType { + /// Returns a `CompressionType` given a content coding + /// in [RFC 7231](https://datatracker.ietf.org/doc/html/rfc7231#section-5.3.4) + /// format. + fn parse(s: &str) -> Option { + let c = match s { + // Compressors we support. + "gzip" => CompressionType::Gzip, + "br" => CompressionType::Brotli, + + // The wildcard means that we can choose whatever + // compression we prefer. In this case, use the + // default. + "*" => DEFAULT_COMPRESSOR, + + // Compressor not supported. + _ => return None, + }; + + Some(c) + } +} + +impl ToString for CompressionType { + fn to_string(&self) -> String { + match self { + CompressionType::Gzip => "gzip".to_string(), + CompressionType::Brotli => "br".to_string(), + _ => String::new(), + } + } +} + pub struct Route<'a> { router: &'a mut Router) -> BoxResponse>, path: String, @@ -97,7 +160,7 @@ impl ResponseExt for Response { } fn insert_cookie(&mut self, cookie: Cookie) { - if let Ok(val) = HeaderValue::from_str(&cookie.to_string()) { + if let Ok(val) = header::HeaderValue::from_str(&cookie.to_string()) { self.headers_mut().append("Set-Cookie", val); } } @@ -106,7 +169,7 @@ impl ResponseExt for Response { let mut cookie = Cookie::named(name); cookie.set_path("/"); cookie.set_max_age(Duration::seconds(1)); - if let Ok(val) = HeaderValue::from_str(&cookie.to_string()) { + if let Ok(val) = header::HeaderValue::from_str(&cookie.to_string()) { self.headers_mut().append("Set-Cookie", val); } } @@ -156,10 +219,11 @@ impl Server { // let shared_router = router.clone(); async move { Ok::<_, String>(service_fn(move |req: Request| { - let headers = default_headers.clone(); + let req_headers = req.headers().clone(); + let def_headers = default_headers.clone(); // Remove double slashes and decode encoded slashes - let mut path = req.uri().path().replace("//", "/").replace("%2F","/"); + let mut path = req.uri().path().replace("//", "/").replace("%2F", "/"); // Remove trailing slashes if path != "/" && path.ends_with('/') { @@ -176,26 +240,20 @@ impl Server { // Run the route's function let func = (found.handler().to_owned().to_owned())(parammed); async move { - let res: Result, String> = func.await; - // Add default headers to response - res.map(|mut response| { - response.headers_mut().extend(headers); - response - }) + match func.await { + Ok(mut res) => { + res.headers_mut().extend(def_headers); + let _ = compress_response(req_headers, &mut res).await; + + Ok(res) + } + Err(msg) => new_boilerplate(def_headers, req_headers, 500, Body::from(msg)).await, + } } .boxed() } // If there was a routing error - Err(e) => async move { - // Return a 404 error - let res: Result, String> = Ok(Response::builder().status(404).body(e.into()).unwrap_or_default()); - // Add default headers to response - res.map(|mut response| { - response.headers_mut().extend(headers); - response - }) - } - .boxed(), + Err(e) => async move { new_boilerplate(def_headers, req_headers, 404, e.into()).await }.boxed(), } })) } @@ -213,3 +271,480 @@ impl Server { server.boxed() } } + +/// Create a boilerplate Response for error conditions. This response will be +/// compressed if requested by client. +async fn new_boilerplate( + default_headers: HeaderMap, + req_headers: HeaderMap, + status: u16, + body: Body, +) -> Result, String> { + match Response::builder().status(status).body(body) { + Ok(mut res) => { + let _ = compress_response(req_headers, &mut res).await; + + res.headers_mut().extend(default_headers.clone()); + Ok(res) + } + Err(msg) => Err(msg.to_string()), + } +} + +/// Determines the desired compressor based on the Accept-Encoding header. +/// +/// This function will honor the [q-value](https://developer.mozilla.org/en-US/docs/Glossary/Quality_values) +/// for each compressor. The q-value is an optional parameter, a decimal value +/// on \[0..1\], to order the compressors by preference. An Accept-Encoding value +/// with no q-values is also accepted. +/// +/// Here are [examples](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding#examples) +/// of valid Accept-Encoding headers. +/// +/// ```http +/// Accept-Encoding: gzip +/// Accept-Encoding: gzip, compress, br +/// Accept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1 +/// ``` +fn determine_compressor(accept_encoding: &str) -> Option { + if accept_encoding.is_empty() { + return None; + }; + + // Keep track of the compressor candidate based on both the client's + // preference and our own. Concrete examples: + // + // 1. "Accept-Encoding: gzip, br" => assuming we like brotli more than + // gzip, and the browser supports brotli, we choose brotli + // + // 2. "Accept-Encoding: gzip;q=0.8, br;q=0.3" => the client has stated a + // preference for gzip over brotli, so we choose gzip + // + // To do this, we need to define a struct which contains the requested + // requested compressor (abstracted as a CompressionType enum) and the + // q-value. If no q-value is defined for the compressor, we assume one of + // 1.0. We first compare compressor candidates by comparing q-values, and + // then CompressionTypes. We keep track of whatever is the greatest per our + // ordering. + + struct CompressorCandidate { + alg: CompressionType, + q: f64, + } + + impl Ord for CompressorCandidate { + fn cmp(&self, other: &Self) -> Ordering { + // Compare q-values. Break ties with the + // CompressionType values. + + match self.q.total_cmp(&other.q) { + Ordering::Equal => self.alg.cmp(&other.alg), + ord => ord, + } + } + } + + impl PartialOrd for CompressorCandidate { + fn partial_cmp(&self, other: &Self) -> Option { + // Guard against NAN, both on our end and on the other. + if self.q.is_nan() || other.q.is_nan() { + return None; + }; + + // f64 and CompressionType are ordered, except in the case + // where the f64 is NAN (which we checked against), so we + // can safely return a Some here. + Some(self.cmp(other)) + } + } + + impl PartialEq for CompressorCandidate { + fn eq(&self, other: &Self) -> bool { + (self.q == other.q) && (self.alg == other.alg) + } + } + + impl Eq for CompressorCandidate {} + + // This is the current candidate. + // + // Assmume no candidate so far. We do this by assigning the sentinel value + // of negative infinity to the q-value. If this value is negative infinity, + // that means there was no viable compressor candidate. + let mut cur_candidate = CompressorCandidate { + alg: CompressionType::Passthrough, + q: f64::NEG_INFINITY, + }; + + // This loop reads the requested compressors and keeps track of whichever + // one has the highest priority per our heuristic. + for val in accept_encoding.to_string().split(',') { + let mut q: f64 = 1.0; + + // The compressor and q-value (if the latter is defined) + // will be delimited by semicolons. + let mut spl: Split = val.split(';'); + + // Get the compressor. For example, in + // gzip;q=0.8 + // this grabs "gzip" in the string. It + // will further validate the compressor against the + // list of those we support. If it is not supported, + // we move onto the next one. + let compressor: CompressionType = match spl.next() { + // CompressionType::parse will return the appropriate enum given + // a string. For example, it will return CompressionType::Gzip + // when given "gzip". + Some(s) => match CompressionType::parse(s.trim()) { + Some(candidate) => candidate, + + // We don't support the requested compression algorithm. + None => continue, + }, + + // We should never get here, but I'm paranoid. + None => continue, + }; + + // Get the q-value. This might not be defined, in which case assume + // 1.0. + if let Some(s) = spl.next() { + if !(s.len() > 2 && s.starts_with("q=")) { + // If the q-value is malformed, the header is malformed, so + // abort. + return None; + } + + match s[2..].parse::() { + Ok(val) => { + if (0.0..=1.0).contains(&val) { + q = val; + } else { + // If the value is outside [0..1], header is malformed. + // Abort. + return None; + }; + } + Err(_) => { + // If this isn't a f64, then assume a malformed header + // value and abort. + return None; + } + } + }; + + // If new_candidate > cur_candidate, make new_candidate the new + // cur_candidate. But do this safely! It is very possible that + // someone gave us the string "NAN", which (&str).parse:: + // will happily translate to f64::NAN. + let new_candidate = CompressorCandidate { alg: compressor, q }; + if let Some(ord) = new_candidate.partial_cmp(&cur_candidate) { + if ord == Ordering::Greater { + cur_candidate = new_candidate; + } + }; + } + + if cur_candidate.q != f64::NEG_INFINITY { + Some(cur_candidate.alg) + } else { + None + } +} + +/// Compress the response body, if possible or desirable. The Body will be +/// compressed in place, and a new header Content-Encoding will be set +/// indicating the compression algorithm. +/// +/// This function deems Body eligible compression if and only if the following +/// conditions are met: +/// +/// 1. the HTTP client requests a compression encoding in the Content-Encoding +/// header (hence the need for the req_headers); +/// +/// 2. the content encoding corresponds to a compression algorithm we support; +/// +/// 3. the Media type in the Content-Type response header is text with any +/// subtype (e.g. text/plain) or application/json. +/// +/// compress_response returns Ok on successful compression, or if not all three +/// conditions above are met. It returns Err if there was a problem decoding +/// any header in either req_headers or res, but res will remain intact. +/// +/// This function logs errors to stderr, but only in debug mode. No information +/// is logged in release builds. +async fn compress_response(req_headers: HeaderMap, res: &mut Response) -> Result<(), String> { + // Check if the data is eligible for compression. + if let Some(hdr) = res.headers().get(header::CONTENT_TYPE) { + match from_utf8(hdr.as_bytes()) { + Ok(val) => { + let s = val.to_string(); + + // TODO: better determination of what is eligible for compression + if !(s.starts_with("text/") || s.starts_with("application/json")) { + return Ok(()); + }; + } + Err(e) => { + dbg_msg!(e); + return Err(e.to_string()); + } + }; + } else { + // Response declares no Content-Type. Assume for simplicity that it + // cannot be compressed. + return Ok(()); + }; + + // Don't bother if the size of the size of the response body will fit + // within an IP frame (less the bytes that make up the TCP/IP and HTTP + // headers). + if res.body().size_hint().lower() < 1452 { + return Ok(()); + }; + + // Quick and dirty closure for extracting a header from the request and + // returning it as a &str. + let get_req_header = |k: header::HeaderName| -> Option<&str> { + match req_headers.get(k) { + Some(hdr) => match from_utf8(hdr.as_bytes()) { + Ok(val) => Some(val), + + #[cfg(debug_assertions)] + Err(e) => { + dbg_msg!(e); + None + } + + #[cfg(not(debug_assertions))] + Err(_) => None, + }, + None => None, + } + }; + + // Check to see which compressor is requested, and if we can use it. + let accept_encoding: &str = match get_req_header(header::ACCEPT_ENCODING) { + Some(val) => val, + None => return Ok(()), // Client requested no compression. + }; + + let compressor: CompressionType = match determine_compressor(accept_encoding) { + Some(c) => c, + None => return Ok(()), + }; + + // Get the body from the response. + let body_bytes: Vec = match body::to_bytes(res.body_mut()).await { + Ok(b) => b.to_vec(), + Err(e) => { + dbg_msg!(e); + return Err(e.to_string()); + } + }; + + // Compress! + match compress_body(compressor, body_bytes) { + Ok(compressed) => { + // We get here iff the compression was successful. Replace the body + // with the compressed payload, and add the appropriate + // Content-Encoding header in the response. + res.headers_mut().insert(header::CONTENT_ENCODING, compressor.to_string().parse().unwrap()); + *(res.body_mut()) = Body::from(compressed); + } + + Err(e) => return Err(e), + } + + Ok(()) +} + +/// Compresses a `Vec` given a [`CompressionType`]. +/// +/// This is a helper function for [`compress_response`] and should not be +/// called directly. + +// I've chosen a TTL of 600 (== 10 minutes) since compression is +// computationally expensive and we don't want to be doing it often. This is +// larger than client::json's TTL, but that's okay, because if client::json +// returns a new serde_json::Value, body_bytes changes, so this function will +// execute again. +#[cached(size = 100, time = 600, result = true)] +fn compress_body(compressor: CompressionType, body_bytes: Vec) -> Result, String> { + // io::Cursor implements io::Read, required for our encoders. + let mut reader = io::Cursor::new(body_bytes); + + let compressed: Vec = match compressor { + CompressionType::Gzip => { + let mut gz: gzip::Encoder> = match gzip::Encoder::new(Vec::new()) { + Ok(gz) => gz, + Err(e) => { + dbg_msg!(e); + return Err(e.to_string()); + } + }; + + match io::copy(&mut reader, &mut gz) { + Ok(_) => match gz.finish().into_result() { + Ok(compressed) => compressed, + Err(e) => { + dbg_msg!(e); + return Err(e.to_string()); + } + }, + Err(e) => { + dbg_msg!(e); + return Err(e.to_string()); + } + } + } + + CompressionType::Brotli => { + // We may want to make the compression parameters configurable + // in the future. For now, the defaults are sufficient. + let brotli_params = BrotliEncoderParams::default(); + + let mut compressed = Vec::::new(); + match BrotliCompress(&mut reader, &mut compressed, &brotli_params) { + Ok(_) => compressed, + Err(e) => { + dbg_msg!(e); + return Err(e.to_string()); + } + } + } + + // This arm is for any requested compressor for which we don't yet + // have an implementation. + _ => { + let msg = "unsupported compressor".to_string(); + return Err(msg); + } + }; + + Ok(compressed) +} + +#[cfg(test)] +mod tests { + use super::*; + use brotli::Decompressor as BrotliDecompressor; + use futures_lite::future::block_on; + use lipsum::lipsum; + use std::{boxed::Box, io}; + + #[test] + fn test_determine_compressor() { + // Single compressor given. + assert_eq!(determine_compressor("unsupported"), None); + assert_eq!(determine_compressor("gzip"), Some(CompressionType::Gzip)); + assert_eq!(determine_compressor("*"), Some(DEFAULT_COMPRESSOR)); + + // Multiple compressors. + assert_eq!(determine_compressor("gzip, br"), Some(CompressionType::Brotli)); + assert_eq!(determine_compressor("gzip;q=0.8, br;q=0.3"), Some(CompressionType::Gzip)); + assert_eq!(determine_compressor("br, gzip"), Some(CompressionType::Brotli)); + assert_eq!(determine_compressor("br;q=0.3, gzip;q=0.4"), Some(CompressionType::Gzip)); + + // Invalid q-values. + assert_eq!(determine_compressor("gzip;q=NAN"), None); + } + + #[test] + fn test_compress_response() { + // This macro generates an Accept-Encoding header value given any number of + // compressors. + macro_rules! ae_gen { + ($x:expr) => { + $x.to_string().as_str() + }; + + ($x:expr, $($y:expr),+) => { + format!("{}, {}", $x.to_string(), ae_gen!($($y),+)).as_str() + }; + } + + for accept_encoding in [ + "*", + ae_gen!(CompressionType::Gzip), + ae_gen!(CompressionType::Brotli, CompressionType::Gzip), + ae_gen!(CompressionType::Brotli), + ] { + // Determine what the expected encoding should be based on both the + // specific encodings we accept. + let expected_encoding: CompressionType = match determine_compressor(accept_encoding) { + Some(s) => s, + None => panic!("determine_compressor(accept_encoding) => None"), + }; + + // Build headers with our Accept-Encoding. + let mut req_headers = HeaderMap::new(); + req_headers.insert(header::ACCEPT_ENCODING, header::HeaderValue::from_str(accept_encoding).unwrap()); + + // Build test response. + let lorem_ipsum: String = lipsum(10000); + let expected_lorem_ipsum = Vec::::from(lorem_ipsum.as_str()); + let mut res = Response::builder() + .status(200) + .header(header::CONTENT_TYPE, "text/plain") + .body(Body::from(lorem_ipsum)) + .unwrap(); + + // Perform the compression. + if let Err(e) = block_on(compress_response(req_headers, &mut res)) { + panic!("compress_response(req_headers, &mut res) => Err(\"{}\")", e); + }; + + // If the content was compressed, we expect the Content-Encoding + // header to be modified. + assert_eq!( + res + .headers() + .get(header::CONTENT_ENCODING) + .unwrap_or_else(|| panic!("missing content-encoding header")) + .to_str() + .unwrap_or_else(|_| panic!("failed to convert Content-Encoding header::HeaderValue to String")), + expected_encoding.to_string() + ); + + // Decompress body and make sure it's equal to what we started + // with. + // + // In the case of no compression, just make sure the "new" body in + // the Response is the same as what with which we start. + let body_vec = match block_on(body::to_bytes(res.body_mut())) { + Ok(b) => b.to_vec(), + Err(e) => panic!("{}", e), + }; + + if expected_encoding == CompressionType::Passthrough { + assert!(body_vec.eq(&expected_lorem_ipsum)); + continue; + } + + // This provides an io::Read for the underlying body. + let mut body_cursor: io::Cursor> = io::Cursor::new(body_vec); + + // Match the appropriate decompresor for the given + // expected_encoding. + let mut decoder: Box = match expected_encoding { + CompressionType::Gzip => match gzip::Decoder::new(&mut body_cursor) { + Ok(dgz) => Box::new(dgz), + Err(e) => panic!("{}", e), + }, + + CompressionType::Brotli => Box::new(BrotliDecompressor::new(body_cursor, expected_lorem_ipsum.len())), + + _ => panic!("no decompressor for {}", expected_encoding.to_string()), + }; + + let mut decompressed = Vec::::new(); + match io::copy(&mut decoder, &mut decompressed) { + Ok(_) => {} + Err(e) => panic!("{}", e), + }; + + assert!(decompressed.eq(&expected_lorem_ipsum)); + } + } +} diff --git a/src/settings.rs b/src/settings.rs index 9cdd266..0fd2640 100644 --- a/src/settings.rs +++ b/src/settings.rs @@ -19,7 +19,7 @@ struct SettingsTemplate { // CONSTANTS -const PREFS: [&str; 10] = [ +const PREFS: [&str; 11] = [ "theme", "front_page", "layout", @@ -27,6 +27,7 @@ const PREFS: [&str; 10] = [ "comment_sort", "post_sort", "show_nsfw", + "blur_nsfw", "use_hls", "hide_hls_notification", "autoplay_videos", diff --git a/src/subreddit.rs b/src/subreddit.rs index b03e7e9..4aff027 100644 --- a/src/subreddit.rs +++ b/src/subreddit.rs @@ -24,6 +24,8 @@ struct SubredditTemplate { /// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place, /// and all fetched posts being filtered). all_posts_filtered: bool, + /// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW) + all_posts_hidden_nsfw: bool, } #[derive(Template)] @@ -111,12 +113,13 @@ pub async fn community(req: Request) -> Result, String> { redirect_url, is_filtered: true, all_posts_filtered: false, + all_posts_hidden_nsfw: false, }) } else { match Post::fetch(&path, quarantined).await { Ok((mut posts, after)) => { - let all_posts_filtered = filter_posts(&mut posts, &filters); - + let (_, all_posts_filtered) = filter_posts(&mut posts, &filters); + let all_posts_hidden_nsfw = posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on"; template(SubredditTemplate { sub, posts, @@ -127,6 +130,7 @@ pub async fn community(req: Request) -> Result, String> { redirect_url, is_filtered: false, all_posts_filtered, + all_posts_hidden_nsfw, }) } Err(msg) => match msg.as_str() { diff --git a/src/user.rs b/src/user.rs index 69c4ae9..8c0540c 100644 --- a/src/user.rs +++ b/src/user.rs @@ -1,7 +1,7 @@ // CRATES use crate::client::json; use crate::server::RequestExt; -use crate::utils::{error, filter_posts, format_url, get_filters, param, template, Post, Preferences, User}; +use crate::utils::{error, filter_posts, format_url, get_filters, param, setting, template, Post, Preferences, User}; use askama::Template; use hyper::{Body, Request, Response}; use time::{macros::format_description, OffsetDateTime}; @@ -24,6 +24,8 @@ struct UserTemplate { /// Whether all fetched posts are filtered (to differentiate between no posts fetched in the first place, /// and all fetched posts being filtered). all_posts_filtered: bool, + /// Whether all posts were hidden because they are NSFW (and user has disabled show NSFW) + all_posts_hidden_nsfw: bool, } // FUNCTIONS @@ -58,13 +60,14 @@ pub async fn profile(req: Request) -> Result, String> { redirect_url, is_filtered: true, all_posts_filtered: false, + all_posts_hidden_nsfw: false, }) } else { // Request user posts/comments from Reddit match Post::fetch(&path, false).await { Ok((mut posts, after)) => { - let all_posts_filtered = filter_posts(&mut posts, &filters); - + let (_, all_posts_filtered) = filter_posts(&mut posts, &filters); + let all_posts_hidden_nsfw = posts.iter().all(|p| p.flags.nsfw) && setting(&req, "show_nsfw") != "on"; template(UserTemplate { user, posts, @@ -76,6 +79,7 @@ pub async fn profile(req: Request) -> Result, String> { redirect_url, is_filtered: false, all_posts_filtered, + all_posts_hidden_nsfw, }) } // If there is an error show error page diff --git a/src/utils.rs b/src/utils.rs index 2691d16..06237e9 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -13,6 +13,21 @@ use std::str::FromStr; use time::{macros::format_description, Duration, OffsetDateTime}; use url::Url; +/// Write a message to stderr on debug mode. This function is a no-op on +/// release code. +#[macro_export] +macro_rules! dbg_msg { + ($x:expr) => { + #[cfg(debug_assertions)] + eprintln!("{}:{}: {}", file!(), line!(), $x.to_string()) + }; + + ($($x:expr),+) => { + #[cfg(debug_assertions)] + dbg_msg!(format!($($x),+)) + }; +} + // Post flair with content, background color and foreground color pub struct Flair { pub flair_parts: Vec, @@ -210,6 +225,7 @@ pub struct Post { pub domain: String, pub rel_time: String, pub created: String, + pub num_duplicates: u64, pub comments: (String, String), pub gallery: Vec, pub awards: Awards, @@ -304,11 +320,12 @@ impl Post { }, flags: Flags { nsfw: data["over_18"].as_bool().unwrap_or_default(), - stickied: data["stickied"].as_bool().unwrap_or_default(), + stickied: data["stickied"].as_bool().unwrap_or_default() || data["pinned"].as_bool().unwrap_or_default(), }, permalink: val(post, "permalink"), rel_time, created, + num_duplicates: post["data"]["num_duplicates"].as_u64().unwrap_or(0), comments: format_num(data["num_comments"].as_i64().unwrap_or_default()), gallery, awards, @@ -447,6 +464,7 @@ pub struct Preferences { pub layout: String, pub wide: String, pub show_nsfw: String, + pub blur_nsfw: String, pub hide_hls_notification: String, pub use_hls: String, pub autoplay_videos: String, @@ -478,6 +496,7 @@ impl Preferences { layout: setting(&req, "layout"), wide: setting(&req, "wide"), show_nsfw: setting(&req, "show_nsfw"), + blur_nsfw: setting(&req, "blur_nsfw"), use_hls: setting(&req, "use_hls"), hide_hls_notification: setting(&req, "hide_hls_notification"), autoplay_videos: setting(&req, "autoplay_videos"), @@ -494,15 +513,110 @@ pub fn get_filters(req: &Request) -> HashSet { setting(req, "filters").split('+').map(String::from).filter(|s| !s.is_empty()).collect::>() } -/// Filters a `Vec` by the given `HashSet` of filters (each filter being a subreddit name or a user name). If a -/// `Post`'s subreddit or author is found in the filters, it is removed. Returns `true` if _all_ posts were filtered -/// out, or `false` otherwise. -pub fn filter_posts(posts: &mut Vec, filters: &HashSet) -> bool { +/// Filters a `Vec` by the given `HashSet` of filters (each filter being +/// a subreddit name or a user name). If a `Post`'s subreddit or author is +/// found in the filters, it is removed. +/// +/// The first value of the return tuple is the number of posts filtered. The +/// second return value is `true` if all posts were filtered. +pub fn filter_posts(posts: &mut Vec, filters: &HashSet) -> (u64, bool) { + // This is the length of the Vec prior to applying the filter. + let lb: u64 = posts.len().try_into().unwrap_or(0); + if posts.is_empty() { - false + (0, false) } else { - posts.retain(|p| !filters.contains(&p.community) && !filters.contains(&["u_", &p.author.name].concat())); - posts.is_empty() + posts.retain(|p| !(filters.contains(&p.community) || filters.contains(&["u_", &p.author.name].concat()))); + + // Get the length of the Vec after applying the filter. + // If lb > la, then at least one post was removed. + let la: u64 = posts.len().try_into().unwrap_or(0); + + (lb - la, posts.is_empty()) + } +} + +/// Creates a [`Post`] from a provided JSON. +pub async fn parse_post(post: &serde_json::Value) -> Post { + // Grab UTC time as unix timestamp + let (rel_time, created) = time(post["data"]["created_utc"].as_f64().unwrap_or_default()); + // Parse post score and upvote ratio + let score = post["data"]["score"].as_i64().unwrap_or_default(); + let ratio: f64 = post["data"]["upvote_ratio"].as_f64().unwrap_or(1.0) * 100.0; + + // Determine the type of media along with the media URL + let (post_type, media, gallery) = Media::parse(&post["data"]).await; + + let awards: Awards = Awards::parse(&post["data"]["all_awardings"]); + + let permalink = val(post, "permalink"); + + let body = if val(post, "removed_by_category") == "moderator" { + format!( + "

[removed] โ€” view removed post

", + permalink + ) + } else { + rewrite_urls(&val(post, "selftext_html")) + }; + + // Build a post using data parsed from Reddit post API + Post { + id: val(post, "id"), + title: val(post, "title"), + community: val(post, "subreddit"), + body, + author: Author { + name: val(post, "author"), + flair: Flair { + flair_parts: FlairPart::parse( + post["data"]["author_flair_type"].as_str().unwrap_or_default(), + post["data"]["author_flair_richtext"].as_array(), + post["data"]["author_flair_text"].as_str(), + ), + text: val(post, "link_flair_text"), + background_color: val(post, "author_flair_background_color"), + foreground_color: val(post, "author_flair_text_color"), + }, + distinguished: val(post, "distinguished"), + }, + permalink, + score: format_num(score), + upvote_ratio: ratio as i64, + post_type, + media, + thumbnail: Media { + url: format_url(val(post, "thumbnail").as_str()), + alt_url: String::new(), + width: post["data"]["thumbnail_width"].as_i64().unwrap_or_default(), + height: post["data"]["thumbnail_height"].as_i64().unwrap_or_default(), + poster: String::new(), + }, + flair: Flair { + flair_parts: FlairPart::parse( + post["data"]["link_flair_type"].as_str().unwrap_or_default(), + post["data"]["link_flair_richtext"].as_array(), + post["data"]["link_flair_text"].as_str(), + ), + text: val(post, "link_flair_text"), + background_color: val(post, "link_flair_background_color"), + foreground_color: if val(post, "link_flair_text_color") == "dark" { + "black".to_string() + } else { + "white".to_string() + }, + }, + flags: Flags { + nsfw: post["data"]["over_18"].as_bool().unwrap_or_default(), + stickied: post["data"]["stickied"].as_bool().unwrap_or_default() || post["data"]["pinned"].as_bool().unwrap_or(false), + }, + domain: val(post, "domain"), + rel_time, + created, + num_duplicates: post["data"]["num_duplicates"].as_u64().unwrap_or(0), + comments: format_num(post["data"]["num_comments"].as_i64().unwrap_or_default()), + gallery, + awards, } } @@ -701,10 +815,11 @@ pub fn redirect(path: String) -> Response { .unwrap_or_default() } -pub async fn error(req: Request, msg: String) -> Result, String> { +/// Renders a generic error landing page. +pub async fn error(req: Request, msg: impl ToString) -> Result, String> { let url = req.uri().to_string(); let body = ErrorTemplate { - msg, + msg: msg.to_string(), prefs: Preferences::new(req), url, } @@ -716,8 +831,7 @@ pub async fn error(req: Request, msg: String) -> Result, St #[cfg(test)] mod tests { - use super::format_num; - use super::rewrite_urls; + use super::{format_num, format_url, rewrite_urls}; #[test] fn format_num_works() { @@ -737,4 +851,33 @@ mod tests { r#"https://www.reddit.com/r/linux_gaming/comments/x/just_a_test/"# ) } + + #[test] + fn test_format_url() { + assert_eq!(format_url("https://a.thumbs.redditmedia.com/XYZ.jpg"), "/thumb/a/XYZ.jpg"); + assert_eq!(format_url("https://emoji.redditmedia.com/a/b"), "/emoji/a/b"); + + assert_eq!( + format_url("https://external-preview.redd.it/foo.jpg?auto=webp&s=bar"), + "/preview/external-pre/foo.jpg?auto=webp&s=bar" + ); + + assert_eq!(format_url("https://i.redd.it/foobar.jpg"), "/img/foobar.jpg"); + assert_eq!( + format_url("https://preview.redd.it/qwerty.jpg?auto=webp&s=asdf"), + "/preview/pre/qwerty.jpg?auto=webp&s=asdf" + ); + assert_eq!(format_url("https://v.redd.it/foo/DASH_360.mp4?source=fallback"), "/vid/foo/360.mp4"); + assert_eq!( + format_url("https://v.redd.it/foo/HLSPlaylist.m3u8?a=bar&v=1&f=sd"), + "/hls/foo/HLSPlaylist.m3u8?a=bar&v=1&f=sd" + ); + assert_eq!(format_url("https://www.redditstatic.com/gold/awards/icon/icon.png"), "/static/gold/awards/icon/icon.png"); + + assert_eq!(format_url(""), ""); + assert_eq!(format_url("self"), ""); + assert_eq!(format_url("default"), ""); + assert_eq!(format_url("nsfw"), ""); + assert_eq!(format_url("spoiler"), ""); + } } diff --git a/static/style.css b/static/style.css index d8bebff..500646d 100644 --- a/static/style.css +++ b/static/style.css @@ -154,6 +154,7 @@ main { } #column_one { + width: 100%; max-width: 750px; border-radius: 5px; overflow: inherit; @@ -716,22 +717,39 @@ a.search_subreddit:hover { font-weight: bold; } -.post_media_image, .post .__NoScript_PlaceHolder__, .post_media_video, .gallery { +.post_media_content, .post .__NoScript_PlaceHolder__, .gallery { max-width: calc(100% - 40px); grid-area: post_media; margin: 15px auto 5px auto; + width: auto; height: auto; + overflow: hidden; } - -.post_media_video.short { - max-height: 512px; +.post_media_video { width: auto; + height: auto; + max-width: 100%; + max-height: 512px; + display: block; + margin: auto; } .post_media_image.short svg, .post_media_image.short img{ - max-height: 512px; width: auto; + height: auto; + max-width: 100%; + max-height: 512px; + display: block; + margin: auto; +} + +.post_nsfw_blur { + filter: blur(1.5rem); +} + +.post_nsfw_blur:hover { + filter: none; } .post_media_image svg{ @@ -817,6 +835,16 @@ a.search_subreddit:hover { margin-right: 15px; } +#post_links > li.desktop_item { + display: auto; +} + +@media screen and (min-width: 480px) { + #post_links > li.mobile_item { + display: none; + } +} + .post_thumbnail { border-radius: 5px; border: var(--panel-border); @@ -827,13 +855,25 @@ a.search_subreddit:hover { margin: 5px; } -.post_thumbnail svg { +.post_thumbnail div { grid-area: 1 / 1 / 2 / 2; - width: 100%; - height: auto; object-fit: cover; align-self: center; justify-self: center; + overflow: hidden; +} + +.post_thumbnail div svg { + width: 100%; + height: auto; +} + +.post_thumbnail span { + z-index: 0; +} + +.thumb_nsfw_blur { + filter: blur(0.3rem) } .post_thumbnail.no_thumbnail { @@ -1182,16 +1222,21 @@ input[type="submit"] { color: var(--accent); } -.md .md-spoiler-text { +.md .md-spoiler-text, .md-spoiler-text a { background: var(--highlighted); color: transparent; } -.md .md-spoiler-text:hover { +.md-spoiler-text:hover { background: var(--foreground); color: var(--text); } +.md-spoiler-text:hover a { + background: var(--foreground); + color: var(--accent); +} + .md li { margin: 10px 0; } .toc_child { list-style: none; } @@ -1238,6 +1283,29 @@ td, th { #error h3 { opacity: 0.85; } #error a { color: var(--accent); } +/* Messages */ + +#duplicates_msg h3 { + display: inline-block; + margin-top: 10px; + margin-bottom: 10px; + text-align: center; + width: 100%; +} + +/* Warnings */ + +.listing_warn { + display: inline-block; + margin: 10px; + text-align: center; + width: 100%; +} + +.listing_warn a { + color: var(--accent); +} + /* Mobile */ @media screen and (max-width: 800px) { @@ -1338,4 +1406,9 @@ td, th { padding: 7px 0px; margin-right: -5px; } + + #post_links > li { margin-right: 10px } + #post_links > li.desktop_item { display: none } + #post_links > li.mobile_item { display: auto } + .post_footer > p > span#upvoted { display: none } } diff --git a/static/themes/doomone.css b/static/themes/doomone.css new file mode 100644 index 0000000..27cb06f --- /dev/null +++ b/static/themes/doomone.css @@ -0,0 +1,13 @@ +.doomone { + --accent: #51afef; + --green: #00a229; + --text: #bbc2cf; + --foreground: #3d4148; + --background: #282c34; + --outside: #52565c; + --post: #24272e; + --panel-border: 2px solid #52565c; + --highlighted: #686b70; + --visited: #969692; + --shadow: 0 1px 3px rgba(0, 0, 0, 0.1); +} diff --git a/static/themes/gruvboxdark.css b/static/themes/gruvboxdark.css new file mode 100644 index 0000000..fb9e3ee --- /dev/null +++ b/static/themes/gruvboxdark.css @@ -0,0 +1,13 @@ +/* Gruvbox-Dark theme setting */ +.gruvboxdark { + --accent: #8ec07c; + --green: #b8bb26; + --text: #ebdbb2; + --foreground: #3c3836; + --background: #282828; + --outside: #3c3836; + --post: #3c3836; + --panel-border: 1px solid #504945; + --highlighted: #282828; + --shadow: 0 1px 3px rgba(0, 0, 0, 0.5); +} diff --git a/static/themes/gruvboxlight.css b/static/themes/gruvboxlight.css new file mode 100644 index 0000000..d39f8e9 --- /dev/null +++ b/static/themes/gruvboxlight.css @@ -0,0 +1,13 @@ +/* Gruvbox-Light theme setting */ +.gruvboxlight { + --accent: #427b58; + --green: #79740e; + --text: #3c3836; + --foreground: #ebdbb2; + --background: #fbf1c7; + --outside: #ebdbb2; + --post: #ebdbb2; + --panel-border: 1px solid #d5c4a1; + --highlighted: #fbf1c7; + --shadow: 0 1px 3px rgba(0, 0, 0, 0.25); +} diff --git a/templates/base.html b/templates/base.html index f30aaaf..e9b51ec 100644 --- a/templates/base.html +++ b/templates/base.html @@ -19,7 +19,7 @@ - + {% endblock %} + {% call utils::post(post) %} + + + {% if post.num_duplicates == 0 %} + (No duplicates found) + {% else if post.flags.nsfw && prefs.show_nsfw != "on" %} + (Enable "Show NSFW posts" in settings to show duplicates) + {% else %} +

Duplicates

+ {% if num_posts_filtered > 0 %} + + {% if all_posts_filtered %} + (All posts have been filtered) + {% else %} + (Some posts have been filtered) + {% endif %} + + {% endif %} + + + +
+ {% for post in duplicates -%} + {# TODO: utils::post should be reworked to permit a truncated display of a post as below #} + {% if !(post.flags.nsfw) || prefs.show_nsfw == "on" %} +
+

+ {% let community -%} + {% if post.community.starts_with("u_") -%} + {% let community = format!("u/{}", &post.community[2..]) -%} + {% else -%} + {% let community = format!("r/{}", post.community) -%} + {% endif -%} + {{ post.community }} + + + + {{ post.rel_time }} + {% if !post.awards.is_empty() %} + {% for award in post.awards.clone() %} + + {{ award.name }} + + {% endfor %} + {% endif %} +

+

+ {% if post.flair.flair_parts.len() > 0 %} + {% call utils::render_flair(post.flair.flair_parts) %} + {% endif %} + {{ post.title }}{% if post.flags.nsfw %} NSFW{% endif %} +

+ +
{{ post.score.0 }} Upvotes
+ + +
+ {% endif %} + {%- endfor %} +
+ +
+ {% if params.before != "" %} + PREV + {% endif %} + + {% if params.after != "" %} + NEXT + {% endif %} +
+ {% endif %} + +{% endblock %} \ No newline at end of file diff --git a/templates/post.html b/templates/post.html index 227d7fc..d69644b 100644 --- a/templates/post.html +++ b/templates/post.html @@ -13,16 +13,25 @@ - - - - + + {% if post.post_type == "image" %} + + + + {% else if post.post_type == "video" || post.post_type == "gif" %} + + + + + {% else %} + + {% endif %} {% endblock %} {% block subscriptions %} @@ -31,95 +40,7 @@ {% block content %}
- - -
-

- r/{{ post.community }} - - - {% if post.author.flair.flair_parts.len() > 0 %} - {% call utils::render_flair(post.author.flair.flair_parts) %} - {% endif %} - - {{ post.rel_time }} - {% if !post.awards.is_empty() %} - - - {% for award in post.awards.clone() %} - - {{ award.name }} - {{ award.count }} - - {% endfor %} - - {% endif %} -

-

- {{ post.title }} - {% if post.flair.flair_parts.len() > 0 %} - {% call utils::render_flair(post.flair.flair_parts) %} - {% endif %} - {% if post.flags.nsfw %} NSFW{% endif %} -

- - - - {% if post.post_type == "image" %} - - - - - Post image - - - - {% else if post.post_type == "video" || post.post_type == "gif" %} - {% if prefs.use_hls == "on" && !post.media.alt_url.is_empty() %} - - - - {% else %} - - {% call utils::render_hls_notification(post.permalink[1..]) %} - {% endif %} - {% else if post.post_type == "gallery" %} - - {% else if post.post_type == "link" %} - {{ post.media.url }} - {% endif %} - - -
{{ post.body|safe }}
-
{{ post.score.0 }} Upvotes
- -
+ {% call utils::post(post) %}
@@ -138,7 +59,7 @@ {% for c in comments -%}
{% if single_thread %} -

View all comments

+

View all comments

{% if c.parent_kind == "t1" %}

Show parent comments

{% endif %} diff --git a/templates/search.html b/templates/search.html index 9386f35..43fadb4 100644 --- a/templates/search.html +++ b/templates/search.html @@ -56,10 +56,15 @@
{% endif %} {% endif %} + + {% if all_posts_hidden_nsfw %} + All posts are hidden because they are NSFW. Enable "Show NSFW posts" in settings to view. + {% endif %} + {% if all_posts_filtered %} -
(All content on this page has been filtered)
+ (All content on this page has been filtered) {% else if is_filtered %} -
(Content from r/{{ sub }} has been filtered)
+ (Content from r/{{ sub }} has been filtered) {% else if params.typed != "sr_user" %} {% for post in posts %} {% if post.flags.nsfw && prefs.show_nsfw != "on" %} diff --git a/templates/settings.html b/templates/settings.html index 60ee109..ed5809d 100644 --- a/templates/settings.html +++ b/templates/settings.html @@ -54,6 +54,11 @@
+
+ + + +
@@ -110,7 +115,7 @@

Note: settings and subscriptions are saved in browser cookies. Clearing your cookies will reset them.


-

You can restore your current settings and subscriptions after clearing your cookies using this link.

+

You can restore your current settings and subscriptions after clearing your cookies using this link.

diff --git a/templates/subreddit.html b/templates/subreddit.html index e5e8fa1..4fdad65 100644 --- a/templates/subreddit.html +++ b/templates/subreddit.html @@ -46,6 +46,10 @@ {% endif %} + {% if all_posts_hidden_nsfw %} +
All posts are hidden because they are NSFW. Enable "Show NSFW posts" in settings to view.
+ {% endif %} + {% if all_posts_filtered %}
(All content on this page has been filtered)
{% else %} diff --git a/templates/user.html b/templates/user.html index 3097dfd..04dc4e6 100644 --- a/templates/user.html +++ b/templates/user.html @@ -32,6 +32,10 @@ + {% if all_posts_hidden_nsfw %} +
All posts are hidden because they are NSFW. Enable "Show NSFW posts" in settings to view.
+ {% endif %} + {% if all_posts_filtered %}
(All content on this page has been filtered)
{% else %} diff --git a/templates/utils.html b/templates/utils.html index 7864200..87d47a3 100644 --- a/templates/utils.html +++ b/templates/utils.html @@ -61,6 +61,109 @@ {% endif %} {%- endmacro %} +{% macro post(post) -%} + +
+

+ r/{{ post.community }} + + + {% if post.author.flair.flair_parts.len() > 0 %} + {% call render_flair(post.author.flair.flair_parts) %} + {% endif %} + + {{ post.rel_time }} + {% if !post.awards.is_empty() %} + + + {% for award in post.awards.clone() %} + + {{ award.name }} + {{ award.count }} + + {% endfor %} + + {% endif %} +

+

+ {{ post.title }} + {% if post.flair.flair_parts.len() > 0 %} + {% call render_flair(post.flair.flair_parts) %} + {% endif %} + {% if post.flags.nsfw %} NSFW{% endif %} +

+ + + + {% if post.post_type == "image" %} +
+ + + + + Post image + + + +
+ {% else if post.post_type == "video" || post.post_type == "gif" %} + {% if prefs.use_hls == "on" && !post.media.alt_url.is_empty() %} + +
+ +
+ + {% else %} +
+ +
+ {% call render_hls_notification(post.permalink[1..]) %} + {% endif %} + {% else if post.post_type == "gallery" %} + + {% else if post.post_type == "link" %} + {{ post.media.url }} + {% endif %} + + +
{{ post.body|safe }}
+
{{ post.score.0 }} Upvotes
+ +
+{%- endmacro %} + {% macro post_in_list(post) -%}

@@ -94,27 +197,36 @@ {% if (prefs.layout.is_empty() || prefs.layout == "card") && post.post_type == "image" %} - - - - - Post image - - - +

+ + + + + Post image + + + +
{% else if (prefs.layout.is_empty() || prefs.layout == "card") && post.post_type == "gif" %} - +
+ +
{% else if (prefs.layout.is_empty() || prefs.layout == "card") && post.post_type == "video" %} {% if prefs.use_hls == "on" && !post.media.alt_url.is_empty() %} - +
+ +
{% else %} - +
+ +
{% call render_hls_notification(format!("{}%23{}", &self.url[1..].replace("&", "%26").replace("+", "%2B"), post.id)) %} {% endif %} {% else if post.post_type != "self" %} @@ -125,12 +237,14 @@ {% else %} - - - - Thumbnail - - +
+ + + + Thumbnail + + +
{% endif %} {% if post.post_type == "link" %}{{ post.domain }}{% else %}{{ post.post_type }}{% endif %}