diff options
| author | A Farzat <a@farzat.xyz> | 2026-03-02 13:24:38 +0300 |
|---|---|---|
| committer | A Farzat <a@farzat.xyz> | 2026-03-02 13:24:38 +0300 |
| commit | 10f1db4541e40ef776e41f572ff07a5ed64205ca (patch) | |
| tree | 38b4436d5f1fe641f52218959b5cf895e2f1a1c9 | |
| parent | 98532dc37ae9f72e8970d8d67e6bab9545596650 (diff) | |
| download | oreilly-epub-10f1db4541e40ef776e41f572ff07a5ed64205ca.tar.gz oreilly-epub-10f1db4541e40ef776e41f572ff07a5ed64205ca.zip | |
Add file and chapter details fetching
Fetching file details allows us to download them to the correct location
later.
Chapter details are currently limited to is_skippable, which might be
used to determine whether to make the corresponding spine item
non-linear. Other details such as the title shall be fetched from the
spine and table-of-contents endpoints later.
| -rw-r--r-- | src/main.rs | 34 | ||||
| -rw-r--r-- | src/models.rs | 27 |
2 files changed, 60 insertions, 1 deletions
diff --git a/src/main.rs b/src/main.rs index 165d6be..30f08ff 100644 --- a/src/main.rs +++ b/src/main.rs @@ -4,7 +4,7 @@ mod models; use anyhow::{Context, Result}; use clap::Parser; use http_client::build_authenticated_client; -use models::{EpubResponse, SearchResponse}; +use models::{Chapter, EpubResponse, FileEntry, Paginated, SearchResponse}; use reqwest::Client; /// Download and generate an EPUB from Safari Books Online. @@ -50,6 +50,34 @@ async fn fetch_epub_data(client: &Client, bookid: &str) -> Result<EpubResponse> Ok(response) } +/// Fetch a paginated API. +async fn fetch_all_pages<T>(client: &reqwest::Client, mut url: String) -> Result<Vec<T>> +where + T: serde::de::DeserializeOwned, +{ + let mut items = Vec::new(); + loop { + // GET current URL and deserialize into Paginated<T>. + let response = client + .get(&url) + .send() + .await? + .error_for_status()? + .json::<Paginated<T>>() + .await + .context("Failed to deserialize API response.")?; + // Extend items with the page's results. + items.extend(response.results); + // Set url to next page if available, else break. + if let Some(next) = response.next { + url = next; + } else { + break; + } + } + Ok(items) +} + #[tokio::main] async fn main() -> Result<()> { // Parse the command line arguments @@ -81,5 +109,9 @@ async fn main() -> Result<()> { println!("Resources URL: {}", epub_data.files); println!("------------------\n"); + println!("Fetching book structure..."); + let chapters: Vec<Chapter> = fetch_all_pages(&client, epub_data.chapters.clone()).await?; + let file_entries: Vec<FileEntry> = fetch_all_pages(&client, epub_data.files.clone()).await?; + Ok(()) } diff --git a/src/models.rs b/src/models.rs index 77afeb2..174b983 100644 --- a/src/models.rs +++ b/src/models.rs @@ -23,3 +23,30 @@ pub struct EpubResponse { pub chapters: String, // This is a URL to the chapters list pub files: String, // This is a URL to the resource files } + +// --- Generic Model for paginated API --- + +#[derive(Debug, serde::Deserialize)] +pub struct Paginated<T> { + pub next: Option<String>, + pub results: Vec<T>, +} + +/// Model for chapters API. +#[derive(Debug, Deserialize)] +pub struct Chapter { + pub ourn: String, + pub is_skippable: bool, +} + +/// Model for files API. +#[derive(Debug, Deserialize)] +pub struct FileEntry { + pub ourn: String, + pub url: String, + pub full_path: String, + pub media_type: String, + pub filename: String, + pub filename_ext: String, + pub kind: String, +} |
