diff --git a/.gitignore b/.gitignore index 9e1d25d..d6c3ab0 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,5 @@ wheels/ # Virtual environments .venv -# Custom -*_data/ -*.epub +# Books +books/ diff --git a/README.md b/README.md index 5d868d7..2c131a1 100644 --- a/README.md +++ b/README.md @@ -2,26 +2,52 @@ ![reader3](reader3.png) -A lightweight, self-hosted EPUB reader that lets you read through EPUB books one chapter at a time. This makes it very easy to copy paste the contents of a chapter to an LLM, to read along. Basically - get epub books (e.g. [Project Gutenberg](https://www.gutenberg.org/) has many), open them up in this reader, copy paste text around to your favorite LLM, and read together and along. +A lightweight, self-hosted EPUB/PDF reader that lets you read through EPUB/PDF books one chapter at a time. This makes it very easy to copy paste the contents of a chapter to an LLM, to read along. Basically - get epub/pdf books (e.g. [Project Gutenberg](https://www.gutenberg.org/) has many), open them up in this reader, copy paste text around to your favorite LLM, and read together and along. -This project was 90% vibe coded just to illustrate how one can very easily [read books together with LLMs](https://x.com/karpathy/status/1990577951671509438). I'm not going to support it in any way, it's provided here as is for other people's inspiration and I don't intend to improve it. Code is ephemeral now and libraries are over, ask your LLM to change it in whatever way you like. +This project was 90% vibe coded just to illustrate how one can very easily [read books together with LLMs](https://x.com/karpathy/status/1990577951671509438) by Mr. Karpathy. + +After branching off, I added pdf support, and a chat pane to allow the similar behavior like Gemini pane when you enable it inside Chrome browser. Currently, it only supports querying with selected text automatically to remote LLMs of your choice. Some interesting ideas could be starting from here, like how to do prompt and context management for the LLMs to make it more effective. ## Usage -The project uses [uv](https://docs.astral.sh/uv/). So for example, download [Dracula EPUB3](https://www.gutenberg.org/ebooks/345) to this directory as `dracula.epub`, then: +The project uses [uv](https://docs.astral.sh/uv/). All books (source files and processed data) live in the `books/` subdirectory. For example, download [Dracula EPUB3](https://www.gutenberg.org/ebooks/345), then: + +```bash +uv run reader3.py ~/Downloads/dracula.epub +``` + +This creates the directory `books/dracula_data`, which registers the book to your local library. + +Similarly, you can also import PDF files. Just run the same command on a `.pdf` file: + +```bash +uv run reader3.py ~/Downloads/mydocument.pdf +``` + +### Testing + +Run the integration test suite to verify the application: ```bash -uv run reader3.py dracula.epub +uv run pytest ``` -This creates the directory `dracula_data`, which registers the book to your local library. We can then run the server: +### Running the Server + +We can then run the server: ```bash uv run server.py ``` -And visit [localhost:8123](http://localhost:8123/) to see your current Library. You can easily add more books, or delete them from your library by deleting the folder. It's not supposed to be complicated or complex. +To stop the server: + +```bash +uv run stop_server.py +``` + +And visit [localhost:8123](http://localhost:8123/) to see your current Library. You can easily add more books, or delete them from your library by deleting their folder under `books/`. It's not supposed to be complicated or complex. ## License -MIT \ No newline at end of file +MIT diff --git a/annotations.py b/annotations.py new file mode 100644 index 0000000..1caa3fa --- /dev/null +++ b/annotations.py @@ -0,0 +1,104 @@ +import os +import json +import uuid +from datetime import datetime +from typing import List, Optional, Literal +from pydantic import BaseModel, Field + +# --- Data Models --- + +class AnnotationTarget(BaseModel): + chapter_index: int + # For EPUB: + cfi: Optional[str] = None + quote: Optional[str] = None + # For PDF: + page_num: Optional[int] = None + rect: Optional[List[float]] = None + +class ChatMessage(BaseModel): + role: str + content: str + +class AnnotationContent(BaseModel): + text: Optional[str] = None # Markdown string for notes + color: Optional[str] = None # e.g. "#ffff00" + chat_messages: Optional[List[ChatMessage]] = None + +class Annotation(BaseModel): + id: str = Field(default_factory=lambda: str(uuid.uuid4())) + created_at: str = Field(default_factory=lambda: datetime.utcnow().isoformat()) + type: Literal['highlight', 'note', 'chat_thread'] + target: AnnotationTarget + content: AnnotationContent + +# --- Storage Logic --- + +def _get_annotations_path(books_dir: str, book_id: str) -> str: + return os.path.join(books_dir, book_id, "annotations.json") + +def load_annotations(books_dir: str, book_id: str) -> List[Annotation]: + path = _get_annotations_path(books_dir, book_id) + if not os.path.exists(path): + return [] + + try: + with open(path, "r", encoding="utf-8") as f: + raw_data = json.load(f) + return [Annotation(**item) for item in raw_data] + except Exception as e: + print(f"Error loading annotations for {book_id}: {e}") + return [] + +def save_annotation_to_disk(books_dir: str, book_id: str, new_annotation: Annotation): + # Load existing + annotations = load_annotations(books_dir, book_id) + annotations.append(new_annotation) + + # Save back + path = _get_annotations_path(books_dir, book_id) + os.makedirs(os.path.dirname(path), exist_ok=True) + try: + with open(path, "w", encoding="utf-8") as f: + # dumping model_dump(mode='json') handles datetime/uuid serialization + json.dump([a.model_dump(mode='json') for a in annotations], f, indent=2) + except Exception as e: + print(f"Error saving annotation for {book_id}: {e}") + raise e + +def delete_annotation_from_disk(books_dir: str, book_id: str, annotation_id: str): + annotations = load_annotations(books_dir, book_id) + filtered = [a for a in annotations if a.id != annotation_id] + + if len(filtered) == len(annotations): + return False # ID not found + + path = _get_annotations_path(books_dir, book_id) + try: + with open(path, "w", encoding="utf-8") as f: + json.dump([a.model_dump(mode='json') for a in filtered], f, indent=2) + return True + except Exception as e: + print(f"Error deleting annotation for {book_id}: {e}") + raise e + +def update_annotation_in_disk(books_dir: str, book_id: str, updated_annotation: Annotation): + annotations = load_annotations(books_dir, book_id) + found = False + for i, a in enumerate(annotations): + if a.id == updated_annotation.id: + annotations[i] = updated_annotation + found = True + break + + if not found: + return False + + path = _get_annotations_path(books_dir, book_id) + try: + with open(path, "w", encoding="utf-8") as f: + json.dump([a.model_dump(mode='json') for a in annotations], f, indent=2) + return True + except Exception as e: + print(f"Error updating annotation for {book_id}: {e}") + raise e diff --git a/pyproject.toml b/pyproject.toml index 31e6179..6cdd22f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,9 +5,14 @@ description = "Simple EPUB reader web app" readme = "README.md" requires-python = ">=3.10" dependencies = [ + "anthropic>=0.79.0", "beautifulsoup4>=4.14.2", "ebooklib>=0.20", "fastapi>=0.121.2", + "httpx>=0.28.1", "jinja2>=3.1.6", + "openai>=2.20.0", + "pydantic>=2.12.4", + "pymupdf>=1.27.1", "uvicorn>=0.38.0", ] diff --git a/reader3.png b/reader3.png index 45aac09..571fa0d 100644 Binary files a/reader3.png and b/reader3.png differ diff --git a/reader3.py b/reader3.py index d0b9d3f..2367ad9 100644 --- a/reader3.py +++ b/reader3.py @@ -13,6 +13,7 @@ import ebooklib from ebooklib import epub from bs4 import BeautifulSoup, Comment +import fitz # PyMuPDF # --- Data structures --- @@ -283,6 +284,52 @@ def process_epub(epub_path: str, output_dir: str) -> Book: return final_book +def process_pdf(pdf_path: str, output_dir: str) -> Book: + """ + Extracts metadata from a PDF and returns a Book object. + Does NOT convert pages to HTML/Images. + """ + print(f"Processing PDF {pdf_path}...") + + # 1. Load PDF + doc = fitz.open(pdf_path) + + # 2. Extract Metadata + meta = doc.metadata + + # PyMuPDF metadata keys: format, title, author, subject, keywords, creator, producer, creationDate, modDate + metadata = BookMetadata( + title=meta.get('title') or os.path.basename(pdf_path).replace('.pdf', ''), + language="en", # default + authors=[meta.get('author')] if meta.get('author') else [], + description=meta.get('subject'), + publisher=meta.get('producer'), + date=meta.get('creationDate'), + identifiers=[], + subjects=meta.get('keywords', '').split(',') if meta.get('keywords') else [] + ) + + # 3. Create Output Directory + if os.path.exists(output_dir): + shutil.rmtree(output_dir) + os.makedirs(output_dir, exist_ok=True) + + # Save absolute path to original file in the book object? + # Or better: Copy the PDF to the output directory so it's self-contained + shutil.copy2(pdf_path, os.path.join(output_dir, "original.pdf")) + + final_book = Book( + metadata=metadata, + spine=[], + toc=[], + images={}, + source_file="original.pdf", # We will look for this in server + processed_at=datetime.now().isoformat() + ) + + return final_book + + def save_to_pickle(book: Book, output_dir: str): p_path = os.path.join(output_dir, 'book.pkl') with open(p_path, 'wb') as f: @@ -301,9 +348,19 @@ def save_to_pickle(book: Book, output_dir: str): epub_file = sys.argv[1] assert os.path.exists(epub_file), "File not found." - out_dir = os.path.splitext(epub_file)[0] + "_data" - - book_obj = process_epub(epub_file, out_dir) + + # Sanitize the directory name + original_base_name = os.path.splitext(os.path.basename(epub_file))[0] + safe_base_name = "".join([c for c in original_base_name if c.isalnum() or c in '._-']).strip() + base_name = safe_base_name + "_data" + + out_dir = os.path.join("books", base_name) + os.makedirs("books", exist_ok=True) + + if epub_file.lower().endswith('.pdf'): + book_obj = process_pdf(epub_file, out_dir) + else: + book_obj = process_epub(epub_file, out_dir) save_to_pickle(book_obj, out_dir) print("\n--- Summary ---") print(f"Title: {book_obj.metadata.title}") diff --git a/server.py b/server.py index 9c870dc..072e778 100644 --- a/server.py +++ b/server.py @@ -3,18 +3,37 @@ from functools import lru_cache from typing import Optional -from fastapi import FastAPI, Request, HTTPException -from fastapi.responses import HTMLResponse, FileResponse +from fastapi import FastAPI, Request, HTTPException, Body +from fastapi.responses import HTMLResponse, FileResponse, JSONResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates +import httpx +import os +import signal +import sys from reader3 import Book, BookMetadata, ChapterContent, TOCEntry +import json +from typing import List +from pydantic import BaseModel + +class ProgressUpdate(BaseModel): + chapter_index: int + page_num: int = 1 # For PDFs + scroll_position: float = 0.0 + zoom: float = 100.0 + dual_page: bool = False + +class ChatMessage(BaseModel): + role: str + content: str app = FastAPI() +app.mount("/books", StaticFiles(directory="books"), name="books") templates = Jinja2Templates(directory="templates") # Where are the book folders located? -BOOKS_DIR = "." +BOOKS_DIR = "books" @lru_cache(maxsize=10) def load_book_cached(folder_name: str) -> Optional[Book]: @@ -34,6 +53,181 @@ def load_book_cached(folder_name: str) -> Optional[Book]: print(f"Error loading book {folder_name}: {e}") return None +OLD_PROGRESS_FILE = "reading_progress.json" + +# --- Per-book storage helpers --- + +def _book_dir(book_id: str) -> str: + return os.path.join(BOOKS_DIR, book_id) + +def load_progress(book_id: str) -> dict: + path = os.path.join(_book_dir(book_id), "progress.json") + if not os.path.exists(path): + return {} + try: + with open(path, "r") as f: + return json.load(f) + except Exception as e: + print(f"Error loading progress for {book_id}: {e}") + return {} + +def save_progress_helper(book_id: str, data: dict): + d = _book_dir(book_id) + os.makedirs(d, exist_ok=True) + path = os.path.join(d, "progress.json") + with open(path, "w") as f: + json.dump(data, f, indent=2) + +def load_chat_history(book_id: str) -> list: + path = os.path.join(_book_dir(book_id), "chat_history.json") + if not os.path.exists(path): + return [] + try: + with open(path, "r") as f: + return json.load(f) + except Exception as e: + print(f"Error loading chat history for {book_id}: {e}") + return [] + +def save_chat_history(book_id: str, messages: list): + d = _book_dir(book_id) + os.makedirs(d, exist_ok=True) + path = os.path.join(d, "chat_history.json") + with open(path, "w") as f: + json.dump(messages, f, indent=2) + +def delete_chat_history(book_id: str): + path = os.path.join(_book_dir(book_id), "chat_history.json") + if os.path.exists(path): + os.remove(path) + +def migrate_global_progress(): + """One-time migration: split global reading_progress.json into per-book files.""" + if not os.path.exists(OLD_PROGRESS_FILE): + return + try: + with open(OLD_PROGRESS_FILE, "r") as f: + all_data = json.load(f) + for book_id, data in all_data.items(): + # Only migrate if per-book file doesn't already exist + per_book_path = os.path.join(_book_dir(book_id), "progress.json") + if not os.path.exists(per_book_path): + save_progress_helper(book_id, data) + print(f" Migrated progress for: {book_id}") + # Rename old file to .bak + os.rename(OLD_PROGRESS_FILE, OLD_PROGRESS_FILE + ".bak") + print(f"Migration complete. Old file renamed to {OLD_PROGRESS_FILE}.bak") + except Exception as e: + print(f"Error during progress migration: {e}") + +# Run migration on module load +print("Checking for progress migration...") +migrate_global_progress() + +@app.post("/api/progress/{book_id}") +async def save_progress(book_id: str, update: ProgressUpdate): + data = update.model_dump() + save_progress_helper(book_id, data) + return {"status": "ok"} + +@app.get("/api/chat-history/{book_id}") +async def get_chat_history(book_id: str): + messages = load_chat_history(book_id) + return JSONResponse(messages) + +@app.post("/api/chat-history/{book_id}") +async def append_chat_message(book_id: str, message: ChatMessage): + messages = load_chat_history(book_id) + messages.append(message.model_dump()) + save_chat_history(book_id, messages) + return {"status": "ok"} + +@app.delete("/api/chat-history/{book_id}") +async def clear_chat_history(book_id: str): + delete_chat_history(book_id) + return {"status": "ok"} + +# --- Annotations API --- + +from annotations import ( + Annotation, AnnotationContent, AnnotationTarget, ChatMessage, + load_annotations, save_annotation_to_disk, + delete_annotation_from_disk, update_annotation_in_disk +) + +@app.get("/api/annotations/{book_id}") +async def get_annotations(book_id: str): + return load_annotations(BOOKS_DIR, book_id) + +@app.post("/api/annotations/{book_id}") +async def create_annotation(book_id: str, annotation: Annotation): + # Ensure ID is unique (it's UUID so unlikely to collide but good practice) + # save_annotation_to_disk simply appends + try: + save_annotation_to_disk(BOOKS_DIR, book_id, annotation) + return {"status": "ok", "id": annotation.id} + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + +@app.delete("/api/annotations/{book_id}/{annotation_id}") +async def delete_annotation(book_id: str, annotation_id: str): + try: + found = delete_annotation_from_disk(BOOKS_DIR, book_id, annotation_id) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + if not found: + raise HTTPException(status_code=404, detail="Annotation not found") + return {"status": "ok"} + +@app.put("/api/annotations/{book_id}/{annotation_id}") +async def update_annotation(book_id: str, annotation_id: str, annotation: Annotation): + # Ensure ID matches + if annotation.id != annotation_id: + raise HTTPException(status_code=400, detail="ID mismatch") + + try: + found = update_annotation_in_disk(BOOKS_DIR, book_id, annotation) + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + + if not found: + raise HTTPException(status_code=404, detail="Annotation not found") + return {"status": "ok"} + +@app.post("/api/annotations/{book_id}/{annotation_id}/chat") +async def append_annotation_chat(book_id: str, annotation_id: str, message: ChatMessage): + """ + Appends a new message to an existing annotation's chat thread. + Use this for context-aware chatting. + """ + annotations = load_annotations(BOOKS_DIR, book_id) + target_annotation = None + for a in annotations: + if a.id == annotation_id: + target_annotation = a + break + + if not target_annotation: + raise HTTPException(status_code=404, detail="Annotation not found") + + # Ensure chat_messages list exists + if target_annotation.content.chat_messages is None: + target_annotation.content.chat_messages = [] + + target_annotation.content.chat_messages.append(message) + + # Update type if it was just a highlight before? + # Maybe strict typing matters, but for now we just save content. + if target_annotation.type == 'highlight': + target_annotation.type = 'chat_thread' + + try: + update_annotation_in_disk(BOOKS_DIR, book_id, target_annotation) + return {"status": "ok"} + except Exception as e: + raise HTTPException(status_code=500, detail=str(e)) + @app.get("/", response_class=HTMLResponse) async def library_view(request: Request): """Lists all available processed books.""" @@ -42,7 +236,8 @@ async def library_view(request: Request): # Scan directory for folders ending in '_data' that have a book.pkl if os.path.exists(BOOKS_DIR): for item in os.listdir(BOOKS_DIR): - if item.endswith("_data") and os.path.isdir(item): + item_path = os.path.join(BOOKS_DIR, item) + if item.endswith("_data") and os.path.isdir(item_path): # Try to load it to get the title book = load_book_cached(item) if book: @@ -56,9 +251,38 @@ async def library_view(request: Request): return templates.TemplateResponse("library.html", {"request": request, "books": books}) @app.get("/read/{book_id}", response_class=HTMLResponse) -async def redirect_to_first_chapter(book_id: str): - """Helper to just go to chapter 0.""" - return await read_chapter(book_id=book_id, chapter_index=0) +async def redirect_to_first_chapter(request: Request, book_id: str): + """Helper to just go to chapter 0 OR open PDF.""" + book = load_book_cached(book_id) + if not book: + raise HTTPException(status_code=404, detail="Book not found") + + # Check if it is a PDF + # We stored "original.pdf" as source_file for PDFs + progress = load_progress(book_id) + + if book.source_file.endswith('.pdf'): + initial_page = progress.get("page_num", 1) + initial_zoom = progress.get("zoom", 1.0) + initial_dual_page = "true" if progress.get("dual_page", False) else "false" + + return templates.TemplateResponse("pdf_reader.html", { + "request": request, + "book": book, + "book_id": book_id, + "pdf_url": f"/books/{book_id}/original.pdf", + "initial_page": initial_page, + "initial_zoom": initial_zoom, + "initial_dual_page": initial_dual_page + }) + + # For EPUB, redirect to last read chapter if available + chapter_idx = progress.get("chapter_index", 0) + # Ensure valid index + if chapter_idx < 0 or chapter_idx >= len(book.spine): + chapter_idx = 0 + + return await read_chapter(request, book_id=book_id, chapter_index=chapter_idx) @app.get("/read/{book_id}/{chapter_index}", response_class=HTMLResponse) async def read_chapter(request: Request, book_id: str, chapter_index: int): @@ -76,6 +300,16 @@ async def read_chapter(request: Request, book_id: str, chapter_index: int): prev_idx = chapter_index - 1 if chapter_index > 0 else None next_idx = chapter_index + 1 if chapter_index < len(book.spine) - 1 else None + # Load progress to restore scroll/zoom if applicable + progress = load_progress(book_id) + initial_scroll = 0 + # Always restore zoom — it's a per-book preference + initial_zoom = progress.get("zoom", 100) + + # Only restore scroll position for the same chapter + if progress.get("chapter_index") == chapter_index: + initial_scroll = progress.get("scroll_position", 0) + return templates.TemplateResponse("reader.html", { "request": request, "book": book, @@ -83,7 +317,32 @@ async def read_chapter(request: Request, book_id: str, chapter_index: int): "chapter_index": chapter_index, "book_id": book_id, "prev_idx": prev_idx, - "next_idx": next_idx + "next_idx": next_idx, + "initial_scroll": initial_scroll, + "initial_zoom": initial_zoom + }) + +@app.get("/api/chapter/{book_id}/{chapter_index}") +async def get_chapter_content(book_id: str, chapter_index: int): + """Returns chapter HTML + navigation metadata as JSON for AJAX navigation.""" + book = load_book_cached(book_id) + if not book: + raise HTTPException(status_code=404, detail="Book not found") + + if chapter_index < 0 or chapter_index >= len(book.spine): + raise HTTPException(status_code=404, detail="Chapter not found") + + current_chapter = book.spine[chapter_index] + prev_idx = chapter_index - 1 if chapter_index > 0 else None + next_idx = chapter_index + 1 if chapter_index < len(book.spine) - 1 else None + + return JSONResponse({ + "content": current_chapter.content, + "chapter_index": chapter_index, + "href": current_chapter.href, + "prev_idx": prev_idx, + "next_idx": next_idx, + "total_chapters": len(book.spine) }) @app.get("/read/{book_id}/images/{image_name}") @@ -104,6 +363,87 @@ async def serve_image(book_id: str, image_name: str): return FileResponse(img_path) +@app.post("/api/chat") +async def chat_proxy(payload: dict = Body(...)): + """ + Proxies chat requests to LLM providers to avoid CORS issues. + Payload: { + "provider": "openai" | "anthropic" | "custom", + "apiKey": "sk-...", + "baseUrl": "https://...", + "model": "gpt-4o", + "messages": [...] + } + """ + provider = payload.get("provider") + api_key = payload.get("apiKey") + base_url = payload.get("baseUrl") + model = payload.get("model") + messages = payload.get("messages") + + if not provider or not messages: + raise HTTPException(status_code=400, detail="Missing provider or messages") + + try: + async with httpx.AsyncClient(timeout=60.0) as client: + if provider == "openai": + url = "https://api.openai.com/v1/chat/completions" + headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} + data = {"model": model or "gpt-4o", "messages": messages} + resp = await client.post(url, json=data, headers=headers) + resp.raise_for_status() + return resp.json() + + elif provider == "anthropic": + url = "https://api.anthropic.com/v1/messages" + headers = { + "x-api-key": api_key, + "anthropic-version": "2023-06-01", + "Content-Type": "application/json" + } + data = {"model": model or "claude-3-5-sonnet-20240620", "messages": messages, "max_tokens": 1024} + resp = await client.post(url, json=data, headers=headers) + resp.raise_for_status() + return resp.json() + + elif provider == "custom": + # For custom, we expect a full URL in baseUrl (e.g. http://localhost:1234/v1/chat/completions) + # Or we can construct it if strictly OpenAI compatible. + # Let's assume user provides full URL for maximum flexibility + if not base_url: + raise HTTPException(status_code=400, detail="Custom provider requires baseUrl") + else: + url = "http://localhost:1234/api/chat/completions" + + heading = {} + if api_key: + heading["Authorization"] = f"Bearer {api_key}" + + # Assume OpenAI format for custom + data = {"model": model, "messages": messages} if model else {"messages": messages} + resp = await client.post(base_url, json=data, headers=heading) + resp.raise_for_status() + return resp.json() + + else: + raise HTTPException(status_code=400, detail="Unknown provider") + + except httpx.HTTPStatusError as e: + print(f"Upstream error: {e.response.text}") + raise HTTPException(status_code=e.response.status_code, detail=f"Upstream error: {e.response.text}") + except Exception as e: + print(f"Proxy error: {e}") + raise HTTPException(status_code=500, detail=str(e)) + + +@app.post("/shutdown") +def shutdown_server(): + """Shuts down the server.""" + print("Shutting down server...") + # Schedule kill + os.kill(os.getpid(), signal.SIGTERM) + return {"message": "Server shutting down"} + if __name__ == "__main__": import uvicorn print("Starting server at http://127.0.0.1:8123") diff --git a/stop_server.py b/stop_server.py new file mode 100644 index 0000000..9ac7b13 --- /dev/null +++ b/stop_server.py @@ -0,0 +1,18 @@ +import httpx +import sys + +def stop_server(): + try: + # Try to call the shutdown endpoint + response = httpx.post("http://127.0.0.1:8123/shutdown", timeout=2.0) + if response.status_code == 200: + print("Shutdown signal sent successfully.") + else: + print(f"Server responded with status code: {response.status_code}") + except httpx.ConnectError: + print("Server is not currently running.") + except Exception as e: + print(f"An error occurred while trying to stop the server: {e}") + +if __name__ == "__main__": + stop_server() diff --git a/templates/components/chat_component.html b/templates/components/chat_component.html new file mode 100644 index 0000000..7060202 --- /dev/null +++ b/templates/components/chat_component.html @@ -0,0 +1,495 @@ + + + + + +
+ +
+ AI Assistant +
+ + +
+
+ +
+
+ Select text to ask about it, or just type a question. +
+
+ +
+
+ + +
+
+ + +
+
+
+ + +
+ +
+ + + \ No newline at end of file diff --git a/templates/components/right_sidebar.html b/templates/components/right_sidebar.html new file mode 100644 index 0000000..53e72b2 --- /dev/null +++ b/templates/components/right_sidebar.html @@ -0,0 +1,392 @@ + + + + + + + + + \ No newline at end of file diff --git a/templates/library.html b/templates/library.html index e7d094d..0def351 100644 --- a/templates/library.html +++ b/templates/library.html @@ -1,27 +1,79 @@ + My Library +

Library

{% if not books %} -

No processed books found. Run reader3.py on an epub first.

+

No processed books found. Run reader3.py on an epub first.

{% endif %}
@@ -32,10 +84,24 @@

Library

{{ book.author }}
{{ book.chapters }} sections
- Read Book + Read Book
{% endfor %} + - + + \ No newline at end of file diff --git a/templates/pdf_reader.html b/templates/pdf_reader.html new file mode 100644 index 0000000..0e5b04e --- /dev/null +++ b/templates/pdf_reader.html @@ -0,0 +1,1029 @@ + + + + + + + {{ book.metadata.title }} + + + + + + + + +
+
+ ← Back + {{ book.metadata.title }} +
+ + +
+ + + Page + + / -- + + + + + + + + + + +
+ +
+ +
+
+ +
+ + + + +
+
+
+
+ +
+
+
+ +
+
+
+ + + + + + + + + {% include "components/right_sidebar.html" %} +
+ + + + + + + + + + + + \ No newline at end of file diff --git a/templates/reader.html b/templates/reader.html index c012edc..512cb11 100644 --- a/templates/reader.html +++ b/templates/reader.html @@ -1,154 +1,1191 @@ + {{ book.metadata.title }} + - -