main #1

مفتوح
MaherGIsmaeelSayed يريد دمج 0 التزامات من main في master
7 ملفات معدلة مع 381 إضافات و175 حذوفات

174
.gitignore مباع
عرض الملف

@@ -1,176 +1,2 @@
# ---> Python
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
#uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
.pdm.toml
.pdm-python
.pdm-build/
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc

عرض الملف

@@ -1,2 +1,21 @@
# PDFSummerizorV1
# PDF Summarizer V1
This project is a command-line tool to summarize PDF documents using a two-step process with AI models.
## How it works
1. **Extracts Text**: The tool first extracts the text content from the provided PDF file.
2. **Initial Summary**: It then uses a generative AI model (e.g., `gemma-3-4b-it`) to create an initial summary of the text.
3. **Refined Summary**: This initial summary is then passed to a second, potentially more advanced model (e.g., `QwQ-32B`), to refine and improve the summary.
## How to use
1. Install the required dependencies:
```bash
pip install -r requirements.txt
```
2. Place your PDF file in the `uploads` directory.
3. Run the application from your terminal:
```bash
python app.py uploads/your_file.pdf
```

148
app.py Normal file
عرض الملف

@@ -0,0 +1,148 @@
import os
import argparse
import requests
import json
from openai import OpenAI
import PyPDF2
from dotenv import load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
load_dotenv()
# --- Configuration ---
GITPASHA_HOST = "https://serverless-store-77838979b96f.hosted.ghaymah.systems"
# Client for final summarization
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
base_url="https://genai.ghaymah.systems"
)
# Client for creating embeddings
embeddings = OpenAIEmbeddings(
openai_api_key=os.environ.get("OPENAI_API_KEY"),
openai_api_base="https://genai.ghaymah.systems"
)
def extract_text_from_pdf(pdf_path):
"""Extracts text from a PDF file."""
print(f"Extracting text from {pdf_path}...")
text = ""
try:
with open(pdf_path, "rb") as f:
reader = PyPDF2.PdfReader(f)
for page in reader.pages:
page_text = page.extract_text()
if page_text:
text += page_text
except FileNotFoundError:
print(f"Error: The file at {pdf_path} was not found.")
return None
except Exception as e:
print(f"An error occurred while reading the PDF: {e}")
return None
print("Text extraction complete.")
return text
def store_text_chunks(text):
"""Splits text, creates embeddings, and stores them in GitPasha."""
print("Splitting text into chunks...")
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = splitter.split_text(text)
print(f"Creating embeddings for {len(chunks)} chunks...")
try:
chunk_vectors = embeddings.embed_documents(chunks)
payloads = [{"text_chunk": chunk} for chunk in chunks]
except Exception as e:
print(f"Failed to create embeddings: {e}")
return False
print("Uploading vectors and payloads to GitPasha...")
try:
response = requests.post(
f"{GITPASHA_HOST}/insert",
json={"vectors": chunk_vectors, "payloads": payloads},
headers={"Content-Type": "application/json"}
)
response.raise_for_status() # Raise an exception for bad status codes
print("→ POST /insert:", response.status_code, response.text)
if response.status_code == 200:
print("Upload complete ✅")
return True
else:
print(f"Failed to insert data. Status: {response.status_code}, Response: {response.text}")
return False
except requests.exceptions.RequestException as e:
print(f"An error occurred while calling the /insert API: {e}")
return False
def summarize_with_context(query, model="DeepSeek-V3-0324"):
"""Creates a query embedding, searches GitPasha, and summarizes."""
print(f"Creating embedding for query: '{query}'")
try:
query_vector = embeddings.embed_query(query)
except Exception as e:
print(f"Failed to create query embedding: {e}")
return None
print("Retrieving relevant context from GitPasha...")
try:
response = requests.post(
f"{GITPASHA_HOST}/search",
json={"vector": query_vector, "k": 4},
headers={"Content-Type": "application/json"}
)
response.raise_for_status()
print("→ POST /search:", response.status_code)
search_results = response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred while calling the /search API: {e}")
return None
if not search_results or 'results' not in search_results:
print("No relevant context found.")
return "Could not find any relevant context to generate a summary."
context = "\n\n".join([result['payload']['text_chunk'] for result in search_results['results']])
print("Generating final summary...")
try:
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant that summarizes documents based on the provided context."},
{"role": "user", "content": f"Based on the following context, please answer the question.\n\nContext:\n{context}\n\nQuestion: {query}"}
]
)
return response.choices[0].message.content
except Exception as e:
print(f"An error occurred during final summarization: {e}")
return None
def main():
parser = argparse.ArgumentParser(description="Summarize a PDF using a remote Serverless Vector Store + AI.")
parser.add_argument("pdf_path", help="Path to the PDF file.")
parser.add_argument("--query", help="Custom question about the PDF.", default="Summarize the key points of this document.")
args = parser.parse_args()
# 1. Extract text from the PDF
pdf_text = extract_text_from_pdf(args.pdf_path)
if not pdf_text:
print("Aborting due to empty text from PDF.")
return
# 2. Store the text chunks and their embeddings
if not store_text_chunks(pdf_text):
print("Aborting due to failure in storing document.")
return
# 3. Query, retrieve context, and summarize
summary = summarize_with_context(args.query)
if summary:
print("\n--- Contextual Summary ---")
print(summary)
if __name__ == "__main__":
main()

179
app2.py Normal file
عرض الملف

@@ -0,0 +1,179 @@
import os
import uvicorn
import requests
from openai import OpenAI
import PyPDF2
from dotenv import load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.responses import JSONResponse
# Load environment variables
load_dotenv()
# --- Configuration ---
GITPASHA_HOST = "https://rag-app-fa66b3d8eb83.hosted.ghaymah.systems"
# Initialize FastAPI app
app = FastAPI(
title="Remote PDF Summarizer API",
description="Upload a PDF and get a summary using a remote RAG pipeline.",
version="2.1.0"
)
# Client for final summarization
client = OpenAI(
api_key=os.environ.get("OPENAI_API_KEY"),
base_url="https://genai.ghaymah.systems"
)
# Use a local embedding model that matches the remote vector store's expected dimension
print("Initializing local embedding model (jinaai/jina-embeddings-v2-small-en)...")
embeddings = HuggingFaceEmbeddings(model_name="jinaai/jina-embeddings-v2-small-en")
print("Embedding model loaded.")
# --- Helper Functions ---
def extract_text_from_pdf(pdf_stream):
"""Extracts text from a PDF file stream and cleans it."""
print("Extracting text from PDF stream...")
text = ""
try:
reader = PyPDF2.PdfReader(pdf_stream)
for page in reader.pages:
page_text = page.extract_text()
if page_text:
text += page_text
print("Text extraction complete.")
# Clean the extracted text for pure plain text
text = ' '.join(text.split())
return text
except Exception as e:
print(f"An error occurred while reading the PDF: {e}")
raise HTTPException(status_code=500, detail=f"Failed to read PDF content: {e}")
def store_text_chunks_remote(text):
"""Splits text, creates embeddings, and stores them in the remote GitPasha vector store."""
if not text:
print("Skipping storage: No text provided.")
return False
print("Splitting text into chunks...")
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = splitter.split_text(text)
print(f"Creating embeddings for {len(chunks)} chunks...")
try:
# Ensure the embedding model produces 512-dim vectors
chunk_vectors = embeddings.embed_documents(chunks)
payloads = [{"text_chunk": chunk} for chunk in chunks]
except Exception as e:
print(f"Failed to create embeddings: {e}")
raise HTTPException(status_code=500, detail=f"Failed to create text embeddings: {e}")
print("Uploading vectors and payloads to remote GitPasha vector store...")
try:
response = requests.post(
f"{GITPASHA_HOST}/insert",
json={"vectors": chunk_vectors, "payloads": payloads},
headers={"Content-Type": "application/json"}
)
response.raise_for_status()
print(f"→ POST /insert: {response.status_code}")
if response.status_code == 200:
print("Upload complete ✅")
return True
else:
raise HTTPException(status_code=response.status_code, detail=f"Failed to insert data into remote vector store: {response.text}")
except requests.exceptions.RequestException as e:
print(f"An error occurred while calling the remote /insert API: {e}")
raise HTTPException(status_code=500, detail=f"Error connecting to remote vector store: {e}")
def get_summary_from_remote_rag(
query: str,
model: str = "DeepSeek-V3-0324"
):
"""Creates a query embedding, searches remote GitPasha, and summarizes."""
print(f"Creating embedding for query: '{query}'")
try:
query_vector = embeddings.embed_query(query)
except Exception as e:
print(f"Failed to create query embedding: {e}")
raise HTTPException(status_code=500, detail=f"Failed to create query embedding: {e}")
print("Retrieving relevant context from remote GitPasha vector store...")
try:
response = requests.post(
f"{GITPASHA_HOST}/search",
json={"vector": query_vector, "k": 4},
headers={"Content-Type": "application/json"}
)
response.raise_for_status()
search_results = response.json()
except requests.exceptions.RequestException as e:
print(f"An error occurred while calling the remote /search API: {e}")
raise HTTPException(status_code=500, detail=f"Error searching remote vector store: {e}")
if not search_results or 'results' not in search_results or not search_results['results']:
print("No relevant context found.")
return "Could not find any relevant context to generate a summary for the query."
context = "\n\n".join([result['payload']['text_chunk'] for result in search_results['results']])
# Generate the final summary using the remote LLM
print("Generating final summary using remote LLM...")
try:
completion_response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant that summarizes documents based on the provided context."},
{"role": "user", "content": f"Based on the following context, please answer the question.\n\nContext:\n{context}\n\nQuestion: {query}"}
]
)
return completion_response.choices[0].message.content
except Exception as e:
print(f"An error occurred during final summarization: {e}")
raise HTTPException(status_code=500, detail=f"Failed to generate summary from AI model: {e}")
# --- API Endpoints ---
@app.post("/summarize/")
async def summarize_pdf(
file: UploadFile = File(...),
query: str = Form("Summarize the key points of this document.")
):
"""
Accepts a PDF file and a query, then returns a summary.
"""
if file.content_type != "application/pdf":
raise HTTPException(status_code=400, detail="Invalid file type. Please upload a PDF.")
try:
# Extract text from the uploaded PDF file stream
pdf_text = extract_text_from_pdf(file.file)
# Store the text chunks and their embeddings in the remote vector store
if not store_text_chunks_remote(pdf_text):
raise HTTPException(status_code=500, detail="Failed to process and store the document in remote vector store.")
# Query, retrieve context, and generate the summary using remote RAG
summary = get_summary_from_remote_rag(query)
return JSONResponse(content={"summary": summary})
except HTTPException as e:
# Re-raise HTTPException to be handled by FastAPI
raise e
except Exception as e:
# Catch any other unexpected errors
print(f"An unexpected error occurred: {e}")
raise HTTPException(status_code=500, detail=f"An unexpected server error occurred: {e}")
@app.get("/")
def read_root():
return {"message": "Welcome to the Remote PDF Summarizer API. Use /docs for documentation."}
# --- Main execution ---
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)

6
requirements.txt Normal file
عرض الملف

@@ -0,0 +1,6 @@
openai
PyPDF2
python-dotenv
langchain
tiktoken
requests

28
temp.py Normal file
عرض الملف

@@ -0,0 +1,28 @@
import requests
import json
import random
HOST = "https://rag-app-fa66b3d8eb83.hosted.ghaymah.systems"
N_DIM = 512
def random_vector():
"""Generates a random 512-dimensional vector."""
return [random.random() for _ in range(N_DIM)]
# Create a sample 512-dimensional vector and a dummy payload
vectors_to_insert = [random_vector()]
payloads = [{"test_data": "This is a test payload for 512-dim vector."}]
print(vectors_to_insert)
print(f"Attempting to send a {N_DIM}-dimensional vector to {HOST}/insert...")
try:
insert_resp = requests.post(
f"{HOST}/insert",
json={"vectors": vectors_to_insert, "payloads": payloads},
headers={"Content-Type": "application/json"}
)
insert_resp.raise_for_status() # Raise an exception for bad status codes
print("Response Status Code:", insert_resp.status_code)
print("Response Body:", insert_resp.text)
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")

ثنائية
uploads/test1.pdf Normal file

ملف ثنائي غير معروض.