def read_pdf(filepath):
"""Takes a filepath to a PDF and returns a string of the PDF's contents"""
# creating a pdf reader object
reader = PdfReader(filepath)
pdf_text = ""
page_number = 0
for page in reader.pages:
page_number += 1
pdf_text += page.extract_text() + f"\nPage Number: {page_number}"
return pdf_text
# Split a text into smaller chunks of size n, preferably ending at the end of a sentence
def create_chunks(text, n, tokenizer):
"""Returns successive n-sized chunks from provided text."""
tokens = tokenizer.encode(text)
i = 0
while i < len(tokens):
# Find the nearest end of sentence within a range of 0.5 * n and 1.5 * n tokens
j = min(i + int(1.5 * n), len(tokens))
while j > i + int(0.5 * n):
# Decode the tokens and check for full stop or newline
chunk = tokenizer.decode(tokens[i:j])
if chunk.endswith(".") or chunk.endswith("\n"):
break
j -= 1
# If no end of sentence found, use n tokens as the chunk size
if j == i + int(0.5 * n):
j = min(i + n, len(tokens))
yield tokens[i:j]
i = j
def extract_chunk(content, template_prompt):
"""This function applies a prompt to some input content. In this case it returns a summarized chunk of text"""
prompt = template_prompt + content
response = openai.ChatCompletion.create(
model=GPT_MODEL, messages=[{"role": "user", "content": prompt}], temperature=0
)
return response["choices"][0]["message"]["content"]
def summarize_text(query):
"""This function does the following:
- Reads in the arxiv_library.csv file in including the embeddings
- Finds the closest file to the user's query
- Scrapes the text out of the file and chunks it
- Summarizes each chunk in parallel
- Does one final summary and returns this to the user"""
# A prompt to dictate how the recursive summarizations should approach the input paper
summary_prompt = """Summarize this text from an academic paper. Extract any key points with reasoning.\n\nContent:"""
# If the library is empty (no searches have been performed yet), we perform one and download the results
library_df = pd.read_csv(paper_dir_filepath).reset_index()
if len(library_df) == 0:
print("No papers searched yet, downloading first.")
get_articles(query)
print("Papers downloaded, continuing")
library_df = pd.read_csv(paper_dir_filepath).reset_index()
library_df.columns = ["title", "filepath", "embedding"]
library_df["embedding"] = library_df["embedding"].apply(ast.literal_eval)
strings = strings_ranked_by_relatedness(query, library_df, top_n=1)
print("Chunking text from paper")
pdf_text = read_pdf(strings[0])
# Initialise tokenizer
tokenizer = tiktoken.get_encoding("cl100k_base")
results = ""
# Chunk up the document into 1500 token chunks
chunks = create_chunks(pdf_text, 1500, tokenizer)
text_chunks = [tokenizer.decode(chunk) for chunk in chunks]
print("Summarizing each chunk of text")
# Parallel process the summaries
with concurrent.futures.ThreadPoolExecutor(
max_workers=len(text_chunks)
) as executor:
futures = [
executor.submit(extract_chunk, chunk, summary_prompt)
for chunk in text_chunks
]
with tqdm(total=len(text_chunks)) as pbar:
for _ in concurrent.futures.as_completed(futures):
pbar.update(1)
for future in futures:
data = future.result()
results += data
# Final summary
print("Summarizing into overall summary")
response = openai.ChatCompletion.create(
model=GPT_MODEL,
messages=[
{
"role": "user",
"content": f"""Write a summary collated from this collection of key points extracted from an academic paper.
The summary should highlight the core argument, conclusions and evidence, and answer the user's query.
User query: {query}
The summary should be structured in bulleted lists following the headings Core Argument, Evidence, and Conclusions.
Key points:\n{results}\nSummary:\n""",
}
],
temperature=0,
)
return response