Update app.py
Browse files
app.py
CHANGED
|
@@ -16,28 +16,27 @@ BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
|
|
| 16 |
IMAGE_API_URL = "http://211.233.58.201:7896"
|
| 17 |
MAX_TOKENS = 7999
|
| 18 |
|
| 19 |
-
#
|
| 20 |
-
|
| 21 |
-
"
|
| 22 |
-
"
|
| 23 |
-
"
|
| 24 |
-
"
|
| 25 |
-
"
|
| 26 |
-
"seo_optimized": "SEO-optimized blog"
|
| 27 |
}
|
| 28 |
|
| 29 |
-
|
| 30 |
"professional": "Professional and formal tone",
|
| 31 |
"casual": "Friendly and conversational tone",
|
| 32 |
-
"
|
| 33 |
-
"
|
| 34 |
}
|
| 35 |
|
| 36 |
-
# Example
|
| 37 |
-
|
| 38 |
-
"example1": "
|
| 39 |
-
"example2": "
|
| 40 |
-
"example3": "
|
| 41 |
}
|
| 42 |
|
| 43 |
# ββββββββββββββββββββββββββββββββ Logging ββββββββββββββββββββββββββββββββ
|
|
@@ -58,243 +57,129 @@ def get_openai_client():
|
|
| 58 |
max_retries=3 # μ¬μλ νμ 3νλ‘ μ€μ
|
| 59 |
)
|
| 60 |
|
| 61 |
-
# ββββββββββββββββββββββββββββββββ
|
| 62 |
-
def get_system_prompt(
|
| 63 |
"""
|
| 64 |
-
Generate a system prompt
|
| 65 |
-
- The
|
| 66 |
-
- The selected template and tone
|
| 67 |
- Guidelines for using web search results and uploaded files
|
| 68 |
"""
|
| 69 |
|
| 70 |
-
#
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
4. **Section 1: Core Concept Introduction**
|
| 90 |
-
- `## π What is [Keyword]?`
|
| 91 |
-
- 1-2 paragraphs definition + π One-line summary
|
| 92 |
-
|
| 93 |
-
5. `---`
|
| 94 |
-
|
| 95 |
-
6. **Section 2: 5 Benefits/Reasons**
|
| 96 |
-
- `## πͺ 5 Reasons Why [Keyword] Is Beneficial`
|
| 97 |
-
- Each subsection format:
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
### 1. [Keyword-focused subheading]
|
| 101 |
-
1-2 paragraphs explanation
|
| 102 |
-
> β One-line key point emphasis
|
| 103 |
-
|
| 104 |
-
- Total of 5 items
|
| 105 |
-
|
| 106 |
-
7. **Section 3: Consumption/Usage Methods**
|
| 107 |
-
- `## π₯ How to Use [Keyword] Effectively!`
|
| 108 |
-
- Emoji bullet list of around 5 items + Additional tips
|
| 109 |
-
|
| 110 |
-
8. `---`
|
| 111 |
-
|
| 112 |
-
9. **Concluding Call to Action**
|
| 113 |
-
- `## π Conclusion β Start Using [Keyword] Today!`
|
| 114 |
-
- 2-3 sentences on benefits/changes β **Action directive** (purchase, subscribe, share, etc.)
|
| 115 |
-
|
| 116 |
-
10. `---`
|
| 117 |
-
|
| 118 |
-
11. **Key Summary Table**
|
| 119 |
-
| Item | Effect |
|
| 120 |
-
|---|---|
|
| 121 |
-
| [Keyword] | [Effect summary] |
|
| 122 |
-
| Key foods/products | [List] |
|
| 123 |
-
|
| 124 |
-
12. `---`
|
| 125 |
-
|
| 126 |
-
13. **Quiz & CTA**
|
| 127 |
-
- Simple Q&A quiz (1 question) β Reveal answer
|
| 128 |
-
- "If you found this helpful, please share/comment" phrase
|
| 129 |
-
- Preview of next post
|
| 130 |
-
|
| 131 |
-
β Additional Guidelines
|
| 132 |
-
- Total length 1,200-1,800 words.
|
| 133 |
-
- Use simple vocabulary and short sentences, enhance readability with emojis, bold text, and quoted sections.
|
| 134 |
-
- Increase credibility with specific numbers, research results, and analogies.
|
| 135 |
-
- No meta-mentions of "prompts" or "instructions".
|
| 136 |
-
- Use conversational but professional tone throughout.
|
| 137 |
-
- Minimize expressions like "according to research" if no external sources are provided.
|
| 138 |
-
|
| 139 |
-
β Output
|
| 140 |
-
- Return **only the completed blog post** in the above format. No additional text.
|
| 141 |
-
|
| 142 |
-
"""
|
| 143 |
-
|
| 144 |
-
# Standard 8-step framework (English version)
|
| 145 |
-
base_prompt = """
|
| 146 |
-
You are an expert in writing professional blog posts. For every blog writing request, strictly follow this 8-step framework to produce a coherent, engaging post:
|
| 147 |
-
|
| 148 |
-
Reader Connection Phase
|
| 149 |
-
1.1. Friendly greeting to build rapport
|
| 150 |
-
1.2. Reflect actual reader concerns through introductory questions
|
| 151 |
-
1.3. Stimulate immediate interest in the topic
|
| 152 |
-
|
| 153 |
-
Problem Definition Phase
|
| 154 |
-
2.1. Define the reader's pain points in detail
|
| 155 |
-
2.2. Analyze the urgency and impact of the problem
|
| 156 |
-
2.3. Build a consensus on why it needs to be solved
|
| 157 |
-
|
| 158 |
-
Establish Expertise Phase
|
| 159 |
-
3.1. Analyze based on objective data
|
| 160 |
-
3.2. Cite expert views and research findings
|
| 161 |
-
3.3. Use real-life examples to further clarify the issue
|
| 162 |
-
|
| 163 |
-
Solution Phase
|
| 164 |
-
4.1. Provide step-by-step guidance
|
| 165 |
-
4.2. Suggest practical tips that can be applied immediately
|
| 166 |
-
4.3. Mention potential obstacles and how to overcome them
|
| 167 |
-
|
| 168 |
-
Build Trust Phase
|
| 169 |
-
5.1. Present actual success stories
|
| 170 |
-
5.2. Quote real user feedback
|
| 171 |
-
5.3. Use objective data to prove effectiveness
|
| 172 |
-
|
| 173 |
-
Action Phase
|
| 174 |
-
6.1. Suggest the first clear step the reader can take
|
| 175 |
-
6.2. Urge timely action by emphasizing urgency
|
| 176 |
-
6.3. Motivate by highlighting incentives or benefits
|
| 177 |
-
|
| 178 |
-
Authenticity Phase
|
| 179 |
-
7.1. Transparently disclose any limits of the solution
|
| 180 |
-
7.2. Admit that individual experiences may vary
|
| 181 |
-
7.3. Mention prerequisites or cautionary points
|
| 182 |
-
|
| 183 |
-
Relationship Continuation Phase
|
| 184 |
-
8.1. Conclude with sincere gratitude
|
| 185 |
-
8.2. Preview upcoming content to build anticipation
|
| 186 |
-
8.3. Provide channels for further communication
|
| 187 |
"""
|
| 188 |
|
| 189 |
-
#
|
| 190 |
-
|
| 191 |
-
"
|
| 192 |
-
|
| 193 |
-
-
|
| 194 |
-
-
|
| 195 |
-
-
|
| 196 |
-
-
|
| 197 |
-
-
|
| 198 |
-
- Give troubleshooting tips and common mistakes to avoid
|
| 199 |
-
- Conclude with suggestions for next steps or advanced applications
|
| 200 |
""",
|
| 201 |
-
"
|
| 202 |
-
|
| 203 |
-
-
|
| 204 |
-
-
|
| 205 |
-
-
|
| 206 |
-
-
|
| 207 |
-
-
|
| 208 |
-
- Provide concrete use cases and outcomes
|
| 209 |
-
- Conclude with a final recommendation or alternatives
|
| 210 |
""",
|
| 211 |
-
"
|
| 212 |
-
|
| 213 |
-
-
|
| 214 |
-
-
|
| 215 |
-
-
|
| 216 |
-
-
|
| 217 |
-
-
|
| 218 |
-
- Balance storytelling with useful information
|
| 219 |
-
- Encourage the reader to reflect on their own story
|
| 220 |
""",
|
| 221 |
-
"
|
| 222 |
-
|
| 223 |
-
-
|
| 224 |
-
-
|
| 225 |
-
-
|
| 226 |
-
-
|
| 227 |
-
-
|
| 228 |
-
- Indicate where internal links could be inserted
|
| 229 |
-
- Provide sufficient content of at least 2000-3000 characters
|
| 230 |
"""
|
| 231 |
}
|
| 232 |
|
| 233 |
-
#
|
| 234 |
-
|
| 235 |
-
"professional": "Use a professional, authoritative voice. Clearly explain
|
| 236 |
-
"casual": "Use a relaxed, conversational style
|
| 237 |
-
"
|
| 238 |
-
"
|
| 239 |
}
|
| 240 |
|
| 241 |
# Guidelines for using search results
|
| 242 |
search_guide = """
|
| 243 |
Guidelines for Using Search Results:
|
| 244 |
-
-
|
| 245 |
-
-
|
| 246 |
-
-
|
| 247 |
-
-
|
| 248 |
-
-
|
| 249 |
-
-
|
|
|
|
| 250 |
"""
|
| 251 |
|
| 252 |
# Guidelines for using uploaded files
|
| 253 |
upload_guide = """
|
| 254 |
-
Guidelines for Using Uploaded Files
|
| 255 |
-
-
|
| 256 |
-
-
|
| 257 |
-
-
|
| 258 |
-
-
|
| 259 |
-
-
|
| 260 |
-
-
|
| 261 |
-
-
|
| 262 |
-
- For text files, integrate relevant content effectively
|
| 263 |
-
- Even if the file content seems tangential, do your best to connect it to the blog topic
|
| 264 |
-
- Keep consistency throughout and ensure the file's data is appropriately reflected
|
| 265 |
"""
|
| 266 |
|
| 267 |
-
# Choose base prompt
|
| 268 |
-
if
|
| 269 |
-
final_prompt =
|
| 270 |
else:
|
| 271 |
-
final_prompt =
|
| 272 |
-
|
| 273 |
-
# If the user chose a specific template (and not ginigen), append the relevant guidelines
|
| 274 |
-
if template != "ginigen" and template in template_guides:
|
| 275 |
-
final_prompt += "\n" + template_guides[template]
|
| 276 |
|
| 277 |
-
#
|
| 278 |
-
if
|
| 279 |
-
final_prompt += f"\n\nTone and
|
| 280 |
|
| 281 |
-
#
|
| 282 |
if include_search_results:
|
| 283 |
final_prompt += f"\n\n{search_guide}"
|
| 284 |
|
| 285 |
-
#
|
| 286 |
if include_uploaded_files:
|
| 287 |
final_prompt += f"\n\n{upload_guide}"
|
| 288 |
|
| 289 |
-
#
|
| 290 |
-
final_prompt +=
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
|
| 299 |
return final_prompt
|
| 300 |
|
|
@@ -354,7 +239,7 @@ def mock_results(query: str) -> str:
|
|
| 354 |
"""Fallback search results if API fails"""
|
| 355 |
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 356 |
return (f"# Fallback Search Content (Generated: {ts})\n\n"
|
| 357 |
-
f"The search API request failed. Please generate
|
| 358 |
f"You may consider the following points:\n\n"
|
| 359 |
f"- Basic concepts and importance of {query}\n"
|
| 360 |
f"- Commonly known related statistics or trends\n"
|
|
@@ -370,7 +255,7 @@ def do_web_search(query: str) -> str:
|
|
| 370 |
logging.warning("No search results, using fallback content")
|
| 371 |
return mock_results(query)
|
| 372 |
|
| 373 |
-
hdr = "# Web Search Results\nUse
|
| 374 |
body = "\n".join(
|
| 375 |
f"### Result {a['index']}: {a['title']}\n\n{a['snippet']}\n\n"
|
| 376 |
f"**Source**: [{a['displayed_link']}]({a['link']})\n\n---\n"
|
|
@@ -504,7 +389,7 @@ def process_uploaded_files(files):
|
|
| 504 |
return None
|
| 505 |
|
| 506 |
result = "# Uploaded File Contents\n\n"
|
| 507 |
-
result += "Below is the content from the files provided by the user. Integrate this data as a main source of information for
|
| 508 |
|
| 509 |
for file in files:
|
| 510 |
try:
|
|
@@ -524,6 +409,31 @@ def process_uploaded_files(files):
|
|
| 524 |
return result
|
| 525 |
|
| 526 |
# ββββββββββββββββββββββββββββββββ Image & Utility βββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 527 |
def generate_image(prompt, w=768, h=768, g=3.5, steps=30, seed=3):
|
| 528 |
"""Image generation function."""
|
| 529 |
if not prompt:
|
|
@@ -541,18 +451,18 @@ def generate_image(prompt, w=768, h=768, g=3.5, steps=30, seed=3):
|
|
| 541 |
logging.error(e)
|
| 542 |
return None, str(e)
|
| 543 |
|
| 544 |
-
def extract_image_prompt(
|
| 545 |
"""
|
| 546 |
-
Generate a single-line English image prompt from the
|
| 547 |
"""
|
| 548 |
client = get_openai_client()
|
| 549 |
|
| 550 |
try:
|
| 551 |
response = client.chat.completions.create(
|
| 552 |
-
model="gpt-4.1-mini",
|
| 553 |
messages=[
|
| 554 |
{"role": "system", "content": "Generate a single-line English image prompt from the following text. Return only the prompt text, nothing else."},
|
| 555 |
-
{"role": "user", "content": f"Topic: {topic}\n\n---\n{
|
| 556 |
],
|
| 557 |
temperature=1,
|
| 558 |
max_tokens=80,
|
|
@@ -564,7 +474,7 @@ def extract_image_prompt(blog_text: str, topic: str):
|
|
| 564 |
logging.error(f"OpenAI image prompt generation error: {e}")
|
| 565 |
return f"A professional photo related to {topic}, high quality"
|
| 566 |
|
| 567 |
-
def md_to_html(md: str, title="
|
| 568 |
"""Convert Markdown to HTML."""
|
| 569 |
return f"<!DOCTYPE html><html><head><title>{title}</title><meta charset='utf-8'></head><body>{markdown.markdown(md)}</body></html>"
|
| 570 |
|
|
@@ -574,8 +484,8 @@ def keywords(text: str, top=5):
|
|
| 574 |
return " ".join(cleaned.split()[:top])
|
| 575 |
|
| 576 |
# ββββββββββββββββββββββββββββββββ Streamlit UI ββββββββββββββββββββββββββββ
|
| 577 |
-
def
|
| 578 |
-
st.title("
|
| 579 |
|
| 580 |
# Set default session state
|
| 581 |
if "ai_model" not in st.session_state:
|
|
@@ -588,46 +498,39 @@ def ginigen_app():
|
|
| 588 |
st.session_state.generate_image = False
|
| 589 |
if "web_search_enabled" not in st.session_state:
|
| 590 |
st.session_state.web_search_enabled = True
|
| 591 |
-
if "
|
| 592 |
-
st.session_state.
|
| 593 |
-
if "
|
| 594 |
-
st.session_state.
|
| 595 |
-
if "word_count" not in st.session_state:
|
| 596 |
-
st.session_state.word_count = 1750
|
| 597 |
|
| 598 |
# Sidebar UI
|
| 599 |
sb = st.sidebar
|
| 600 |
-
sb.title("
|
| 601 |
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
sb.subheader("Blog Style Settings")
|
| 605 |
sb.selectbox(
|
| 606 |
-
"
|
| 607 |
-
options=list(
|
| 608 |
-
format_func=lambda x:
|
| 609 |
-
key="
|
| 610 |
)
|
| 611 |
|
| 612 |
sb.selectbox(
|
| 613 |
-
"
|
| 614 |
-
options=list(
|
| 615 |
-
format_func=lambda x:
|
| 616 |
-
key="
|
| 617 |
)
|
| 618 |
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
# Example topics
|
| 623 |
-
sb.subheader("Example Topics")
|
| 624 |
c1, c2, c3 = sb.columns(3)
|
| 625 |
-
if c1.button("
|
| 626 |
-
process_example(
|
| 627 |
-
if c2.button("
|
| 628 |
-
process_example(
|
| 629 |
-
if c3.button("
|
| 630 |
-
process_example(
|
| 631 |
|
| 632 |
sb.subheader("Other Settings")
|
| 633 |
sb.toggle("Auto Save", key="auto_save")
|
|
@@ -637,22 +540,28 @@ def ginigen_app():
|
|
| 637 |
st.session_state.web_search_enabled = web_search_enabled
|
| 638 |
|
| 639 |
if web_search_enabled:
|
| 640 |
-
st.sidebar.info("β
Web search results will be integrated into the
|
| 641 |
|
| 642 |
-
# Download the latest
|
| 643 |
-
|
| 644 |
(m["content"] for m in reversed(st.session_state.messages)
|
| 645 |
if m["role"] == "assistant" and m["content"].strip()),
|
| 646 |
None
|
| 647 |
)
|
| 648 |
-
if
|
| 649 |
-
|
| 650 |
-
|
| 651 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 652 |
d1, d2 = sb.columns(2)
|
| 653 |
-
d1.download_button("Download as Markdown",
|
| 654 |
file_name=f"{title}.md", mime="text/markdown")
|
| 655 |
-
d2.download_button("Download as HTML", md_to_html(
|
| 656 |
file_name=f"{title}.html", mime="text/html")
|
| 657 |
|
| 658 |
# JSON conversation record upload
|
|
@@ -669,14 +578,14 @@ def ginigen_app():
|
|
| 669 |
sb.download_button(
|
| 670 |
"Save",
|
| 671 |
data=json.dumps(st.session_state.messages, ensure_ascii=False, indent=2),
|
| 672 |
-
file_name="
|
| 673 |
mime="application/json"
|
| 674 |
)
|
| 675 |
|
| 676 |
# File Upload
|
| 677 |
-
st.subheader("
|
| 678 |
uploaded_files = st.file_uploader(
|
| 679 |
-
"Upload files to be
|
| 680 |
type=["txt", "csv", "pdf"],
|
| 681 |
accept_multiple_files=True,
|
| 682 |
key="file_uploader"
|
|
@@ -684,7 +593,7 @@ def ginigen_app():
|
|
| 684 |
|
| 685 |
if uploaded_files:
|
| 686 |
file_count = len(uploaded_files)
|
| 687 |
-
st.success(f"{file_count} files uploaded. They will be
|
| 688 |
|
| 689 |
with st.expander("Preview Uploaded Files", expanded=False):
|
| 690 |
for idx, file in enumerate(uploaded_files):
|
|
@@ -734,33 +643,44 @@ def ginigen_app():
|
|
| 734 |
# Display existing messages
|
| 735 |
for m in st.session_state.messages:
|
| 736 |
with st.chat_message(m["role"]):
|
| 737 |
-
|
| 738 |
-
|
| 739 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 740 |
|
| 741 |
# User input
|
| 742 |
-
|
| 743 |
-
if
|
| 744 |
-
process_input(
|
| 745 |
-
|
| 746 |
|
| 747 |
# μ¬μ΄λλ° νλ¨ λ°°μ§(λ§ν¬) μΆκ°
|
| 748 |
sb.markdown("---")
|
| 749 |
sb.markdown("Created by [https://ginigen.com](https://ginigen.com) | [YouTube Channel](https://www.youtube.com/@ginipickaistudio)")
|
| 750 |
|
| 751 |
-
|
| 752 |
-
|
| 753 |
def process_example(topic):
|
| 754 |
-
"""Process the selected example
|
| 755 |
process_input(topic, [])
|
| 756 |
|
| 757 |
-
def process_input(
|
| 758 |
# Add user's message
|
| 759 |
-
if not any(m["role"] == "user" and m["content"] ==
|
| 760 |
-
st.session_state.messages.append({"role": "user", "content":
|
| 761 |
|
| 762 |
with st.chat_message("user"):
|
| 763 |
-
st.markdown(
|
| 764 |
|
| 765 |
with st.chat_message("assistant"):
|
| 766 |
placeholder = st.empty()
|
|
@@ -772,7 +692,7 @@ def process_input(prompt: str, uploaded_files):
|
|
| 772 |
|
| 773 |
try:
|
| 774 |
# μν νμλ₯Ό μν μν μ»΄ν¬λνΈ
|
| 775 |
-
status = st.status("Preparing to
|
| 776 |
status.update(label="Initializing client...")
|
| 777 |
|
| 778 |
client = get_openai_client()
|
|
@@ -785,7 +705,7 @@ def process_input(prompt: str, uploaded_files):
|
|
| 785 |
if use_web_search:
|
| 786 |
status.update(label="Performing web search...")
|
| 787 |
with st.spinner("Searching the web..."):
|
| 788 |
-
search_content = do_web_search(keywords(
|
| 789 |
|
| 790 |
# Process uploaded files β content
|
| 791 |
file_content = None
|
|
@@ -794,25 +714,29 @@ def process_input(prompt: str, uploaded_files):
|
|
| 794 |
with st.spinner("Analyzing files..."):
|
| 795 |
file_content = process_uploaded_files(uploaded_files)
|
| 796 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 797 |
# Build system prompt
|
| 798 |
-
status.update(label="Preparing
|
| 799 |
sys_prompt = get_system_prompt(
|
| 800 |
-
|
| 801 |
-
|
| 802 |
-
word_count=st.session_state.word_count,
|
| 803 |
include_search_results=use_web_search,
|
| 804 |
include_uploaded_files=has_uploaded_files
|
| 805 |
)
|
| 806 |
|
| 807 |
# OpenAI API νΈμΆ μ€λΉ
|
| 808 |
-
status.update(label="
|
| 809 |
|
| 810 |
# λ©μμ§ κ΅¬μ±
|
| 811 |
api_messages = [
|
| 812 |
{"role": "system", "content": sys_prompt}
|
| 813 |
]
|
| 814 |
|
| 815 |
-
user_content =
|
| 816 |
|
| 817 |
# κ²μ κ²°κ³Όκ° μμΌλ©΄ μ¬μ©μ ν둬ννΈμ μΆκ°
|
| 818 |
if search_content:
|
|
@@ -822,6 +746,17 @@ def process_input(prompt: str, uploaded_files):
|
|
| 822 |
if file_content:
|
| 823 |
user_content += "\n\n" + file_content
|
| 824 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 825 |
# μ¬μ©μ λ©μμ§ μΆκ°
|
| 826 |
api_messages.append({"role": "user", "content": user_content})
|
| 827 |
|
|
@@ -842,63 +777,78 @@ def process_input(prompt: str, uploaded_files):
|
|
| 842 |
if chunk.choices and len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
|
| 843 |
content_delta = chunk.choices[0].delta.content
|
| 844 |
full_response += content_delta
|
| 845 |
-
message_placeholder.markdown(full_response + "β")
|
| 846 |
|
| 847 |
# μ΅μ’
μλ΅ νμ (컀μ μ κ±°)
|
| 848 |
-
message_placeholder.markdown(full_response)
|
| 849 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 850 |
|
| 851 |
except Exception as api_error:
|
| 852 |
error_message = str(api_error)
|
| 853 |
logging.error(f"API error: {error_message}")
|
| 854 |
status.update(label=f"Error: {error_message}", state="error")
|
| 855 |
-
raise Exception(f"
|
| 856 |
|
| 857 |
-
#
|
| 858 |
-
answer_entry_saved = False
|
| 859 |
if st.session_state.generate_image and full_response:
|
| 860 |
-
with st.spinner("Generating image..."):
|
| 861 |
try:
|
| 862 |
-
ip = extract_image_prompt(full_response,
|
| 863 |
img, cap = generate_image(ip)
|
| 864 |
if img:
|
|
|
|
| 865 |
st.image(img, caption=cap)
|
| 866 |
-
st.session_state.messages.append({
|
| 867 |
-
"role": "assistant",
|
| 868 |
-
"content": full_response,
|
| 869 |
-
"image": img,
|
| 870 |
-
"image_caption": cap
|
| 871 |
-
})
|
| 872 |
-
answer_entry_saved = True
|
| 873 |
except Exception as img_error:
|
| 874 |
logging.error(f"Image generation error: {str(img_error)}")
|
| 875 |
-
st.warning("
|
| 876 |
-
|
| 877 |
-
# Save the answer if not saved above
|
| 878 |
-
if not answer_entry_saved and full_response:
|
| 879 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 880 |
|
| 881 |
# Download buttons
|
| 882 |
if full_response:
|
| 883 |
-
st.subheader("Download This
|
| 884 |
c1, c2 = st.columns(2)
|
| 885 |
c1.download_button(
|
| 886 |
"Markdown",
|
| 887 |
data=full_response,
|
| 888 |
-
file_name=f"{
|
| 889 |
mime="text/markdown"
|
| 890 |
)
|
| 891 |
c2.download_button(
|
| 892 |
"HTML",
|
| 893 |
-
data=md_to_html(full_response,
|
| 894 |
-
file_name=f"{
|
| 895 |
mime="text/html"
|
| 896 |
)
|
| 897 |
|
| 898 |
# Auto save
|
| 899 |
if st.session_state.auto_save and st.session_state.messages:
|
| 900 |
try:
|
| 901 |
-
fn = f"
|
| 902 |
with open(fn, "w", encoding="utf-8") as fp:
|
| 903 |
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2)
|
| 904 |
except Exception as e:
|
|
@@ -914,7 +864,7 @@ def process_input(prompt: str, uploaded_files):
|
|
| 914 |
|
| 915 |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ
|
| 916 |
def main():
|
| 917 |
-
|
| 918 |
|
| 919 |
if __name__ == "__main__":
|
| 920 |
main()
|
|
|
|
| 16 |
IMAGE_API_URL = "http://211.233.58.201:7896"
|
| 17 |
MAX_TOKENS = 7999
|
| 18 |
|
| 19 |
+
# Search modes and style definitions (in English)
|
| 20 |
+
SEARCH_MODES = {
|
| 21 |
+
"comprehensive": "Comprehensive answer with multiple sources",
|
| 22 |
+
"academic": "Academic and research-focused results",
|
| 23 |
+
"news": "Latest news and current events",
|
| 24 |
+
"technical": "Technical and specialized information",
|
| 25 |
+
"educational": "Educational and learning resources"
|
|
|
|
| 26 |
}
|
| 27 |
|
| 28 |
+
RESPONSE_STYLES = {
|
| 29 |
"professional": "Professional and formal tone",
|
| 30 |
"casual": "Friendly and conversational tone",
|
| 31 |
+
"simple": "Simple and easy to understand",
|
| 32 |
+
"detailed": "Detailed and thorough explanations"
|
| 33 |
}
|
| 34 |
|
| 35 |
+
# Example search queries
|
| 36 |
+
EXAMPLE_QUERIES = {
|
| 37 |
+
"example1": "What are the latest developments in quantum computing?",
|
| 38 |
+
"example2": "How does climate change affect biodiversity in tropical rainforests?",
|
| 39 |
+
"example3": "What are the economic implications of artificial intelligence in the job market?"
|
| 40 |
}
|
| 41 |
|
| 42 |
# ββββββββββββββββββββββββββββββββ Logging ββββββββββββββββββββββββββββββββ
|
|
|
|
| 57 |
max_retries=3 # μ¬μλ νμ 3νλ‘ μ€μ
|
| 58 |
)
|
| 59 |
|
| 60 |
+
# ββββββββββββββββββββββββββββββββ System Prompt βββββββββββββββββββββββββ
|
| 61 |
+
def get_system_prompt(mode="comprehensive", style="professional", include_search_results=True, include_uploaded_files=False) -> str:
|
| 62 |
"""
|
| 63 |
+
Generate a system prompt for the perplexity-like interface based on:
|
| 64 |
+
- The selected search mode and style
|
|
|
|
| 65 |
- Guidelines for using web search results and uploaded files
|
| 66 |
"""
|
| 67 |
|
| 68 |
+
# Base prompt for comprehensive mode
|
| 69 |
+
comprehensive_prompt = """
|
| 70 |
+
You are an advanced AI assistant that provides comprehensive answers with multiple sources, similar to Perplexity.
|
| 71 |
+
|
| 72 |
+
Your task is to:
|
| 73 |
+
1. Thoroughly analyze the user's query
|
| 74 |
+
2. Provide a clear, well-structured answer integrating information from multiple sources
|
| 75 |
+
3. Include relevant images, videos, and links in your response
|
| 76 |
+
4. Format your answer with proper headings, bullet points, and sections
|
| 77 |
+
5. Cite sources inline and provide a references section at the end
|
| 78 |
+
|
| 79 |
+
Important guidelines:
|
| 80 |
+
- Organize information logically with clear section headings
|
| 81 |
+
- Use bullet points and numbered lists for clarity
|
| 82 |
+
- Include specific, factual information whenever possible
|
| 83 |
+
- Provide balanced perspectives on controversial topics
|
| 84 |
+
- Display relevant statistics, data, or quotes when appropriate
|
| 85 |
+
- Format your response using markdown for readability
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
"""
|
| 87 |
|
| 88 |
+
# Alternative modes
|
| 89 |
+
mode_prompts = {
|
| 90 |
+
"academic": """
|
| 91 |
+
Your focus is on providing academic and research-focused responses:
|
| 92 |
+
- Prioritize peer-reviewed research and academic sources
|
| 93 |
+
- Include citations in a formal academic format
|
| 94 |
+
- Discuss methodologies and research limitations where relevant
|
| 95 |
+
- Present different scholarly perspectives on the topic
|
| 96 |
+
- Use precise, technical language appropriate for an academic audience
|
|
|
|
|
|
|
| 97 |
""",
|
| 98 |
+
"news": """
|
| 99 |
+
Your focus is on providing the latest news and current events:
|
| 100 |
+
- Prioritize recent news articles and current information
|
| 101 |
+
- Include publication dates for all news sources
|
| 102 |
+
- Present multiple perspectives from different news outlets
|
| 103 |
+
- Distinguish between facts and opinions/editorial content
|
| 104 |
+
- Update information with the most recent developments
|
|
|
|
|
|
|
| 105 |
""",
|
| 106 |
+
"technical": """
|
| 107 |
+
Your focus is on providing technical and specialized information:
|
| 108 |
+
- Use precise technical terminology appropriate to the field
|
| 109 |
+
- Include code snippets, formulas, or technical diagrams where relevant
|
| 110 |
+
- Break down complex concepts into step-by-step explanations
|
| 111 |
+
- Reference technical documentation, standards, and best practices
|
| 112 |
+
- Consider different technical approaches or methodologies
|
|
|
|
|
|
|
| 113 |
""",
|
| 114 |
+
"educational": """
|
| 115 |
+
Your focus is on providing educational and learning resources:
|
| 116 |
+
- Structure information in a learning-friendly progression
|
| 117 |
+
- Include examples, analogies, and visual explanations
|
| 118 |
+
- Highlight key concepts and definitions
|
| 119 |
+
- Suggest further learning resources at different difficulty levels
|
| 120 |
+
- Present information that's accessible to learners at various levels
|
|
|
|
|
|
|
| 121 |
"""
|
| 122 |
}
|
| 123 |
|
| 124 |
+
# Response styles
|
| 125 |
+
style_guides = {
|
| 126 |
+
"professional": "Use a professional, authoritative voice. Clearly explain technical terms and present data systematically.",
|
| 127 |
+
"casual": "Use a relaxed, conversational style with a friendly tone. Include relatable examples and occasionally use informal expressions.",
|
| 128 |
+
"simple": "Use straightforward language and avoid jargon. Keep sentences and paragraphs short. Explain concepts as if to someone with no background in the subject.",
|
| 129 |
+
"detailed": "Provide thorough explanations with comprehensive background information. Explore nuances and edge cases. Present multiple perspectives and detailed analysis."
|
| 130 |
}
|
| 131 |
|
| 132 |
# Guidelines for using search results
|
| 133 |
search_guide = """
|
| 134 |
Guidelines for Using Search Results:
|
| 135 |
+
- Include source links directly in your response using markdown: [Source Name](URL)
|
| 136 |
+
- For each major claim or piece of information, indicate its source
|
| 137 |
+
- If sources conflict, explain the different perspectives and their reliability
|
| 138 |
+
- Include 3-5 relevant images by writing: 
|
| 139 |
+
- Include 1-2 relevant video links when appropriate by writing: [Video: Title](video_url)
|
| 140 |
+
- Format search information into a cohesive, well-structured response
|
| 141 |
+
- Include a "References" section at the end listing all major sources with links
|
| 142 |
"""
|
| 143 |
|
| 144 |
# Guidelines for using uploaded files
|
| 145 |
upload_guide = """
|
| 146 |
+
Guidelines for Using Uploaded Files:
|
| 147 |
+
- Treat the uploaded files as primary sources for your response
|
| 148 |
+
- Extract and highlight key information from files that directly addresses the query
|
| 149 |
+
- Quote relevant passages and cite the specific file
|
| 150 |
+
- For numerical data in CSV files, consider creating summary statements
|
| 151 |
+
- For PDF content, reference specific sections or pages
|
| 152 |
+
- Integrate file information seamlessly with web search results
|
| 153 |
+
- When information conflicts, prioritize file content over general web results
|
|
|
|
|
|
|
|
|
|
| 154 |
"""
|
| 155 |
|
| 156 |
+
# Choose base prompt based on mode
|
| 157 |
+
if mode == "comprehensive":
|
| 158 |
+
final_prompt = comprehensive_prompt
|
| 159 |
else:
|
| 160 |
+
final_prompt = comprehensive_prompt + "\n" + mode_prompts.get(mode, "")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
|
| 162 |
+
# Add style guide
|
| 163 |
+
if style in style_guides:
|
| 164 |
+
final_prompt += f"\n\nTone and Style: {style_guides[style]}"
|
| 165 |
|
| 166 |
+
# Add search results guidance
|
| 167 |
if include_search_results:
|
| 168 |
final_prompt += f"\n\n{search_guide}"
|
| 169 |
|
| 170 |
+
# Add uploaded files guidance
|
| 171 |
if include_uploaded_files:
|
| 172 |
final_prompt += f"\n\n{upload_guide}"
|
| 173 |
|
| 174 |
+
# Additional formatting instructions
|
| 175 |
+
final_prompt += """
|
| 176 |
+
\n\nAdditional Formatting Requirements:
|
| 177 |
+
- Use markdown headings (## and ###) to organize your response
|
| 178 |
+
- Use bold text (**text**) for emphasis on important points
|
| 179 |
+
- Include a "Related Questions" section at the end with 3-5 follow-up questions
|
| 180 |
+
- Format your response with proper spacing and paragraph breaks
|
| 181 |
+
- Make all links clickable by using proper markdown format: [text](url)
|
| 182 |
+
"""
|
| 183 |
|
| 184 |
return final_prompt
|
| 185 |
|
|
|
|
| 239 |
"""Fallback search results if API fails"""
|
| 240 |
ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 241 |
return (f"# Fallback Search Content (Generated: {ts})\n\n"
|
| 242 |
+
f"The search API request failed. Please generate a response based on any pre-existing knowledge about '{query}'.\n\n"
|
| 243 |
f"You may consider the following points:\n\n"
|
| 244 |
f"- Basic concepts and importance of {query}\n"
|
| 245 |
f"- Commonly known related statistics or trends\n"
|
|
|
|
| 255 |
logging.warning("No search results, using fallback content")
|
| 256 |
return mock_results(query)
|
| 257 |
|
| 258 |
+
hdr = "# Web Search Results\nUse these results to provide a comprehensive answer with multiple sources. Include relevant images, videos, and links.\n\n"
|
| 259 |
body = "\n".join(
|
| 260 |
f"### Result {a['index']}: {a['title']}\n\n{a['snippet']}\n\n"
|
| 261 |
f"**Source**: [{a['displayed_link']}]({a['link']})\n\n---\n"
|
|
|
|
| 389 |
return None
|
| 390 |
|
| 391 |
result = "# Uploaded File Contents\n\n"
|
| 392 |
+
result += "Below is the content from the files provided by the user. Integrate this data as a main source of information for your response.\n\n"
|
| 393 |
|
| 394 |
for file in files:
|
| 395 |
try:
|
|
|
|
| 409 |
return result
|
| 410 |
|
| 411 |
# ββββββββββββββββββββββββββββββββ Image & Utility βββββββββββββββββββββββββ
|
| 412 |
+
def get_images_for_query(query, count=5):
|
| 413 |
+
"""
|
| 414 |
+
Simulate getting relevant images for a query.
|
| 415 |
+
In a real implementation, this would call an image search API.
|
| 416 |
+
"""
|
| 417 |
+
# This is a placeholder - in production, you would use a real image search API
|
| 418 |
+
sample_images = [
|
| 419 |
+
"https://source.unsplash.com/random/800x600/?"+query.replace(" ", "+"),
|
| 420 |
+
"https://source.unsplash.com/random/600x400/?"+query.replace(" ", "+"),
|
| 421 |
+
"https://source.unsplash.com/random/400x300/?"+query.replace(" ", "+"),
|
| 422 |
+
]
|
| 423 |
+
return sample_images[:min(count, len(sample_images))]
|
| 424 |
+
|
| 425 |
+
def get_videos_for_query(query, count=2):
|
| 426 |
+
"""
|
| 427 |
+
Simulate getting relevant videos for a query.
|
| 428 |
+
In a real implementation, this would call a video search API.
|
| 429 |
+
"""
|
| 430 |
+
# This is a placeholder - in production, you would use a real video search API
|
| 431 |
+
sample_videos = [
|
| 432 |
+
{"title": f"Introduction to {query}", "url": "https://www.youtube.com/results?search_query="+query.replace(" ", "+")},
|
| 433 |
+
{"title": f"Detailed explanation of {query}", "url": "https://www.youtube.com/results?search_query=advanced+"+query.replace(" ", "+")}
|
| 434 |
+
]
|
| 435 |
+
return sample_videos[:min(count, len(sample_videos))]
|
| 436 |
+
|
| 437 |
def generate_image(prompt, w=768, h=768, g=3.5, steps=30, seed=3):
|
| 438 |
"""Image generation function."""
|
| 439 |
if not prompt:
|
|
|
|
| 451 |
logging.error(e)
|
| 452 |
return None, str(e)
|
| 453 |
|
| 454 |
+
def extract_image_prompt(response_text: str, topic: str):
|
| 455 |
"""
|
| 456 |
+
Generate a single-line English image prompt from the response content.
|
| 457 |
"""
|
| 458 |
client = get_openai_client()
|
| 459 |
|
| 460 |
try:
|
| 461 |
response = client.chat.completions.create(
|
| 462 |
+
model="gpt-4.1-mini",
|
| 463 |
messages=[
|
| 464 |
{"role": "system", "content": "Generate a single-line English image prompt from the following text. Return only the prompt text, nothing else."},
|
| 465 |
+
{"role": "user", "content": f"Topic: {topic}\n\n---\n{response_text}\n\n---"}
|
| 466 |
],
|
| 467 |
temperature=1,
|
| 468 |
max_tokens=80,
|
|
|
|
| 474 |
logging.error(f"OpenAI image prompt generation error: {e}")
|
| 475 |
return f"A professional photo related to {topic}, high quality"
|
| 476 |
|
| 477 |
+
def md_to_html(md: str, title="Perplexity-like Response"):
|
| 478 |
"""Convert Markdown to HTML."""
|
| 479 |
return f"<!DOCTYPE html><html><head><title>{title}</title><meta charset='utf-8'></head><body>{markdown.markdown(md)}</body></html>"
|
| 480 |
|
|
|
|
| 484 |
return " ".join(cleaned.split()[:top])
|
| 485 |
|
| 486 |
# ββββββββββββββββββββββββββββββββ Streamlit UI ββββββββββββββββββββββββββββ
|
| 487 |
+
def perplexity_app():
|
| 488 |
+
st.title("Perplexity-like AI Assistant")
|
| 489 |
|
| 490 |
# Set default session state
|
| 491 |
if "ai_model" not in st.session_state:
|
|
|
|
| 498 |
st.session_state.generate_image = False
|
| 499 |
if "web_search_enabled" not in st.session_state:
|
| 500 |
st.session_state.web_search_enabled = True
|
| 501 |
+
if "search_mode" not in st.session_state:
|
| 502 |
+
st.session_state.search_mode = "comprehensive"
|
| 503 |
+
if "response_style" not in st.session_state:
|
| 504 |
+
st.session_state.response_style = "professional"
|
|
|
|
|
|
|
| 505 |
|
| 506 |
# Sidebar UI
|
| 507 |
sb = st.sidebar
|
| 508 |
+
sb.title("Search Settings")
|
| 509 |
|
| 510 |
+
sb.subheader("Response Configuration")
|
|
|
|
|
|
|
| 511 |
sb.selectbox(
|
| 512 |
+
"Search Mode",
|
| 513 |
+
options=list(SEARCH_MODES.keys()),
|
| 514 |
+
format_func=lambda x: SEARCH_MODES[x],
|
| 515 |
+
key="search_mode"
|
| 516 |
)
|
| 517 |
|
| 518 |
sb.selectbox(
|
| 519 |
+
"Response Style",
|
| 520 |
+
options=list(RESPONSE_STYLES.keys()),
|
| 521 |
+
format_func=lambda x: RESPONSE_STYLES[x],
|
| 522 |
+
key="response_style"
|
| 523 |
)
|
| 524 |
|
| 525 |
+
# Example queries
|
| 526 |
+
sb.subheader("Example Queries")
|
|
|
|
|
|
|
|
|
|
| 527 |
c1, c2, c3 = sb.columns(3)
|
| 528 |
+
if c1.button("Quantum Computing", key="ex1"):
|
| 529 |
+
process_example(EXAMPLE_QUERIES["example1"])
|
| 530 |
+
if c2.button("Climate Change", key="ex2"):
|
| 531 |
+
process_example(EXAMPLE_QUERIES["example2"])
|
| 532 |
+
if c3.button("AI Economics", key="ex3"):
|
| 533 |
+
process_example(EXAMPLE_QUERIES["example3"])
|
| 534 |
|
| 535 |
sb.subheader("Other Settings")
|
| 536 |
sb.toggle("Auto Save", key="auto_save")
|
|
|
|
| 540 |
st.session_state.web_search_enabled = web_search_enabled
|
| 541 |
|
| 542 |
if web_search_enabled:
|
| 543 |
+
st.sidebar.info("β
Web search results will be integrated into the response.")
|
| 544 |
|
| 545 |
+
# Download the latest response
|
| 546 |
+
latest_response = next(
|
| 547 |
(m["content"] for m in reversed(st.session_state.messages)
|
| 548 |
if m["role"] == "assistant" and m["content"].strip()),
|
| 549 |
None
|
| 550 |
)
|
| 551 |
+
if latest_response:
|
| 552 |
+
# Extract a title from the response - first heading or first line
|
| 553 |
+
title_match = re.search(r"# (.*?)(\n|$)", latest_response)
|
| 554 |
+
if title_match:
|
| 555 |
+
title = title_match.group(1).strip()
|
| 556 |
+
else:
|
| 557 |
+
first_line = latest_response.split('\n', 1)[0].strip()
|
| 558 |
+
title = first_line[:40] + "..." if len(first_line) > 40 else first_line
|
| 559 |
+
|
| 560 |
+
sb.subheader("Download Latest Response")
|
| 561 |
d1, d2 = sb.columns(2)
|
| 562 |
+
d1.download_button("Download as Markdown", latest_response,
|
| 563 |
file_name=f"{title}.md", mime="text/markdown")
|
| 564 |
+
d2.download_button("Download as HTML", md_to_html(latest_response, title),
|
| 565 |
file_name=f"{title}.html", mime="text/html")
|
| 566 |
|
| 567 |
# JSON conversation record upload
|
|
|
|
| 578 |
sb.download_button(
|
| 579 |
"Save",
|
| 580 |
data=json.dumps(st.session_state.messages, ensure_ascii=False, indent=2),
|
| 581 |
+
file_name="conversation_history.json",
|
| 582 |
mime="application/json"
|
| 583 |
)
|
| 584 |
|
| 585 |
# File Upload
|
| 586 |
+
st.subheader("Upload Files")
|
| 587 |
uploaded_files = st.file_uploader(
|
| 588 |
+
"Upload files to be used as reference (txt, csv, pdf)",
|
| 589 |
type=["txt", "csv", "pdf"],
|
| 590 |
accept_multiple_files=True,
|
| 591 |
key="file_uploader"
|
|
|
|
| 593 |
|
| 594 |
if uploaded_files:
|
| 595 |
file_count = len(uploaded_files)
|
| 596 |
+
st.success(f"{file_count} files uploaded. They will be used as sources for your query.")
|
| 597 |
|
| 598 |
with st.expander("Preview Uploaded Files", expanded=False):
|
| 599 |
for idx, file in enumerate(uploaded_files):
|
|
|
|
| 643 |
# Display existing messages
|
| 644 |
for m in st.session_state.messages:
|
| 645 |
with st.chat_message(m["role"]):
|
| 646 |
+
# Process markdown to allow clickable links and properly rendered content
|
| 647 |
+
st.markdown(m["content"], unsafe_allow_html=True)
|
| 648 |
+
|
| 649 |
+
# Display images if present
|
| 650 |
+
if "images" in m and m["images"]:
|
| 651 |
+
st.subheader("Related Images")
|
| 652 |
+
cols = st.columns(min(3, len(m["images"])))
|
| 653 |
+
for i, (img_url, caption) in enumerate(m["images"]):
|
| 654 |
+
col_idx = i % len(cols)
|
| 655 |
+
with cols[col_idx]:
|
| 656 |
+
st.image(img_url, caption=caption, use_column_width=True)
|
| 657 |
+
|
| 658 |
+
# Display videos if present
|
| 659 |
+
if "videos" in m and m["videos"]:
|
| 660 |
+
st.subheader("Related Videos")
|
| 661 |
+
for video in m["videos"]:
|
| 662 |
+
st.markdown(f"[π¬ {video['title']}]({video['url']})", unsafe_allow_html=True)
|
| 663 |
|
| 664 |
# User input
|
| 665 |
+
query = st.chat_input("Enter your query or question here.")
|
| 666 |
+
if query:
|
| 667 |
+
process_input(query, uploaded_files)
|
|
|
|
| 668 |
|
| 669 |
# μ¬μ΄λλ° νλ¨ λ°°μ§(λ§ν¬) μΆκ°
|
| 670 |
sb.markdown("---")
|
| 671 |
sb.markdown("Created by [https://ginigen.com](https://ginigen.com) | [YouTube Channel](https://www.youtube.com/@ginipickaistudio)")
|
| 672 |
|
|
|
|
|
|
|
| 673 |
def process_example(topic):
|
| 674 |
+
"""Process the selected example query."""
|
| 675 |
process_input(topic, [])
|
| 676 |
|
| 677 |
+
def process_input(query: str, uploaded_files):
|
| 678 |
# Add user's message
|
| 679 |
+
if not any(m["role"] == "user" and m["content"] == query for m in st.session_state.messages):
|
| 680 |
+
st.session_state.messages.append({"role": "user", "content": query})
|
| 681 |
|
| 682 |
with st.chat_message("user"):
|
| 683 |
+
st.markdown(query)
|
| 684 |
|
| 685 |
with st.chat_message("assistant"):
|
| 686 |
placeholder = st.empty()
|
|
|
|
| 692 |
|
| 693 |
try:
|
| 694 |
# μν νμλ₯Ό μν μν μ»΄ν¬λνΈ
|
| 695 |
+
status = st.status("Preparing to answer your query...")
|
| 696 |
status.update(label="Initializing client...")
|
| 697 |
|
| 698 |
client = get_openai_client()
|
|
|
|
| 705 |
if use_web_search:
|
| 706 |
status.update(label="Performing web search...")
|
| 707 |
with st.spinner("Searching the web..."):
|
| 708 |
+
search_content = do_web_search(keywords(query, top=5))
|
| 709 |
|
| 710 |
# Process uploaded files β content
|
| 711 |
file_content = None
|
|
|
|
| 714 |
with st.spinner("Analyzing files..."):
|
| 715 |
file_content = process_uploaded_files(uploaded_files)
|
| 716 |
|
| 717 |
+
# Get relevant images and videos (before generating response)
|
| 718 |
+
status.update(label="Finding related media...")
|
| 719 |
+
related_images = get_images_for_query(query) if use_web_search else []
|
| 720 |
+
related_videos = get_videos_for_query(query) if use_web_search else []
|
| 721 |
+
|
| 722 |
# Build system prompt
|
| 723 |
+
status.update(label="Preparing comprehensive answer...")
|
| 724 |
sys_prompt = get_system_prompt(
|
| 725 |
+
mode=st.session_state.search_mode,
|
| 726 |
+
style=st.session_state.response_style,
|
|
|
|
| 727 |
include_search_results=use_web_search,
|
| 728 |
include_uploaded_files=has_uploaded_files
|
| 729 |
)
|
| 730 |
|
| 731 |
# OpenAI API νΈμΆ μ€λΉ
|
| 732 |
+
status.update(label="Generating response...")
|
| 733 |
|
| 734 |
# λ©μμ§ κ΅¬μ±
|
| 735 |
api_messages = [
|
| 736 |
{"role": "system", "content": sys_prompt}
|
| 737 |
]
|
| 738 |
|
| 739 |
+
user_content = query
|
| 740 |
|
| 741 |
# κ²μ κ²°κ³Όκ° μμΌλ©΄ μ¬μ©μ ν둬ννΈμ μΆκ°
|
| 742 |
if search_content:
|
|
|
|
| 746 |
if file_content:
|
| 747 |
user_content += "\n\n" + file_content
|
| 748 |
|
| 749 |
+
# Add image and video information to the prompt
|
| 750 |
+
if related_images:
|
| 751 |
+
user_content += "\n\n# Related Images\n"
|
| 752 |
+
for i, img_url in enumerate(related_images):
|
| 753 |
+
user_content += f"\n"
|
| 754 |
+
|
| 755 |
+
if related_videos:
|
| 756 |
+
user_content += "\n\n# Related Videos\n"
|
| 757 |
+
for video in related_videos:
|
| 758 |
+
user_content += f"\n[Video: {video['title']}]({video['url']})"
|
| 759 |
+
|
| 760 |
# μ¬μ©μ λ©μμ§ μΆκ°
|
| 761 |
api_messages.append({"role": "user", "content": user_content})
|
| 762 |
|
|
|
|
| 777 |
if chunk.choices and len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
|
| 778 |
content_delta = chunk.choices[0].delta.content
|
| 779 |
full_response += content_delta
|
| 780 |
+
message_placeholder.markdown(full_response + "β", unsafe_allow_html=True)
|
| 781 |
|
| 782 |
# μ΅μ’
μλ΅ νμ (컀μ μ κ±°)
|
| 783 |
+
message_placeholder.markdown(full_response, unsafe_allow_html=True)
|
| 784 |
+
|
| 785 |
+
# Display related images if available
|
| 786 |
+
if related_images:
|
| 787 |
+
image_captions = [f"Related image {i+1}" for i in range(len(related_images))]
|
| 788 |
+
images_with_captions = list(zip(related_images, image_captions))
|
| 789 |
+
|
| 790 |
+
cols = st.columns(min(3, len(related_images)))
|
| 791 |
+
for i, (img_url, caption) in enumerate(images_with_captions):
|
| 792 |
+
col_idx = i % len(cols)
|
| 793 |
+
with cols[col_idx]:
|
| 794 |
+
st.image(img_url, caption=caption, use_column_width=True)
|
| 795 |
+
|
| 796 |
+
# Display related videos if available
|
| 797 |
+
if related_videos:
|
| 798 |
+
st.subheader("Related Videos")
|
| 799 |
+
for video in related_videos:
|
| 800 |
+
st.markdown(f"[π¬ {video['title']}]({video['url']})", unsafe_allow_html=True)
|
| 801 |
+
|
| 802 |
+
status.update(label="Response completed!", state="complete")
|
| 803 |
+
|
| 804 |
+
# Save the response with images and videos in the session state
|
| 805 |
+
st.session_state.messages.append({
|
| 806 |
+
"role": "assistant",
|
| 807 |
+
"content": full_response,
|
| 808 |
+
"images": list(zip(related_images, image_captions)) if related_images else [],
|
| 809 |
+
"videos": related_videos
|
| 810 |
+
})
|
| 811 |
|
| 812 |
except Exception as api_error:
|
| 813 |
error_message = str(api_error)
|
| 814 |
logging.error(f"API error: {error_message}")
|
| 815 |
status.update(label=f"Error: {error_message}", state="error")
|
| 816 |
+
raise Exception(f"Response generation error: {error_message}")
|
| 817 |
|
| 818 |
+
# Additional image generation if enabled
|
|
|
|
| 819 |
if st.session_state.generate_image and full_response:
|
| 820 |
+
with st.spinner("Generating custom image..."):
|
| 821 |
try:
|
| 822 |
+
ip = extract_image_prompt(full_response, query)
|
| 823 |
img, cap = generate_image(ip)
|
| 824 |
if img:
|
| 825 |
+
st.subheader("AI-Generated Image")
|
| 826 |
st.image(img, caption=cap)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 827 |
except Exception as img_error:
|
| 828 |
logging.error(f"Image generation error: {str(img_error)}")
|
| 829 |
+
st.warning("Custom image generation failed. Using web images only.")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 830 |
|
| 831 |
# Download buttons
|
| 832 |
if full_response:
|
| 833 |
+
st.subheader("Download This Response")
|
| 834 |
c1, c2 = st.columns(2)
|
| 835 |
c1.download_button(
|
| 836 |
"Markdown",
|
| 837 |
data=full_response,
|
| 838 |
+
file_name=f"{query[:30]}.md",
|
| 839 |
mime="text/markdown"
|
| 840 |
)
|
| 841 |
c2.download_button(
|
| 842 |
"HTML",
|
| 843 |
+
data=md_to_html(full_response, query[:30]),
|
| 844 |
+
file_name=f"{query[:30]}.html",
|
| 845 |
mime="text/html"
|
| 846 |
)
|
| 847 |
|
| 848 |
# Auto save
|
| 849 |
if st.session_state.auto_save and st.session_state.messages:
|
| 850 |
try:
|
| 851 |
+
fn = f"conversation_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
| 852 |
with open(fn, "w", encoding="utf-8") as fp:
|
| 853 |
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2)
|
| 854 |
except Exception as e:
|
|
|
|
| 864 |
|
| 865 |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ
|
| 866 |
def main():
|
| 867 |
+
perplexity_app()
|
| 868 |
|
| 869 |
if __name__ == "__main__":
|
| 870 |
main()
|