Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import pandas as pd | |
| import requests | |
| from bs4 import BeautifulSoup | |
| import re | |
| import time | |
| import nltk | |
| from nltk.tokenize import word_tokenize | |
| from nltk.corpus import stopwords | |
| from collections import Counter | |
| import json | |
| import os | |
| from datetime import datetime, timedelta | |
| from openai import OpenAI # μλ‘μ΄ import λ°©μ | |
| from dotenv import load_dotenv | |
| import traceback | |
| import plotly.graph_objects as go | |
| import schedule | |
| import threading | |
| import matplotlib.pyplot as plt | |
| import kss # KoNLPy λμ KSS μ¬μ© | |
| from PIL import Image | |
| import base64 | |
| from io import BytesIO | |
| import logging | |
| # λ‘κΉ μ€μ | |
| logging.basicConfig( | |
| level=logging.INFO, | |
| format='%(asctime)s - %(levelname)s - %(message)s', | |
| handlers=[ | |
| logging.StreamHandler(), | |
| logging.FileHandler('/tmp/crawler.log') | |
| ] | |
| ) | |
| # μλν΄λΌμ°λ μΆκ° | |
| try: | |
| from wordcloud import WordCloud | |
| except ImportError: | |
| st.error("wordcloud ν¨ν€μ§λ₯Ό μ€μΉν΄μ£ΌμΈμ: pip install wordcloud") | |
| WordCloud = None | |
| # μ€μΌμ€λ¬ μν ν΄λμ€ μΆκ° | |
| class SchedulerState: | |
| def __init__(self): | |
| self.is_running = False | |
| self.thread = None | |
| self.last_run = None | |
| self.next_run = None | |
| self.scheduled_jobs = [] | |
| self.scheduled_results = [] | |
| # μ μ μ€μΌμ€λ¬ μν κ°μ²΄ μμ± (μ€λ λ μμμ μ¬μ©) | |
| global_scheduler_state = SchedulerState() | |
| # API ν€ κ΄λ¦¬λ₯Ό μν μΈμ μν μ΄κΈ°ν | |
| if 'openai_client' not in st.session_state: | |
| st.session_state.openai_client = None | |
| # μ¬λ¬ λ°©λ²μΌλ‘ API ν€ λ‘λ μλ | |
| load_dotenv() # .env νμΌμμ λ‘λ μλ | |
| # OpenAI ν΄λΌμ΄μΈνΈ μ΄κΈ°νλ₯Ό μν ν¨μ | |
| def init_openai_client(api_key=None): | |
| try: | |
| if api_key: | |
| client = OpenAI(api_key=api_key) | |
| # κ°λ¨ν API ν€ μ ν¨μ± κ²μ¬ | |
| client.models.list() # API ν€κ° μ ν¨νμ§ ν μ€νΈ | |
| return client | |
| return None | |
| except Exception as e: | |
| st.error(f"API ν€ μ΄κΈ°ν μ€λ₯: {str(e)}") | |
| return None | |
| # 1. νκ²½ λ³μμμ API ν€ νμΈ | |
| api_key = os.environ.get('OPENAI_API_KEY') | |
| if api_key: | |
| st.session_state.openai_client = init_openai_client(api_key) | |
| # 2. Streamlit secretsμμ API ν€ νμΈ | |
| if not st.session_state.openai_client: | |
| try: | |
| if 'OPENAI_API_KEY' in st.secrets: | |
| st.session_state.openai_client = init_openai_client(st.secrets['OPENAI_API_KEY']) | |
| except Exception as e: | |
| pass # secrets νμΌμ΄ μμ΄λ μ€λ₯ λ°μνμ§ μμ | |
| # NLTK λ°μ΄ν° κ²½λ‘ μ€μ - νμ¬ μν¬μ€νμ΄μ€μ nltk_data μ¬μ© | |
| nltk_data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'nltk_data') | |
| nltk.data.path.insert(0, nltk_data_path) | |
| # νμν NLTK λ°μ΄ν° νμΈ | |
| try: | |
| nltk.data.find('tokenizers/punkt') | |
| except LookupError: | |
| nltk.download('punkt', download_dir=nltk_data_path) | |
| try: | |
| nltk.data.find('corpora/stopwords') | |
| except LookupError: | |
| nltk.download('stopwords', download_dir=nltk_data_path) | |
| # νμ΄μ§ μ€μ | |
| st.set_page_config(page_title="λ΄μ€ κΈ°μ¬ λꡬ", page_icon="π°", layout="wide") | |
| # μ¬μ΄λλ°μ API ν€ μ λ ₯ νλ μΆκ° | |
| with st.sidebar: | |
| st.title("λ΄μ€ κΈ°μ¬ λꡬ") | |
| menu = st.radio( | |
| "λ©λ΄ μ ν", | |
| ["λ΄μ€ κΈ°μ¬ ν¬λ‘€λ§", "κΈ°μ¬ λΆμνκΈ°", "μ κΈ°μ¬ μμ±νκΈ°", "λ΄μ€ κΈ°μ¬ μμ½νκΈ°"] | |
| ) | |
| st.divider() | |
| api_key = st.text_input("OpenAI API ν€ μ λ ₯", type="password") | |
| if api_key: | |
| client = init_openai_client(api_key) | |
| if client: | |
| st.session_state.openai_client = client | |
| st.success("API ν€κ° μ±κ³΅μ μΌλ‘ μ€μ λμμ΅λλ€!") | |
| else: | |
| st.error("μ ν¨νμ§ μμ API ν€μ λλ€.") | |
| # μ μ₯λ κΈ°μ¬λ₯Ό λΆλ¬μ€λ ν¨μ | |
| def load_saved_articles(): | |
| if os.path.exists('/tmp/saved_articles/articles.json'): | |
| with open('/tmp/saved_articles/articles.json', 'r', encoding='utf-8') as f: | |
| return json.load(f) | |
| return [] | |
| # κΈ°μ¬λ₯Ό μ μ₯νλ ν¨μ | |
| def save_articles(articles): | |
| os.makedirs('/tmp/saved_articles', exist_ok=True) | |
| with open('/tmp/saved_articles/articles.json', 'w', encoding='utf-8') as f: | |
| json.dump(articles, f, ensure_ascii=False, indent=2) | |
| def crawl_naver_news(keyword, num_articles=5): | |
| """ | |
| λ€μ΄λ² λ΄μ€ κΈ°μ¬λ₯Ό μμ§νλ ν¨μ | |
| """ | |
| logging.info(f"ν¬λ‘€λ§ μμ: ν€μλ={keyword}, κΈ°μ¬ μ={num_articles}") | |
| url = f"https://search.naver.com/search.naver?where=news&query={keyword}" | |
| results = [] | |
| try: | |
| # νμ΄μ§ μμ² | |
| logging.info(f"μμ² URL: {url}") | |
| response = requests.get(url) | |
| logging.info(f"μλ΅ μν μ½λ: {response.status_code}") | |
| soup = BeautifulSoup(response.text, 'html.parser') | |
| # λ΄μ€ μμ΄ν μ°ΎκΈ° | |
| news_items = soup.select('div.sds-comps-base-layout.sds-comps-full-layout') | |
| logging.info(f"μ°Ύμ λ΄μ€ μμ΄ν μ: {len(news_items)}") | |
| # κ° λ΄μ€ μμ΄ν μμ μ 보 μΆμΆ | |
| for i, item in enumerate(news_items): | |
| if i >= num_articles: | |
| break | |
| try: | |
| # μ λͺ©κ³Ό λ§ν¬ μΆμΆ | |
| title_element = item.select_one('a.X0fMYp2dHd0TCUS2hjww span') | |
| if not title_element: | |
| continue | |
| title = title_element.text.strip() | |
| link_element = item.select_one('a.X0fMYp2dHd0TCUS2hjww') | |
| link = link_element['href'] if link_element else "" | |
| # μΈλ‘ μ¬ μΆμΆ | |
| press_element = item.select_one('div.sds-comps-profile-info-title span.sds-comps-text-type-body2') | |
| source = press_element.text.strip() if press_element else "μ μ μμ" | |
| # λ μ§ μΆμΆ | |
| date_element = item.select_one('span.r0VOr') | |
| date = date_element.text.strip() if date_element else "μ μ μμ" | |
| # 미리보기 λ΄μ© μΆμΆ | |
| desc_element = item.select_one('a.X0fMYp2dHd0TCUS2hjww.IaKmSOGPdofdPwPE6cyU > span') | |
| description = desc_element.text.strip() if desc_element else "λ΄μ© μμ" | |
| results.append({ | |
| 'title': title, | |
| 'link': link, | |
| 'description': description, | |
| 'source': source, | |
| 'date': date, | |
| 'content': "" | |
| }) | |
| logging.info(f"κΈ°μ¬ μΆμΆ μ±κ³΅: {title}") | |
| except Exception as e: | |
| logging.error(f"κΈ°μ¬ μ 보 μΆμΆ μ€ μ€λ₯ λ°μ: {str(e)}", exc_info=True) | |
| continue | |
| except Exception as e: | |
| logging.error(f"νμ΄μ§ μμ² μ€ μ€λ₯ λ°μ: {str(e)}", exc_info=True) | |
| logging.info(f"ν¬λ‘€λ§ μλ£: {len(results)}κ° κΈ°μ¬ μμ§") | |
| return results | |
| # κΈ°μ¬ μλ¬Έ κ°μ Έμ€κΈ° | |
| def get_article_content(url): | |
| logging.info(f"κΈ°μ¬ μλ¬Έ κ°μ Έμ€κΈ° μμ: {url}") | |
| try: | |
| response = requests.get(url, timeout=5) | |
| logging.info(f"μλ¬Έ μμ² μν μ½λ: {response.status_code}") | |
| soup = BeautifulSoup(response.text, 'html.parser') | |
| # λ€μ΄λ² λ΄μ€ λ³Έλ¬Έ μ°ΎκΈ° | |
| content = soup.select_one('#dic_area') | |
| if content: | |
| text = content.text.strip() | |
| text = re.sub(r'\s+', ' ', text) | |
| logging.info("λ€μ΄λ² λ΄μ€ λ³Έλ¬Έ μΆμΆ μ±κ³΅") | |
| return text | |
| # λ€λ₯Έ λ΄μ€ μ¬μ΄νΈ λ³Έλ¬Έ μ°ΎκΈ° | |
| content = soup.select_one('.article_body, .article-body, .article-content, .news-content-inner') | |
| if content: | |
| text = content.text.strip() | |
| text = re.sub(r'\s+', ' ', text) | |
| logging.info("μΌλ° λ΄μ€ λ³Έλ¬Έ μΆμΆ μ±κ³΅") | |
| return text | |
| logging.warning("λ³Έλ¬Έμ μ°Ύμ μ μμ") | |
| return "λ³Έλ¬Έμ κ°μ Έμ¬ μ μμ΅λλ€." | |
| except Exception as e: | |
| logging.error(f"μλ¬Έ κ°μ Έμ€κΈ° μ€λ₯: {str(e)}", exc_info=True) | |
| return f"μ€λ₯ λ°μ: {str(e)}" | |
| # NLTKλ₯Ό μ΄μ©ν ν€μλ λΆμ (KSS νμ©) | |
| def analyze_keywords(text, top_n=10): | |
| # νκ΅μ΄ λΆμ©μ΄ λͺ©λ‘ | |
| korean_stopwords = ['μ΄', 'κ·Έ', 'μ ', 'κ²', 'λ°', 'λ±', 'λ₯Ό', 'μ', 'μ', 'μμ', 'μ', 'μΌλ‘', 'λ‘'] | |
| # KSSλ₯Ό μ¬μ©ν λ¬Έμ₯ λΆλ¦¬ λ° ν ν°ν | |
| try: | |
| sentences = kss.split_sentences(text) | |
| tokens = [] | |
| for sentence in sentences: | |
| # κ°λ¨ν ν ν°ν (곡백 κΈ°μ€) | |
| tokens.extend(sentence.split()) | |
| except: | |
| # KSS μ€ν¨μ κΈ°λ³Έ ν ν°ν | |
| tokens = text.split() | |
| tokens = [word for word in tokens if word.isalnum() and len(word) > 1 and word not in korean_stopwords] | |
| word_count = Counter(tokens) | |
| top_keywords = word_count.most_common(top_n) | |
| return top_keywords | |
| #μλ ν΄λΌμ°λμ© λΆμ | |
| def extract_keywords_for_wordcloud(text, top_n=50): | |
| if not text or len(text.strip()) < 10: | |
| return {} | |
| try: | |
| try: | |
| tokens = word_tokenize(text.lower()) | |
| except Exception as e: | |
| st.warning(f"{str(e)} μ€λ₯λ°μ") | |
| tokens = text.lower().split() | |
| stop_words = set() | |
| try: | |
| stop_words = set(stopwords.words('english')) | |
| except Exception: | |
| pass | |
| korea_stop_words = { | |
| 'λ°', 'λ±', 'λ₯Ό', 'μ΄', 'μ', 'κ°', 'μ', 'λ', 'μΌλ‘', 'μμ', 'κ·Έ', 'λ', 'λλ', 'νλ', 'ν ', 'νκ³ ', | |
| 'μλ€', 'μ΄λ€', 'μν΄', 'κ²μ΄λ€', 'κ²μ', 'λν', 'λλ¬Έ', 'κ·Έλ¦¬κ³ ', 'νμ§λ§', 'κ·Έλ¬λ', 'κ·Έλμ', | |
| 'μ λλ€', 'ν©λλ€', 'μ΅λλ€', 'μ', 'μ£ ', 'κ³ ', 'κ³Ό', 'μ', 'λ', 'μ', 'μ', 'κ²', 'λ€', 'μ ', 'μ ', | |
| 'λ ', 'μ', 'μΌ', 'μ', 'λΆ', 'μ΄', 'μ§λ', 'μ¬ν΄', 'λ΄λ ', 'μ΅κ·Ό', 'νμ¬', 'μ€λ', 'λ΄μΌ', 'μ΄μ ', | |
| 'μ€μ ', 'μ€ν', 'λΆν°', 'κΉμ§', 'μκ²', 'κ»μ', 'μ΄λΌκ³ ', 'λΌκ³ ', 'νλ©°', 'νλ©΄μ', 'λ°λΌ', 'ν΅ν΄', | |
| 'κ΄λ ¨', 'ννΈ', 'νΉν', 'κ°μ₯', 'λ§€μ°', 'λ', 'λ', 'λ§μ΄', 'μ‘°κΈ', 'νμ', 'μμ£Ό', 'κ°λ', 'κ±°μ', | |
| 'μ ν', 'λ°λ‘', 'μ λ§', 'λ§μ½', 'λΉλ‘―ν', 'λ±μ', 'λ±μ΄', 'λ±μ', 'λ±κ³Ό', 'λ±λ', 'λ±μ', 'λ±μμ', | |
| 'κΈ°μ', 'λ΄μ€', 'μ¬μ§', 'μ°ν©λ΄μ€', 'λ΄μμ€', 'μ 곡', '무λ¨', 'μ μ¬', 'μ¬λ°°ν¬', 'κΈμ§', 'μ΅μ»€', 'λ©νΈ', | |
| 'μΌλ³΄', 'λ°μΌλ¦¬', 'κ²½μ ', 'μ¬ν', 'μ μΉ', 'μΈκ³', 'κ³Όν', 'μμ΄ν°', 'λ·μ»΄', 'μ¨λ·', 'λΈλ‘ν°', 'μ μμ λ¬Έ' | |
| } | |
| stop_words.update(korea_stop_words) | |
| # 1κΈμ μ΄μμ΄κ³ λΆμ©μ΄κ° μλ ν ν°λ§ νν°λ§ | |
| filtered_tokens = [word for word in tokens if len(word) > 1 and word not in stop_words] | |
| # λ¨μ΄ λΉλ κ³μ° | |
| word_freq = {} | |
| for word in filtered_tokens: | |
| if word.isalnum(): # μνλ²³κ³Ό μ«μλ§ ν¬ν¨λ λ¨μ΄λ§ νμ© | |
| word_freq[word] = word_freq.get(word, 0) + 1 | |
| # λΉλμμΌλ‘ μ λ ¬νμ¬ μμ nκ° λ°ν | |
| sorted_words = sorted(word_freq.items(), key=lambda x: x[1], reverse=True) | |
| if not sorted_words: | |
| return {"data": 1, "analysis": 1, "news": 1} | |
| return dict(sorted_words[:top_n]) | |
| except Exception as e: | |
| st.error(f"μ€λ₯λ°μ {str(e)}") | |
| return {"data": 1, "analysis": 1, "news": 1} | |
| # μλ ν΄λΌμ°λ μμ± ν¨μ | |
| def generate_wordcloud(keywords_dict): | |
| if not WordCloud: | |
| st.warning("μλν΄λΌμ°λ μ€μΉμλμ΄ μμ΅λλ€.") | |
| return None | |
| try: | |
| # κΈ°λ³Έ WordCloud κ°μ²΄ (ν°νΈ κ²½λ‘ μμ΄) | |
| wc = WordCloud( | |
| width=800, | |
| height=400, | |
| background_color='white', | |
| colormap='viridis', | |
| max_font_size=150, | |
| random_state=42 | |
| ) | |
| try: | |
| import os | |
| script_dir = os.path.dirname(os.path.abspath(__file__)) | |
| # μ¬μ©μκ° λ£¨νΈμ λ£μ ν°νΈ νμΌ μ΄λ¦μ μ§μ ν©λλ€. | |
| # λ§μ½ λ€λ₯Έ μ΄λ¦μ ν°νΈλ₯Ό μ¬μ©νλ€λ©΄ μ΄ λΆλΆμ μμ ν΄μ£ΌμΈμ. (μ: "YourFontName.ttf") | |
| possible_font_paths = ["NanumGothic.ttf"] | |
| font_path = None | |
| for path_segment in possible_font_paths: | |
| candidate = os.path.join(script_dir, path_segment) | |
| if os.path.exists(candidate): | |
| font_path = candidate | |
| break | |
| # font_pathκ° μ±κ³΅μ μΌλ‘ μ°Ύμμ§ κ²½μ°μλ§ ν°νΈ κ²½λ‘λ₯Ό ν¬ν¨νμ¬ WordCloud μ¬μμ± | |
| if font_path: | |
| wc = WordCloud( | |
| font_path=font_path, | |
| width=800, | |
| height=400, | |
| background_color='white', | |
| colormap='viridis', | |
| max_font_size=150, | |
| random_state=42 | |
| ).generate_from_frequencies(keywords_dict) | |
| else: | |
| st.warning(f"μ§μ λ νκ΅μ΄ κΈκΌ΄ νμΌ({', '.join(possible_font_paths)})μ μ€ν¬λ¦½νΈ λλ ν°λ¦¬μμ μ°Ύμ μ μμ΅λλ€. μλν΄λΌμ°λκ° κΉ¨μ§ μ μμ΅λλ€.") | |
| except Exception as e: | |
| print(f"κΈκΌ΄ λ‘λ© μ€ μ€λ₯ λ°μ: {str(e)}") | |
| st.warning(f"κΈκΌ΄ λ‘λ© μ€ μμμΉ λͺ»ν μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}") # μ¬μ©μμκ²λ κ²½κ³ νμ | |
| # μ΅μ’ μ μΌλ‘ wc κ°μ²΄ λ°ν (ν°νΈκ° μ μ©λμκ±°λ, κΈ°λ³Έ κ°μ²΄μ΄κ±°λ) | |
| return wc.generate_from_frequencies(keywords_dict) if isinstance(wc, WordCloud) else None | |
| except Exception as e: | |
| st.error(f"μλν΄λΌμ°λ μμ± μ€ μ€λ₯λ°μ: {str(e)}") | |
| return None | |
| # λ΄μ€ λΆμ ν¨μ | |
| def analyze_news_content(news_df): | |
| if news_df.empty: | |
| return "λ°μ΄ν°κ° μμ΅λλ€" | |
| results = {} | |
| #μΉ΄ν κ³ λ¦¬λ³ | |
| if 'source' in news_df.columns: | |
| results['source_counts'] = news_df['source'].value_counts().to_dict() | |
| #μΉ΄ν κ³ λ¦¬λ³ | |
| if 'date' in news_df.columns: | |
| results['date_counts'] = news_df['date'].value_counts().to_dict() | |
| #ν€μλλΆμ | |
| all_text = " ".join(news_df['title'].fillna('') + " " + news_df['content'].fillna('')) | |
| if len(all_text.strip()) > 0: | |
| results['top_keywords_for_wordcloud']= extract_keywords_for_wordcloud(all_text, top_n=50) | |
| results['top_keywords'] = analyze_keywords(all_text) | |
| else: | |
| results['top_keywords_for_wordcloud']={} | |
| results['top_keywords'] = [] | |
| return results | |
| # OpenAI APIλ₯Ό μ΄μ©ν μ κΈ°μ¬ μμ± (μλ‘μ΄ λ²μ λ°©μ) | |
| def generate_article(original_content, prompt_text): | |
| try: | |
| if not st.session_state.openai_client: | |
| return "OpenAI API ν€κ° μ€μ λμ§ μμμ΅λλ€." | |
| response = st.session_state.openai_client.chat.completions.create( | |
| model="gpt-4.1-nano", # λλ μ¬μ© κ°λ₯ν μ μ ν λͺ¨λΈ | |
| messages=[ | |
| {"role": "system", "content": "λΉμ μ μ λ¬Έμ μΈ λ΄μ€ κΈ°μμ λλ€. μ£Όμ΄μ§ λ΄μ©μ λ°νμΌλ‘ μλ‘μ΄ κΈ°μ¬λ₯Ό μμ±ν΄μ£ΌμΈμ."}, | |
| {"role": "user", "content": f"λ€μ λ΄μ©μ λ°νμΌλ‘ {prompt_text}\n\n{original_content[:1000]}"} | |
| ], | |
| max_tokens=2000 | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"κΈ°μ¬ μμ± μ€λ₯: {str(e)}" | |
| # μ¬λ¬ μ λͺ©μΌλ‘λΆν° κΈ°μ¬ μμ±νλ ν¨μ μΆκ° | |
| def generate_article_from_titles(titles, prompt_text): | |
| try: | |
| if not st.session_state.openai_client: | |
| return "OpenAI API ν€κ° μ€μ λμ§ μμμ΅λλ€." | |
| titles_text = "\n".join([f"- {title}" for title in titles]) | |
| response = st.session_state.openai_client.chat.completions.create( | |
| model="gpt-4.1-nano", # λλ μ¬μ© κ°λ₯ν μ μ ν λͺ¨λΈ | |
| messages=[ | |
| {"role": "system", "content": "λΉμ μ μ λ¬Έμ μΈ λ΄μ€ κΈ°μμ λλ€. μ£Όμ΄μ§ μ¬λ¬ λ΄μ€ μ λͺ©μ λ°νμΌλ‘ μλ‘μ΄ ν΅ν© κΈ°μ¬λ₯Ό μμ±ν΄μ£ΌμΈμ."}, | |
| {"role": "user", "content": f"λ€μ λ΄μ€ μ λͺ©λ€μ λ°νμΌλ‘ {prompt_text}\n\n{titles_text}"} | |
| ], | |
| max_tokens=2000 | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"κΈ°μ¬ μμ± μ€λ₯: {str(e)}" | |
| # OpenAI APIλ₯Ό μ΄μ©ν μ΄λ―Έμ§ μμ± (μλ‘μ΄ λ²μ λ°©μ) | |
| def generate_image(prompt): | |
| try: | |
| if not st.session_state.openai_client: | |
| return "OpenAI API ν€κ° μ€μ λμ§ μμμ΅λλ€." | |
| # GPT Image 1 λͺ¨λΈλ‘ μ΄λ―Έμ§ μμ± | |
| result = st.session_state.openai_client.images.generate( | |
| model="gpt-image-1", # μλ‘μ΄ λͺ¨λΈλͺ μ¬μ© | |
| prompt=prompt, | |
| size="1024x1024" | |
| ) | |
| # base64 μ΄λ―Έμ§ λ°μ΄ν°λ₯Ό λμ½λ© | |
| image_base64 = result.data[0].b64_json | |
| image_bytes = base64.b64decode(image_base64) | |
| # BytesIO κ°μ²΄λ‘ λ³ν | |
| image = BytesIO(image_bytes) | |
| # PIL Imageλ‘ λ³ννμ¬ ν¬κΈ° μ‘°μ (μ νμ¬ν) | |
| pil_image = Image.open(image) | |
| pil_image = pil_image.resize((800, 800), Image.LANCZOS) # ν¬κΈ° μ‘°μ | |
| # λ€μ BytesIOλ‘ λ³ν | |
| output = BytesIO() | |
| pil_image.save(output, format="JPEG", quality=80, optimize=True) | |
| output.seek(0) | |
| return output | |
| except Exception as e: | |
| return f"μ΄λ―Έμ§ μμ± μ€λ₯: {str(e)}" | |
| # μ€μΌμ€λ¬ κ΄λ ¨ ν¨μλ€ | |
| def get_next_run_time(hour, minute): | |
| now = datetime.now() | |
| next_run = now.replace(hour=hour, minute=minute, second=0, microsecond=0) | |
| if next_run <= now: | |
| next_run += timedelta(days=1) | |
| return next_run | |
| def run_scheduled_task(): | |
| try: | |
| while global_scheduler_state.is_running: | |
| schedule.run_pending() | |
| time.sleep(1) | |
| except Exception as e: | |
| print(f"μ€μΌμ€λ¬ μλ¬ λ°μ: {e}") | |
| traceback.print_exc() | |
| def perform_news_task(task_type, keyword, num_articles, file_prefix): | |
| logging.info(f"μ€μΌμ€λ¬ μμ μμ: {task_type}, ν€μλ={keyword}") | |
| try: | |
| articles = crawl_naver_news(keyword, num_articles) | |
| logging.info(f"μμ§λ κΈ°μ¬ μ: {len(articles)}") | |
| # κΈ°μ¬ λ΄μ© κ°μ Έμ€κΈ° | |
| for i, article in enumerate(articles): | |
| logging.info(f"κΈ°μ¬ {i+1}/{len(articles)} μλ¬Έ κ°μ Έμ€κΈ°: {article['title']}") | |
| article['content'] = get_article_content(article['link']) | |
| time.sleep(0.5) # μλ² λΆν λ°©μ§ | |
| # κ²°κ³Ό μ μ₯ | |
| os.makedirs('/tmp/scheduled_news', exist_ok=True) | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| filename = f"/tmp/scheduled_news/{file_prefix}_{task_type}_{timestamp}.json" | |
| with open(filename, 'w', encoding='utf-8') as f: | |
| json.dump(articles, f, ensure_ascii=False, indent=2) | |
| logging.info(f"κ²°κ³Ό μ μ₯ μλ£: {filename}") | |
| global_scheduler_state.last_run = datetime.now() | |
| print(f"{datetime.now()} - {task_type} λ΄μ€ κΈ°μ¬ μμ§ μλ£: {keyword}") | |
| # μ μ μνμ μμ§ κ²°κ³Όλ₯Ό μ μ₯ | |
| result_item = { | |
| 'task_type': task_type, | |
| 'keyword': keyword, | |
| 'timestamp': timestamp, | |
| 'num_articles': len(articles), | |
| 'filename': filename | |
| } | |
| global_scheduler_state.scheduled_results.append(result_item) | |
| except Exception as e: | |
| logging.error(f"μμ μ€ν μ€ μ€λ₯ λ°μ: {str(e)}", exc_info=True) | |
| traceback.print_exc() | |
| def start_scheduler(daily_tasks, interval_tasks): | |
| if not global_scheduler_state.is_running: | |
| schedule.clear() | |
| global_scheduler_state.scheduled_jobs = [] | |
| # μΌλ³ νμ€ν¬ λ±λ‘ | |
| for task in daily_tasks: | |
| hour = task['hour'] | |
| minute = task['minute'] | |
| keyword = task['keyword'] | |
| num_articles = task['num_articles'] | |
| job_id = f"daily_{keyword}_{hour}_{minute}" | |
| schedule.every().day.at(f"{hour:02d}:{minute:02d}").do( | |
| perform_news_task, "daily", keyword, num_articles, job_id | |
| ).tag(job_id) | |
| global_scheduler_state.scheduled_jobs.append({ | |
| 'id': job_id, | |
| 'type': 'daily', | |
| 'time': f"{hour:02d}:{minute:02d}", | |
| 'keyword': keyword, | |
| 'num_articles': num_articles | |
| }) | |
| # μκ° κ°κ²© νμ€ν¬ λ±λ‘ | |
| for task in interval_tasks: | |
| interval_minutes = task['interval_minutes'] | |
| keyword = task['keyword'] | |
| num_articles = task['num_articles'] | |
| run_immediately = task['run_immediately'] | |
| job_id = f"interval_{keyword}_{interval_minutes}" | |
| if run_immediately: | |
| # μ¦μ μ€ν | |
| perform_news_task("interval", keyword, num_articles, job_id) | |
| # λΆ κ°κ²©μΌλ‘ μμ½ | |
| schedule.every(interval_minutes).minutes.do( | |
| perform_news_task, "interval", keyword, num_articles, job_id | |
| ).tag(job_id) | |
| global_scheduler_state.scheduled_jobs.append({ | |
| 'id': job_id, | |
| 'type': 'interval', | |
| 'interval': f"{interval_minutes}λΆλ§λ€", | |
| 'keyword': keyword, | |
| 'num_articles': num_articles, | |
| 'run_immediately': run_immediately | |
| }) | |
| # λ€μ μ€ν μκ° κ³μ° | |
| next_run = schedule.next_run() | |
| if next_run: | |
| global_scheduler_state.next_run = next_run | |
| # μ€μΌμ€λ¬ μ°λ λ μμ | |
| global_scheduler_state.is_running = True | |
| global_scheduler_state.thread = threading.Thread( | |
| target=run_scheduled_task, daemon=True | |
| ) | |
| global_scheduler_state.thread.start() | |
| # μνλ₯Ό μΈμ μνλ‘λ λ³΅μ¬ (UI νμμ©) | |
| if 'scheduler_status' not in st.session_state: | |
| st.session_state.scheduler_status = {} | |
| st.session_state.scheduler_status = { | |
| 'is_running': global_scheduler_state.is_running, | |
| 'last_run': global_scheduler_state.last_run, | |
| 'next_run': global_scheduler_state.next_run, | |
| 'jobs_count': len(global_scheduler_state.scheduled_jobs) | |
| } | |
| def stop_scheduler(): | |
| if global_scheduler_state.is_running: | |
| global_scheduler_state.is_running = False | |
| schedule.clear() | |
| if global_scheduler_state.thread: | |
| global_scheduler_state.thread.join(timeout=1) | |
| global_scheduler_state.next_run = None | |
| global_scheduler_state.scheduled_jobs = [] | |
| # UI μν μ λ°μ΄νΈ | |
| if 'scheduler_status' in st.session_state: | |
| st.session_state.scheduler_status['is_running'] = False | |
| # λ©λ΄μ λ°λ₯Έ νλ©΄ νμ | |
| if menu == "λ΄μ€ κΈ°μ¬ ν¬λ‘€λ§": | |
| st.header("λ΄μ€ κΈ°μ¬ ν¬λ‘€λ§") | |
| keyword = st.text_input("κ²μμ΄ μ λ ₯", "μΈκ³΅μ§λ₯") | |
| num_articles = st.slider("κ°μ Έμ¬ κΈ°μ¬ μ", min_value=1, max_value=20, value=5) | |
| if st.button("κΈ°μ¬ κ°μ Έμ€κΈ°"): | |
| with st.spinner("κΈ°μ¬λ₯Ό μμ§ μ€μ λλ€..."): | |
| articles = crawl_naver_news(keyword, num_articles) | |
| # κΈ°μ¬ λ΄μ© κ°μ Έμ€κΈ° | |
| for i, article in enumerate(articles): | |
| st.progress((i + 1) / len(articles)) | |
| article['content'] = get_article_content(article['link']) | |
| time.sleep(0.5) # μλ² λΆν λ°©μ§ | |
| # κ²°κ³Ό μ μ₯ λ° νμ | |
| save_articles(articles) | |
| st.success(f"{len(articles)}κ°μ κΈ°μ¬λ₯Ό μμ§νμ΅λλ€!") | |
| # μμ§ν κΈ°μ¬ νμ | |
| for article in articles: | |
| with st.expander(f"{article['title']} - {article['source']}"): | |
| st.write(f"**μΆμ²:** {article['source']}") | |
| st.write(f"**λ μ§:** {article['date']}") | |
| st.write(f"**μμ½:** {article['description']}") | |
| st.write(f"**λ§ν¬:** {article['link']}") | |
| st.write("**본문 미리보기:**") | |
| st.write(article['content'][:300] + "..." if len(article['content']) > 300 else article['content']) | |
| elif menu == "κΈ°μ¬ λΆμνκΈ°": | |
| st.header("κΈ°μ¬ λΆμνκΈ°") | |
| articles = load_saved_articles() | |
| if not articles: | |
| st.warning("μ μ₯λ κΈ°μ¬κ° μμ΅λλ€. λ¨Όμ 'λ΄μ€ κΈ°μ¬ ν¬λ‘€λ§' λ©λ΄μμ κΈ°μ¬λ₯Ό μμ§ν΄μ£ΌμΈμ.") | |
| else: | |
| # κΈ°μ¬ μ ν | |
| titles = [article['title'] for article in articles] | |
| selected_title = st.selectbox("λΆμν κΈ°μ¬ μ ν", titles) | |
| selected_article = next((a for a in articles if a['title'] == selected_title), None) | |
| if selected_article: | |
| st.write(f"**μ λͺ©:** {selected_article['title']}") | |
| st.write(f"**μΆμ²:** {selected_article['source']}") | |
| # λ³Έλ¬Έ νμ | |
| with st.expander("κΈ°μ¬ λ³Έλ¬Έ 보기"): | |
| st.write(selected_article['content']) | |
| # λΆμ λ°©λ² μ ν | |
| analysis_type = st.radio( | |
| "λΆμ λ°©λ²", | |
| ["ν€μλ λΆμ", "κ°μ λΆμ", "ν μ€νΈ ν΅κ³"] | |
| ) | |
| if analysis_type == "ν€μλ λΆμ": | |
| if st.button("ν€μλ λΆμνκΈ°"): | |
| with st.spinner("ν€μλλ₯Ό λΆμ μ€μ λλ€..."): | |
| keyword_tab1, keyword_tab2 = st.tabs(["ν€μλ λΉλ", "μλν΄λΌμ°λ"]) | |
| with keyword_tab1: | |
| keywords = analyze_keywords(selected_article['content']) | |
| # Plotlyλ₯Ό μ¬μ©ν μκ°ν | |
| df = pd.DataFrame(keywords, columns=['λ¨μ΄', 'λΉλμ']) | |
| fig = go.Figure(data=[ | |
| go.Bar( | |
| x=df['λ¨μ΄'], | |
| y=df['λΉλμ'], | |
| marker_color='rgb(55, 83, 109)' | |
| ) | |
| ]) | |
| fig.update_layout( | |
| title='ν€μλ λΉλ λΆμ', | |
| xaxis_title='ν€μλ', | |
| yaxis_title='λΉλμ', | |
| height=500, | |
| margin=dict(l=50, r=50, t=80, b=50) | |
| ) | |
| st.plotly_chart(fig, use_container_width=True) | |
| st.write("**μ£Όμ ν€μλ:**") | |
| for word, count in keywords: | |
| st.write(f"- {word}: {count}ν") | |
| with keyword_tab2: | |
| keyword_dict = extract_keywords_for_wordcloud(selected_article['content']) | |
| wc = generate_wordcloud(keyword_dict) | |
| if wc: | |
| fig, ax = plt.subplots(figsize=(10, 5)) | |
| ax.imshow(wc, interpolation='bilinear') | |
| ax.axis('off') | |
| st.pyplot(fig) | |
| # ν€μλ μμ 20κ° νμ | |
| st.write("**μμ 20κ° ν€μλ:**") | |
| top_keywords = sorted(keyword_dict.items(), key=lambda x: x[1], reverse=True)[:20] | |
| keyword_df = pd.DataFrame(top_keywords, columns=['ν€μλ', 'λΉλ']) | |
| st.dataframe(keyword_df) | |
| else: | |
| st.error("μλν΄λΌμ°λλ₯Ό μμ±ν μ μμ΅λλ€.") | |
| elif analysis_type == "ν μ€νΈ ν΅κ³": | |
| if st.button("ν μ€νΈ ν΅κ³ λΆμ"): | |
| content = selected_article['content'] | |
| # ν μ€νΈ ν΅κ³ κ³μ° | |
| word_count = len(re.findall(r'\b\w+\b', content)) | |
| char_count = len(content) | |
| try: | |
| # KSSλ‘ λ¬Έμ₯ λΆλ¦¬ | |
| sentences = kss.split_sentences(content) | |
| sentence_count = len(sentences) | |
| except: | |
| # KSS μ€ν¨μ κΈ°λ³Έ λ¬Έμ₯ λΆλ¦¬ | |
| sentence_count = len(re.split(r'[.!?]+', content)) | |
| avg_word_length = sum(len(word) for word in re.findall(r'\b\w+\b', content)) / word_count if word_count > 0 else 0 | |
| avg_sentence_length = word_count / sentence_count if sentence_count > 0 else 0 | |
| # ν΅κ³ νμ | |
| st.subheader("ν μ€νΈ ν΅κ³") | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| st.metric("λ¨μ΄ μ", f"{word_count:,}") | |
| with col2: | |
| st.metric("λ¬Έμ μ", f"{char_count:,}") | |
| with col3: | |
| st.metric("λ¬Έμ₯ μ", f"{sentence_count:,}") | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.metric("νκ· λ¨μ΄ κΈΈμ΄", f"{avg_word_length:.1f}μ") | |
| with col2: | |
| st.metric("νκ· λ¬Έμ₯ κΈΈμ΄", f"{avg_sentence_length:.1f}λ¨μ΄") | |
| # ν μ€νΈ 볡μ‘μ± μ μ | |
| complexity_score = min(10, (avg_sentence_length / 10) * 5 + (avg_word_length / 5) * 5) | |
| st.progress(complexity_score / 10) | |
| st.write(f"ν μ€νΈ 볡μ‘μ± μ μ: {complexity_score:.1f}/10") | |
| # νμ¬ λΆμ λΆλΆ μ κ±° (KoNLPy μμ‘΄μ± μ κ±°) | |
| st.info("μμΈ νμ¬ λΆμμ νμ¬ μ§μλμ§ μμ΅λλ€.") | |
| elif analysis_type == "κ°μ λΆμ": | |
| if st.button("κ°μ λΆμνκΈ°"): | |
| if st.session_state.openai_client: | |
| with st.spinner("κΈ°μ¬μ κ°μ μ λΆμ μ€μ λλ€..."): | |
| try: | |
| response = st.session_state.openai_client.chat.completions.create( | |
| model="gpt-4.1-mini", | |
| messages=[ | |
| {"role": "system", "content": """λΉμ μ ν μ€νΈμ κ°μ κ³Ό λ Όμ‘°λ₯Ό λΆμνλ μ λ¬Έκ°μ λλ€. | |
| λ€μ λ΄μ€ κΈ°μ¬μ κ°μ κ³Ό λ Όμ‘°λ₯Ό λΆμνκ³ , λ°λμ μλ νμμ JSONμΌλ‘ μλ΅ν΄μ£ΌμΈμ: | |
| { | |
| "sentiment": "κΈμ μ /λΆμ μ /μ€λ¦½μ ", | |
| "reason": "μ΄μ μ€λͺ ...", | |
| "keywords": [ | |
| {"word": "ν€μλ1", "score": 8}, | |
| {"word": "ν€μλ2", "score": 7} | |
| ] | |
| }"""}, | |
| {"role": "user", "content": f"λ€μ λ΄μ€ κΈ°μ¬λ₯Ό λΆμν΄ μ£ΌμΈμ:\n\nμ λͺ©: {selected_article['title']}\n\nλ΄μ©: {selected_article['content'][:1500]}"} | |
| ], | |
| max_tokens=800, | |
| response_format={ "type": "json_object" } # JSON μλ΅ νμ κ°μ | |
| ) | |
| # μλ΅ λ΄μ© νμΈ λ° λλ²κΉ | |
| content = response.choices[0].message.content | |
| logging.info(f"API μλ΅: {content}") | |
| # JSON νμ± | |
| try: | |
| analysis_result = json.loads(content) | |
| except json.JSONDecodeError as e: | |
| logging.error(f"JSON νμ± μ€λ₯: {str(e)}") | |
| logging.error(f"νμ± μλν λ΄μ©: {content}") | |
| st.error("API μλ΅μ νμ±νλ μ€ μ€λ₯κ° λ°μνμ΅λλ€. μλ΅ νμμ΄ μ¬λ°λ₯΄μ§ μμ΅λλ€.") | |
| st.stop() # return λμ st.stop() μ¬μ© | |
| # κ²°κ³Ό μκ°ν | |
| st.subheader("κ°μ λΆμ κ²°κ³Ό") | |
| # 1. κ°μ νμ μ λ°λ₯Έ μκ°μ νν | |
| sentiment_type = analysis_result.get('sentiment', 'μ€λ¦½μ ') | |
| col1, col2, col3 = st.columns([1, 3, 1]) | |
| with col2: | |
| if sentiment_type == "κΈμ μ ": | |
| st.markdown(f""" | |
| <div style="background-color:#DCEDC8; padding:20px; border-radius:10px; text-align:center;"> | |
| <h1 style="color:#388E3C; font-size:28px;">π κΈμ μ λ Όμ‘° π</h1> | |
| <p style="font-size:16px;">κ°μ κ°λ: λμ</p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| elif sentiment_type == "λΆμ μ ": | |
| st.markdown(f""" | |
| <div style="background-color:#FFCDD2; padding:20px; border-radius:10px; text-align:center;"> | |
| <h1 style="color:#D32F2F; font-size:28px;">π λΆμ μ λ Όμ‘° π</h1> | |
| <p style="font-size:16px;">κ°μ κ°λ: λμ</p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| else: | |
| st.markdown(f""" | |
| <div style="background-color:#E0E0E0; padding:20px; border-radius:10px; text-align:center;"> | |
| <h1 style="color:#616161; font-size:28px;">π μ€λ¦½μ λ Όμ‘° π</h1> | |
| <p style="font-size:16px;">κ°μ κ°λ: μ€κ°</p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| # 2. μ΄μ μ€λͺ | |
| st.markdown("### λΆμ κ·Όκ±°") | |
| st.markdown(f"<div style='background-color:#F5F5F5; padding:15px; border-radius:5px;'>{analysis_result.get('reason', '')}</div>", unsafe_allow_html=True) | |
| # 3. κ°μ ν€μλ μκ°ν | |
| st.markdown("### ν΅μ¬ κ°μ ν€μλ") | |
| # ν€μλ λ°μ΄ν° μ€λΉ | |
| keywords = analysis_result.get('keywords', []) | |
| if keywords: | |
| # λ§λ μ°¨νΈμ© λ°μ΄ν° | |
| keyword_names = [item.get('word', '') for item in keywords] | |
| keyword_scores = [item.get('score', 0) for item in keywords] | |
| # λ μ΄λ μ°¨νΈ μμ± | |
| fig = go.Figure() | |
| # μμ μ€μ | |
| if sentiment_type == "κΈμ μ ": | |
| fill_color = 'rgba(76, 175, 80, 0.3)' # μ°ν μ΄λ‘μ | |
| line_color = 'rgba(76, 175, 80, 1)' # μ§ν μ΄λ‘μ | |
| elif sentiment_type == "λΆμ μ ": | |
| fill_color = 'rgba(244, 67, 54, 0.3)' # μ°ν λΉ¨κ°μ | |
| line_color = 'rgba(244, 67, 54, 1)' # μ§ν λΉ¨κ°μ | |
| else: | |
| fill_color = 'rgba(158, 158, 158, 0.3)' # μ°ν νμ | |
| line_color = 'rgba(158, 158, 158, 1)' # μ§ν νμ | |
| # λ μ΄λ μ°¨νΈ λ°μ΄ν° μ€λΉ - λ§μ§λ§ μ μ΄ μ²« μ κ³Ό μ°κ²°λλλ‘ λ°μ΄ν° μΆκ° | |
| radar_keywords = keyword_names.copy() | |
| radar_scores = keyword_scores.copy() | |
| # λ μ΄λ μ°¨νΈ μμ± | |
| fig.add_trace(go.Scatterpolar( | |
| r=radar_scores, | |
| theta=radar_keywords, | |
| fill='toself', | |
| fillcolor=fill_color, | |
| line=dict(color=line_color, width=2), | |
| name='κ°μ ν€μλ' | |
| )) | |
| # λ μ΄λ μ°¨νΈ λ μ΄μμ μ€μ | |
| fig.update_layout( | |
| polar=dict( | |
| radialaxis=dict( | |
| visible=True, | |
| range=[0, 10], | |
| tickmode='linear', | |
| tick0=0, | |
| dtick=2 | |
| ) | |
| ), | |
| showlegend=False, | |
| title={ | |
| 'text': 'κ°μ ν€μλ λ μ΄λ λΆμ', | |
| 'y':0.95, | |
| 'x':0.5, | |
| 'xanchor': 'center', | |
| 'yanchor': 'top' | |
| }, | |
| height=500, | |
| width=500, | |
| margin=dict(l=80, r=80, t=80, b=80) | |
| ) | |
| # μ°¨νΈ μ€μμ νμ | |
| col1, col2, col3 = st.columns([1, 2, 1]) | |
| with col2: | |
| st.plotly_chart(fig) | |
| # ν€μλ μΉ΄λλ‘ νμ | |
| st.markdown("#### ν€μλ μΈλΆ μ€λͺ ") | |
| cols = st.columns(min(len(keywords), 5)) | |
| for i, keyword in enumerate(keywords): | |
| with cols[i % len(cols)]: | |
| word = keyword.get('word', '') | |
| score = keyword.get('score', 0) | |
| # μ μμ λ°λ₯Έ μμ κ³μ° | |
| r, g, b = 0, 0, 0 | |
| if sentiment_type == "κΈμ μ ": | |
| g = min(200 + score * 5, 255) | |
| r = max(255 - score * 20, 100) | |
| elif sentiment_type == "λΆμ μ ": | |
| r = min(200 + score * 5, 255) | |
| g = max(255 - score * 20, 100) | |
| else: | |
| r = g = b = 128 | |
| # μΉ΄λ μμ± | |
| st.markdown(f""" | |
| <div style="background-color:rgba({r},{g},{b},0.2); padding:10px; border-radius:5px; text-align:center; margin:5px;"> | |
| <h3 style="margin:0;">{word}</h3> | |
| <div style="background-color:#E0E0E0; border-radius:3px; margin-top:5px;"> | |
| <div style="width:{score*10}%; background-color:rgba({r},{g},{b},0.8); height:10px; border-radius:3px;"></div> | |
| </div> | |
| <p style="margin:2px; font-size:12px;">κ°λ: {score}/10</p> | |
| </div> | |
| """, unsafe_allow_html=True) | |
| else: | |
| st.info("ν€μλλ₯Ό μΆμΆνμ§ λͺ»νμ΅λλ€.") | |
| # 4. μμ½ ν΅κ³ | |
| st.markdown("### μ£Όμ ν΅κ³") | |
| col1, col2, col3 = st.columns(3) | |
| with col1: | |
| st.metric(label="κΈμ /λΆμ μ μ", value=f"{7 if sentiment_type == 'κΈμ μ ' else 3 if sentiment_type == 'λΆμ μ ' else 5}/10") | |
| with col2: | |
| st.metric(label="ν€μλ μ", value=len(keywords)) | |
| with col3: | |
| avg_score = sum(keyword_scores) / len(keyword_scores) if keyword_scores else 0 | |
| st.metric(label="νκ· κ°λ", value=f"{avg_score:.1f}/10") | |
| except Exception as e: | |
| st.error(f"κ°μ λΆμ μ€λ₯: {str(e)}") | |
| st.error(traceback.format_exc()) | |
| else: | |
| st.warning("OpenAI API ν€λ₯Ό μ¬μ΄λλ°μμ μ€μ ν΄μ£ΌμΈμ.") | |
| elif menu == "μ κΈ°μ¬ μμ±νκΈ°": | |
| st.header("μ κΈ°μ¬ μμ±νκΈ°") | |
| articles = load_saved_articles() | |
| if not articles: | |
| st.warning("μ μ₯λ κΈ°μ¬κ° μμ΅λλ€. λ¨Όμ 'λ΄μ€ κΈ°μ¬ ν¬λ‘€λ§' λ©λ΄μμ κΈ°μ¬λ₯Ό μμ§ν΄μ£ΌμΈμ.") | |
| else: | |
| # ν μΆκ°: λ¨μΌ κΈ°μ¬λ‘ μμ± vs λ€μ€ μ λͺ©μΌλ‘ μμ± | |
| tab1, tab2 = st.tabs(["λ¨μΌ κΈ°μ¬λ‘ μμ±", "μ¬λ¬ μ λͺ©μΌλ‘ μμ±"]) | |
| with tab1: | |
| # κΈ°μ‘΄ μ½λ: λ¨μΌ κΈ°μ¬ μ ν | |
| titles = [article['title'] for article in articles] | |
| selected_title = st.selectbox("μλ³Έ κΈ°μ¬ μ ν", titles, key="single_article") | |
| selected_article = next((a for a in articles if a['title'] == selected_title), None) | |
| if selected_article: | |
| st.write(f"**μλ³Έ μ λͺ©:** {selected_article['title']}") | |
| with st.expander("μλ³Έ κΈ°μ¬ λ΄μ©"): | |
| st.write(selected_article['content']) | |
| prompt_text ="""λ€μ κΈ°μ¬ μμμ λ°λΌμ λ€μ μμ±ν΄μ€. | |
| μν : λΉμ μ μ λ¬Έμ¬μ κΈ°μμ λλ€. | |
| μμ : μ΅κ·Ό μΌμ΄λ μ¬κ±΄μ λν 보λμλ£λ₯Ό μμ±ν΄μΌ ν©λλ€. μλ£λ μ¬μ€μ κΈ°λ°μΌλ‘ νλ©°, κ°κ΄μ μ΄κ³ μ νν΄μΌ ν©λλ€. | |
| μ§μΉ¨: | |
| μ 곡λ μ 보λ₯Ό λ°νμΌλ‘ μ λ¬Έ 보λμλ£ νμμ λ§μΆ° κΈ°μ¬λ₯Ό μμ±νμΈμ. | |
| κΈ°μ¬ μ λͺ©μ μ£Όμ λ₯Ό λͺ νν λ°μνκ³ λ μμ κ΄μ¬μ λ μ μλλ‘ μμ±ν©λλ€. | |
| κΈ°μ¬ λ΄μ©μ μ ννκ³ κ°κ²°νλ©° μ€λλ ₯ μλ λ¬Έμ₯μΌλ‘ ꡬμ±ν©λλ€. | |
| κ΄λ ¨μμ μΈν°λ·°λ₯Ό μΈμ© ννλ‘ λ£μ΄μ£ΌμΈμ. | |
| μμ μ 보μ μ§μΉ¨μ μ°Έκ³ νμ¬ μ λ¬Έ 보λμλ£ νμμ κΈ°μ¬λ₯Ό μμ±ν΄ μ£ΌμΈμ""" | |
| # μ΄λ―Έμ§ μμ± μ¬λΆ μ ν μ΅μ μΆκ° | |
| generate_image_too = st.checkbox("κΈ°μ¬ μμ± ν μ΄λ―Έμ§λ ν¨κ» μμ±νκΈ°", value=True, key="single_image") | |
| if st.button("μ κΈ°μ¬ μμ±νκΈ°", key="generate_single"): | |
| if st.session_state.openai_client: | |
| with st.spinner("κΈ°μ¬λ₯Ό μμ± μ€μ λλ€..."): | |
| new_article = generate_article(selected_article['content'], prompt_text) | |
| st.write("**μμ±λ κΈ°μ¬:**") | |
| st.write(new_article) | |
| # μ΄λ―Έμ§ μμ±νκΈ° (μ΅μ μ΄ μ νλ κ²½μ°) | |
| if generate_image_too: | |
| with st.spinner("κΈ°μ¬ κ΄λ ¨ μ΄λ―Έμ§λ₯Ό μμ± μ€μ λλ€..."): | |
| image_prompt = f"""μ λ¬ΈκΈ°μ¬ μ λͺ© "{selected_article['title']}" μ λ³΄κ³ μ΄λ―Έμ§λ₯Ό λ§λ€μ΄μ€ | |
| μ΄λ―Έμ§μλ λ€μ μμκ° ν¬ν¨λμ΄μΌ ν©λλ€: | |
| - κΈ°μ¬λ₯Ό μ΄ν΄ν μ μλ λμ | |
| - κΈ°μ¬ λ΄μ©κ³Ό κ΄λ ¨λ ν μ€νΈ | |
| - μ¬ννκ² μ²λ¦¬ | |
| """ | |
| # μ΄λ―Έμ§ μμ± | |
| image = generate_image(image_prompt) | |
| if isinstance(image, BytesIO): | |
| st.subheader("μμ±λ μ΄λ―Έμ§:") | |
| st.image(image, use_column_width=True) | |
| else: | |
| st.error(image) | |
| # μμ±λ κΈ°μ¬ μ μ₯ μ΅μ | |
| if st.button("μμ±λ κΈ°μ¬ μ μ₯", key="save_single"): | |
| new_article_data = { | |
| 'title': f"[μμ±λ¨] {selected_article['title']}", | |
| 'source': f"AI μμ± (μλ³Έ: {selected_article['source']})", | |
| 'date': datetime.now().strftime("%Y-%m-%d %H:%M"), | |
| 'description': new_article[:100] + "...", | |
| 'link': "", | |
| 'content': new_article | |
| } | |
| articles.append(new_article_data) | |
| save_articles(articles) | |
| st.success("μμ±λ κΈ°μ¬κ° μ μ₯λμμ΅λλ€!") | |
| else: | |
| st.warning("OpenAI API ν€λ₯Ό μ¬μ΄λλ°μμ μ€μ ν΄μ£ΌμΈμ.") | |
| with tab2: | |
| # μλ‘μ΄ κΈ°λ₯: μ¬λ¬ μ λͺ©μΌλ‘ κΈ°μ¬ μμ± | |
| st.subheader("μ¬λ¬ μ λͺ©μ κΈ°λ°μΌλ‘ νλμ κΈ°μ¬ μμ±νκΈ°") | |
| # λ€μ€ μ ν μμ ―μΌλ‘ μ¬λ¬ μ λͺ© μ ν κ°λ₯ | |
| titles = [article['title'] for article in articles] | |
| selected_titles = st.multiselect("μ¬λ¬ κΈ°μ¬ μ λͺ© μ ν (2κ° μ΄μ κΆμ₯)", titles) | |
| if selected_titles: | |
| st.write(f"**μ νλ μ λͺ© μ:** {len(selected_titles)}κ°") | |
| with st.expander("μ νλ μ λͺ© λͺ©λ‘"): | |
| for i, title in enumerate(selected_titles): | |
| st.write(f"{i+1}. {title}") | |
| multi_prompt_text = """λ€μ λ΄μ€ μ λͺ©λ€μ μ’ ν©νμ¬ νλμ ν΅ν©λ κΈ°μ¬λ‘ μμ±ν΄μ€. | |
| μν : λΉμ μ μ λ¬Έμ¬μ κΈ°μμ λλ€. | |
| μμ : μ¬λ¬ λ΄μ€ μ λͺ©μμ κ³΅ν΅ μ£Όμ λ₯Ό νμ νκ³ , μ΄λ₯Ό μ’ ν©ν 보λμλ£λ₯Ό μμ±ν΄μΌ ν©λλ€. | |
| μ§μΉ¨: | |
| - μ 곡λ μ¬λ¬ μ λͺ©μ μ’ ν©μ μΌλ‘ λΆμνμ¬ νλμ μΌκ΄λ κΈ°μ¬λ₯Ό μμ±νμΈμ. | |
| - κΈ°μ¬ μ λͺ©μ μ 곡λ λͺ¨λ μ λͺ©μ ν΅μ¬ μ£Όμ λ₯Ό λ΄μμΌ ν©λλ€. | |
| - κΈ°μ¬ λ΄μ©μ μ λͺ©λ€μ΄ λ€λ£¨λ λͺ¨λ μ£Όμ μ£Όμ λ₯Ό ν¬ν¨ν΄μΌ ν©λλ€. | |
| - κ΄λ ¨μμ κ°μ μΈν°λ·°λ₯Ό μΈμ© ννλ‘ λ£μ΄μ£ΌμΈμ. | |
| - μ 곡λ μ λͺ©λ€μ λ§₯λ½μ μ μ§νλ©΄μ μΌκ΄μ± μλ λ΄λ¬ν°λΈλ₯Ό ꡬμ±νμΈμ.""" | |
| # ν둬ννΈ νΈμ§ μ΅μ | |
| custom_prompt = st.checkbox("μ§μ ν둬ννΈ μμ±νκΈ°") | |
| if custom_prompt: | |
| multi_prompt_text = st.text_area("ν둬ννΈ μ§μ μ λ ₯", multi_prompt_text, height=250) | |
| # μ΄λ―Έμ§ μμ± μ΅μ | |
| generate_multi_image = st.checkbox("κΈ°μ¬ μμ± ν μ΄λ―Έμ§λ ν¨κ» μμ±νκΈ°", value=True, key="multi_image") | |
| if st.button("μ κΈ°μ¬ μμ±νκΈ°", key="generate_multi"): | |
| if st.session_state.openai_client: | |
| if len(selected_titles) < 1: | |
| st.error("μ΅μ 1κ° μ΄μμ μ λͺ©μ μ νν΄μ£ΌμΈμ.") | |
| else: | |
| with st.spinner("μ¬λ¬ μ λͺ©μΌλ‘λΆν° κΈ°μ¬λ₯Ό μμ± μ€μ λλ€..."): | |
| # μ νλ μ λͺ©λ€μ μ΄μ©νμ¬ μ κΈ°μ¬ μμ± | |
| new_article = generate_article_from_titles(selected_titles, multi_prompt_text) | |
| st.write("**μμ±λ κΈ°μ¬:**") | |
| st.write(new_article) | |
| # μ΄λ―Έμ§ μμ± (μ΅μ μ΄ μ νλ κ²½μ°) | |
| if generate_multi_image: | |
| with st.spinner("κΈ°μ¬ κ΄λ ¨ μ΄λ―Έμ§λ₯Ό μμ± μ€μ λλ€..."): | |
| combined_titles = " / ".join(selected_titles[:3]) # μ²μ 3κ° μ λͺ©λ§ μ¬μ© | |
| image_prompt = f"""μ¬λ¬ λ΄μ€λ₯Ό μ’ ν©ν κΈ°μ¬ "{combined_titles}" κ΄λ ¨ μ΄λ―Έμ§λ₯Ό λ§λ€μ΄μ€. | |
| μ΄λ―Έμ§μλ λ€μ μμκ° ν¬ν¨λμ΄μΌ ν©λλ€: | |
| - μ¬λ¬ λ΄μ€μ κ³΅ν΅ μ£Όμ λ₯Ό μκ°νν λμ | |
| - ν΅μ¬ ν€μλλ κ°λ | |
| - μ¬ννκ³ ν΅ν©λ λμμΈ | |
| """ | |
| # μ΄λ―Έμ§ μμ± | |
| image = generate_image(image_prompt) | |
| if isinstance(image, BytesIO): | |
| st.subheader("μμ±λ μ΄λ―Έμ§:") | |
| st.image(image, use_column_width=True) | |
| else: | |
| st.error(image) | |
| # μμ±λ κΈ°μ¬ μ μ₯ μ΅μ | |
| if st.button("μμ±λ κΈ°μ¬ μ μ₯", key="save_multi"): | |
| # ν΅ν© μ λͺ© μμ± (첫 λ²μ§Έ μ λͺ© + μΆκ° μ λͺ© μ) | |
| if len(selected_titles) > 1: | |
| combined_title = f"{selected_titles[0]} μΈ {len(selected_titles)-1}건 κ΄λ ¨ μμ" | |
| else: | |
| combined_title = selected_titles[0] | |
| new_article_data = { | |
| 'title': f"[μ¬λ¬ μ λͺ© ν΅ν©] {combined_title}", | |
| 'source': "AI μμ± (μ¬λ¬ μ λͺ© ν΅ν©)", | |
| 'date': datetime.now().strftime("%Y-%m-%d %H:%M"), | |
| 'description': new_article[:100] + "...", | |
| 'link': "", | |
| 'content': new_article | |
| } | |
| articles.append(new_article_data) | |
| save_articles(articles) | |
| st.success("μμ±λ κΈ°μ¬κ° μ μ₯λμμ΅λλ€!") | |
| else: | |
| st.warning("OpenAI API ν€λ₯Ό μ¬μ΄λλ°μμ μ€μ ν΄μ£ΌμΈμ.") | |
| elif menu == "λ΄μ€ κΈ°μ¬ μμ½νκΈ°": | |
| st.header("λ΄μ€ κΈ°μ¬ μμ½νκΈ°") | |
| # ν μμ± | |
| tab1, tab2, tab3 = st.tabs(["μΌλ³ μμ½", "μκ° κ°κ²© μμ½", "μ€μΌμ€λ¬ μν"]) | |
| # μΌλ³ μμ½ ν | |
| with tab1: | |
| st.subheader("λ§€μΌ μ ν΄μ§ μκ°μ κΈ°μ¬ μμ§νκΈ°") | |
| # ν€μλ μ λ ₯ | |
| daily_keyword = st.text_input("κ²μ ν€μλ", value="μΈκ³΅μ§λ₯", key="daily_keyword") | |
| daily_num_articles = st.slider("μμ§ν κΈ°μ¬ μ", min_value=1, max_value=20, value=5, key="daily_num_articles") | |
| # μκ° μ€μ | |
| daily_col1, daily_col2 = st.columns(2) | |
| with daily_col1: | |
| daily_hour = st.selectbox("μ", range(24), format_func=lambda x: f"{x:02d}μ", key="daily_hour") | |
| with daily_col2: | |
| daily_minute = st.selectbox("λΆ", range(0, 60, 5), format_func=lambda x: f"{x:02d}λΆ", key="daily_minute") | |
| # μΌλ³ μμ½ λ¦¬μ€νΈ | |
| if 'daily_tasks' not in st.session_state: | |
| st.session_state.daily_tasks = [] | |
| if st.button("μΌλ³ μμ½ μΆκ°"): | |
| st.session_state.daily_tasks.append({ | |
| 'hour': daily_hour, | |
| 'minute': daily_minute, | |
| 'keyword': daily_keyword, | |
| 'num_articles': daily_num_articles | |
| }) | |
| st.success(f"μΌλ³ μμ½μ΄ μΆκ°λμμ΅λλ€: λ§€μΌ {daily_hour:02d}:{daily_minute:02d} - '{daily_keyword}'") | |
| # μμ½ λͺ©λ‘ νμ | |
| if st.session_state.daily_tasks: | |
| st.subheader("μΌλ³ μμ½ λͺ©λ‘") | |
| for i, task in enumerate(st.session_state.daily_tasks): | |
| st.write(f"{i+1}. λ§€μΌ {task['hour']:02d}:{task['minute']:02d} - '{task['keyword']}' ({task['num_articles']}κ°)") | |
| if st.button("μΌλ³ μμ½ μ΄κΈ°ν"): | |
| st.session_state.daily_tasks = [] | |
| st.warning("μΌλ³ μμ½μ΄ λͺ¨λ μ΄κΈ°νλμμ΅λλ€.") | |
| # μκ° κ°κ²© μμ½ ν | |
| with tab2: | |
| st.subheader("μκ° κ°κ²©μΌλ‘ κΈ°μ¬ μμ§νκΈ°") | |
| # ν€μλ μ λ ₯ | |
| interval_keyword = st.text_input("κ²μ ν€μλ", value="λΉ λ°μ΄ν°", key="interval_keyword") | |
| interval_num_articles = st.slider("μμ§ν κΈ°μ¬ μ", min_value=1, max_value=20, value=5, key="interval_num_articles") | |
| # μκ° κ°κ²© μ€μ | |
| interval_minutes = st.number_input("μ€ν κ°κ²©(λΆ)", min_value=1, max_value=60*24, value=30, key="interval_minutes") | |
| # μ¦μ μ€ν μ¬λΆ | |
| run_immediately = st.checkbox("μ¦μ μ€ν", value=True, help="체ν¬νλ©΄ μ€μΌμ€λ¬ μμ μ μ¦μ μ€νν©λλ€.") | |
| # μκ° κ°κ²© μμ½ λ¦¬μ€νΈ | |
| if 'interval_tasks' not in st.session_state: | |
| st.session_state.interval_tasks = [] | |
| if st.button("μκ° κ°κ²© μμ½ μΆκ°"): | |
| st.session_state.interval_tasks.append({ | |
| 'interval_minutes': interval_minutes, | |
| 'keyword': interval_keyword, | |
| 'num_articles': interval_num_articles, | |
| 'run_immediately': run_immediately | |
| }) | |
| st.success(f"μκ° κ°κ²© μμ½μ΄ μΆκ°λμμ΅λλ€: {interval_minutes}λΆλ§λ€ - '{interval_keyword}'") | |
| # μμ½ λͺ©λ‘ νμ | |
| if st.session_state.interval_tasks: | |
| st.subheader("μκ° κ°κ²© μμ½ λͺ©λ‘") | |
| for i, task in enumerate(st.session_state.interval_tasks): | |
| immediate_text = "μ¦μ μ€ν ν " if task['run_immediately'] else "" | |
| st.write(f"{i+1}. {immediate_text}{task['interval_minutes']}λΆλ§λ€ - '{task['keyword']}' ({task['num_articles']}κ°)") | |
| if st.button("μκ° κ°κ²© μμ½ μ΄κΈ°ν"): | |
| st.session_state.interval_tasks = [] | |
| st.warning("μκ° κ°κ²© μμ½μ΄ λͺ¨λ μ΄κΈ°νλμμ΅λλ€.") | |
| # μ€μΌμ€λ¬ μν ν | |
| with tab3: | |
| st.subheader("μ€μΌμ€λ¬ μ μ΄ λ° μν") | |
| # λ‘κ·Έ λ·°μ΄λ₯Ό μλ¨μ λ°°μΉ | |
| st.subheader("μ€μκ° λ‘κ·Έ") | |
| log_container = st.empty() | |
| def update_logs(): | |
| try: | |
| with open('/tmp/crawler.log', 'r') as f: | |
| logs = f.readlines() | |
| return ''.join(logs[-100:]) # μ΅κ·Ό 100μ€λ§ νμ | |
| except Exception as e: | |
| return f"λ‘κ·Έ νμΌμ μ½μ μ μμ΅λλ€: {str(e)}" | |
| # λ‘κ·Έ μλ μ λ°μ΄νΈ | |
| if st.checkbox("λ‘κ·Έ μλ μ λ°μ΄νΈ", value=True): | |
| log_content = update_logs() | |
| log_container.text_area("μ΅κ·Ό λ‘κ·Έ", value=log_content, height=400) | |
| else: | |
| if st.button("λ‘κ·Έ μλ‘κ³ μΉ¨"): | |
| log_content = update_logs() | |
| log_container.text_area("μ΅κ·Ό λ‘κ·Έ", value=log_content, height=400) | |
| st.divider() | |
| # μ€μΌμ€λ¬ μ μ΄ | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| # μ€μΌμ€λ¬ μμ/μ€μ§ λ²νΌ | |
| if not global_scheduler_state.is_running: | |
| if st.button("μ€μΌμ€λ¬ μμ"): | |
| if not st.session_state.daily_tasks and not st.session_state.interval_tasks: | |
| st.error("μμ½λ μμ μ΄ μμ΅λλ€. λ¨Όμ μΌλ³ μμ½ λλ μκ° κ°κ²© μμ½μ μΆκ°ν΄μ£ΌμΈμ.") | |
| else: | |
| start_scheduler(st.session_state.daily_tasks, st.session_state.interval_tasks) | |
| st.success("μ€μΌμ€λ¬κ° μμλμμ΅λλ€.") | |
| else: | |
| if st.button("μ€μΌμ€λ¬ μ€μ§"): | |
| stop_scheduler() | |
| st.warning("μ€μΌμ€λ¬κ° μ€μ§λμμ΅λλ€.") | |
| with col2: | |
| # μ€μΌμ€λ¬ μν νμ | |
| if 'scheduler_status' in st.session_state: | |
| st.write(f"μν: {'μ€νμ€' if global_scheduler_state.is_running else 'μ€μ§'}") | |
| if global_scheduler_state.last_run: | |
| st.write(f"λ§μ§λ§ μ€ν: {global_scheduler_state.last_run.strftime('%Y-%m-%d %H:%M:%S')}") | |
| if global_scheduler_state.next_run and global_scheduler_state.is_running: | |
| st.write(f"λ€μ μ€ν: {global_scheduler_state.next_run.strftime('%Y-%m-%d %H:%M:%S')}") | |
| else: | |
| st.write("μν: μ€μ§") | |
| # μμ½λ μμ λͺ©λ‘ | |
| if global_scheduler_state.scheduled_jobs: | |
| st.subheader("νμ¬ μ€ν μ€μΈ μμ½ μμ ") | |
| for i, job in enumerate(global_scheduler_state.scheduled_jobs): | |
| if job['type'] == 'daily': | |
| st.write(f"{i+1}. [μΌλ³] λ§€μΌ {job['time']} - '{job['keyword']}' ({job['num_articles']}κ°)") | |
| else: | |
| immediate_text = "[μ¦μ μ€ν ν] " if job.get('run_immediately', False) else "" | |
| st.write(f"{i+1}. [κ°κ²©] {immediate_text}{job['interval']} - '{job['keyword']}' ({job['num_articles']}κ°)") | |
| # μ€μΌμ€λ¬ μ€ν κ²°κ³Ό | |
| if global_scheduler_state.scheduled_results: | |
| st.subheader("μ€μΌμ€λ¬ μ€ν κ²°κ³Ό") | |
| # κ²°κ³Όλ₯Ό UIμ νμνκΈ° μ μ λ³΅μ¬ | |
| results_for_display = global_scheduler_state.scheduled_results.copy() | |
| if results_for_display: | |
| result_df = pd.DataFrame(results_for_display) | |
| result_df['μ€νμκ°'] = result_df['timestamp'].apply(lambda x: datetime.strptime(x, "%Y%m%d_%H%M%S").strftime("%Y-%m-%d %H:%M:%S")) | |
| result_df = result_df.rename(columns={ | |
| 'task_type': 'μμ μ ν', | |
| 'keyword': 'ν€μλ', | |
| 'num_articles': 'κΈ°μ¬μ', | |
| 'filename': 'νμΌλͺ ' | |
| }) | |
| result_df['μμ μ ν'] = result_df['μμ μ ν'].apply(lambda x: 'μΌλ³' if x == 'daily' else 'μκ°κ°κ²©') | |
| st.dataframe( | |
| result_df[['μμ μ ν', 'ν€μλ', 'κΈ°μ¬μ', 'μ€νμκ°', 'νμΌλͺ ']], | |
| hide_index=True | |
| ) | |
| # μμ§λ νμΌ λ³΄κΈ° | |
| if os.path.exists('/tmp/scheduled_news'): | |
| files = [f for f in os.listdir('/tmp/scheduled_news') if f.endswith('.json')] | |
| if files: | |
| st.subheader("μμ§λ νμΌ μ΄κΈ°") | |
| selected_file = st.selectbox("νμΌ μ ν", files, index=len(files)-1) | |
| if selected_file and st.button("νμΌ λ΄μ© 보기"): | |
| with open(os.path.join('/tmp/scheduled_news', selected_file), 'r', encoding='utf-8') as f: | |
| articles = json.load(f) | |
| st.write(f"**νμΌλͺ :** {selected_file}") | |
| st.write(f"**μμ§ κΈ°μ¬ μ:** {len(articles)}κ°") | |
| for article in articles: | |
| with st.expander(f"{article['title']} - {article['source']}"): | |
| st.write(f"**μΆμ²:** {article['source']}") | |
| st.write(f"**λ μ§:** {article['date']}") | |
| st.write(f"**λ§ν¬:** {article['link']}") | |
| st.write("**λ³Έλ¬Έ:**") | |
| st.write(article['content'][:500] + "..." if len(article['content']) > 500 else article['content']) | |
| # νΈν° | |
| st.markdown("---") | |
| st.markdown("Β© λ΄μ€ κΈ°μ¬ λꡬ @conanssam") |