Spaces:
Sleeping
Sleeping
| import os | |
| os.environ["OMP_NUM_THREADS"] = "1" | |
| os.environ["MKL_NUM_THREADS"] = "1" | |
| import shutil | |
| import uvicorn | |
| from fastapi import FastAPI, UploadFile, File, HTTPException | |
| from pipeline import UltraRobustCallAnalytics | |
| import gradio as gr | |
| # --- 1. App & Pipeline Setup --- | |
| app = FastAPI(title="Call Center Analytics Engine") | |
| pipeline_engine = None | |
| async def startup_event(): | |
| global pipeline_engine | |
| token = os.environ.get("HF_TOKEN") | |
| print(f"π DEBUG: Checking for Token...") | |
| if token is None: | |
| print("β ERROR: HF_TOKEN is None! The app cannot read the secret.") | |
| print(f" Available Environment Keys: {[k for k in os.environ.keys() if 'HF' in k]}") | |
| elif len(token) < 10: | |
| print("β ERROR: Token seems too short or invalid.") | |
| else: | |
| print(f"β Token found! Starts with: {token[:4]}...") | |
| # 3. Initialize | |
| print("Initializing UltraRobustCallAnalytics...") | |
| pipeline_engine = UltraRobustCallAnalytics(hf_token=token) | |
| print("Pipeline initialized successfully!") | |
| # --- 2. Existing API Endpoint (for programmatic access) --- | |
| async def analyze_audio(file: UploadFile = File(...)): | |
| if not pipeline_engine: | |
| raise HTTPException(status_code=500, detail="Engine not initialized") | |
| temp_path = f"temp_{file.filename}" | |
| try: | |
| with open(temp_path, "wb") as buffer: | |
| shutil.copyfileobj(file.file, buffer) | |
| result = pipeline_engine.process_call(temp_path) | |
| return result | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| finally: | |
| if os.path.exists(temp_path): | |
| os.remove(temp_path) | |
| # --- 3. Gradio Wrapper Function --- | |
| def gradio_process(audio_filepath): | |
| """ | |
| Wrapper function to connect Gradio input directly to the pipeline. | |
| Gradio handles the file upload and provides a temp filepath. | |
| """ | |
| if pipeline_engine is None: | |
| return {"error": "System is still starting up... please wait a moment."} | |
| if audio_filepath is None: | |
| return {"message": "Please upload a file."} | |
| try: | |
| # Call your existing pipeline logic directly | |
| print(f"Processing file from Gradio: {audio_filepath}") | |
| result = pipeline_engine.process_call(audio_filepath) | |
| return result | |
| except Exception as e: | |
| return {"error": str(e)} | |
| # --- 4. Build Gradio UI --- | |
| with gr.Blocks(title="Call Center AI") as demo: | |
| gr.Markdown("# π§ Call Center Analytics Hub") | |
| gr.Markdown("Upload a call recording to extract speakers, text, and emotions.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| # Input: Audio file (returns a filepath) | |
| audio_input = gr.Audio(type="filepath", label="Upload or Record Call") | |
| analyze_btn = gr.Button("Analyze Call", variant="primary") | |
| with gr.Column(): | |
| # Output: JSON result | |
| result_output = gr.JSON(label="Analysis Results") | |
| analyze_btn.click(fn=gradio_process, inputs=audio_input, outputs=result_output) | |
| # --- 5. Mount Gradio & Run --- | |
| # This serves the Gradio UI at the root "/" | |
| app = gr.mount_gradio_app(app, demo, path="/") | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |