Saltar al contenido principal

Comprender los Resultados del Análisis

Aprende cómo recuperar, analizar y actuar sobre los resultados del análisis de llamadas con IA de CallCov.

Resumen

CallCov analiza cada llamada y proporciona insights estructurados en tres áreas clave:

  1. Compliance - Cumplimiento regulatorio y adherencia al script
  2. Calidad - Experiencia del cliente y desempeño del agente
  3. Coaching - Sugerencias de mejora accionables

Recuperar Resultados

Obtener Análisis por ID

Una vez que el análisis esté completo, recupera los resultados completos usando el ID del análisis:

import requests
API_KEY = "your_api_key_here"
API_URL = "https://api.callcov.com/api/v1"
def get_analysis_results(analysis_id):
"""Retrieve complete analysis results"""
headers = {"X-API-Key": API_KEY}
response = requests.get(
f"{API_URL}/analysis/{analysis_id}",
headers=headers
)
if response.status_code == 200:
return response.json()
else:
raise Exception(f"Error: {response.status_code} - {response.text}")
# Usage
results = get_analysis_results("550e8400-e29b-41d4-a716-446655440000")
print(f"Analysis Status: {results['status']}")
print(f"Call Duration: {results['audio']['duration_seconds']}s")

Comprender la Estructura de Resultados

Un resultado de análisis completo incluye:

{
"id": "550e8400-e29b-41d4-a716-446655440000",
"object": "analysis",
"created": 1642248000,
"status": "completed",
"call": {
"agent_id": "agent_001",
"contact_id": "customer_12345",
"call_type": "inbound",
"metadata": {
"department": "sales",
"campaign_id": "spring_2024"
}
},
"audio": {
"url": "https://s3.amazonaws.com/callcov/...",
"duration_seconds": 125.5,
"format": "wav"
},
"transcript": {
"text": "Transcripción completa de la conversación...",
"segments": [
{
"speaker": "Agent",
"text": "Hola, gracias por llamar.",
"start": 0.0,
"end": 2.5,
"confidence": 0.98
}
]
},
"results": {
"compliance": { ... },
"quality": { ... },
"coaching": { ... }
}
}

Trabajar con Resultados de Compliance

Los resultados de compliance identifican violaciones regulatorias y adherencia al script:

def check_compliance_violations(results):
"""Identify and categorize compliance issues"""
compliance = results['results']['compliance']
violations = []
# Check required disclosures
if not compliance['disclosures']['privacy_notice']:
violations.append({
'type': 'missing_disclosure',
'severity': 'high',
'message': 'Privacy notice not provided',
'timestamp': None # Absence detection
})
# Check prohibited language
for violation in compliance['violations']:
if violation['type'] == 'prohibited_language':
violations.append({
'type': 'prohibited_language',
'severity': violation['severity'],
'message': violation['description'],
'timestamp': violation['timestamp'],
'quote': violation['quote']
})
# Script compliance
script_score = compliance['script_adherence']['score']
if script_score < 0.8: # Below 80% adherence
violations.append({
'type': 'script_deviation',
'severity': 'medium',
'message': f'Script adherence at {script_score:.0%}',
'missed_points': compliance['script_adherence']['missed_points']
})
return violations
# Usage
violations = check_compliance_violations(results)
if violations:
print(f"⚠️ Found {len(violations)} compliance issues:")
for v in violations:
print(f" [{v['severity'].upper()}] {v['message']}")

Campos de Resultados de Compliance

CampoTipoDescripción
disclosures.privacy_noticebooleanPolítica de privacidad mencionada
disclosures.call_recordingbooleanDivulgación de grabación dada
disclosures.tcpa_compliancebooleanRequisitos TCPA cumplidos
violationsarrayLista de violaciones detectadas
script_adherence.scorefloatPuntuación de cumplimiento del script 0-1
script_adherence.missed_pointsarrayPuntos del script requeridos no cubiertos

Trabajar con Resultados de Calidad

Las métricas de calidad miden la experiencia del cliente y el desempeño del agente:

def generate_quality_scorecard(results):
"""Create agent scorecard from quality metrics"""
quality = results['results']['quality']
scorecard = {
'agent_id': results['call']['agent_id'],
'call_id': results['id'],
'overall_score': quality['overall_score'],
'metrics': {}
}
# Customer sentiment
sentiment = quality['customer_sentiment']
scorecard['metrics']['customer_satisfaction'] = {
'score': sentiment['score'], # -1 to 1
'label': sentiment['label'], # 'positive', 'neutral', 'negative'
'rating': convert_sentiment_to_rating(sentiment['score'])
}
# Agent performance
performance = quality['agent_performance']
scorecard['metrics']['professionalism'] = performance['professionalism']
scorecard['metrics']['empathy'] = performance['empathy']
scorecard['metrics']['problem_solving'] = performance['problem_solving']
scorecard['metrics']['communication_clarity'] = performance['communication_clarity']
# Call handling
handling = quality['call_handling']
scorecard['metrics']['resolution_achieved'] = handling['resolution_achieved']
scorecard['metrics']['hold_time_appropriate'] = handling['hold_time_seconds'] < 60
scorecard['metrics']['transfer_avoided'] = not handling['transferred']
return scorecard
def convert_sentiment_to_rating(score):
"""Convert -1 to 1 score to 1-5 star rating"""
# -1.0 to -0.6: 1 star
# -0.6 to -0.2: 2 stars
# -0.2 to 0.2: 3 stars
# 0.2 to 0.6: 4 stars
# 0.6 to 1.0: 5 stars
return min(5, max(1, int((score + 1) * 2.5) + 1))
# Usage
scorecard = generate_quality_scorecard(results)
print(f"Agent: {scorecard['agent_id']}")
print(f"Overall Score: {scorecard['overall_score']:.0%}")
print(f"Customer Satisfaction: {scorecard['metrics']['customer_satisfaction']['rating']}/5 stars")

Campos de Resultados de Calidad

CampoTipoDescripción
overall_scorefloatPuntuación compuesta de calidad 0-1
customer_sentiment.scorefloatPuntuación de sentimiento -1 a 1
customer_sentiment.labelstringpositive, neutral, negative
agent_performance.professionalismfloatCalificación de profesionalismo 0-1
agent_performance.empathyfloatCalificación de empatía 0-1
agent_performance.problem_solvingfloatCalificación de resolución de problemas 0-1
call_handling.resolution_achievedbooleanProblema resuelto en la llamada
call_handling.hold_time_secondsintegerTiempo total en espera

Trabajar con Insights de Coaching

Los insights de coaching proporcionan retroalimentación accionable para la mejora del agente:

def extract_coaching_opportunities(results):
"""Extract prioritized coaching points"""
coaching = results['results']['coaching']
opportunities = {
'strengths': [],
'improvements': [],
'critical_issues': []
}
# Strengths to reinforce
for strength in coaching['strengths']:
opportunities['strengths'].append({
'area': strength['category'],
'description': strength['description'],
'example': strength['quote'],
'timestamp': strength['timestamp']
})
# Areas for improvement
for improvement in coaching['improvements']:
opportunities['improvements'].append({
'area': improvement['category'],
'current_behavior': improvement['what_happened'],
'recommended_approach': improvement['what_to_do'],
'example': improvement['quote'],
'priority': improvement['priority'], # 'high', 'medium', 'low'
'timestamp': improvement['timestamp']
})
# Critical issues requiring immediate attention
for issue in coaching['critical_issues']:
opportunities['critical_issues'].append({
'area': issue['category'],
'description': issue['description'],
'severity': issue['severity'],
'example': issue['quote'],
'timestamp': issue['timestamp']
})
return opportunities
# Usage
coaching = extract_coaching_opportunities(results)
print("\n🎯 Resumen de Coaching:")
print(f" Fortalezas: {len(coaching['strengths'])}")
print(f" Mejoras: {len(coaching['improvements'])}")
print(f" Problemas Críticos: {len(coaching['critical_issues'])}")
if coaching['critical_issues']:
print("\n⚠️ Problemas Críticos que Requieren Atención Inmediata:")
for issue in coaching['critical_issues']:
print(f" • {issue['description']}")
print(f" Cita: \"{issue['example']}\" en {issue['timestamp']}s")

Campos de Resultados de Coaching

CampoTipoDescripción
strengthsarrayComportamientos positivos para reforzar
improvementsarrayÁreas de desarrollo
critical_issuesarrayProblemas urgentes que requieren atención
improvements[].prioritystringhigh, medium, low
improvements[].what_happenedstringDescripción del comportamiento actual
improvements[].what_to_dostringEnfoque alternativo recomendado

Mejores Prácticas

1. Cachea los Resultados Apropiadamente

Evita llamadas innecesarias a la API cacheando análisis completados:

from functools import lru_cache
from datetime import datetime, timedelta

# In-memory cache with TTL
results_cache = {}

def get_cached_results(analysis_id, ttl_hours=24):
"""Get results with caching"""
if analysis_id in results_cache:
cached_data, cached_time = results_cache[analysis_id]
if datetime.now() - cached_time < timedelta(hours=ttl_hours):
return cached_data

# Fetch fresh data
results = get_analysis_results(analysis_id)

# Cache only completed analyses
if results['status'] == 'completed':
results_cache[analysis_id] = (results, datetime.now())

return results

2. Maneja Resultados Parciales

Procesa resultados incluso si algunas secciones no están disponibles:

def safe_extract_metrics(results):
"""Safely extract metrics with fallbacks"""
metrics = {}

try:
metrics['compliance_score'] = results['results']['compliance']['overall_score']
except (KeyError, TypeError):
metrics['compliance_score'] = None

try:
metrics['quality_score'] = results['results']['quality']['overall_score']
except (KeyError, TypeError):
metrics['quality_score'] = None

return metrics

3. Agrega Resultados para Reportes

Combina múltiples análisis para reportes de equipo o tendencias:

def aggregate_team_performance(analysis_ids):
"""Generate team-level metrics"""
all_results = [get_analysis_results(aid) for aid in analysis_ids]

team_metrics = {
'total_calls': len(all_results),
'avg_quality_score': 0,
'avg_compliance_score': 0,
'total_violations': 0,
'resolution_rate': 0
}

for results in all_results:
quality = results['results']['quality']
compliance = results['results']['compliance']

team_metrics['avg_quality_score'] += quality['overall_score']
team_metrics['avg_compliance_score'] += compliance['overall_score']
team_metrics['total_violations'] += len(compliance['violations'])

if quality['call_handling']['resolution_achieved']:
team_metrics['resolution_rate'] += 1

# Calculate averages
team_metrics['avg_quality_score'] /= len(all_results)
team_metrics['avg_compliance_score'] /= len(all_results)
team_metrics['resolution_rate'] /= len(all_results)

return team_metrics

4. Filtra Resultados por Criterios

Extrae insights específicos basados en reglas de negocio:

def find_calls_needing_review(analysis_ids):
"""Identify calls requiring manager review"""
flagged_calls = []

for analysis_id in analysis_ids:
results = get_analysis_results(analysis_id)

# Flag criteria
quality_score = results['results']['quality']['overall_score']
compliance_violations = results['results']['compliance']['violations']
critical_coaching = results['results']['coaching']['critical_issues']

should_review = (
quality_score < 0.6 or # Quality below 60%
len(compliance_violations) > 0 or # Any compliance issues
len(critical_coaching) > 0 # Critical coaching needed
)

if should_review:
flagged_calls.append({
'analysis_id': analysis_id,
'agent_id': results['call']['agent_id'],
'quality_score': quality_score,
'violation_count': len(compliance_violations),
'critical_issues': len(critical_coaching)
})

return flagged_calls

Casos de Uso Comunes

Generar Tarjeta de Puntuación del Agente

Combina todas las métricas en una tarjeta de puntuación completa:

def create_agent_scorecard(agent_id, date_range):
"""Generate comprehensive agent scorecard"""
# Get all analyses for agent in date range
analyses = get_analyses_for_agent(agent_id, date_range)

scorecard = {
'agent_id': agent_id,
'period': date_range,
'total_calls': len(analyses),
'metrics': {
'avg_quality': 0,
'avg_compliance': 0,
'resolution_rate': 0,
'avg_customer_sentiment': 0
},
'strengths': [],
'improvement_areas': []
}

# Aggregate metrics
for analysis in analyses:
results = get_analysis_results(analysis['id'])
scorecard['metrics']['avg_quality'] += results['results']['quality']['overall_score']
scorecard['metrics']['avg_compliance'] += results['results']['compliance']['overall_score']
# ... aggregate other metrics

# Calculate averages
scorecard['metrics']['avg_quality'] /= len(analyses)
scorecard['metrics']['avg_compliance'] /= len(analyses)

return scorecard

Reporte de Auditoría de Compliance

Identifica todas las violaciones de compliance en las llamadas:

def generate_compliance_audit(date_range):
"""Generate compliance audit report"""
analyses = get_all_analyses(date_range)

audit = {
'total_calls': len(analyses),
'compliant_calls': 0,
'violations_by_type': {},
'high_risk_calls': []
}

for analysis_id in analyses:
results = get_analysis_results(analysis_id)
compliance = results['results']['compliance']

violations = compliance['violations']
if len(violations) == 0:
audit['compliant_calls'] += 1
else:
# Categorize violations
for violation in violations:
vtype = violation['type']
audit['violations_by_type'][vtype] = \
audit['violations_by_type'].get(vtype, 0) + 1

if violation['severity'] == 'high':
audit['high_risk_calls'].append({
'analysis_id': analysis_id,
'agent_id': results['call']['agent_id'],
'violation': violation
})

return audit

Próximos Pasos