Skip to main content

Understanding Analysis Results

Learn how to retrieve, parse, and act on CallCov's AI-powered call analysis results.

Overview​

CallCov analyzes each call and provides structured, qualitative insights (not numerical scores) across three key areas:

  1. Compliance - Regulatory checks with pass/fail flags (identity verification, disclosures, prohibited phrases, sensitive data)
  2. Quality - Performance metrics with flags and counts (greeting, sentiment, empathy, interruptions, call structure)
  3. Coaching - Actionable recommendations with priorities and customer effort score (0-5 scale)

Note: CallCov does not provide overall numerical scores like "Quality: 85/100". Instead, it returns detailed, structured data with boolean flags, counts, and timestamps that you can use to build your own scoring system.

Retrieving Results​

Get Analysis by ID​

Once an analysis is complete, retrieve the full results using the analysis ID:

import requests
API_KEY = "your_api_key_here"
API_URL = "https://api.callcov.com/api/v1"
def get_analysis_results(analysis_id):
"""Retrieve complete analysis results"""
headers = {"X-API-Key": API_KEY}
response = requests.get(
f"{API_URL}/analysis/{analysis_id}",
headers=headers
)
if response.status_code == 200:
return response.json()
else:
raise Exception(f"Error: {response.status_code} - {response.text}")
# Usage
results = get_analysis_results("550e8400-e29b-41d4-a716-446655440000")
print(f"Analysis Status: {results['status']}")
print(f"Call Duration: {results['audio']['duration_seconds']}s")

Understanding the Results Structure​

A complete analysis result includes:

{
"id": "550e8400-e29b-41d4-a716-446655440000",
"object": "analysis",
"created": 1642248000,
"status": "completed",
"livemode": false,
"call": {
"agent_id": "agent_001",
"contact_id": "customer_12345",
"duration_ms": 125500,
"duration_seconds": 125.5
},
"audio": {
"url": "https://s3.amazonaws.com/callcov/...",
"size_bytes": 1048576,
"format": "wav",
"duration_seconds": 125.5
},
"transcript": {
"text": "Full conversation transcript...",
"segments": [
{
"speaker": "A",
"text": "Hello, thank you for calling.",
"start": 0.0,
"end": 2.5
}
],
"language": "en"
},
"results": {
"compliance": {
"identity_verification": {"verified": true, "timestamp": 0.0, "flagged": false},
"mandatory_disclosures": {"read": true, "matched_disclosures": [...], "missing_disclosures": []},
"purpose_declaration": {"disclosed": true, "interaction_number": 1, "flagged": false},
"prohibited_phrases": {"used": false, "instances": []},
"sensitive_data": {"shared": false, "instances": []}
},
"quality": {
"sentiment_analysis": {...},
"greeting": {"present": true, "timestamp": 0.0, "flagged_missing": false, "flagged_late": false},
"call_structure": {...},
"resolution_marker": {"confirmed": true, "timestamp": 20.0},
"empathetic_language": {...},
"objections": {...},
"persuasion_markers": {...},
"interruptions": {"count": 0, "instances": [], "flagged": false},
"language_adequacy": {...}
},
"coaching": {
"recommendations": [{"category": "greeting", "priority": "low", "description": "..."}],
"customer_effort_score": 1
}
},
"metadata": {
"webhook_url": "https://your-app.com/webhooks/analysis",
"completed_at": "2024-01-15T14:32:15.123Z",
"processing_time_ms": 45230,
"error_message": null
}
}
Updated API Response Format

The examples in this guide use illustrative code patterns. For the exact, current API response structure, see:

The actual API returns structured flags and counts, not numerical scores like overall_score or compliance.score.

Working with Compliance Results​

Compliance results contain pass/fail flags for regulatory requirements:

def check_compliance_violations(results):
"""Identify and categorize compliance issues"""
compliance = results['results']['compliance']
violations = []
# Check required disclosures
if not compliance['disclosures']['privacy_notice']:
violations.append({
'type': 'missing_disclosure',
'severity': 'high',
'message': 'Privacy notice not provided',
'timestamp': None # Absence detection
})
# Check prohibited language
for violation in compliance['violations']:
if violation['type'] == 'prohibited_language':
violations.append({
'type': 'prohibited_language',
'severity': violation['severity'],
'message': violation['description'],
'timestamp': violation['timestamp'],
'quote': violation['quote']
})
# Script compliance
script_score = compliance['script_adherence']['score']
if script_score < 0.8: # Below 80% adherence
violations.append({
'type': 'script_deviation',
'severity': 'medium',
'message': f'Script adherence at {script_score:.0%}',
'missed_points': compliance['script_adherence']['missed_points']
})
return violations
# Usage
violations = check_compliance_violations(results)
if violations:
print(f"⚠️ Found {len(violations)} compliance issues:")
for v in violations:
print(f" [{v['severity'].upper()}] {v['message']}")

Compliance Result Fields​

FieldTypeDescription
identity_verification.verifiedbooleanAgent stated their identity
identity_verification.timestampfloatWhen identity was stated (seconds)
identity_verification.flaggedbooleanTrue if not verified or late
mandatory_disclosures.readbooleanAll required disclosures were read
mandatory_disclosures.matched_disclosuresarrayDisclosures that were found
mandatory_disclosures.missing_disclosuresarrayDisclosures that were missing
purpose_declaration.disclosedbooleanCall purpose was disclosed
purpose_declaration.interaction_numberintWhich interaction disclosed purpose
purpose_declaration.flaggedbooleanTrue if not disclosed or too late
prohibited_phrases.usedbooleanWhether prohibited language was used
prohibited_phrases.instancesarrayList of violations with timestamps
sensitive_data.sharedbooleanWhether sensitive data was shared
sensitive_data.instancesarrayList of sensitive data instances

Working with Quality Results​

Quality metrics measure customer experience and agent performance:

def generate_quality_scorecard(results):
"""Create agent scorecard from quality metrics"""
quality = results['results']['quality']
scorecard = {
'agent_id': results['call']['agent_id'],
'call_id': results['id'],
'overall_score': quality['overall_score'],
'metrics': {}
}
# Customer sentiment
sentiment = quality['customer_sentiment']
scorecard['metrics']['customer_satisfaction'] = {
'score': sentiment['score'], # -1 to 1
'label': sentiment['label'], # 'positive', 'neutral', 'negative'
'rating': convert_sentiment_to_rating(sentiment['score'])
}
# Agent performance
performance = quality['agent_performance']
scorecard['metrics']['professionalism'] = performance['professionalism']
scorecard['metrics']['empathy'] = performance['empathy']
scorecard['metrics']['problem_solving'] = performance['problem_solving']
scorecard['metrics']['communication_clarity'] = performance['communication_clarity']
# Call handling
handling = quality['call_handling']
scorecard['metrics']['resolution_achieved'] = handling['resolution_achieved']
scorecard['metrics']['hold_time_appropriate'] = handling['hold_time_seconds'] < 60
scorecard['metrics']['transfer_avoided'] = not handling['transferred']
return scorecard
def convert_sentiment_to_rating(score):
"""Convert -1 to 1 score to 1-5 star rating"""
# -1.0 to -0.6: 1 star
# -0.6 to -0.2: 2 stars
# -0.2 to 0.2: 3 stars
# 0.2 to 0.6: 4 stars
# 0.6 to 1.0: 5 stars
return min(5, max(1, int((score + 1) * 2.5) + 1))
# Usage
scorecard = generate_quality_scorecard(results)
print(f"Agent: {scorecard['agent_id']}")
print(f"Overall Score: {scorecard['overall_score']:.0%}")
print(f"Customer Satisfaction: {scorecard['metrics']['customer_satisfaction']['rating']}/5 stars")

Quality Result Fields​

FieldTypeDescription
sentiment_analysis.trajectoryarraySentiment points (1-5) throughout call
sentiment_analysis.first_30s_avgintAverage sentiment in first 30 seconds
sentiment_analysis.last_30s_avgintAverage sentiment in last 30 seconds
sentiment_analysis.drop_magnitudeintMagnitude of sentiment drop
sentiment_analysis.flagged_dropbooleanTrue if drop > threshold
sentiment_analysis.negative_periodsarrayPeriods of negative sentiment
greeting.presentbooleanGreeting was present
greeting.timestampfloatWhen greeting occurred
greeting.flagged_missingbooleanTrue if no greeting found
greeting.flagged_latebooleanTrue if greeting was late
call_structure.follows_structurebooleanCall followed expected structure
call_structure.stages_completedarrayList of completed stages
call_structure.missing_stagesarrayList of missing stages
resolution_marker.confirmedbooleanResolution was confirmed
resolution_marker.timestampfloatWhen resolution was confirmed
empathetic_language.usedbooleanEmpathy markers were present
empathetic_language.instancesarrayTimestamps of empathy instances
empathetic_language.flaggedbooleanTrue if intervals lack empathy
objections.identifiedarrayCustomer objections detected
objections.flagged_unhandledbooleanTrue if objections not handled
interruptions.countintNumber of interruptions
interruptions.flaggedbooleanTrue if count > 5
language_adequacy.appropriatebooleanLanguage was appropriate
language_adequacy.flaggedbooleanTrue if issues found

Working with Coaching Insights​

Coaching insights provide actionable feedback for agent improvement:

def extract_coaching_opportunities(results):
"""Extract prioritized coaching points"""
coaching = results['results']['coaching']
opportunities = {
'strengths': [],
'improvements': [],
'critical_issues': []
}
# Strengths to reinforce
for strength in coaching['strengths']:
opportunities['strengths'].append({
'area': strength['category'],
'description': strength['description'],
'example': strength['quote'],
'timestamp': strength['timestamp']
})
# Areas for improvement
for improvement in coaching['improvements']:
opportunities['improvements'].append({
'area': improvement['category'],
'current_behavior': improvement['what_happened'],
'recommended_approach': improvement['what_to_do'],
'example': improvement['quote'],
'priority': improvement['priority'], # 'high', 'medium', 'low'
'timestamp': improvement['timestamp']
})
# Critical issues requiring immediate attention
for issue in coaching['critical_issues']:
opportunities['critical_issues'].append({
'area': issue['category'],
'description': issue['description'],
'severity': issue['severity'],
'example': issue['quote'],
'timestamp': issue['timestamp']
})
return opportunities
# Usage
coaching = extract_coaching_opportunities(results)
print("\n🎯 Coaching Summary:")
print(f" Strengths: {len(coaching['strengths'])}")
print(f" Improvements: {len(coaching['improvements'])}")
print(f" Critical Issues: {len(coaching['critical_issues'])}")
if coaching['critical_issues']:
print("\n⚠️ Critical Issues Requiring Immediate Attention:")
for issue in coaching['critical_issues']:
print(f" β€’ {issue['description']}")
print(f" Quote: \"{issue['example']}\" at {issue['timestamp']}s")

Coaching Result Fields​

FieldTypeDescription
recommendationsarrayList of coaching recommendations
recommendations[].categorystringCategory (e.g., "greeting", "empathy")
recommendations[].prioritystringPriority level: "high", "medium", "low"
recommendations[].descriptionstringSpecific recommendation text
customer_effort_scoreintCustomer effort score (0-5, where 5 = highest effort)

Best Practices​

1. Cache Results Appropriately​

Avoid unnecessary API calls by caching completed analyses:

from functools import lru_cache
from datetime import datetime, timedelta

# In-memory cache with TTL
results_cache = {}

def get_cached_results(analysis_id, ttl_hours=24):
"""Get results with caching"""
if analysis_id in results_cache:
cached_data, cached_time = results_cache[analysis_id]
if datetime.now() - cached_time < timedelta(hours=ttl_hours):
return cached_data

# Fetch fresh data
results = get_analysis_results(analysis_id)

# Cache only completed analyses
if results['status'] == 'completed':
results_cache[analysis_id] = (results, datetime.now())

return results

2. Handle Partial Results​

Process results even if some sections are unavailable:

def safe_extract_metrics(results):
"""Safely extract metrics with fallbacks"""
metrics = {}

try:
metrics['compliance_score'] = results['results']['compliance']['overall_score']
except (KeyError, TypeError):
metrics['compliance_score'] = None

try:
metrics['quality_score'] = results['results']['quality']['overall_score']
except (KeyError, TypeError):
metrics['quality_score'] = None

return metrics

3. Aggregate Results for Reporting​

Combine multiple analyses for team or trend reports:

def aggregate_team_performance(analysis_ids):
"""Generate team-level metrics"""
all_results = [get_analysis_results(aid) for aid in analysis_ids]

team_metrics = {
'total_calls': len(all_results),
'avg_quality_score': 0,
'avg_compliance_score': 0,
'total_violations': 0,
'resolution_rate': 0
}

for results in all_results:
quality = results['results']['quality']
compliance = results['results']['compliance']

team_metrics['avg_quality_score'] += quality['overall_score']
team_metrics['avg_compliance_score'] += compliance['overall_score']
team_metrics['total_violations'] += len(compliance['violations'])

if quality['call_handling']['resolution_achieved']:
team_metrics['resolution_rate'] += 1

# Calculate averages
team_metrics['avg_quality_score'] /= len(all_results)
team_metrics['avg_compliance_score'] /= len(all_results)
team_metrics['resolution_rate'] /= len(all_results)

return team_metrics

4. Filter Results by Criteria​

Extract specific insights based on business rules:

def find_calls_needing_review(analysis_ids):
"""Identify calls requiring manager review"""
flagged_calls = []

for analysis_id in analysis_ids:
results = get_analysis_results(analysis_id)

# Flag criteria
quality_score = results['results']['quality']['overall_score']
compliance_violations = results['results']['compliance']['violations']
critical_coaching = results['results']['coaching']['critical_issues']

should_review = (
quality_score < 0.6 or # Quality below 60%
len(compliance_violations) > 0 or # Any compliance issues
len(critical_coaching) > 0 # Critical coaching needed
)

if should_review:
flagged_calls.append({
'analysis_id': analysis_id,
'agent_id': results['call']['agent_id'],
'quality_score': quality_score,
'violation_count': len(compliance_violations),
'critical_issues': len(critical_coaching)
})

return flagged_calls

Common Use Cases​

Generate Agent Scorecard​

Combine all metrics into a comprehensive scorecard:

def create_agent_scorecard(agent_id, date_range):
"""Generate comprehensive agent scorecard"""
# Get all analyses for agent in date range
analyses = get_analyses_for_agent(agent_id, date_range)

scorecard = {
'agent_id': agent_id,
'period': date_range,
'total_calls': len(analyses),
'metrics': {
'avg_quality': 0,
'avg_compliance': 0,
'resolution_rate': 0,
'avg_customer_sentiment': 0
},
'strengths': [],
'improvement_areas': []
}

# Aggregate metrics
for analysis in analyses:
results = get_analysis_results(analysis['id'])
scorecard['metrics']['avg_quality'] += results['results']['quality']['overall_score']
scorecard['metrics']['avg_compliance'] += results['results']['compliance']['overall_score']
# ... aggregate other metrics

# Calculate averages
scorecard['metrics']['avg_quality'] /= len(analyses)
scorecard['metrics']['avg_compliance'] /= len(analyses)

return scorecard

Compliance Audit Report​

Identify all compliance violations across calls:

def generate_compliance_audit(date_range):
"""Generate compliance audit report"""
analyses = get_all_analyses(date_range)

audit = {
'total_calls': len(analyses),
'compliant_calls': 0,
'violations_by_type': {},
'high_risk_calls': []
}

for analysis_id in analyses:
results = get_analysis_results(analysis_id)
compliance = results['results']['compliance']

violations = compliance['violations']
if len(violations) == 0:
audit['compliant_calls'] += 1
else:
# Categorize violations
for violation in violations:
vtype = violation['type']
audit['violations_by_type'][vtype] = \
audit['violations_by_type'].get(vtype, 0) + 1

if violation['severity'] == 'high':
audit['high_risk_calls'].append({
'analysis_id': analysis_id,
'agent_id': results['call']['agent_id'],
'violation': violation
})

return audit

Next Steps​