Initial commit: The Ultimate Antigravity Skills Collection (58 Skills)

This commit is contained in:
sck_0
2026-01-14 18:48:08 +01:00
commit 7f46ed8ca1
447 changed files with 110829 additions and 0 deletions

View File

@@ -0,0 +1,441 @@
#!/usr/bin/env python3
"""
Customer Interview Analyzer
Extracts insights, patterns, and opportunities from user interviews
"""
import re
from typing import Dict, List, Tuple, Set
from collections import Counter, defaultdict
import json
class InterviewAnalyzer:
"""Analyze customer interviews for insights and patterns"""
def __init__(self):
# Pain point indicators
self.pain_indicators = [
'frustrat', 'annoy', 'difficult', 'hard', 'confus', 'slow',
'problem', 'issue', 'struggle', 'challeng', 'pain', 'waste',
'manual', 'repetitive', 'tedious', 'boring', 'time-consuming',
'complicated', 'complex', 'unclear', 'wish', 'need', 'want'
]
# Positive indicators
self.delight_indicators = [
'love', 'great', 'awesome', 'amazing', 'perfect', 'easy',
'simple', 'quick', 'fast', 'helpful', 'useful', 'valuable',
'save', 'efficient', 'convenient', 'intuitive', 'clear'
]
# Feature request indicators
self.request_indicators = [
'would be nice', 'wish', 'hope', 'want', 'need', 'should',
'could', 'would love', 'if only', 'it would help', 'suggest',
'recommend', 'idea', 'what if', 'have you considered'
]
# Jobs to be done patterns
self.jtbd_patterns = [
r'when i\s+(.+?),\s+i want to\s+(.+?)\s+so that\s+(.+)',
r'i need to\s+(.+?)\s+because\s+(.+)',
r'my goal is to\s+(.+)',
r'i\'m trying to\s+(.+)',
r'i use \w+ to\s+(.+)',
r'helps me\s+(.+)',
]
def analyze_interview(self, text: str) -> Dict:
"""Analyze a single interview transcript"""
text_lower = text.lower()
sentences = self._split_sentences(text)
analysis = {
'pain_points': self._extract_pain_points(sentences),
'delights': self._extract_delights(sentences),
'feature_requests': self._extract_requests(sentences),
'jobs_to_be_done': self._extract_jtbd(text_lower),
'sentiment_score': self._calculate_sentiment(text_lower),
'key_themes': self._extract_themes(text_lower),
'quotes': self._extract_key_quotes(sentences),
'metrics_mentioned': self._extract_metrics(text),
'competitors_mentioned': self._extract_competitors(text)
}
return analysis
def _split_sentences(self, text: str) -> List[str]:
"""Split text into sentences"""
# Simple sentence splitting
sentences = re.split(r'[.!?]+', text)
return [s.strip() for s in sentences if s.strip()]
def _extract_pain_points(self, sentences: List[str]) -> List[Dict]:
"""Extract pain points from sentences"""
pain_points = []
for sentence in sentences:
sentence_lower = sentence.lower()
for indicator in self.pain_indicators:
if indicator in sentence_lower:
# Extract context around the pain point
pain_points.append({
'quote': sentence,
'indicator': indicator,
'severity': self._assess_severity(sentence_lower)
})
break
return pain_points[:10] # Return top 10
def _extract_delights(self, sentences: List[str]) -> List[Dict]:
"""Extract positive feedback"""
delights = []
for sentence in sentences:
sentence_lower = sentence.lower()
for indicator in self.delight_indicators:
if indicator in sentence_lower:
delights.append({
'quote': sentence,
'indicator': indicator,
'strength': self._assess_strength(sentence_lower)
})
break
return delights[:10]
def _extract_requests(self, sentences: List[str]) -> List[Dict]:
"""Extract feature requests and suggestions"""
requests = []
for sentence in sentences:
sentence_lower = sentence.lower()
for indicator in self.request_indicators:
if indicator in sentence_lower:
requests.append({
'quote': sentence,
'type': self._classify_request(sentence_lower),
'priority': self._assess_request_priority(sentence_lower)
})
break
return requests[:10]
def _extract_jtbd(self, text: str) -> List[Dict]:
"""Extract Jobs to Be Done patterns"""
jobs = []
for pattern in self.jtbd_patterns:
matches = re.findall(pattern, text, re.IGNORECASE)
for match in matches:
if isinstance(match, tuple):
job = ''.join(match)
else:
job = match
jobs.append({
'job': job,
'pattern': pattern.pattern if hasattr(pattern, 'pattern') else pattern
})
return jobs[:5]
def _calculate_sentiment(self, text: str) -> Dict:
"""Calculate overall sentiment of the interview"""
positive_count = sum(1 for ind in self.delight_indicators if ind in text)
negative_count = sum(1 for ind in self.pain_indicators if ind in text)
total = positive_count + negative_count
if total == 0:
sentiment_score = 0
else:
sentiment_score = (positive_count - negative_count) / total
if sentiment_score > 0.3:
sentiment_label = 'positive'
elif sentiment_score < -0.3:
sentiment_label = 'negative'
else:
sentiment_label = 'neutral'
return {
'score': round(sentiment_score, 2),
'label': sentiment_label,
'positive_signals': positive_count,
'negative_signals': negative_count
}
def _extract_themes(self, text: str) -> List[str]:
"""Extract key themes using word frequency"""
# Remove common words
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at',
'to', 'for', 'of', 'with', 'by', 'from', 'as', 'is',
'was', 'are', 'were', 'been', 'be', 'have', 'has',
'had', 'do', 'does', 'did', 'will', 'would', 'could',
'should', 'may', 'might', 'must', 'can', 'shall',
'it', 'i', 'you', 'we', 'they', 'them', 'their'}
# Extract meaningful words
words = re.findall(r'\b[a-z]{4,}\b', text)
meaningful_words = [w for w in words if w not in stop_words]
# Count frequency
word_freq = Counter(meaningful_words)
# Extract themes (top frequent meaningful words)
themes = [word for word, count in word_freq.most_common(10) if count >= 3]
return themes
def _extract_key_quotes(self, sentences: List[str]) -> List[str]:
"""Extract the most insightful quotes"""
scored_sentences = []
for sentence in sentences:
if len(sentence) < 20 or len(sentence) > 200:
continue
score = 0
sentence_lower = sentence.lower()
# Score based on insight indicators
if any(ind in sentence_lower for ind in self.pain_indicators):
score += 2
if any(ind in sentence_lower for ind in self.request_indicators):
score += 2
if 'because' in sentence_lower:
score += 1
if 'but' in sentence_lower:
score += 1
if '?' in sentence:
score += 1
if score > 0:
scored_sentences.append((score, sentence))
# Sort by score and return top quotes
scored_sentences.sort(reverse=True)
return [s[1] for s in scored_sentences[:5]]
def _extract_metrics(self, text: str) -> List[str]:
"""Extract any metrics or numbers mentioned"""
metrics = []
# Find percentages
percentages = re.findall(r'\d+%', text)
metrics.extend(percentages)
# Find time metrics
time_metrics = re.findall(r'\d+\s*(?:hours?|minutes?|days?|weeks?|months?)', text, re.IGNORECASE)
metrics.extend(time_metrics)
# Find money metrics
money_metrics = re.findall(r'\$[\d,]+', text)
metrics.extend(money_metrics)
# Find general numbers with context
number_contexts = re.findall(r'(\d+)\s+(\w+)', text)
for num, context in number_contexts:
if context.lower() not in ['the', 'a', 'an', 'and', 'or', 'of']:
metrics.append(f"{num} {context}")
return list(set(metrics))[:10]
def _extract_competitors(self, text: str) -> List[str]:
"""Extract competitor mentions"""
# Common competitor indicators
competitor_patterns = [
r'(?:use|used|using|tried|trying|switch from|switched from|instead of)\s+(\w+)',
r'(\w+)\s+(?:is better|works better|is easier)',
r'compared to\s+(\w+)',
r'like\s+(\w+)',
r'similar to\s+(\w+)',
]
competitors = set()
for pattern in competitor_patterns:
matches = re.findall(pattern, text, re.IGNORECASE)
competitors.update(matches)
# Filter out common words
common_words = {'this', 'that', 'it', 'them', 'other', 'another', 'something'}
competitors = [c for c in competitors if c.lower() not in common_words and len(c) > 2]
return list(competitors)[:5]
def _assess_severity(self, text: str) -> str:
"""Assess severity of pain point"""
if any(word in text for word in ['very', 'extremely', 'really', 'totally', 'completely']):
return 'high'
elif any(word in text for word in ['somewhat', 'bit', 'little', 'slightly']):
return 'low'
return 'medium'
def _assess_strength(self, text: str) -> str:
"""Assess strength of positive feedback"""
if any(word in text for word in ['absolutely', 'definitely', 'really', 'very']):
return 'strong'
return 'moderate'
def _classify_request(self, text: str) -> str:
"""Classify the type of request"""
if any(word in text for word in ['ui', 'design', 'look', 'color', 'layout']):
return 'ui_improvement'
elif any(word in text for word in ['feature', 'add', 'new', 'build']):
return 'new_feature'
elif any(word in text for word in ['fix', 'bug', 'broken', 'work']):
return 'bug_fix'
elif any(word in text for word in ['faster', 'slow', 'performance', 'speed']):
return 'performance'
return 'general'
def _assess_request_priority(self, text: str) -> str:
"""Assess priority of request"""
if any(word in text for word in ['critical', 'urgent', 'asap', 'immediately', 'blocking']):
return 'critical'
elif any(word in text for word in ['need', 'important', 'should', 'must']):
return 'high'
elif any(word in text for word in ['nice', 'would', 'could', 'maybe']):
return 'low'
return 'medium'
def aggregate_interviews(interviews: List[Dict]) -> Dict:
"""Aggregate insights from multiple interviews"""
aggregated = {
'total_interviews': len(interviews),
'common_pain_points': defaultdict(list),
'common_requests': defaultdict(list),
'jobs_to_be_done': [],
'overall_sentiment': {
'positive': 0,
'negative': 0,
'neutral': 0
},
'top_themes': Counter(),
'metrics_summary': set(),
'competitors_mentioned': Counter()
}
for interview in interviews:
# Aggregate pain points
for pain in interview.get('pain_points', []):
indicator = pain.get('indicator', 'unknown')
aggregated['common_pain_points'][indicator].append(pain['quote'])
# Aggregate requests
for request in interview.get('feature_requests', []):
req_type = request.get('type', 'general')
aggregated['common_requests'][req_type].append(request['quote'])
# Aggregate JTBD
aggregated['jobs_to_be_done'].extend(interview.get('jobs_to_be_done', []))
# Aggregate sentiment
sentiment = interview.get('sentiment_score', {}).get('label', 'neutral')
aggregated['overall_sentiment'][sentiment] += 1
# Aggregate themes
for theme in interview.get('key_themes', []):
aggregated['top_themes'][theme] += 1
# Aggregate metrics
aggregated['metrics_summary'].update(interview.get('metrics_mentioned', []))
# Aggregate competitors
for competitor in interview.get('competitors_mentioned', []):
aggregated['competitors_mentioned'][competitor] += 1
# Process aggregated data
aggregated['common_pain_points'] = dict(aggregated['common_pain_points'])
aggregated['common_requests'] = dict(aggregated['common_requests'])
aggregated['top_themes'] = dict(aggregated['top_themes'].most_common(10))
aggregated['metrics_summary'] = list(aggregated['metrics_summary'])
aggregated['competitors_mentioned'] = dict(aggregated['competitors_mentioned'])
return aggregated
def format_single_interview(analysis: Dict) -> str:
"""Format single interview analysis"""
output = ["=" * 60]
output.append("CUSTOMER INTERVIEW ANALYSIS")
output.append("=" * 60)
# Sentiment
sentiment = analysis['sentiment_score']
output.append(f"\n📊 Overall Sentiment: {sentiment['label'].upper()}")
output.append(f" Score: {sentiment['score']}")
output.append(f" Positive signals: {sentiment['positive_signals']}")
output.append(f" Negative signals: {sentiment['negative_signals']}")
# Pain Points
if analysis['pain_points']:
output.append("\n🔥 Pain Points Identified:")
for i, pain in enumerate(analysis['pain_points'][:5], 1):
output.append(f"\n{i}. [{pain['severity'].upper()}] {pain['quote'][:100]}...")
# Feature Requests
if analysis['feature_requests']:
output.append("\n💡 Feature Requests:")
for i, req in enumerate(analysis['feature_requests'][:5], 1):
output.append(f"\n{i}. [{req['type']}] Priority: {req['priority']}")
output.append(f" \"{req['quote'][:100]}...\"")
# Jobs to Be Done
if analysis['jobs_to_be_done']:
output.append("\n🎯 Jobs to Be Done:")
for i, job in enumerate(analysis['jobs_to_be_done'], 1):
output.append(f"{i}. {job['job']}")
# Key Themes
if analysis['key_themes']:
output.append("\n🏷️ Key Themes:")
output.append(", ".join(analysis['key_themes']))
# Key Quotes
if analysis['quotes']:
output.append("\n💬 Key Quotes:")
for i, quote in enumerate(analysis['quotes'][:3], 1):
output.append(f'{i}. "{quote}"')
# Metrics
if analysis['metrics_mentioned']:
output.append("\n📈 Metrics Mentioned:")
output.append(", ".join(analysis['metrics_mentioned']))
# Competitors
if analysis['competitors_mentioned']:
output.append("\n🏢 Competitors Mentioned:")
output.append(", ".join(analysis['competitors_mentioned']))
return "\n".join(output)
def main():
import sys
if len(sys.argv) < 2:
print("Usage: python customer_interview_analyzer.py <interview_file.txt>")
print("\nThis tool analyzes customer interview transcripts to extract:")
print(" - Pain points and frustrations")
print(" - Feature requests and suggestions")
print(" - Jobs to be done")
print(" - Sentiment analysis")
print(" - Key themes and quotes")
sys.exit(1)
# Read interview transcript
with open(sys.argv[1], 'r') as f:
interview_text = f.read()
# Analyze
analyzer = InterviewAnalyzer()
analysis = analyzer.analyze_interview(interview_text)
# Output
if len(sys.argv) > 2 and sys.argv[2] == 'json':
print(json.dumps(analysis, indent=2))
else:
print(format_single_interview(analysis))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,296 @@
#!/usr/bin/env python3
"""
RICE Prioritization Framework
Calculates RICE scores for feature prioritization
RICE = (Reach x Impact x Confidence) / Effort
"""
import json
import csv
from typing import List, Dict, Tuple
import argparse
class RICECalculator:
"""Calculate RICE scores for feature prioritization"""
def __init__(self):
self.impact_map = {
'massive': 3.0,
'high': 2.0,
'medium': 1.0,
'low': 0.5,
'minimal': 0.25
}
self.confidence_map = {
'high': 100,
'medium': 80,
'low': 50
}
self.effort_map = {
'xl': 13,
'l': 8,
'm': 5,
's': 3,
'xs': 1
}
def calculate_rice(self, reach: int, impact: str, confidence: str, effort: str) -> float:
"""
Calculate RICE score
Args:
reach: Number of users/customers affected per quarter
impact: massive/high/medium/low/minimal
confidence: high/medium/low (percentage)
effort: xl/l/m/s/xs (person-months)
"""
impact_score = self.impact_map.get(impact.lower(), 1.0)
confidence_score = self.confidence_map.get(confidence.lower(), 50) / 100
effort_score = self.effort_map.get(effort.lower(), 5)
if effort_score == 0:
return 0
rice_score = (reach * impact_score * confidence_score) / effort_score
return round(rice_score, 2)
def prioritize_features(self, features: List[Dict]) -> List[Dict]:
"""
Calculate RICE scores and rank features
Args:
features: List of feature dictionaries with RICE components
"""
for feature in features:
feature['rice_score'] = self.calculate_rice(
feature.get('reach', 0),
feature.get('impact', 'medium'),
feature.get('confidence', 'medium'),
feature.get('effort', 'm')
)
# Sort by RICE score descending
return sorted(features, key=lambda x: x['rice_score'], reverse=True)
def analyze_portfolio(self, features: List[Dict]) -> Dict:
"""
Analyze the feature portfolio for balance and insights
"""
if not features:
return {}
total_effort = sum(
self.effort_map.get(f.get('effort', 'm').lower(), 5)
for f in features
)
total_reach = sum(f.get('reach', 0) for f in features)
effort_distribution = {}
impact_distribution = {}
for feature in features:
effort = feature.get('effort', 'm').lower()
impact = feature.get('impact', 'medium').lower()
effort_distribution[effort] = effort_distribution.get(effort, 0) + 1
impact_distribution[impact] = impact_distribution.get(impact, 0) + 1
# Calculate quick wins (high impact, low effort)
quick_wins = [
f for f in features
if f.get('impact', '').lower() in ['massive', 'high']
and f.get('effort', '').lower() in ['xs', 's']
]
# Calculate big bets (high impact, high effort)
big_bets = [
f for f in features
if f.get('impact', '').lower() in ['massive', 'high']
and f.get('effort', '').lower() in ['l', 'xl']
]
return {
'total_features': len(features),
'total_effort_months': total_effort,
'total_reach': total_reach,
'average_rice': round(sum(f['rice_score'] for f in features) / len(features), 2),
'effort_distribution': effort_distribution,
'impact_distribution': impact_distribution,
'quick_wins': len(quick_wins),
'big_bets': len(big_bets),
'quick_wins_list': quick_wins[:3], # Top 3 quick wins
'big_bets_list': big_bets[:3] # Top 3 big bets
}
def generate_roadmap(self, features: List[Dict], team_capacity: int = 10) -> List[Dict]:
"""
Generate a quarterly roadmap based on team capacity
Args:
features: Prioritized feature list
team_capacity: Person-months available per quarter
"""
quarters = []
current_quarter = {
'quarter': 1,
'features': [],
'capacity_used': 0,
'capacity_available': team_capacity
}
for feature in features:
effort = self.effort_map.get(feature.get('effort', 'm').lower(), 5)
if current_quarter['capacity_used'] + effort <= team_capacity:
current_quarter['features'].append(feature)
current_quarter['capacity_used'] += effort
else:
# Move to next quarter
current_quarter['capacity_available'] = team_capacity - current_quarter['capacity_used']
quarters.append(current_quarter)
current_quarter = {
'quarter': len(quarters) + 1,
'features': [feature],
'capacity_used': effort,
'capacity_available': team_capacity - effort
}
if current_quarter['features']:
current_quarter['capacity_available'] = team_capacity - current_quarter['capacity_used']
quarters.append(current_quarter)
return quarters
def format_output(features: List[Dict], analysis: Dict, roadmap: List[Dict]) -> str:
"""Format the results for display"""
output = ["=" * 60]
output.append("RICE PRIORITIZATION RESULTS")
output.append("=" * 60)
# Top prioritized features
output.append("\n📊 TOP PRIORITIZED FEATURES\n")
for i, feature in enumerate(features[:10], 1):
output.append(f"{i}. {feature.get('name', 'Unnamed')}")
output.append(f" RICE Score: {feature['rice_score']}")
output.append(f" Reach: {feature.get('reach', 0)} | Impact: {feature.get('impact', 'medium')} | "
f"Confidence: {feature.get('confidence', 'medium')} | Effort: {feature.get('effort', 'm')}")
output.append("")
# Portfolio analysis
output.append("\n📈 PORTFOLIO ANALYSIS\n")
output.append(f"Total Features: {analysis.get('total_features', 0)}")
output.append(f"Total Effort: {analysis.get('total_effort_months', 0)} person-months")
output.append(f"Total Reach: {analysis.get('total_reach', 0):,} users")
output.append(f"Average RICE Score: {analysis.get('average_rice', 0)}")
output.append(f"\n🎯 Quick Wins: {analysis.get('quick_wins', 0)} features")
for qw in analysis.get('quick_wins_list', []):
output.append(f"{qw.get('name', 'Unnamed')} (RICE: {qw['rice_score']})")
output.append(f"\n🚀 Big Bets: {analysis.get('big_bets', 0)} features")
for bb in analysis.get('big_bets_list', []):
output.append(f"{bb.get('name', 'Unnamed')} (RICE: {bb['rice_score']})")
# Roadmap
output.append("\n\n📅 SUGGESTED ROADMAP\n")
for quarter in roadmap:
output.append(f"\nQ{quarter['quarter']} - Capacity: {quarter['capacity_used']}/{quarter['capacity_used'] + quarter['capacity_available']} person-months")
for feature in quarter['features']:
output.append(f"{feature.get('name', 'Unnamed')} (RICE: {feature['rice_score']})")
return "\n".join(output)
def load_features_from_csv(filepath: str) -> List[Dict]:
"""Load features from CSV file"""
features = []
with open(filepath, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
feature = {
'name': row.get('name', ''),
'reach': int(row.get('reach', 0)),
'impact': row.get('impact', 'medium'),
'confidence': row.get('confidence', 'medium'),
'effort': row.get('effort', 'm'),
'description': row.get('description', '')
}
features.append(feature)
return features
def create_sample_csv(filepath: str):
"""Create a sample CSV file for testing"""
sample_features = [
['name', 'reach', 'impact', 'confidence', 'effort', 'description'],
['User Dashboard Redesign', '5000', 'high', 'high', 'l', 'Complete redesign of user dashboard'],
['Mobile Push Notifications', '10000', 'massive', 'medium', 'm', 'Add push notification support'],
['Dark Mode', '8000', 'medium', 'high', 's', 'Implement dark mode theme'],
['API Rate Limiting', '2000', 'low', 'high', 'xs', 'Add rate limiting to API'],
['Social Login', '12000', 'high', 'medium', 'm', 'Add Google/Facebook login'],
['Export to PDF', '3000', 'medium', 'low', 's', 'Export reports as PDF'],
['Team Collaboration', '4000', 'massive', 'low', 'xl', 'Real-time collaboration features'],
['Search Improvements', '15000', 'high', 'high', 'm', 'Enhance search functionality'],
['Onboarding Flow', '20000', 'massive', 'high', 's', 'Improve new user onboarding'],
['Analytics Dashboard', '6000', 'high', 'medium', 'l', 'Advanced analytics for users'],
]
with open(filepath, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(sample_features)
print(f"Sample CSV created at: {filepath}")
def main():
parser = argparse.ArgumentParser(description='RICE Framework for Feature Prioritization')
parser.add_argument('input', nargs='?', help='CSV file with features or "sample" to create sample')
parser.add_argument('--capacity', type=int, default=10, help='Team capacity per quarter (person-months)')
parser.add_argument('--output', choices=['text', 'json', 'csv'], default='text', help='Output format')
args = parser.parse_args()
# Create sample if requested
if args.input == 'sample':
create_sample_csv('sample_features.csv')
return
# Use sample data if no input provided
if not args.input:
features = [
{'name': 'User Dashboard', 'reach': 5000, 'impact': 'high', 'confidence': 'high', 'effort': 'l'},
{'name': 'Push Notifications', 'reach': 10000, 'impact': 'massive', 'confidence': 'medium', 'effort': 'm'},
{'name': 'Dark Mode', 'reach': 8000, 'impact': 'medium', 'confidence': 'high', 'effort': 's'},
{'name': 'API Rate Limiting', 'reach': 2000, 'impact': 'low', 'confidence': 'high', 'effort': 'xs'},
{'name': 'Social Login', 'reach': 12000, 'impact': 'high', 'confidence': 'medium', 'effort': 'm'},
]
else:
features = load_features_from_csv(args.input)
# Calculate RICE scores
calculator = RICECalculator()
prioritized = calculator.prioritize_features(features)
analysis = calculator.analyze_portfolio(prioritized)
roadmap = calculator.generate_roadmap(prioritized, args.capacity)
# Output results
if args.output == 'json':
result = {
'features': prioritized,
'analysis': analysis,
'roadmap': roadmap
}
print(json.dumps(result, indent=2))
elif args.output == 'csv':
# Output prioritized features as CSV
if prioritized:
keys = prioritized[0].keys()
print(','.join(keys))
for feature in prioritized:
print(','.join(str(feature.get(k, '')) for k in keys))
else:
print(format_output(prioritized, analysis, roadmap))
if __name__ == "__main__":
main()