"):end_index].strip()
+ result = json.loads(json_str)
+ self.context.test_script = result.get("test_script", "")
+ else:
+ console.print("[red]Failed to extract test script from response[/red]")
+ return
+
+ # Judge the generated tests
+ judge_result = self.llm.judge(
+ self.context.test_script,
+ [
+ "Tests cover all selected commits",
+ "Tests are comprehensive and meaningful",
+ "Test script is valid Python code",
+ "Tests check both success and failure cases"
+ ],
+ context
+ )
+
+ if judge_result.status == "retry":
+ console.print(f"[yellow]Regenerating tests: {judge_result.feedback}[/yellow]")
+ # Add feedback to context and retry
+ context["judge_feedback"] = judge_result.feedback
+ response = self.llm.call(task_prompt, context)
+ # Extract again
+ json_match = re.search(r'(.*?)', response, re.DOTALL)
+ if json_match:
+ json_str = json_match.group(1).strip()
+ result = json.loads(json_str)
+ self.context.test_script = result.get("test_script", "")
+ elif judge_result.status == "human":
+ console.print(f"[yellow]Human intervention needed: {judge_result.feedback}[/yellow]")
+ # TODO: Implement human feedback loop
+
+ progress.update(task, completed=True)
+
+ # Save test script
+ test_file = Path(f"test_release_{self.context.version}.py")
+ test_file.write_text(self.context.test_script)
+ console.print(f"[green]Test script saved to {test_file}[/green]")
+
+ def _run_tests(self) -> bool:
+ """Run the generated tests"""
+ console.print("\n[bold]Step 4: Run Tests[/bold]")
+
+ test_file = f"test_release_{self.context.version}.py"
+
+ with Progress(
+ SpinnerColumn(),
+ TextColumn("[progress.description]{task.description}"),
+ console=console
+ ) as progress:
+ task = progress.add_task("Running tests...", total=None)
+
+ result = subprocess.run(
+ ["python", test_file],
+ capture_output=True,
+ text=True
+ )
+
+ progress.update(task, completed=True)
+
+ if result.returncode == 0:
+ console.print("[green]✅ All tests passed![/green]")
+ self.context.test_results = {"status": "passed", "output": result.stdout}
+ return True
+ else:
+ console.print("[red]❌ Tests failed![/red]")
+ console.print(result.stdout)
+ console.print(result.stderr)
+ self.context.test_results = {
+ "status": "failed",
+ "output": result.stdout,
+ "error": result.stderr
+ }
+ return False
+
+ def _create_version_branch(self):
+ """Create version branch and cherry-pick commits"""
+ console.print(f"\n[bold]Step 5: Create Branch {self.context.branch_name}[/bold]")
+
+ # Checkout main first
+ subprocess.run(["git", "checkout", "main"], check=True)
+
+ # Create version branch
+ commit_hashes = [c["hash"] for c in self.context.selected_commits]
+
+ if GitOperations.cherry_pick_commits(commit_hashes, self.context.branch_name):
+ console.print(f"[green]Created branch {self.context.branch_name} with {len(commit_hashes)} commits[/green]")
+ else:
+ raise Exception("Failed to create version branch")
+
+ def _generate_release_notes(self):
+ """Generate release notes"""
+ console.print("\n[bold]Step 6: Generate Release Notes[/bold]")
+
+ # Implementation continues...
+ # (Keeping it minimal as requested)
+ pass
+
+ def _generate_demo(self):
+ """Generate demo script"""
+ console.print("\n[bold]Step 7: Generate Demo[/bold]")
+ pass
+
+ def _update_docs(self):
+ """Update documentation"""
+ console.print("\n[bold]Step 8: Update Documentation[/bold]")
+ pass
+
+ def _build_and_publish(self):
+ """Build and publish to PyPI"""
+ console.print("\n[bold]Step 9: Build and Publish[/bold]")
+
+ if not self.auto_mode:
+ if not Confirm.ask("Ready to publish to PyPI?"):
+ return False
+
+ # Run publish.sh
+ result = subprocess.run(["./publish.sh"], capture_output=True)
+
+ if result.returncode == 0:
+ console.print(f"[green]✅ Published v{self.context.version} to PyPI![/green]")
+
+ # Merge to main
+ subprocess.run(["git", "checkout", "main"], check=True)
+ subprocess.run(["git", "merge", "--squash", self.context.branch_name], check=True)
+ subprocess.run(["git", "commit", "-m", f"Release v{self.context.version}"], check=True)
+
+ return True
+ else:
+ console.print("[red]Publishing failed![/red]")
+ return False
+
+ # Helper methods
+ def _extract_changed_files(self, diff: str) -> List[str]:
+ """Extract changed file paths from diff"""
+ files = []
+ for line in diff.split('\n'):
+ if line.startswith('+++') or line.startswith('---'):
+ file = line[4:].split('\t')[0]
+ if file != '/dev/null' and file not in files:
+ files.append(file)
+ return files
+
+ def _get_selected_diffs_summary(self) -> str:
+ """Get summary of diffs for selected commits"""
+ # Simplified for brevity
+ return f"{len(self.context.selected_commits)} commits selected"
+
+ def _load_test_patterns(self) -> str:
+ """Load existing test patterns"""
+ # Would load from existing test files
+ return "Follow pytest patterns"
+
+ def _format_commits_for_llm(self) -> str:
+ """Format commits for LLM consumption"""
+ lines = []
+ for commit in self.context.selected_commits:
+ lines.append(f"- {commit['hash'][:8]}: {commit['subject']}")
+ return '\n'.join(lines)
+
+@click.command()
+@click.option('--all', is_flag=True, help='Select all commits automatically')
+@click.option('-y', '--yes', is_flag=True, help='Auto-confirm version bump')
+@click.option('--dry-run', is_flag=True, help='Run without publishing')
+@click.option('--test', is_flag=True, help='Test mode - no git operations, no publishing')
+def main(all, yes, dry_run, test):
+ """Crawl4AI Release Agent - Automated release management"""
+ agent = ReleaseAgent(auto_mode=yes, select_all=all, test_mode=test)
+ agent.run()
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/tests/releases/test_release_0.6.4.py b/tests/releases/test_release_0.6.4.py
new file mode 100644
index 00000000..06bd8f9e
--- /dev/null
+++ b/tests/releases/test_release_0.6.4.py
@@ -0,0 +1,151 @@
+import pytest
+import asyncio
+import time
+from crawl4ai import AsyncWebCrawler, CrawlerRunConfig, BrowserConfig, CacheMode
+
+
+@pytest.mark.asyncio
+async def test_wait_for_timeout_separate_from_page_timeout():
+ """Test that wait_for has its own timeout separate from page_timeout"""
+ browser_config = BrowserConfig(headless=True)
+
+ # Test with short wait_for_timeout but longer page_timeout
+ config = CrawlerRunConfig(
+ wait_for="css:.nonexistent-element",
+ wait_for_timeout=2000, # 2 seconds
+ page_timeout=10000, # 10 seconds
+ cache_mode=CacheMode.BYPASS
+ )
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ start_time = time.time()
+ result = await crawler.arun("https://example.com", config=config)
+ elapsed = time.time() - start_time
+
+ # Should timeout after ~2 seconds (wait_for_timeout), not 10 seconds
+ assert elapsed < 5, f"Expected timeout around 2s, but took {elapsed:.2f}s"
+ assert result.success, "Crawl should still succeed even if wait_for times out"
+
+
+@pytest.mark.asyncio
+async def test_wait_for_timeout_with_existing_element():
+ """Test that wait_for_timeout works correctly when element exists"""
+ browser_config = BrowserConfig(headless=True)
+
+ config = CrawlerRunConfig(
+ wait_for="css:body", # This should exist quickly
+ wait_for_timeout=5000,
+ cache_mode=CacheMode.BYPASS
+ )
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ start_time = time.time()
+ result = await crawler.arun("https://example.com", config=config)
+ elapsed = time.time() - start_time
+
+ # Should complete quickly since body element exists
+ assert elapsed < 3, f"Expected quick completion, but took {elapsed:.2f}s"
+ assert result.success
+ assert "
+
+
+ Test GA Integration
+
+
+
+
+
+ Test Page
+ Testing Google Analytics integration
+
+
+ """
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ result = await crawler.arun(f"raw://{html_content}", config=config)
+
+ assert result.success
+ # Check that GA scripts are preserved in the HTML
+ assert "googletagmanager.com/gtag/js" in result.html
+ assert "dataLayer" in result.html
+ assert "gtag('config'" in result.html
+
+
+@pytest.mark.asyncio
+async def test_mkdocs_no_duplicate_gtag():
+ """Test that there are no duplicate gtag.js entries in documentation"""
+ browser_config = BrowserConfig(headless=True)
+ config = CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
+
+ # Simulate MkDocs-like HTML structure
+ html_content = """
+
+
+
+ Crawl4AI Documentation
+
+
+
+
+ Crawl4AI Documentation
+ Welcome to the documentation
+
+
+ """
+
+ async with AsyncWebCrawler(config=browser_config) as crawler:
+ result = await crawler.arun(f"raw://{html_content}", config=config)
+
+ assert result.success
+ # Count occurrences of gtag.js to ensure no duplicates
+ gtag_count = result.html.count("googletagmanager.com/gtag/js")
+ assert gtag_count <= 1, f"Found {gtag_count} gtag.js scripts, expected at most 1"
+
+ # Ensure the analytics functionality is still there
+ if gtag_count == 1:
+ assert "dataLayer" in result.html
+ assert "gtag('config'" in result.html
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v"])
\ No newline at end of file