diff --git a/.gitignore b/.gitignore index 4d73a83..945d47b 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,10 @@ dist *.swp *.swo +# Python +__pycache__ +*.pyc + # OS .DS_Store Thumbs.db @@ -40,6 +44,9 @@ Thumbs.db !/public/**/*.mp4 !/public/**/*.webm +# Analyzer output (generated frames & results) +scripts/analyze-style/output/ + # Logs *.log npm-debug.log* diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 83c3322..12dacf5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -46,7 +46,7 @@ importers: specifier: ^5.9.3 version: 5.9.3 - videos/cuabench: + videos/cua-cloud: dependencies: '@launchpad/assets': specifier: workspace:* @@ -131,180 +131,7 @@ importers: specifier: 5.9.3 version: 5.9.3 - videos/cuabench-launch: - dependencies: - '@launchpad/assets': - specifier: workspace:* - version: link:../../packages/assets - '@launchpad/shared': - specifier: workspace:* - version: link:../../packages/shared - '@remotion/bundler': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/cli': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/google-fonts': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/paths': - specifier: 4.0.407 - version: 4.0.407 - '@remotion/player': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/shapes': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/tailwind-v4': - specifier: 4.0.407 - version: 4.0.407(@remotion/bundler@4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(typescript@5.9.3)(webpack@5.96.1) - clsx: - specifier: 2.1.1 - version: 2.1.1 - next: - specifier: 16.0.10 - version: 16.0.10(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - react: - specifier: 19.2.3 - version: 19.2.3 - react-dom: - specifier: 19.2.3 - version: 19.2.3(react@19.2.3) - remotion: - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - tailwind-merge: - specifier: 3.0.1 - version: 3.0.1 - zod: - specifier: 3.22.3 - version: 3.22.3 - devDependencies: - '@remotion/eslint-plugin': - specifier: 4.0.407 - version: 4.0.407(eslint@9.19.0(jiti@2.6.1))(typescript@5.9.3) - '@tailwindcss/postcss': - specifier: 4.1.1 - version: 4.1.1 - '@types/node': - specifier: 20.12.14 - version: 20.12.14 - '@types/react': - specifier: 19.2.3 - version: 19.2.3 - '@types/react-dom': - specifier: 19.2.3 - version: 19.2.3(@types/react@19.2.3) - autoprefixer: - specifier: 10.4.20 - version: 10.4.20(postcss@8.4.47) - eslint: - specifier: 9.19.0 - version: 9.19.0(jiti@2.6.1) - postcss: - specifier: 8.4.47 - version: 8.4.47 - prettier: - specifier: 3.6.0 - version: 3.6.0 - tailwindcss: - specifier: 4.0.3 - version: 4.0.3 - typescript: - specifier: 5.9.3 - version: 5.9.3 - - videos/cuabenchnew: - dependencies: - '@launchpad/assets': - specifier: workspace:* - version: link:../../packages/assets - '@launchpad/shared': - specifier: workspace:* - version: link:../../packages/shared - '@remotion/bundler': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/cli': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/google-fonts': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/paths': - specifier: 4.0.407 - version: 4.0.407 - '@remotion/player': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/shapes': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/tailwind-v4': - specifier: 4.0.407 - version: 4.0.407(@remotion/bundler@4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(typescript@5.9.3)(webpack@5.96.1) - '@remotion/transitions': - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - clsx: - specifier: 2.1.1 - version: 2.1.1 - next: - specifier: 16.0.10 - version: 16.0.10(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - react: - specifier: 19.2.3 - version: 19.2.3 - react-dom: - specifier: 19.2.3 - version: 19.2.3(react@19.2.3) - remotion: - specifier: 4.0.407 - version: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - tailwind-merge: - specifier: 3.0.1 - version: 3.0.1 - zod: - specifier: 3.22.3 - version: 3.22.3 - devDependencies: - '@remotion/eslint-plugin': - specifier: 4.0.407 - version: 4.0.407(eslint@9.19.0(jiti@2.6.1))(typescript@5.9.3) - '@tailwindcss/postcss': - specifier: 4.1.1 - version: 4.1.1 - '@types/node': - specifier: 20.12.14 - version: 20.12.14 - '@types/react': - specifier: 19.2.7 - version: 19.2.7 - '@types/react-dom': - specifier: 19.2.3 - version: 19.2.3(@types/react@19.2.7) - autoprefixer: - specifier: 10.4.20 - version: 10.4.20(postcss@8.4.47) - eslint: - specifier: 9.19.0 - version: 9.19.0(jiti@2.6.1) - postcss: - specifier: 8.4.47 - version: 8.4.47 - prettier: - specifier: 3.6.0 - version: 3.6.0 - tailwindcss: - specifier: 4.0.3 - version: 4.0.3 - typescript: - specifier: 5.9.3 - version: 5.9.3 - - videos/skills-announcement: + videos/cuabench: dependencies: '@launchpad/assets': specifier: workspace:* @@ -1356,12 +1183,6 @@ packages: peerDependencies: '@remotion/bundler': 4.0.407 - '@remotion/transitions@4.0.407': - resolution: {integrity: sha512-XnRoBM/IEjQ3BHjTlJS/b3o8uxq9CH/s8qAC9n1PWkhoWx4I1/JaUqFbkunc6LQIAPC7286up2Edtx2zAOP1rA==} - peerDependencies: - react: '>=16.8.0' - react-dom: '>=16.8.0' - '@remotion/web-renderer@4.0.407': resolution: {integrity: sha512-wO3IE+opVoT6mdcrZUzvD9urdglOr91qXO4uJqbTgzcVzRCpRp2iOPKQCjPmI/RxWQ6+fWN4VjZJP1E0iQIgGw==} peerDependencies: @@ -1481,9 +1302,6 @@ packages: peerDependencies: '@types/react': ^19.2.0 - '@types/react@19.2.3': - resolution: {integrity: sha512-k5dJVszUiNr1DSe8Cs+knKR6IrqhqdhpUwzqhkS8ecQTSf3THNtbfIp/umqHMpX2bv+9dkx3fwDv/86LcSfvSg==} - '@types/react@19.2.7': resolution: {integrity: sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==} @@ -3508,14 +3326,14 @@ snapshots: '@remotion/media-parser': 4.0.407 '@remotion/studio': 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@remotion/studio-shared': 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - css-loader: 5.2.7(webpack@5.96.1) + css-loader: 5.2.7(webpack@5.96.1(esbuild@0.25.0)) esbuild: 0.25.0 react: 19.2.3 react-dom: 19.2.3(react@19.2.3) react-refresh: 0.9.0 remotion: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) source-map: 0.7.3 - style-loader: 4.0.0(webpack@5.96.1) + style-loader: 4.0.0(webpack@5.96.1(esbuild@0.25.0)) webpack: 5.96.1(esbuild@0.25.0) transitivePeerDependencies: - '@swc/core' @@ -3692,25 +3510,17 @@ snapshots: dependencies: '@remotion/bundler': 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) '@tailwindcss/postcss': 4.1.1 - css-loader: 5.2.7(webpack@5.96.1) + css-loader: 5.2.7(webpack@5.96.1(esbuild@0.25.0)) postcss: 8.5.1 postcss-loader: 8.1.1(postcss@8.5.1)(typescript@5.9.3)(webpack@5.96.1) postcss-preset-env: 10.1.3(postcss@8.5.1) - style-loader: 4.0.0(webpack@5.96.1) + style-loader: 4.0.0(webpack@5.96.1(esbuild@0.25.0)) tailwindcss: 4.1.1 transitivePeerDependencies: - '@rspack/core' - typescript - webpack - '@remotion/transitions@4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': - dependencies: - '@remotion/paths': 4.0.407 - '@remotion/shapes': 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - react: 19.2.3 - react-dom: 19.2.3(react@19.2.3) - remotion: 4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3) - '@remotion/web-renderer@4.0.407(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': dependencies: '@remotion/licensing': 4.0.407 @@ -3821,18 +3631,10 @@ snapshots: dependencies: undici-types: 5.26.5 - '@types/react-dom@19.2.3(@types/react@19.2.3)': - dependencies: - '@types/react': 19.2.3 - '@types/react-dom@19.2.3(@types/react@19.2.7)': dependencies: '@types/react': 19.2.7 - '@types/react@19.2.3': - dependencies: - csstype: 3.2.3 - '@types/react@19.2.7': dependencies: csstype: 3.2.3 @@ -4105,7 +3907,7 @@ snapshots: postcss-selector-parser: 7.1.1 postcss-value-parser: 4.2.0 - css-loader@5.2.7(webpack@5.96.1): + css-loader@5.2.7(webpack@5.96.1(esbuild@0.25.0)): dependencies: icss-utils: 5.1.0(postcss@8.4.47) loader-utils: 2.0.4 @@ -5102,7 +4904,7 @@ snapshots: strip-json-comments@3.1.1: {} - style-loader@4.0.0(webpack@5.96.1): + style-loader@4.0.0(webpack@5.96.1(esbuild@0.25.0)): dependencies: webpack: 5.96.1(esbuild@0.25.0) @@ -5127,7 +4929,7 @@ snapshots: tapable@2.3.0: {} - terser-webpack-plugin@5.3.16(esbuild@0.25.0)(webpack@5.96.1(esbuild@0.25.0)): + terser-webpack-plugin@5.3.16(esbuild@0.25.0)(webpack@5.96.1): dependencies: '@jridgewell/trace-mapping': 0.3.31 jest-worker: 27.5.1 @@ -5249,7 +5051,7 @@ snapshots: neo-async: 2.6.2 schema-utils: 3.3.0 tapable: 2.3.0 - terser-webpack-plugin: 5.3.16(esbuild@0.25.0)(webpack@5.96.1(esbuild@0.25.0)) + terser-webpack-plugin: 5.3.16(esbuild@0.25.0)(webpack@5.96.1) watchpack: 2.5.1 webpack-sources: 3.3.3 transitivePeerDependencies: diff --git a/scripts/analyze-style/README.md b/scripts/analyze-style/README.md new file mode 100644 index 0000000..4cd8e19 --- /dev/null +++ b/scripts/analyze-style/README.md @@ -0,0 +1,108 @@ +# Video Style Analyzer + +Extracts visual style from launch videos — color palettes, motion intensity, audio pacing, scene structure — and optionally uses Gemini Vision to synthesize a detailed style guide that a motion designer could use to recreate the look and feel. + +## Setup + +```bash +cd scripts/analyze-style +pip install -r requirements.txt +``` + +For Gemini Vision synthesis, set your API key: + +```bash +export GEMINI_API_KEY="your-key-here" +``` + +## Usage + +### Basic analysis (metrics only) + +```bash +python3 analyze.py /path/to/video.mp4 --output-dir ./output --output-json ./output/results.json +``` + +This runs the full metrics pipeline: +- **Scene detection** — PySceneDetect identifies scene boundaries and extracts keyframes (start/mid/end per scene) +- **Color analysis** — dominant colors, brightness, saturation, warmth per scene +- **Audio analysis** — tempo (BPM), beat count, energy curve, spectral features +- **Motion analysis** — frame-difference scoring per scene, editing pace classification + +### Full analysis with style synthesis + +```bash +GEMINI_API_KEY="your-key" python3 analyze.py /path/to/video.mp4 \ + --output-dir ./output \ + --output-json ./output/results.json \ + --synthesize +``` + +The `--synthesize` flag sends extracted keyframes to Gemini Vision and produces: +- **Per-scene descriptions** — layout composition, typography treatment, animation states, iconography style +- **Transition descriptions** — how scenes connect (hard cuts, dissolves, shared visual anchors) +- **Style guide** — a comprehensive breakdown covering visual identity, typography, motion language, layout, narrative structure, iconography, audio-visual sync, and overall feel + +### Flags + +| Flag | Description | +|------|-------------| +| `--output-dir DIR` | Where to save extracted keyframes (default: temp directory) | +| `--output-json PATH` | Write JSON results to file (default: prints to stdout) | +| `--skip-audio` | Skip librosa audio analysis | +| `--synthesize` | Run Gemini Vision synthesis (requires `GEMINI_API_KEY`) | + +### Run individual modules + +Each module can also be run standalone for debugging: + +```bash +python3 scene_detector.py /path/to/video.mp4 # Scene detection + keyframes +python3 color_analyzer.py /path/to/image.jpg # Color palette from an image +python3 audio_analyzer.py /path/to/video.mp4 # Audio features +python3 motion_analyzer.py /path/to/video.mp4 # Motion scores +python3 synthesizer.py /path/to/results.json # Run synthesis on existing results +``` + +## Output format + +The JSON output follows this structure: + +```jsonc +{ + "video_path": "...", + "video_info": { "fps", "total_frames", "duration", "resolution" }, + "scenes": [ + { + "scene_number": 1, + "start_time": 0.0, + "end_time": 10.5, + "duration": 10.5, + "keyframe_path": "output/scene_0001_mid.jpg", + "keyframe_paths": ["..._start.jpg", "..._mid.jpg", "..._end.jpg"], + "colors": { "dominant_color", "avg_brightness", "avg_saturation", "warmth", "palette_hex" }, + "motion_score": 2.03 + } + ], + "audio": { "tempo_bpm", "beat_count", "duration", "avg_energy", "energy_description", "pacing_description" }, + "motion": { "avg_motion_score", "motion_variance", "motion_description", "editing_pace" }, + "style_summary": { "color_mood", "avg_brightness", "pacing", "motion_level", "scene_count", "avg_scene_duration" }, + // Only present with --synthesize: + "synthesis": { + "scene_descriptions": ["..."], + "transition_descriptions": ["..."], + "style_guide": "..." + } +} +``` + +## Architecture + +``` +analyze.py — CLI entry point, orchestrates all modules +scene_detector.py — PySceneDetect keyframe extraction (start/mid/end per scene) +color_analyzer.py — colorgram.py color palette extraction + warmth/brightness analysis +audio_analyzer.py — librosa tempo, energy, spectral feature extraction +motion_analyzer.py — OpenCV frame-difference motion scoring +synthesizer.py — Gemini Vision scene captioning + style guide synthesis +``` diff --git a/scripts/analyze-style/analyze.py b/scripts/analyze-style/analyze.py new file mode 100644 index 0000000..3c60c50 --- /dev/null +++ b/scripts/analyze-style/analyze.py @@ -0,0 +1,264 @@ +"""Main CLI entry point for video style analysis. + +Orchestrates scene detection, color analysis, audio analysis, and motion +analysis to produce a comprehensive style profile of a video file. +""" + +from __future__ import annotations + +import argparse +import json +import os +import sys +import tempfile +from collections import Counter +from typing import Optional + +from scene_detector import detect_scenes +from color_analyzer import extract_palette, analyze_palette +from audio_analyzer import analyze_audio +from motion_analyzer import analyze_motion, get_video_info +from synthesizer import run_synthesis + + +def _log(msg: str) -> None: + """Print a progress message to stderr so stdout stays clean for JSON.""" + print(msg, file=sys.stderr) + + +def analyze_video(video_path: str, output_dir: str, skip_audio: bool = False) -> dict: + """Run the full analysis pipeline on a video file. + + Args: + video_path: Path to the input video file. + output_dir: Directory to save keyframes and intermediate results. + skip_audio: If True, skip audio analysis entirely. + + Returns: + A dictionary containing the complete style analysis results. + """ + video_path = os.path.abspath(video_path) + + # ── Video metadata ────────────────────────────────────────────────── + _log("Extracting video metadata...") + video_info: dict = get_video_info(video_path) + + # ── Scene detection ───────────────────────────────────────────────── + _log("Detecting scenes and extracting keyframes...") + scenes: list[dict] = detect_scenes(video_path, output_dir) + _log(f" Found {len(scenes)} scene(s).") + + # ── Color analysis per keyframe ───────────────────────────────────── + _log("Analyzing color palettes...") + for scene in scenes: + keyframe_path: str = scene.get("keyframe_path", "") + if keyframe_path and os.path.isfile(keyframe_path): + palette = extract_palette(keyframe_path) + scene["colors"] = analyze_palette(palette) + else: + scene["colors"] = { + "dominant_color": None, + "avg_brightness": 0.0, + "avg_saturation": 0.0, + "warmth": "neutral", + "palette_hex": [], + } + + # ── Audio analysis ────────────────────────────────────────────────── + audio_result: Optional[dict] = None + if skip_audio: + _log("Skipping audio analysis (--skip-audio).") + else: + _log("Analyzing audio track...") + try: + audio_result = analyze_audio(video_path) + _log(" Audio analysis complete.") + except (FileNotFoundError, RuntimeError) as exc: + _log(f" Audio analysis failed: {exc}") + audio_result = None + + # ── Motion analysis ───────────────────────────────────────────────── + _log("Analyzing motion...") + scene_boundaries: list[tuple[float, float]] = [ + (s["start_time"], s["end_time"]) for s in scenes + ] + motion_result: dict = analyze_motion(video_path, scene_boundaries) + + # Attach per-scene motion scores when available. + per_scene_scores: list[float] = motion_result.get("scene_motion_scores", []) + for i, scene in enumerate(scenes): + if i < len(per_scene_scores): + scene["motion_score"] = per_scene_scores[i] + + # ── Style summary ─────────────────────────────────────────────────── + _log("Computing style summary...") + style_summary = _compute_style_summary(scenes, audio_result, motion_result) + + # ── Assemble final output ─────────────────────────────────────────── + # Build a clean audio dict for the output (drop energy_curve for brevity). + audio_output: Optional[dict] = None + if audio_result is not None: + audio_output = { + "tempo_bpm": audio_result["tempo_bpm"], + "beat_count": audio_result["beat_count"], + "duration": audio_result["duration_seconds"], + "avg_energy": audio_result["avg_energy"], + "energy_description": audio_result["energy_description"], + "pacing_description": audio_result["pacing_description"], + "spectral_centroid_mean": audio_result["spectral_centroid_mean"], + } + + result: dict = { + "video_path": video_path, + "video_info": video_info, + "scenes": [ + { + "scene_number": s["scene_number"], + "start_time": s["start_time"], + "end_time": s["end_time"], + "duration": s["duration"], + "keyframe_path": s.get("keyframe_path", ""), + "keyframe_paths": s.get("keyframe_paths", []), + "colors": s.get("colors", {}), + **({"motion_score": s["motion_score"]} if "motion_score" in s else {}), + } + for s in scenes + ], + "audio": audio_output, + "motion": { + "avg_motion_score": motion_result.get("avg_motion_score", 0.0), + "motion_variance": motion_result.get("motion_variance", 0.0), + "motion_description": motion_result.get("motion_description", "unknown"), + "editing_pace": motion_result.get("editing_pace", "unknown"), + }, + "style_summary": style_summary, + } + + return result + + +def _compute_style_summary( + scenes: list[dict], + audio_result: Optional[dict], + motion_result: dict, +) -> dict: + """Derive high-level style descriptors from the raw analysis results.""" + # Color mood: majority vote of per-scene warmth values. + warmth_counts: Counter[str] = Counter() + brightness_total = 0.0 + brightness_count = 0 + + for scene in scenes: + colors = scene.get("colors", {}) + warmth_counts[colors.get("warmth", "neutral")] += 1 + bri = colors.get("avg_brightness", 0.0) + if bri > 0: + brightness_total += bri + brightness_count += 1 + + color_mood: str = warmth_counts.most_common(1)[0][0] if warmth_counts else "neutral" + avg_brightness: float = round(brightness_total / brightness_count, 2) if brightness_count else 0.0 + + # Pacing: prefer audio-derived pacing, fall back to editing pace. + if audio_result is not None: + pacing: str = audio_result.get("pacing_description", "unknown") + else: + pacing = motion_result.get("editing_pace", "unknown") + + motion_level: str = motion_result.get("motion_description", "unknown") + + scene_count: int = len(scenes) + total_duration = sum(s.get("duration", 0.0) for s in scenes) + avg_scene_duration: float = round(total_duration / scene_count, 3) if scene_count else 0.0 + + return { + "color_mood": color_mood, + "avg_brightness": avg_brightness, + "pacing": pacing, + "motion_level": motion_level, + "scene_count": scene_count, + "avg_scene_duration": avg_scene_duration, + } + + +def main() -> None: + parser = argparse.ArgumentParser( + description="Analyze the visual and auditory style of a video file.", + ) + parser.add_argument( + "video_path", + help="Path to the video file to analyze.", + ) + parser.add_argument( + "--output-dir", + default=None, + help="Directory to save keyframes and results. A temp dir is created if omitted.", + ) + parser.add_argument( + "--output-json", + default=None, + help="Path to write the JSON results file. Prints to stdout if omitted.", + ) + parser.add_argument( + "--skip-audio", + action="store_true", + default=False, + help="Skip audio analysis.", + ) + parser.add_argument( + "--synthesize", + action="store_true", + default=False, + help="Run Gemini Vision synthesis for rich style descriptions (requires GEMINI_API_KEY).", + ) + + args = parser.parse_args() + + # Resolve output directory. + if args.output_dir: + output_dir: str = os.path.abspath(args.output_dir) + else: + output_dir = tempfile.mkdtemp(prefix="video_style_") + _log(f"Using temporary output directory: {output_dir}") + + try: + result = analyze_video(args.video_path, output_dir, skip_audio=args.skip_audio) + except Exception as exc: + _log(f"Error: {exc}") + sys.exit(1) + + # ── Gemini Vision synthesis (optional) ──────────────────────────── + if args.synthesize: + _log("\n── Running Gemini Vision synthesis ──") + try: + synthesis = run_synthesis(result) + result["synthesis"] = synthesis + except RuntimeError as exc: + _log(f"Synthesis failed: {exc}") + result["synthesis"] = None + + # Write JSON output. + json_str = json.dumps(result, indent=2, ensure_ascii=False) + + if args.output_json: + output_json_path = os.path.abspath(args.output_json) + os.makedirs(os.path.dirname(output_json_path), exist_ok=True) + with open(output_json_path, "w", encoding="utf-8") as f: + f.write(json_str) + f.write("\n") + _log(f"Results written to {output_json_path}") + else: + print(json_str) + + # Final summary to stderr. + summary = result.get("style_summary", {}) + _log( + f"Analysis complete. {summary.get('scene_count', 0)} scenes detected. " + f"Style: {summary.get('color_mood', 'unknown')}, " + f"{summary.get('pacing', 'unknown')} pacing, " + f"{summary.get('motion_level', 'unknown')} motion." + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/analyze-style/audio_analyzer.py b/scripts/analyze-style/audio_analyzer.py new file mode 100644 index 0000000..31959ef --- /dev/null +++ b/scripts/analyze-style/audio_analyzer.py @@ -0,0 +1,145 @@ +"""Audio feature extraction from video files using librosa and ffmpeg.""" + +import subprocess +import sys +import tempfile +from pathlib import Path + +import librosa +import numpy as np + + +def analyze_audio(video_path: str) -> dict: + """Extract audio features from a video file. + + Args: + video_path: Path to the input video file. + + Returns: + Dictionary containing extracted audio features. + + Raises: + FileNotFoundError: If the video file or ffmpeg is not found. + RuntimeError: If audio extraction or analysis fails. + """ + video = Path(video_path) + if not video.is_file(): + raise FileNotFoundError(f"Video file not found: {video_path}") + + # Extract audio from video to a temporary wav file + with tempfile.NamedTemporaryFile(suffix=".wav", delete=True) as tmp: + tmp_path = tmp.name + + try: + _extract_audio(video_path, tmp_path) + features = _compute_features(tmp_path) + finally: + # Clean up temp file + Path(tmp_path).unlink(missing_ok=True) + + return features + + +def _extract_audio(video_path: str, output_path: str) -> None: + """Use ffmpeg to extract audio from a video file as mono 22050 Hz wav.""" + cmd = [ + "ffmpeg", + "-i", video_path, + "-vn", # no video + "-acodec", "pcm_s16le", + "-ar", "22050", # sample rate that librosa expects by default + "-ac", "1", # mono + "-y", # overwrite + output_path, + ] + try: + result = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + timeout=120, + ) + except FileNotFoundError: + raise FileNotFoundError( + "ffmpeg not found. Please install ffmpeg and ensure it is on your PATH." + ) + + if result.returncode != 0: + stderr = result.stderr.decode(errors="replace") + if "does not contain any stream" in stderr or "no audio" in stderr.lower(): + raise RuntimeError(f"Video has no audio track: {video_path}") + raise RuntimeError(f"ffmpeg failed (exit {result.returncode}): {stderr[:500]}") + + +def _compute_features(wav_path: str) -> dict: + """Compute audio features from a wav file using librosa.""" + y, sr = librosa.load(wav_path, sr=22050, mono=True) + + if len(y) == 0: + raise RuntimeError("Extracted audio is empty.") + + duration_seconds: float = float(librosa.get_duration(y=y, sr=sr)) + + # Tempo and beats + tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr) + tempo_bpm: float = float(np.atleast_1d(tempo)[0]) + beat_count: int = int(len(beat_frames)) + + # RMS energy (per frame) + rms = librosa.feature.rms(y=y)[0] + avg_energy: float = float(np.mean(rms)) + + # Energy curve sampled at ~1-second intervals + frames_per_second = sr / 512 # default hop_length for rms is 512 + step = max(1, int(round(frames_per_second))) + energy_curve: list[float] = [float(v) for v in rms[::step]] + + # Spectral centroid + centroid = librosa.feature.spectral_centroid(y=y, sr=sr)[0] + spectral_centroid_mean: float = float(np.mean(centroid)) + + # Descriptive labels + if avg_energy > 0.05: + energy_description = "high" + elif avg_energy > 0.01: + energy_description = "medium" + else: + energy_description = "low" + + if tempo_bpm > 140: + pacing_description = "fast" + elif tempo_bpm >= 90: + pacing_description = "moderate" + else: + pacing_description = "slow" + + return { + "tempo_bpm": tempo_bpm, + "beat_count": beat_count, + "duration_seconds": duration_seconds, + "avg_energy": avg_energy, + "energy_curve": energy_curve, + "spectral_centroid_mean": spectral_centroid_mean, + "energy_description": energy_description, + "pacing_description": pacing_description, + } + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: python {sys.argv[0]} ") + sys.exit(1) + + try: + result = analyze_audio(sys.argv[1]) + except (FileNotFoundError, RuntimeError) as exc: + print(f"Error: {exc}", file=sys.stderr) + sys.exit(1) + + for key, value in result.items(): + if isinstance(value, list): + print(f" {key}: [{len(value)} samples]") + elif isinstance(value, float): + print(f" {key}: {value:.4f}") + else: + print(f" {key}: {value}") diff --git a/scripts/analyze-style/color_analyzer.py b/scripts/analyze-style/color_analyzer.py new file mode 100644 index 0000000..9342384 --- /dev/null +++ b/scripts/analyze-style/color_analyzer.py @@ -0,0 +1,149 @@ +"""Extract and analyze dominant color palettes from keyframe images.""" + +import colorsys +import sys +from pathlib import Path + +import colorgram + + +def extract_palette(image_path: str, num_colors: int = 5) -> list[dict]: + """Extract the dominant color palette from an image. + + Args: + image_path: Path to the image file. + num_colors: Number of dominant colors to extract. + + Returns: + List of dicts sorted by proportion descending, each containing: + rgb: (r, g, b) tuple + hex: hex color string + proportion: float between 0 and 1 + """ + path = Path(image_path) + if not path.exists(): + raise FileNotFoundError(f"Image not found: {image_path}") + + try: + colors = colorgram.extract(str(path), num_colors) + except Exception as exc: + raise ValueError(f"Could not process image: {image_path}") from exc + + total = sum(c.proportion for c in colors) + palette: list[dict] = [] + for c in colors: + r, g, b = c.rgb.r, c.rgb.g, c.rgb.b + palette.append( + { + "rgb": (r, g, b), + "hex": f"#{r:02x}{g:02x}{b:02x}", + "proportion": c.proportion / total if total > 0 else 0.0, + } + ) + + palette.sort(key=lambda x: x["proportion"], reverse=True) + return palette + + +def analyze_palette(colors: list[dict]) -> dict: + """Analyze a color palette produced by extract_palette. + + Args: + colors: List of color dicts from extract_palette. + + Returns: + Dict with: + dominant_color: hex string of the highest-proportion color + avg_brightness: weighted average brightness (0-255) + avg_saturation: weighted average saturation (0-1) + warmth: "warm", "cool", or "neutral" + palette_hex: list of hex strings + """ + if not colors: + return { + "dominant_color": None, + "avg_brightness": 0.0, + "avg_saturation": 0.0, + "warmth": "neutral", + "palette_hex": [], + } + + dominant_color = colors[0]["hex"] + + total_brightness = 0.0 + total_saturation = 0.0 + total_weight = 0.0 + weighted_red = 0.0 + weighted_blue = 0.0 + + for c in colors: + r, g, b = c["rgb"] + w = c["proportion"] + total_weight += w + + # Brightness: simple average of RGB channels + brightness = (r + g + b) / 3.0 + total_brightness += brightness * w + + # Saturation via colorsys (HSV) + _, s, _ = colorsys.rgb_to_hsv(r / 255.0, g / 255.0, b / 255.0) + total_saturation += s * w + + weighted_red += r * w + weighted_blue += b * w + + avg_brightness = total_brightness / total_weight if total_weight > 0 else 0.0 + avg_saturation = total_saturation / total_weight if total_weight > 0 else 0.0 + + # Warmth: compare weighted red vs blue channels + if total_weight > 0: + avg_red = weighted_red / total_weight + avg_blue = weighted_blue / total_weight + diff = avg_red - avg_blue + if diff > 10: + warmth = "warm" + elif diff < -10: + warmth = "cool" + else: + warmth = "neutral" + else: + warmth = "neutral" + + return { + "dominant_color": dominant_color, + "avg_brightness": round(avg_brightness, 2), + "avg_saturation": round(avg_saturation, 4), + "warmth": warmth, + "palette_hex": [c["hex"] for c in colors], + } + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} [num_colors]") + sys.exit(1) + + img_path = sys.argv[1] + n_colors = int(sys.argv[2]) if len(sys.argv) > 2 else 5 + + try: + palette = extract_palette(img_path, n_colors) + except (FileNotFoundError, ValueError) as err: + print(f"Error: {err}") + sys.exit(1) + + print("Extracted palette:") + for i, color in enumerate(palette, 1): + print( + f" {i}. {color['hex']} " + f"RGB{color['rgb']} " + f"{color['proportion']:.1%}" + ) + + analysis = analyze_palette(palette) + print("\nAnalysis:") + print(f" Dominant color : {analysis['dominant_color']}") + print(f" Avg brightness : {analysis['avg_brightness']}") + print(f" Avg saturation : {analysis['avg_saturation']}") + print(f" Warmth : {analysis['warmth']}") + print(f" Palette : {', '.join(analysis['palette_hex'])}") diff --git a/scripts/analyze-style/motion_analyzer.py b/scripts/analyze-style/motion_analyzer.py new file mode 100644 index 0000000..8074dab --- /dev/null +++ b/scripts/analyze-style/motion_analyzer.py @@ -0,0 +1,196 @@ +"""Analyze motion intensity and editing pace in a video using frame differencing.""" + +from __future__ import annotations + +import sys +from pathlib import Path + +import cv2 +import numpy as np + + +def get_video_info(video_path: str) -> dict: + """Return basic metadata for a video file. + + Args: + video_path: Path to the video file. + + Returns: + Dict with fps, total_frames, duration, and resolution. + """ + path = Path(video_path) + if not path.exists(): + raise FileNotFoundError(f"Video not found: {video_path}") + + cap = cv2.VideoCapture(str(path)) + if not cap.isOpened(): + raise RuntimeError(f"Failed to open video: {video_path}") + + fps = cap.get(cv2.CAP_PROP_FPS) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + cap.release() + + duration = total_frames / fps if fps > 0 else 0.0 + + return { + "fps": fps, + "total_frames": total_frames, + "duration": round(duration, 3), + "resolution": {"width": width, "height": height}, + } + + +def _motion_description(avg_score: float) -> str: + """Map an average motion score (0-255) to a human-readable label.""" + if avg_score < 2: + return "static" + if avg_score < 8: + return "gentle" + if avg_score < 20: + return "moderate" + if avg_score < 40: + return "dynamic" + return "frenetic" + + +def _editing_pace(scene_boundaries: list[tuple[float, float]]) -> str: + """Classify editing pace based on average scene duration.""" + if not scene_boundaries: + return "unknown" + durations = [end - start for start, end in scene_boundaries] + avg_duration = sum(durations) / len(durations) + if avg_duration > 5.0: + return "slow" + if avg_duration >= 2.0: + return "moderate" + return "fast" + + +def analyze_motion( + video_path: str, + scene_boundaries: list[tuple[float, float]] | None = None, + sample_every: int = 5, +) -> dict: + """Analyze motion intensity across a video by sampling frame differences. + + Args: + video_path: Path to the video file. + scene_boundaries: Optional list of (start_sec, end_sec) tuples + defining scene boundaries. When provided, per-scene motion + scores and editing pace are included in the result. + sample_every: Process every Nth frame for performance (default 5). + + Returns: + Dict with avg_motion_score, motion_variance, scene_motion_scores, + total_frames, fps, resolution, motion_description, and + editing_pace (when scene_boundaries is provided). + """ + path = Path(video_path) + if not path.exists(): + raise FileNotFoundError(f"Video not found: {video_path}") + + cap = cv2.VideoCapture(str(path)) + if not cap.isOpened(): + raise RuntimeError(f"Failed to open video: {video_path}") + + fps = cap.get(cv2.CAP_PROP_FPS) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + # -- Collect per-frame motion scores ------------------------------------ + motion_scores: list[float] = [] + prev_gray = None + frame_idx = 0 + + while True: + ret, frame = cap.read() + if not ret: + break + + if frame_idx % sample_every == 0: + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + if prev_gray is not None: + diff = cv2.absdiff(prev_gray, gray) + motion_scores.append(float(np.mean(diff))) + prev_gray = gray + + frame_idx += 1 + + cap.release() + + avg_motion = float(np.mean(motion_scores)) if motion_scores else 0.0 + motion_var = float(np.var(motion_scores)) if motion_scores else 0.0 + + # -- Per-scene motion scores -------------------------------------------- + scene_motion_scores: list[float] | None = None + editing_pace: str | None = None + + if scene_boundaries is not None: + scene_motion_scores = [] + for start_sec, end_sec in scene_boundaries: + start_frame = int(start_sec * fps) + end_frame = int(end_sec * fps) + + # Map sampled indices back: sampled index i corresponds to + # original frame i * sample_every. + scene_vals = [ + score + for i, score in enumerate(motion_scores) + # The diff at index i is between sampled frame i and i+1, + # so the source frame is (i + 1) * sample_every. + if start_frame <= (i + 1) * sample_every < end_frame + ] + scene_avg = float(np.mean(scene_vals)) if scene_vals else 0.0 + scene_motion_scores.append(round(scene_avg, 4)) + + editing_pace = _editing_pace(scene_boundaries) + + result: dict = { + "avg_motion_score": round(avg_motion, 4), + "motion_variance": round(motion_var, 4), + "scene_motion_scores": scene_motion_scores, + "total_frames": total_frames, + "fps": fps, + "resolution": {"width": width, "height": height}, + "motion_description": _motion_description(avg_motion), + } + + if editing_pace is not None: + result["editing_pace"] = editing_pace + + return result + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} ") + sys.exit(1) + + video_file = sys.argv[1] + + try: + info = get_video_info(video_file) + except (FileNotFoundError, RuntimeError) as exc: + print(f"Error: {exc}", file=sys.stderr) + sys.exit(1) + + print("Video info:") + print(f" FPS : {info['fps']}") + print(f" Total frames : {info['total_frames']}") + print(f" Duration : {info['duration']}s") + print(f" Resolution : {info['resolution']['width']}x{info['resolution']['height']}") + + try: + result = analyze_motion(video_file) + except (FileNotFoundError, RuntimeError) as exc: + print(f"Error: {exc}", file=sys.stderr) + sys.exit(1) + + print("\nMotion analysis:") + print(f" Avg motion score : {result['avg_motion_score']}") + print(f" Motion variance : {result['motion_variance']}") + print(f" Description : {result['motion_description']}") + print(f" Total frames : {result['total_frames']}") diff --git a/scripts/analyze-style/requirements.txt b/scripts/analyze-style/requirements.txt new file mode 100644 index 0000000..1889679 --- /dev/null +++ b/scripts/analyze-style/requirements.txt @@ -0,0 +1,5 @@ +scenedetect[opencv]>=0.6 +colorgram.py>=1.2 +librosa>=0.10 +opencv-python>=4.8 +numpy>=1.24 diff --git a/scripts/analyze-style/scene_detector.py b/scripts/analyze-style/scene_detector.py new file mode 100644 index 0000000..934fbdd --- /dev/null +++ b/scripts/analyze-style/scene_detector.py @@ -0,0 +1,115 @@ +# Scene detection and keyframe extraction using PySceneDetect. + +from __future__ import annotations + +import os +import sys + +import cv2 +from scenedetect import open_video, SceneManager +from scenedetect.detectors import AdaptiveDetector + + +def detect_scenes(video_path: str, output_dir: str) -> list[dict]: + if not os.path.isfile(video_path): + raise FileNotFoundError(f"Video not found: {video_path}") + + os.makedirs(output_dir, exist_ok=True) + + video = open_video(video_path) + scene_manager = SceneManager() + scene_manager.add_detector(AdaptiveDetector()) + scene_manager.detect_scenes(video) + + scene_list = scene_manager.get_scene_list() + + # If no scenes detected (e.g. very short video), treat the entire video as one scene. + if not scene_list: + video.reset() + total_frames = video.duration.get_frames() + fps = video.frame_rate + start_time = 0.0 + end_time = total_frames / fps + scene_list = [ + (video.base_timecode, video.base_timecode + video.duration), + ] + + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + raise RuntimeError(f"Failed to open video with OpenCV: {video_path}") + + fps = cap.get(cv2.CAP_PROP_FPS) + results: list[dict] = [] + + for idx, (start, end) in enumerate(scene_list, start=1): + start_sec = start.get_seconds() + end_sec = end.get_seconds() + duration = end_sec - start_sec + + # For very short scenes (< 0.5s), extract a single midpoint frame. + # Otherwise extract 3 keyframes: start (10%), middle (50%), end (90%). + if duration < 0.5: + sample_points = [("mid", 0.5)] + else: + sample_points = [("start", 0.1), ("mid", 0.5), ("end", 0.9)] + + keyframe_paths: list[str] = [] + mid_keyframe_path = "" + + for label, pct in sample_points: + sec = start_sec + duration * pct + frame_num = int(sec * fps) + + cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num) + ret, frame = cap.read() + + keyframe_filename = f"scene_{idx:04d}_{label}.jpg" + keyframe_path = os.path.join(output_dir, keyframe_filename) + + if ret: + cv2.imwrite(keyframe_path, frame) + keyframe_paths.append(keyframe_path) + else: + keyframe_paths.append("") + + if label == "mid": + mid_keyframe_path = keyframe_path if ret else "" + + results.append( + { + "scene_number": idx, + "start_time": round(start_sec, 3), + "end_time": round(end_sec, 3), + "duration": round(duration, 3), + "keyframe_paths": keyframe_paths, + "keyframe_path": mid_keyframe_path, + } + ) + + cap.release() + return results + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} [output_dir]") + sys.exit(1) + + video_file = sys.argv[1] + out_dir = sys.argv[2] if len(sys.argv) > 2 else "keyframes" + + try: + scenes = detect_scenes(video_file, out_dir) + except (FileNotFoundError, RuntimeError) as exc: + print(f"Error: {exc}", file=sys.stderr) + sys.exit(1) + + print(f"Detected {len(scenes)} scene(s):\n") + for scene in scenes: + paths = ", ".join(scene["keyframe_paths"]) + print( + f" Scene {scene['scene_number']:>3d} " + f"{scene['start_time']:>8.3f}s - {scene['end_time']:>8.3f}s " + f"(duration: {scene['duration']:.3f}s) " + f"keyframes: [{paths}]" + ) diff --git a/scripts/analyze-style/synthesizer.py b/scripts/analyze-style/synthesizer.py new file mode 100644 index 0000000..49a5a68 --- /dev/null +++ b/scripts/analyze-style/synthesizer.py @@ -0,0 +1,334 @@ +"""Gemini Vision-powered style synthesis from video analysis data. + +Uses the Gemini 2.0 Flash model to generate rich natural-language +descriptions of individual scenes, transitions between scenes, and an +overall style guide that a motion designer could use to recreate the +video's look and feel. +""" + +from __future__ import annotations + +import json +import os +import sys +import time +from pathlib import Path +from typing import Any + +import google.generativeai as genai + +# ── Constants ──────────────────────────────────────────────────────────────── + +_MODEL_NAME = "gemini-2.5-flash" +_MAX_RETRIES = 2 # initial attempt + 1 retry +_RETRY_DELAY_S = 2 + +_SCENE_SYSTEM_PROMPT = ( + "You are a motion design analyst. Describe this video scene's visual style " + "in detail.\n" + "Focus on: layout composition, typography style and treatment, color palette " + "and mood, animation states visible (blur, fade, scale, position), " + "iconography/illustration style, spatial relationships between elements, and " + "the overall design language.\n" + "Be specific about what you see — name fonts if recognizable, describe exact " + "positions, note any parallax or depth effects. Keep it to 3-5 sentences." +) + +_TRANSITION_SYSTEM_PROMPT = ( + "You are a motion design analyst. You are given two keyframes: the last " + "frame of one scene and the first frame of the next scene.\n" + "Describe the transition style between them. Is it a hard cut, a cross " + "dissolve, a fade to/from black, a morphing animation, a wipe, a zoom, " + "or something else? Note any shared visual elements that carry across the " + "cut, color shifts, and how the viewer's eye is guided between the two " + "compositions.\n" + "Keep it to 2-3 sentences." +) + +_SYNTHESIS_PROMPT_TEMPLATE = """\ +You are a creative director analyzing a launch video's style to create a reusable style guide. + +Based on the following video analysis data, scene descriptions, and transition descriptions, +write a comprehensive style synthesis that could be used to recreate a video in this exact style. + +Cover these dimensions: +1. VISUAL IDENTITY: Color palette, brightness, contrast approach +2. TYPOGRAPHY: Font choices, text animation style (reveals, fades, blurs), hierarchy +3. MOTION LANGUAGE: How elements enter/exit, easing style, speed, fluidity +4. LAYOUT & COMPOSITION: Spatial patterns, use of whitespace, element placement +5. NARRATIVE STRUCTURE: How the video flows between sections, pacing rhythm +6. ICONOGRAPHY & ILLUSTRATION: Style of visual elements, how they relate to content +7. AUDIO-VISUAL SYNC: How motion relates to the soundtrack tempo/energy +8. OVERALL FEEL: 2-3 adjectives that capture the video's personality + +Be extremely specific and actionable — this should be detailed enough that a motion designer +could recreate the style without seeing the original video. + +--- VIDEO ANALYSIS DATA --- +{analysis_json} + +--- SCENE DESCRIPTIONS --- +{scene_descriptions} + +--- TRANSITION DESCRIPTIONS --- +{transition_descriptions} +""" + + +# ── Helpers ────────────────────────────────────────────────────────────────── + +def _log(msg: str) -> None: + """Print a progress message to stderr so stdout stays clean.""" + print(msg, file=sys.stderr) + + +def _get_api_key() -> str: + """Return the Gemini API key from the environment. + + Checks ``GEMINI_API_KEY`` first, then falls back to ``GOOGLE_API_KEY``. + Raises :class:`RuntimeError` if neither is set. + """ + key = os.environ.get("GEMINI_API_KEY") or os.environ.get("GOOGLE_API_KEY") + if not key: + raise RuntimeError( + "Gemini API key not found. Set the GEMINI_API_KEY or GOOGLE_API_KEY " + "environment variable before running this module." + ) + return key + + +def _configure_client() -> genai.GenerativeModel: + """Configure the genai SDK and return a model instance.""" + genai.configure(api_key=_get_api_key()) + return genai.GenerativeModel(_MODEL_NAME) + + +def _upload_image(path: str) -> Any: + """Upload a local image file to Gemini and return the File handle.""" + return genai.upload_file(path) + + +def _call_with_retry(generate_fn, *args, **kwargs) -> str: + """Call *generate_fn* with retry logic for transient API failures. + + Retries once after a 2-second delay. Returns the response text on + success, or an error note string on final failure. + """ + last_exc: Exception | None = None + for attempt in range(_MAX_RETRIES): + try: + response = generate_fn(*args, **kwargs) + return response.text + except Exception as exc: + last_exc = exc + if attempt < _MAX_RETRIES - 1: + _log(f" API call failed ({exc}), retrying in {_RETRY_DELAY_S}s...") + time.sleep(_RETRY_DELAY_S) + return f"[ERROR: API call failed after {_MAX_RETRIES} attempts — {last_exc}]" + + +# ── Public API ─────────────────────────────────────────────────────────────── + +def describe_scene(keyframe_paths: list[str], scene_data: dict) -> str: + """Describe a single scene's visual style using Gemini Vision. + + Args: + keyframe_paths: 1-3 image file paths for this scene's keyframes. + scene_data: Metadata dict for the scene (colors, motion_score, duration). + + Returns: + A natural-language description of the scene's style. + """ + model = _configure_client() + + # Upload images (skip missing / empty paths). + uploaded_files: list[Any] = [] + for p in keyframe_paths[:3]: + if p and os.path.isfile(p): + uploaded_files.append(_upload_image(p)) + + # Build the prompt parts: images first, then textual context. + parts: list[Any] = list(uploaded_files) + + metadata_summary = ( + f"Scene metadata — duration: {scene_data.get('duration', 'N/A')}s, " + f"motion score: {scene_data.get('motion_score', 'N/A')}, " + f"colors: {json.dumps(scene_data.get('colors', {}), indent=2)}" + ) + parts.append(f"{_SCENE_SYSTEM_PROMPT}\n\n{metadata_summary}") + + return _call_with_retry(model.generate_content, parts) + + +def describe_transitions(scene_keyframes: list[list[str]]) -> list[str]: + """Describe the transition style between each pair of adjacent scenes. + + Args: + scene_keyframes: A list of keyframe-path lists, one per scene. + Each inner list contains the keyframe paths for that scene + (typically start, mid, end). + + Returns: + A list of transition descriptions. The list has length + ``len(scene_keyframes) - 1``. + """ + if len(scene_keyframes) < 2: + return [] + + model = _configure_client() + descriptions: list[str] = [] + + for i in range(len(scene_keyframes) - 1): + # End frame of scene i, start frame of scene i+1. + end_frame = _last_valid_path(scene_keyframes[i]) + start_frame = _first_valid_path(scene_keyframes[i + 1]) + + parts: list[Any] = [] + if end_frame: + parts.append(_upload_image(end_frame)) + if start_frame: + parts.append(_upload_image(start_frame)) + + if not parts: + descriptions.append("[No keyframes available for this transition]") + continue + + parts.append(_TRANSITION_SYSTEM_PROMPT) + desc = _call_with_retry(model.generate_content, parts) + descriptions.append(desc) + + return descriptions + + +def synthesize_style( + analysis_json: dict, + scene_descriptions: list[str], + transition_descriptions: list[str], +) -> str: + """Produce an overall style guide from analysis data and descriptions. + + This is a text-only call (no images). + + Args: + analysis_json: The full analysis JSON dict. + scene_descriptions: Per-scene style descriptions. + transition_descriptions: Per-transition descriptions. + + Returns: + A comprehensive style guide string. + """ + model = _configure_client() + + scenes_text = "\n\n".join( + f"Scene {i + 1}: {desc}" for i, desc in enumerate(scene_descriptions) + ) + transitions_text = "\n\n".join( + f"Transition {i + 1} -> {i + 2}: {desc}" + for i, desc in enumerate(transition_descriptions) + ) + + prompt = _SYNTHESIS_PROMPT_TEMPLATE.format( + analysis_json=json.dumps(analysis_json, indent=2, default=str), + scene_descriptions=scenes_text, + transition_descriptions=transitions_text or "(no transitions detected)", + ) + + return _call_with_retry(model.generate_content, prompt) + + +def run_synthesis(analysis_json: dict) -> dict: + """Orchestrate the full synthesis pipeline. + + Args: + analysis_json: The complete analysis JSON (as produced by + ``analyze.py``). Must contain a ``scenes`` key whose entries + have ``keyframe_paths``. + + Returns: + A dict with keys ``scene_descriptions``, ``transition_descriptions``, + and ``style_guide``. + """ + scenes = analysis_json.get("scenes", []) + total = len(scenes) + + # ── Scene descriptions ─────────────────────────────────────────────── + _log(f"Describing {total} scene(s) with Gemini Vision...") + scene_descriptions: list[str] = [] + for i, scene in enumerate(scenes): + _log(f" Scene {i + 1}/{total}...") + keyframe_paths = scene.get("keyframe_paths", []) + # Fall back to the single mid-frame if keyframe_paths is absent. + if not keyframe_paths: + kf = scene.get("keyframe_path", "") + keyframe_paths = [kf] if kf else [] + desc = describe_scene(keyframe_paths, scene) + scene_descriptions.append(desc) + + # ── Transition descriptions ────────────────────────────────────────── + _log("Describing transitions between scenes...") + all_keyframe_lists: list[list[str]] = [] + for scene in scenes: + kfp = scene.get("keyframe_paths", []) + if not kfp: + kf = scene.get("keyframe_path", "") + kfp = [kf] if kf else [] + all_keyframe_lists.append(kfp) + + transition_descriptions = describe_transitions(all_keyframe_lists) + _log(f" Described {len(transition_descriptions)} transition(s).") + + # ── Style synthesis ────────────────────────────────────────────────── + _log("Synthesizing overall style guide...") + style_guide = synthesize_style( + analysis_json, scene_descriptions, transition_descriptions + ) + _log("Synthesis complete.") + + return { + "scene_descriptions": scene_descriptions, + "transition_descriptions": transition_descriptions, + "style_guide": style_guide, + } + + +# ── Internal helpers ───────────────────────────────────────────────────────── + +def _last_valid_path(paths: list[str]) -> str | None: + """Return the last non-empty path that exists on disk, or None.""" + for p in reversed(paths): + if p and os.path.isfile(p): + return p + return None + + +def _first_valid_path(paths: list[str]) -> str | None: + """Return the first non-empty path that exists on disk, or None.""" + for p in paths: + if p and os.path.isfile(p): + return p + return None + + +# ── CLI entry point ────────────────────────────────────────────────────────── + +if __name__ == "__main__": + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} ", file=sys.stderr) + sys.exit(1) + + results_path = sys.argv[1] + + if not os.path.isfile(results_path): + print(f"Error: file not found: {results_path}", file=sys.stderr) + sys.exit(1) + + with open(results_path, "r", encoding="utf-8") as f: + analysis = json.load(f) + + try: + output = run_synthesis(analysis) + except RuntimeError as exc: + print(f"Error: {exc}", file=sys.stderr) + sys.exit(1) + + print(output["style_guide"]) diff --git a/videos/cua-cloud/next.config.js b/videos/cua-cloud/next.config.js new file mode 100644 index 0000000..2b227a8 --- /dev/null +++ b/videos/cua-cloud/next.config.js @@ -0,0 +1,6 @@ +/** @type {import('next').NextConfig} */ +const nextConfig = { + transpilePackages: ["@launchpad/shared", "@launchpad/assets"], +}; + +module.exports = nextConfig; diff --git a/videos/cua-cloud/package.json b/videos/cua-cloud/package.json new file mode 100644 index 0000000..8879785 --- /dev/null +++ b/videos/cua-cloud/package.json @@ -0,0 +1,45 @@ +{ + "name": "@launchpad/cua-cloud", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev", + "build": "next build", + "start": "next start", + "lint": "eslint .", + "remotion": "remotion studio", + "render": "remotion render CuaCloudFull", + "render:preview": "remotion render CuaCloudFull --scale=0.5" + }, + "dependencies": { + "@launchpad/shared": "workspace:*", + "@launchpad/assets": "workspace:*", + "@remotion/bundler": "4.0.407", + "@remotion/cli": "4.0.407", + "@remotion/google-fonts": "4.0.407", + "@remotion/paths": "4.0.407", + "@remotion/player": "4.0.407", + "@remotion/shapes": "4.0.407", + "@remotion/tailwind-v4": "4.0.407", + "clsx": "2.1.1", + "next": "16.0.10", + "react": "19.2.3", + "react-dom": "19.2.3", + "remotion": "4.0.407", + "tailwind-merge": "3.0.1", + "zod": "3.22.3" + }, + "devDependencies": { + "@tailwindcss/postcss": "4.1.1", + "@remotion/eslint-plugin": "4.0.407", + "@types/node": "20.12.14", + "@types/react": "19.2.7", + "@types/react-dom": "19.2.3", + "autoprefixer": "10.4.20", + "eslint": "9.19.0", + "postcss": "8.4.47", + "prettier": "3.6.0", + "tailwindcss": "4.0.3", + "typescript": "5.9.3" + } +} diff --git a/videos/cua-cloud/postcss.config.mjs b/videos/cua-cloud/postcss.config.mjs new file mode 100644 index 0000000..d5c96e4 --- /dev/null +++ b/videos/cua-cloud/postcss.config.mjs @@ -0,0 +1,8 @@ +const config = { + plugins: { + "@tailwindcss/postcss": {}, + autoprefixer: {}, + }, +}; + +export default config; diff --git a/videos/cua-cloud/public/.gitkeep b/videos/cua-cloud/public/.gitkeep new file mode 100644 index 0000000..c9fdbed --- /dev/null +++ b/videos/cua-cloud/public/.gitkeep @@ -0,0 +1 @@ +# Place video-specific assets here (audio, images, etc.) diff --git a/videos/cua-cloud/public/cua-logo.svg b/videos/cua-cloud/public/cua-logo.svg new file mode 100644 index 0000000..1847443 --- /dev/null +++ b/videos/cua-cloud/public/cua-logo.svg @@ -0,0 +1,57 @@ + + + + + + + + + + + + + + + + diff --git a/videos/cua-cloud/public/desktop-screenshot.png b/videos/cua-cloud/public/desktop-screenshot.png new file mode 100644 index 0000000..c42d33a Binary files /dev/null and b/videos/cua-cloud/public/desktop-screenshot.png differ diff --git a/videos/cua-cloud/remotion.config.ts b/videos/cua-cloud/remotion.config.ts new file mode 100644 index 0000000..07a9aa5 --- /dev/null +++ b/videos/cua-cloud/remotion.config.ts @@ -0,0 +1,5 @@ +import { Config } from "@remotion/cli/config"; +import { webpackOverride } from "./src/remotion/webpack-override.mjs"; + +Config.setVideoImageFormat("jpeg"); +Config.overrideWebpackConfig(webpackOverride); diff --git a/videos/cua-cloud/src/app/layout.tsx b/videos/cua-cloud/src/app/layout.tsx new file mode 100644 index 0000000..8870fa5 --- /dev/null +++ b/videos/cua-cloud/src/app/layout.tsx @@ -0,0 +1,15 @@ +import type { Metadata } from "next"; +import "../styles/global.css"; + +export const metadata: Metadata = { + title: "Launchpad Video Preview", + description: "Preview and render Remotion videos", +}; + +export default function RootLayout({ children }: { children: React.ReactNode }) { + return ( + + {children} + + ); +} diff --git a/videos/cua-cloud/src/app/page.tsx b/videos/cua-cloud/src/app/page.tsx new file mode 100644 index 0000000..d4bd8b8 --- /dev/null +++ b/videos/cua-cloud/src/app/page.tsx @@ -0,0 +1,32 @@ +"use client"; + +import { Player } from "@remotion/player"; +import { FullVideo, FULL_VIDEO_DURATION } from "../remotion/scenes/FullVideo"; +import { VIDEO_WIDTH, VIDEO_HEIGHT, VIDEO_FPS } from "../../types/constants"; + +export default function Home() { + return ( +
+

CUA Cloud Launch

+ +
+ +
+ +
+

Use Remotion Studio for full editing: pnpm remotion

+

Render video: pnpm render

+
+
+ ); +} diff --git a/videos/cua-cloud/src/remotion/Root.tsx b/videos/cua-cloud/src/remotion/Root.tsx new file mode 100644 index 0000000..fbf7f51 --- /dev/null +++ b/videos/cua-cloud/src/remotion/Root.tsx @@ -0,0 +1,54 @@ +import { Composition } from "remotion"; +import { HookScene, HOOK_SCENE_DURATION } from "./scenes/HookScene"; +import { DemoScene, DEMO_SCENE_DURATION } from "./scenes/DemoScene"; +import { ShowcaseTaglineScene, SHOWCASE_TAGLINE_SCENE_DURATION } from "./scenes/ShowcaseTaglineScene"; +import { EndCardScene, ENDCARD_SCENE_DURATION } from "./scenes/EndCardScene"; +import { FullVideo, FULL_VIDEO_DURATION } from "./scenes/FullVideo"; +import { VIDEO_WIDTH, VIDEO_HEIGHT, VIDEO_FPS } from "../../types/constants"; + +export const RemotionRoot: React.FC = () => { + return ( + <> + + + + + + + ); +}; diff --git a/videos/cua-cloud/src/remotion/components/AnimatedCursor.tsx b/videos/cua-cloud/src/remotion/components/AnimatedCursor.tsx new file mode 100644 index 0000000..8f4fd2a --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/AnimatedCursor.tsx @@ -0,0 +1,146 @@ +import React from "react"; +import { useCurrentFrame, interpolate, Easing } from "remotion"; + +interface CursorPosition { + x: number; + y: number; + frame: number; + click?: boolean; +} + +interface AnimatedCursorProps { + positions: CursorPosition[]; + style?: React.CSSProperties; +} + +export const AnimatedCursor: React.FC = ({ + positions, + style, +}) => { + const frame = useCurrentFrame(); + + if (positions.length === 0) { + return null; + } + + // Before the first position's frame, hide the cursor + if (frame < positions[0].frame) { + return null; + } + + // Find the current segment: which two positions are we interpolating between? + let currentX = positions[0].x; + let currentY = positions[0].y; + + for (let i = 0; i < positions.length - 1; i++) { + const from = positions[i]; + const to = positions[i + 1]; + + if (frame >= from.frame && frame <= to.frame) { + currentX = interpolate(frame, [from.frame, to.frame], [from.x, to.x], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.inOut(Easing.cubic), + }); + currentY = interpolate(frame, [from.frame, to.frame], [from.y, to.y], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.inOut(Easing.cubic), + }); + break; + } + + if (frame > to.frame) { + currentX = to.x; + currentY = to.y; + } + } + + // If past the last position, stay at the last position + const lastPos = positions[positions.length - 1]; + if (frame > lastPos.frame) { + currentX = lastPos.x; + currentY = lastPos.y; + } + + // Click animation: check if we're within 8 frames after a click position + let clickRippleOpacity = 0; + let clickRippleScale = 0; + let cursorScale = 1; + + for (const pos of positions) { + if (pos.click && frame >= pos.frame && frame < pos.frame + 8) { + const clickProgress = frame - pos.frame; + + // Ripple: scales up from 0 to 1 and fades out + clickRippleScale = interpolate(clickProgress, [0, 8], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + clickRippleOpacity = interpolate(clickProgress, [0, 2, 8], [0.6, 0.5, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + + // Cursor scale down on click and back + cursorScale = interpolate(clickProgress, [0, 3, 8], [1, 0.9, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.out(Easing.ease), + }); + } + } + + return ( +
+ {/* Click ripple effect */} + {clickRippleOpacity > 0 && ( +
+ )} + + {/* Cursor */} +
+ + + +
+
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/components/AnimatedKoala.tsx b/videos/cua-cloud/src/remotion/components/AnimatedKoala.tsx new file mode 100644 index 0000000..32621db --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/AnimatedKoala.tsx @@ -0,0 +1,162 @@ +import React from "react"; +import { useCurrentFrame, interpolate, Easing } from "remotion"; + +interface AnimatedKoalaProps { + size?: number; + color?: string; + blinkAt?: number[]; + lookDirection?: "left" | "center" | "right"; + lookStartFrame?: number; + antennaWiggle?: boolean; + style?: React.CSSProperties; +} + +export const AnimatedKoala: React.FC = ({ + size = 120, + color = "white", + blinkAt = [30, 90], + lookDirection = "center", + lookStartFrame = 0, + antennaWiggle = true, + style, +}) => { + const frame = useCurrentFrame(); + + // --- Blink animation --- + // For each blinkAt frame, compute scaleY for eyes + let eyeScaleY = 1; + for (const blinkFrame of blinkAt) { + if (frame >= blinkFrame && frame < blinkFrame + 6) { + const blinkProgress = frame - blinkFrame; + if (blinkProgress < 3) { + // Closing: 1 -> 0 over 3 frames + eyeScaleY = interpolate(blinkProgress, [0, 3], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + } else { + // Opening: 0 -> 1 over 3 frames + eyeScaleY = interpolate(blinkProgress, [3, 6], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + } + } + } + + // --- Look direction animation --- + const targetRotation = + lookDirection === "left" ? -8 : lookDirection === "right" ? 8 : 0; + const headRotation = interpolate( + frame, + [lookStartFrame, lookStartFrame + 15], + [0, targetRotation], + { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.inOut(Easing.ease), + } + ); + + // --- Antenna wiggle --- + const wiggleAmount = antennaWiggle ? Math.sin(frame * 0.12) : 0; + const centerAntennaRotation = wiggleAmount * 5; + const leftAntennaRotation = wiggleAmount * 8; + const rightAntennaRotation = wiggleAmount * -8; + + // Antenna pivot points (base of each antenna) + const centerAntennaOriginX = 556.48; + const centerAntennaOriginY = 280.01; + const leftAntennaOriginX = 475.52; + const leftAntennaOriginY = 305.74; + const rightAntennaOriginX = 637.94; + const rightAntennaOriginY = 305.73; + + // Eye centers for blink transform-origin + const leftEyeCenterX = (320.24 + 378.94) / 2; // 349.59 + const leftEyeCenterY = (624.88 + 765.85) / 2; // 695.365 + const rightEyeCenterX = (738 + 816.7) / 2; // 777.35 + const rightEyeCenterY = (624.52 + 765.49) / 2; // 695.005 + + // Head rotation origin (center of head) + const headOriginX = 558; + const headOriginY = 700; + + return ( + + {/* Head group (nose + body) - rotates for look direction */} + + {/* Path 1 - Nose */} + + + {/* Path 2 - Body */} + + + + {/* Antenna group - each antenna wiggles independently */} + {/* Path 3 - Center antenna */} + + + + + {/* Path 4 - Left antenna */} + + + + + {/* Path 5 - Right antenna */} + + + + + {/* Path 6 - Left eye (blinks) */} + + + + + {/* Path 7 - Right eye (blinks) */} + + + + + ); +}; diff --git a/videos/cua-cloud/src/remotion/components/BrowserFrame.tsx b/videos/cua-cloud/src/remotion/components/BrowserFrame.tsx new file mode 100644 index 0000000..01c9a65 --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/BrowserFrame.tsx @@ -0,0 +1,117 @@ +import React, { ReactNode } from "react"; +import { loadFont as loadMono } from "@remotion/google-fonts/JetBrainsMono"; + +const { fontFamily: monoFamily } = loadMono(); + +interface BrowserFrameProps { + url?: string; + children: ReactNode; + width?: number; + height?: number; + scale?: number; + style?: React.CSSProperties; +} + +const TRAFFIC_LIGHTS = [ + { color: "#ff5f57" }, + { color: "#febc2e" }, + { color: "#28c840" }, +] as const; + +const TITLE_BAR_HEIGHT = 40; + +export const BrowserFrame: React.FC = ({ + url = "cloud.cua.ai", + children, + width = 1400, + height = 820, + scale = 1, + style, +}) => { + return ( +
+ {/* Title Bar */} +
+ {/* Traffic Lights */} +
+ {TRAFFIC_LIGHTS.map((light, i) => ( +
+ ))} +
+ + {/* URL Text */} +
+ {url} +
+
+ + {/* Content Area */} +
+ {children} +
+
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/components/FeatureCard.tsx b/videos/cua-cloud/src/remotion/components/FeatureCard.tsx new file mode 100644 index 0000000..b69fc38 --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/FeatureCard.tsx @@ -0,0 +1,73 @@ +import { useCurrentFrame, interpolate, Easing } from "remotion"; +import { CUA } from "../theme"; +import { loadFont } from "@remotion/google-fonts/Urbanist"; + +const { fontFamily } = loadFont(); + +interface FeatureCardProps { + label: string; + accentColor: string; + delay?: number; +} + +export const FeatureCard: React.FC = ({ + label, + accentColor, + delay = 0, +}) => { + const frame = useCurrentFrame(); + const adjustedFrame = Math.max(0, frame - delay); + + const opacity = interpolate(adjustedFrame, [0, 12], [0, 1], { + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }); + + const scale = interpolate(adjustedFrame, [0, 12], [0.92, 1], { + extrapolateRight: "clamp", + easing: Easing.out(Easing.back(1.5)), + }); + + const blur = interpolate(adjustedFrame, [0, 12], [4, 0], { + extrapolateRight: "clamp", + }); + + return ( +
+ {/* Accent left bar */} +
+
+ {label} +
+
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/components/GlowPulse.tsx b/videos/cua-cloud/src/remotion/components/GlowPulse.tsx new file mode 100644 index 0000000..01d378c --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/GlowPulse.tsx @@ -0,0 +1,47 @@ +import { useCurrentFrame, interpolate, Easing } from "remotion"; + +interface GlowPulseProps { + startFrame: number; + durationInFrames?: number; + color?: string; + size?: number; +} + +export const GlowPulse: React.FC = ({ + startFrame, + durationInFrames = 12, + color = "rgba(97, 188, 255, 0.34)", + size = 400, +}) => { + const frame = useCurrentFrame(); + const progress = Math.max(0, frame - startFrame); + + const scale = interpolate(progress, [0, durationInFrames], [0.8, 1.5], { + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }); + + const opacity = interpolate(progress, [0, durationInFrames], [0.6, 0], { + extrapolateRight: "clamp", + easing: Easing.in(Easing.quad), + }); + + if (frame < startFrame || frame > startFrame + durationInFrames) return null; + + return ( +
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/components/GradientText.tsx b/videos/cua-cloud/src/remotion/components/GradientText.tsx new file mode 100644 index 0000000..8f1e880 --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/GradientText.tsx @@ -0,0 +1,28 @@ +import { CSSProperties, ReactNode } from "react"; +import { GRADIENTS } from "../theme"; + +interface GradientTextProps { + children: ReactNode; + gradient?: string; + style?: CSSProperties; +} + +export const GradientText: React.FC = ({ + children, + gradient = GRADIENTS.brand5, + style, +}) => { + return ( + + {children} + + ); +}; diff --git a/videos/cua-cloud/src/remotion/components/GridBackground.tsx b/videos/cua-cloud/src/remotion/components/GridBackground.tsx new file mode 100644 index 0000000..185c43f --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/GridBackground.tsx @@ -0,0 +1,28 @@ +import { AbsoluteFill } from "remotion"; +import { ReactNode } from "react"; +import { CUA } from "../theme"; + +interface GridBackgroundProps { + children: ReactNode; + gridOpacity?: number; +} + +export const GridBackground: React.FC = ({ + children, + gridOpacity = CUA.grid.opacity, +}) => { + return ( + + {/* Grid overlay */} + + {/* Content */} + {children} + + ); +}; diff --git a/videos/cua-cloud/src/remotion/components/ProvisioningDemo.tsx b/videos/cua-cloud/src/remotion/components/ProvisioningDemo.tsx new file mode 100644 index 0000000..d474bfc --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/ProvisioningDemo.tsx @@ -0,0 +1,947 @@ +import React from "react"; +import { useCurrentFrame, interpolate, Easing } from "remotion"; +import { loadFont } from "@remotion/google-fonts/Urbanist"; +import { loadFont as loadMono } from "@remotion/google-fonts/JetBrainsMono"; + +const { fontFamily } = loadFont(); +const { fontFamily: monoFamily } = loadMono(); + +/* ------------------------------------------------------------------ */ +/* Style Tokens */ +/* ------------------------------------------------------------------ */ + +const DASH_BG = "#0b0d10"; +const ROW_BG = "#1a1d23"; + +const badge = ( + r: number, + g: number, + b: number, + opts?: { alphaRing?: number; alphaText?: number; alphaBg?: number }, +) => { + const aR = opts?.alphaRing ?? 0.44; + const aT = opts?.alphaText ?? 0.88; + const aB = opts?.alphaBg ?? 0.14; + return { + border: `1px solid rgba(${r}, ${g}, ${b}, ${aR})`, + color: `rgba(${r}, ${g}, ${b}, ${aT})`, + background: `rgba(${r}, ${g}, ${b}, ${aB})`, + borderRadius: 999, + padding: "2px 10px", + fontSize: 11, + fontWeight: 600 as const, + textTransform: "uppercase" as const, + letterSpacing: "0.08em", + fontFamily: monoFamily, + display: "inline-block", + }; +}; + +const BADGE_RUNNING = badge(82, 211, 127); +const BADGE_STOPPED = badge(196, 200, 210, { + alphaRing: 0.44, + alphaText: 0.64, + alphaBg: 0.14, +}); +const BADGE_PROVISIONING = badge(121, 199, 255, { + alphaRing: 0.44, + alphaText: 0.8, + alphaBg: 0.14, +}); + +const BTN_PRIMARY: React.CSSProperties = { + borderRadius: 999, + background: "#61bcff", + color: "white", + padding: "8px 20px", + fontSize: 14, + fontWeight: 600, + fontFamily, + border: "none", + cursor: "pointer", + whiteSpace: "nowrap", +}; + +const BTN_GREEN: React.CSSProperties = { + ...BTN_PRIMARY, + background: "#4fd37e", +}; + +const MODAL: React.CSSProperties = { + background: "#12161b", + border: "1px solid rgba(255, 255, 255, 0.1)", + borderRadius: 16, + padding: 32, + maxWidth: 560, + width: "100%", +}; + +const CARD_OPTION: React.CSSProperties = { + background: ROW_BG, + border: "1px solid rgba(255, 255, 255, 0.06)", + borderRadius: 8, + padding: "16px 20px", + cursor: "pointer", + fontFamily, +}; + +const CARD_OPTION_SELECTED: React.CSSProperties = { + ...CARD_OPTION, + border: "1px solid rgba(97, 188, 255, 0.5)", + background: "rgba(97, 188, 255, 0.08)", +}; + +const TABLE_HEADER: React.CSSProperties = { + fontFamily: monoFamily, + fontSize: 11, + textTransform: "uppercase", + letterSpacing: "0.08em", + color: "rgba(196, 200, 210, 0.64)", + padding: "10px 16px", + textAlign: "left", +}; + +const TABLE_CELL: React.CSSProperties = { + fontFamily, + fontSize: 14, + color: "#f8f9fa", + padding: "14px 16px", +}; + +/* ------------------------------------------------------------------ */ +/* Helpers */ +/* ------------------------------------------------------------------ */ + +/** Fade in over `dur` frames starting at `start`. */ +const fadeIn = (frame: number, start: number, dur: number) => + interpolate(frame, [start, start + dur], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }); + +/** Slide-up entrance: returns { opacity, translateY }. */ +const slideUp = ( + frame: number, + start: number, + dur: number, + distance = 30, +) => ({ + opacity: fadeIn(frame, start, dur), + translateY: interpolate(frame, [start, start + dur], [distance, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }), +}); + +/** Cross-fade helper: fade out `durOut` then fade in `durIn`. */ +const crossFade = ( + frame: number, + switchAt: number, + durOut: number, + durIn: number, +) => ({ + outOpacity: interpolate( + frame, + [switchAt - durOut, switchAt], + [1, 0], + { extrapolateLeft: "clamp", extrapolateRight: "clamp" }, + ), + inOpacity: interpolate( + frame, + [switchAt, switchAt + durIn], + [0, 1], + { extrapolateLeft: "clamp", extrapolateRight: "clamp" }, + ), +}); + +/* ------------------------------------------------------------------ */ +/* Sub-components */ +/* ------------------------------------------------------------------ */ + +const MetricCard: React.FC<{ label: string; value: string }> = ({ + label, + value, +}) => ( +
+
+ {label} +
+
+ {value} +
+
+); + +const StepIndicator: React.FC<{ current: number; total: number }> = ({ + current, + total, +}) => ( +
+ {Array.from({ length: total }, (_, i) => ( +
+ ))} + + {current} of {total} + +
+); + +/* ------------------------------------------------------------------ */ +/* Main Component */ +/* ------------------------------------------------------------------ */ + +export const ProvisioningDemo: React.FC = () => { + const frame = useCurrentFrame(); + + /* ---- global entrance fade ---- */ + const dashboardOpacity = fadeIn(frame, 0, 10); + + /* ---- button press effect (frames 40-55) ---- */ + const isButtonPressed = frame >= 40 && frame < 43; + const newBtnStyle: React.CSSProperties = { + ...BTN_PRIMARY, + background: isButtonPressed ? "#4da3e0" : "#61bcff", + transform: isButtonPressed ? "scale(0.96)" : "scale(1)", + }; + + /* ---- modal overlay (frames 40+) ---- */ + const showModal = frame >= 40; + const backdropOpacity = interpolate(frame, [40, 50], [0, 0.6], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + + /* ---- modal slide-up entrance (frames 55-65) ---- */ + const modalEntrance = slideUp(frame, 50, 10); + + /* ---- determine current wizard step ---- */ + const wizardStep = + frame < 100 ? 1 : frame < 140 ? 2 : frame < 175 ? 3 : 0; + + /* ---- Phase 3: OS selection (55-100) ---- */ + const linuxSelected = frame >= 75; + const step1NextHighlight = frame >= 90 && frame < 100; + + /* ---- Phase 4: Configuration (100-140) ---- */ + const step1to2 = crossFade(frame, 100, 5, 5); + const mediumSelected = frame >= 115; + const step2NextHighlight = frame >= 130 && frame < 140; + + /* ---- Phase 5: Region (140-175) ---- */ + const step2to3 = crossFade(frame, 140, 5, 5); + const regionSelected = frame >= 155; + const createHighlight = frame >= 165 && frame < 175; + + /* ---- Phase 6: Provisioning (175-220) ---- */ + const showProvisioning = frame >= 175 && frame < 220; + const step3toProvision = crossFade(frame, 175, 5, 5); + + const provisionText = + frame >= 210 + ? "Starting services..." + : frame >= 195 + ? "Configuring network..." + : "Allocating resources..."; + + const provisionDone = frame >= 218; + + /* ---- Phase 7: Success (220-240) ---- */ + const showSuccess = frame >= 220; + const successEntrance = slideUp(frame, 220, 10); + + const checkScale = interpolate(frame, [220, 230], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.out(Easing.back(2.5)), + }); + + /* ================================================================ */ + /* Wizard step content */ + /* ================================================================ */ + + const renderWizardContent = () => { + /* ---- Provisioning state ---- */ + if (showSuccess) { + return ( +
+ {/* Green checkmark */} +
+ + + +
+ +
+ Sandbox ready! +
+ +
+ + dev-sandbox-04 + + Running +
+ +
+ 0:58 +
+
+ Provisioning time +
+
+ ); + } + + if (showProvisioning) { + const provOpacity = step3toProvision.inOpacity; + return ( +
+ {/* Spinner */} +
+ +
+ Creating sandbox... +
+ + + {provisionDone ? "Running" : "Provisioning"} + + +
+ {provisionText} +
+
+ ); + } + + /* ---- Wizard steps ---- */ + if (wizardStep === 1) { + const stepOpacity = fadeIn(frame, 55, 10); + return ( +
+ {/* Header */} +
+
+ Create Sandbox +
+ +
+ + {/* OS Label */} +
+ Operating System +
+ + {/* OS Cards */} +
+ {(["Linux", "Windows", "macOS"] as const).map((os) => ( +
+
+ {os === "Linux" ? "\uD83D\uDC27" : os === "Windows" ? "\u2B1C" : "\uF8FF"} +
+
+ {os} +
+
+ ))} +
+ + {/* Next button */} +
+
+ Next +
+
+
+ ); + } + + if (wizardStep === 2) { + const inOp = + frame < 105 + ? interpolate(frame, [100, 105], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }) + : 1; + + return ( +
+ {/* Header */} +
+
+ Create Sandbox +
+ +
+ + {/* Config Label */} +
+ Configuration +
+ + {/* Config Cards */} +
+ {( + [ + { name: "Small", cpu: "2 vCPU", ram: "4 GB" }, + { name: "Medium", cpu: "4 vCPU", ram: "8 GB" }, + { name: "Large", cpu: "8 vCPU", ram: "16 GB" }, + ] as const + ).map((cfg) => ( +
+
+ {cfg.name} +
+
+ {cfg.cpu} · {cfg.ram} +
+
+ ))} +
+ + {/* Next button */} +
+
+ Next +
+
+
+ ); + } + + if (wizardStep === 3) { + const inOp = + frame < 145 + ? interpolate(frame, [140, 145], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }) + : 1; + + return ( +
+ {/* Header */} +
+
+ Create Sandbox +
+ +
+ + {/* Region Label */} +
+ Region +
+ + {/* Region options */} +
+ {["us-east-1", "eu-west-1", "ap-south-1"].map((region) => ( +
+ {/* Radio indicator */} +
+ {regionSelected && region === "us-east-1" && ( +
+ )} +
+ + {region} + +
+ ))} +
+ + {/* Summary */} +
+ Linux · Medium · us-east-1 +
+ + {/* Create Sandbox button */} +
+
+ Create Sandbox +
+
+
+ ); + } + + return null; + }; + + /* ================================================================ */ + /* Render */ + /* ================================================================ */ + + return ( +
+ {/* ---- Dashboard content ---- */} +
+ {/* Top bar */} +
+
+ Sandboxes +
+
+ New Sandbox
+
+ + {/* Metric cards */} +
+ + +
+ + {/* Table */} +
+ {/* Header row */} +
+
Name
+
Status
+
Region
+
OS
+
+ + {/* Row 1 */} +
+
+ dev-agent-01 +
+
+ Running +
+
+ us-east +
+
+ Linux +
+
+ + {/* Row 2 */} +
+
+ test-env-03 +
+
+ Stopped +
+
+ eu-west +
+
+ Linux +
+
+
+
+ + {/* ---- Modal overlay ---- */} + {showModal && ( +
+
+ {renderWizardContent()} +
+
+ )} +
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/components/SmoothReveal.tsx b/videos/cua-cloud/src/remotion/components/SmoothReveal.tsx new file mode 100644 index 0000000..bee20e0 --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/SmoothReveal.tsx @@ -0,0 +1,61 @@ +import { useCurrentFrame, interpolate, Easing } from "remotion"; +import { ReactNode } from "react"; + +interface SmoothRevealProps { + children: ReactNode; + durationInFrames?: number; + delay?: number; + direction?: "up" | "left"; + distance?: number; + blurAmount?: number; + easing?: (t: number) => number; + style?: React.CSSProperties; +} + +/** + * A smooth text reveal that uses blur + opacity + translate instead of clip-path. + * Text starts blurry and transparent, then clears up and slides into place. + * Replaces the choppy TextReveal component. + */ +export const SmoothReveal: React.FC = ({ + children, + durationInFrames = 20, + delay = 0, + direction = "up", + distance = 20, + blurAmount = 8, + easing = Easing.bezier(0.22, 1, 0.36, 1), + style, +}) => { + const frame = useCurrentFrame(); + + const progress = interpolate(frame - delay, [0, durationInFrames], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing, + }); + + const opacity = progress; + const blur = interpolate(progress, [0, 1], [blurAmount, 0]); + const translate = interpolate(progress, [0, 1], [distance, 0]); + + const transform = + direction === "up" + ? `translateY(${translate}px)` + : `translateX(${translate}px)`; + + return ( +
+ {children} +
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/components/StatusDot.tsx b/videos/cua-cloud/src/remotion/components/StatusDot.tsx new file mode 100644 index 0000000..c99b9de --- /dev/null +++ b/videos/cua-cloud/src/remotion/components/StatusDot.tsx @@ -0,0 +1,45 @@ +import { useCurrentFrame, interpolate, Easing } from "remotion"; + +interface StatusDotProps { + delay?: number; + color?: string; + size?: number; +} + +export const StatusDot: React.FC = ({ + delay = 0, + color = "#4fd37e", + size = 8, +}) => { + const frame = useCurrentFrame(); + const adjustedFrame = Math.max(0, frame - delay); + + const opacity = interpolate(adjustedFrame, [0, 8], [0, 1], { + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }); + + const scale = interpolate(adjustedFrame, [0, 8], [0.5, 1], { + extrapolateRight: "clamp", + easing: Easing.out(Easing.back(2)), + }); + + // Subtle pulse after appearing + const pulseFrame = Math.max(0, adjustedFrame - 8); + const pulse = 1 + Math.sin(pulseFrame * 0.15) * 0.15; + + return ( +
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/index.ts b/videos/cua-cloud/src/remotion/index.ts new file mode 100644 index 0000000..a4e987f --- /dev/null +++ b/videos/cua-cloud/src/remotion/index.ts @@ -0,0 +1,5 @@ +import { registerRoot } from "remotion"; +import { RemotionRoot } from "./Root"; +import "../styles/global.css"; + +registerRoot(RemotionRoot); diff --git a/videos/cua-cloud/src/remotion/scenes/DemoScene.tsx b/videos/cua-cloud/src/remotion/scenes/DemoScene.tsx new file mode 100644 index 0000000..b4e4b2a --- /dev/null +++ b/videos/cua-cloud/src/remotion/scenes/DemoScene.tsx @@ -0,0 +1,593 @@ +import { AbsoluteFill, useCurrentFrame, interpolate, Easing } from "remotion"; +import { GridBackground } from "../components/GridBackground"; +import { SmoothReveal } from "../components/SmoothReveal"; +import { BrowserFrame } from "../components/BrowserFrame"; +import { AnimatedCursor } from "../components/AnimatedCursor"; +import { GradientText } from "../components/GradientText"; +import { CUA, cuaEasings, DEMO_DURATION } from "../theme"; +import { loadFont } from "@remotion/google-fonts/Urbanist"; + +const { fontFamily } = loadFont(); + +export const DEMO_SCENE_DURATION = DEMO_DURATION; + +// --------------------------------------------------------------------------- +// SpecItem — small spec display with icon +// --------------------------------------------------------------------------- +const SpecItem: React.FC<{ icon: string; label: string }> = ({ + icon, + label, +}) => { + const iconPath = + icon === "cpu" + ? "M4 4h8v8H4zM6 1v3m4-3v3M6 12v3m4-3v3M1 6h3m-3 4h3M12 6h3m-3 4h3" + : icon === "ram" + ? "M2 5h12v6H2zM4 5V3m4 2V3m4 2V3M4 11v2m4-2v2m4-2v2" + : "M3 3h10v10H3zM7 7h2v2H7z"; + + return ( +
+ + + + + {label} + +
+ ); +}; + +// --------------------------------------------------------------------------- +// SandboxCard — "just-falcon" style running sandbox card +// Purple/blue gradient header, green dot, specs, Open in Browser button +// --------------------------------------------------------------------------- +const SandboxCard: React.FC<{ + name: string; + statusDotColor: string; + statusLabel?: string; + showContent?: boolean; + contentOpacity?: number; +}> = ({ + name, + statusDotColor, + statusLabel, + showContent = true, + contentOpacity = 1, +}) => { + return ( +
+ {/* Gradient header */} +
+ + {/* Body */} +
+ {/* Name row */} +
+
+ + {name} + + {/* Copy icon */} + + + + + {/* Gear icon */} + + + + +
+ + {/* Status label (for provisioning card) */} + {statusLabel && ( +
+ {statusLabel} +
+ )} + + {/* "Never" muted text */} +
+ Never +
+ + {/* Specs row */} +
+ + + +
+ + {/* Open in Browser button */} +
+ Open in Browser +
+
+
+ ); +}; + +// --------------------------------------------------------------------------- +// AddNewSandboxCard — dashed border placeholder card +// --------------------------------------------------------------------------- +const AddNewSandboxCard: React.FC<{ + glowing?: boolean; +}> = ({ glowing = false }) => { + return ( +
+ {/* Plus icon */} +
+ + +
+ {/* Label */} +
+ Add New Sandbox +
+
+ ); +}; + +// --------------------------------------------------------------------------- +// NewCardMaterialize — the card that appears after clicking "Add New Sandbox" +// Scales from center, header first, then body content fades in +// --------------------------------------------------------------------------- +const NewCardMaterialize: React.FC<{ + frame: number; + startFrame: number; +}> = ({ frame, startFrame }) => { + const localFrame = frame - startFrame; + + // Card scales in from 0 to 1 over 15 frames + const cardScale = interpolate(localFrame, [0, 15], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + + // Body content fades in after header appears (starts at frame +10) + const bodyOpacity = interpolate(localFrame, [10, 22], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.reveal, + }); + + // Provisioning -> Running transition at frame 140 (absolute) + const isRunning = frame >= 140; + const dotColor = isRunning ? CUA.brand.green : CUA.brand.blue; + const statusLabel = isRunning ? "Running" : "Provisioning"; + + if (localFrame < 0) return null; + + return ( +
+ +
+ ); +}; + +// --------------------------------------------------------------------------- +// DashboardContent — the grid of cards inside the browser +// --------------------------------------------------------------------------- +const DashboardContent: React.FC<{ + frame: number; + showAddCard: boolean; + addCardGlowing: boolean; + showNewCard: boolean; +}> = ({ frame, showAddCard, addCardGlowing, showNewCard }) => { + return ( +
+
+ {/* Slot 1: Add New Sandbox card OR the materializing new card */} + {showAddCard && !showNewCard && ( + + )} + {showNewCard && ( + + )} + + {/* Slot 2: Existing running sandbox "just-falcon" */} + +
+
+ ); +}; + +// --------------------------------------------------------------------------- +// Cursor positions for Part 2 interaction +// --------------------------------------------------------------------------- +const CURSOR_POSITIONS = [ + { x: 1200, y: 700, frame: 95, click: false }, // Enter from bottom-right + { x: 820, y: 490, frame: 108, click: false }, // Glide to "Add New Sandbox" center + { x: 820, y: 490, frame: 110, click: true }, // Click + { x: 900, y: 550, frame: 130, click: false }, // Drift away +]; + +// --------------------------------------------------------------------------- +// DemoScene — 210 frames +// Part 1 (0-80): Word-by-word storytelling, text ONLY +// Part 2 (80-210): Dashboard UI demo, visuals ONLY +// --------------------------------------------------------------------------- +export const DemoScene: React.FC = () => { + const frame = useCurrentFrame(); + + // =================================================================== + // PART 1 (frames 0-80): Word-by-word text reveal + // =================================================================== + + // All three lines fade out together at frames 72-80 + const textGroupFade = interpolate(frame, [72, 80], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.exit, + }); + + // =================================================================== + // PART 2 (frames 80-210): Browser + Dashboard + // =================================================================== + + // Browser scale: 0.85 -> 1.15 (entrance), zoom to 1.5 during click, hold 1.5 -> 1.55 + const browserEntryScale = interpolate(frame, [80, 100], [0.85, 1.15], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }); + const browserCinematicScale = interpolate(frame, [100, 170], [1.15, 1.5], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + const browserHoldScale = interpolate(frame, [170, 200], [1.5, 1.55], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + + const browserScale = + frame < 100 + ? browserEntryScale + : frame < 170 + ? browserCinematicScale + : browserHoldScale; + + // Browser opacity: fade in 80-90, hold, fade out 185-205 + const browserOpacity = interpolate( + frame, + [80, 90, 185, 205], + [0, 1, 1, 0], + { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }, + ); + + // Browser blur: starts blurry and clears up (matches SmoothReveal blur effect) + const browserBlur = interpolate(frame, [80, 100], [15, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + + // Card interaction states + const addCardGlowing = frame >= 110 && frame < 118; + const showNewCard = frame >= 115; + + return ( + + {/* ============================================= */} + {/* PART 1: Word-by-word text (frames 0-80) */} + {/* ============================================= */} + {frame < 80 && ( + = 72 ? textGroupFade : 1, + }} + > +
+ {/* "Instantly" — frames 0-20 */} + +
+ Instantly +
+
+ + {/* "get a sandbox" — frames 25-45 */} + {frame >= 25 && ( + +
+ get a sandbox +
+
+ )} + + {/* "that's yours." — frames 50-70 */} + {frame >= 50 && ( + +
+ that's{" "} + yours. +
+
+ )} +
+
+ )} + + {/* ============================================= */} + {/* PART 2: Browser + Dashboard (frames 80-210) */} + {/* ============================================= */} + {frame >= 80 && frame < 210 && ( + +
= 80 && frame < 100 ? `blur(${browserBlur}px)` : undefined, + }} + > + + + + +
+
+ )} + + {/* Animated cursor */} + {frame >= 95 && frame <= 140 && ( + + )} +
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/scenes/EndCardScene.tsx b/videos/cua-cloud/src/remotion/scenes/EndCardScene.tsx new file mode 100644 index 0000000..e0b5479 --- /dev/null +++ b/videos/cua-cloud/src/remotion/scenes/EndCardScene.tsx @@ -0,0 +1,293 @@ +import React from "react"; +import { + AbsoluteFill, + useCurrentFrame, + interpolate, + Easing, +} from "remotion"; +import { GridBackground } from "../components/GridBackground"; +import { SmoothReveal } from "../components/SmoothReveal"; +import { AnimatedKoala } from "../components/AnimatedKoala"; +import { CUA, cuaEasings, ENDCARD_DURATION } from "../theme"; +import { loadFont as loadMono } from "@remotion/google-fonts/JetBrainsMono"; +import { loadFont } from "@remotion/google-fonts/Urbanist"; + +const { fontFamily } = loadFont(); +const { fontFamily: monoFamily } = loadMono(); + +export const ENDCARD_SCENE_DURATION = ENDCARD_DURATION; + +// --------------------------------------------------------------------------- +// GradientWave — multi-layered SVG wave with heavy blur for hazy, dreamy feel +// --------------------------------------------------------------------------- +const GradientWave: React.FC<{ frame: number }> = ({ frame }) => { + const width = 1920; + const height = 180; + const shift = frame * 1.5; + + const makeWavePath = ( + speed: number, + amplitude: number, + yOffset: number, + phaseOffset: number, + ): string => { + const points: string[] = []; + for (let x = 0; x <= width; x += 4) { + const y = + yOffset + + Math.sin((x + shift * speed + phaseOffset) * 0.006) * amplitude + + Math.sin((x - shift * speed * 0.7 + phaseOffset) * 0.01) * + (amplitude * 0.6) + + Math.sin((x + shift * speed * 1.3 + phaseOffset) * 0.004) * + (amplitude * 0.8); + points.push(`${x},${y}`); + } + return `M0,${height} L${points.join(" L")} L${width},${height} Z`; + }; + + const layers = [ + { speed: 2.0, amplitude: 30, yOffset: height * 0.55, phaseOffset: 0, opacity: 0.25 }, + { speed: 1.4, amplitude: 22, yOffset: height * 0.45, phaseOffset: 200, opacity: 0.18 }, + { speed: 2.5, amplitude: 18, yOffset: height * 0.6, phaseOffset: 500, opacity: 0.12 }, + ]; + + return ( +
+ + + + + + + + + {layers.map((layer, i) => ( + + ))} + +
+ ); +}; + +export const EndCardScene: React.FC = () => { + const frame = useCurrentFrame(); + + // --------------------------------------------------------------------------- + // Koala entrance: frames 0-25 + // Starts HUGE (scale 2.0), scales down to 1.0 with blur clearing + // --------------------------------------------------------------------------- + const koalaScaleDown = interpolate(frame, [0, 25], [2.0, 1.0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + + const koalaFadeIn = interpolate(frame, [0, 12], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + + const koalaBlur = interpolate(frame, [0, 25], [15, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + + // --------------------------------------------------------------------------- + // CTA button: frames 35-50 bounce entrance with blur fade-in + // --------------------------------------------------------------------------- + const ctaScale = interpolate(frame, [35, 50], [0, 1.0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + + const ctaOpacity = interpolate(frame, [35, 43], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + + const ctaBlur = interpolate(frame, [35, 50], [12, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.out(Easing.cubic), + }); + + // --------------------------------------------------------------------------- + // Koala look direction (adjusted +5 frames) + // Frame 40-55: Koala blinks at 45. Looks left at 50. + // Frame 55-65: Koala looks right at 60. + // Frame 65-70: Koala blinks at 65. Back to center. + // --------------------------------------------------------------------------- + let koalaLookDirection: "left" | "center" | "right" = "center"; + let koalaLookStartFrame = 0; + + if (frame >= 50 && frame < 60) { + koalaLookDirection = "left"; + koalaLookStartFrame = 50; + } else if (frame >= 60 && frame < 70) { + koalaLookDirection = "right"; + koalaLookStartFrame = 60; + } else if (frame >= 70) { + koalaLookDirection = "center"; + koalaLookStartFrame = 70; + } + + // --------------------------------------------------------------------------- + // Breathing animation: frames 65-180 + // Subtle scale oscillation between 1.0 and 1.02 + // --------------------------------------------------------------------------- + const breatheScale = + frame >= 65 && frame < 180 + ? Math.sin(frame * 0.08) * 0.02 + 1.0 + : 1.0; + + // --------------------------------------------------------------------------- + // Exit: frames 200-250 + // Scale UP slightly (1.0 -> 1.15) while fading out — cinematic zoom-into-black + // --------------------------------------------------------------------------- + const exitScale = interpolate(frame, [200, 250], [1.0, 1.15], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + + const exitOpacity = interpolate(frame, [200, 250], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.exit, + }); + + // Combine scales: entrance (first 25 frames), then breathe, then exit + let compositionScale: number; + if (frame < 25) { + compositionScale = koalaScaleDown; + } else if (frame >= 200) { + compositionScale = exitScale * breatheScale; + } else { + compositionScale = breatheScale; + } + + const masterOpacity = frame < 200 ? koalaFadeIn : koalaFadeIn * exitOpacity; + + // Frames 250+: black + if (frame >= 250) { + return ( + + + + ); + } + + return ( + + {/* Animated gradient wave at bottom */} + + + +
+ {/* Animated Koala — big and centered */} + + + {/* "cua.ai" text — SmoothReveal from frame 25 */} + +
+ cua.ai +
+
+ + {/* "Try now ->" CTA pill button — bounce entrance from frame 35 with blur */} +
= 35 && frame < 50 ? `blur(${ctaBlur}px)` : undefined, + marginTop: 4, + }} + > +
+ Try now + +
+
+
+
+
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/scenes/FullVideo.tsx b/videos/cua-cloud/src/remotion/scenes/FullVideo.tsx new file mode 100644 index 0000000..f5053d0 --- /dev/null +++ b/videos/cua-cloud/src/remotion/scenes/FullVideo.tsx @@ -0,0 +1,35 @@ +import { AbsoluteFill, Series } from "remotion"; +import { HookScene, HOOK_SCENE_DURATION } from "./HookScene"; +import { DemoScene, DEMO_SCENE_DURATION } from "./DemoScene"; +import { ShowcaseTaglineScene, SHOWCASE_TAGLINE_SCENE_DURATION } from "./ShowcaseTaglineScene"; +import { EndCardScene, ENDCARD_SCENE_DURATION } from "./EndCardScene"; + +export const FULL_VIDEO_DURATION = + HOOK_SCENE_DURATION + + DEMO_SCENE_DURATION + + SHOWCASE_TAGLINE_SCENE_DURATION + + ENDCARD_SCENE_DURATION; + +export const FullVideo: React.FC = () => { + return ( + + + + + + + + + + + + + + + + + + + + ); +}; diff --git a/videos/cua-cloud/src/remotion/scenes/HookScene.tsx b/videos/cua-cloud/src/remotion/scenes/HookScene.tsx new file mode 100644 index 0000000..b720079 --- /dev/null +++ b/videos/cua-cloud/src/remotion/scenes/HookScene.tsx @@ -0,0 +1,379 @@ +import { AbsoluteFill, useCurrentFrame, interpolate } from "remotion"; +import { GridBackground } from "../components/GridBackground"; +import { SmoothReveal } from "../components/SmoothReveal"; +import { GradientText } from "../components/GradientText"; +import { AnimatedKoala } from "../components/AnimatedKoala"; +import { CUA, cuaEasings, GRADIENTS, HOOK_DURATION } from "../theme"; +import { loadFont } from "@remotion/google-fonts/Urbanist"; + +const { fontFamily } = loadFont(); + +export const HOOK_SCENE_DURATION = HOOK_DURATION; + +// --------------------------------------------------------------------------- +// GradientWave — multi-layered SVG wave with heavy blur for hazy, dreamy feel +// --------------------------------------------------------------------------- +const GradientWave: React.FC<{ frame: number }> = ({ frame }) => { + const width = 1920; + const height = 180; + const shift = frame * 1.5; + + const makeWavePath = ( + speed: number, + amplitude: number, + yOffset: number, + phaseOffset: number, + ): string => { + const points: string[] = []; + for (let x = 0; x <= width; x += 4) { + const y = + yOffset + + Math.sin((x + shift * speed + phaseOffset) * 0.006) * amplitude + + Math.sin((x - shift * speed * 0.7 + phaseOffset) * 0.01) * + (amplitude * 0.6) + + Math.sin((x + shift * speed * 1.3 + phaseOffset) * 0.004) * + (amplitude * 0.8); + points.push(`${x},${y}`); + } + return `M0,${height} L${points.join(" L")} L${width},${height} Z`; + }; + + const layers = [ + { speed: 2.0, amplitude: 30, yOffset: height * 0.55, phaseOffset: 0, opacity: 0.25 }, + { speed: 1.4, amplitude: 22, yOffset: height * 0.45, phaseOffset: 200, opacity: 0.18 }, + { speed: 2.5, amplitude: 18, yOffset: height * 0.6, phaseOffset: 500, opacity: 0.12 }, + ]; + + return ( +
+ + + + + + + + + {layers.map((layer, i) => ( + + ))} + +
+ ); +}; + +// --------------------------------------------------------------------------- +// HookScene — "Meet CUA Cloud v2" (120 frames / 4s) +// +// Timeline: +// 0-3 Grid fades in +// 3-12 "Meet" appears HUGE at scale 3.0 with SmoothReveal blur +// 12-40 Camera pulls back 3.0 -> 1.0, "CUA Cloud" fades in +// 38-48 "v2" slides in from right (dramatic, no overshoot) +// 38-48 Koala fades in with blur +// 50-80 ZOOM IN on v2 — scale 1.0 -> 3.5, transformOrigin shifts right +// "Meet", "CUA Cloud", and koala fade out with blur (50-62) +// ONLY the gradient "v2" remains, filling the viewport +// 95-120 Fade out (opacity only, v2 is already zoomed huge) +// --------------------------------------------------------------------------- +export const HookScene: React.FC = () => { + const frame = useCurrentFrame(); + + // --- Grid fade-in: frames 0-3 --- + const gridOpacity = interpolate(frame, [0, 3], [0, CUA.grid.opacity], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + + // --- Blur fade-in for the whole composition container: frames 0-10 --- + const containerBlur = interpolate(frame, [0, 10], [14, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.reveal, + }); + const containerEntryOpacity = interpolate(frame, [0, 10], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.reveal, + }); + + // --- Camera pull-back: frames 12-40, scale 3.0 -> 1.0 --- + const zoomScale = interpolate(frame, [12, 40], [3.0, 1.0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + + // --- "CUA Cloud" reveal: frames 18-34 --- + const cloudOpacity = interpolate(frame, [18, 34], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + const cloudTranslateY = interpolate(frame, [18, 34], [20, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + // Blur fade-in for "CUA Cloud" text + const cloudBlur = interpolate(frame, [18, 34], [12, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + + // --- "v2" slide-in from right: frames 38-48 (dramatic, no overshoot) --- + const v2TranslateX = interpolate(frame, [38, 48], [300, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + const v2Opacity = interpolate(frame, [38, 45], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.reveal, + }); + // Blur fade-in for "v2" + const v2EntryBlur = interpolate(frame, [38, 48], [15, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + + // --- AnimatedKoala fade-in with blur: frames 38-48 --- + const koalaEntryOpacity = interpolate(frame, [38, 48], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.reveal, + }); + const koalaEntryBlur = interpolate(frame, [38, 48], [14, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.reveal, + }); + + // --- ZOOM IN on v2: frames 50-80, scale 1.0 -> 3.5 --- + const v2ZoomScale = interpolate(frame, [50, 80], [1.0, 3.5], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + + // transformOrigin X shifts from 50% -> 78% to center on v2 + const originX = interpolate(frame, [50, 80], [50, 78], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.dramatic, + }); + + // --- Fade out "Meet", "CUA Cloud", and koala during zoom: frames 50-62 --- + const discardOpacity = interpolate(frame, [50, 62], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.exit, + }); + const discardBlur = interpolate(frame, [50, 62], [0, 15], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.exit, + }); + + // --- Exit: frames 95-120, fade out only (already zoomed) --- + const exitOpacity = interpolate(frame, [95, 120], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.exit, + }); + + // Composite scale: pull-back phase, then v2 zoom-in phase + // Pull-back settles at 1.0 by frame 40. v2 zoom kicks in at frame 50. + const compositeScale = frame < 50 ? zoomScale : zoomScale * v2ZoomScale; + + // Master opacity: full until exit phase + const masterOpacity = frame < 95 ? 1 : exitOpacity; + + // transformOrigin: center during pull-back, shifts right during v2 zoom + const transformOrigin = `${originX}% 50%`; + + // Koala composite opacity: entry then discard + const koalaOpacity = frame < 50 + ? koalaEntryOpacity + : koalaEntryOpacity * discardOpacity; + const koalaBlur = frame < 50 + ? koalaEntryBlur + : discardBlur; + + // "Meet" discard filter (SmoothReveal handles entry; we layer discard on top) + const meetWrapOpacity = frame < 50 ? 1 : discardOpacity; + const meetWrapBlur = frame < 50 ? 0 : discardBlur; + + // "CUA Cloud" composite: entry blur then discard blur + const cloudCompositeOpacity = frame < 50 + ? cloudOpacity + : cloudOpacity * discardOpacity; + const cloudCompositeBlur = frame < 50 + ? cloudBlur + : discardBlur; + + // "v2" entry blur (no discard — v2 stays visible) + const v2Blur = v2EntryBlur; + + return ( + + {/* Animated gradient wave at bottom */} + + + + {/* Master container with blur fade-in, pulls back 3.0->1.0, then zooms into v2 1.0->3.5 */} +
+ {/* "Meet" — BLASTS in at frame 3, enormous at scale(3.0) */} + {/* Wrapper applies discard fade-out + blur during zoom phase */} +
+ +
+ Meet +
+
+
+ + {/* "CUA Cloud" + "v2" row — revealed as camera pulls back */} +
+ {/* Koala to the left of text — blur fade-in, then discard during zoom */} +
+ +
+ + {/* "CUA Cloud" text — blur fade-in, then discard during zoom */} +
+ CUA Cloud +
+ + {/* "v2" — slides in from right with blur, then becomes the hero via zoom */} +
+ + v2 + +
+
+
+
+
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/scenes/ShowcaseTaglineScene.tsx b/videos/cua-cloud/src/remotion/scenes/ShowcaseTaglineScene.tsx new file mode 100644 index 0000000..40d4dc2 --- /dev/null +++ b/videos/cua-cloud/src/remotion/scenes/ShowcaseTaglineScene.tsx @@ -0,0 +1,890 @@ +import React from "react"; +import { + AbsoluteFill, + Img, + useCurrentFrame, + interpolate, + Easing, + staticFile, +} from "remotion"; +import { GridBackground } from "../components/GridBackground"; +import { SmoothReveal } from "../components/SmoothReveal"; +import { GradientText } from "../components/GradientText"; +import { AnimatedKoala } from "../components/AnimatedKoala"; +import { CUA, GRADIENTS, SHOWCASE_TAGLINE_DURATION, cuaEasings } from "../theme"; +import { loadFont } from "@remotion/google-fonts/Urbanist"; +import { loadFont as loadMono } from "@remotion/google-fonts/JetBrainsMono"; + +const { fontFamily } = loadFont(); +const { fontFamily: monoFamily } = loadMono(); + +export const SHOWCASE_TAGLINE_SCENE_DURATION = SHOWCASE_TAGLINE_DURATION; + +// --------------------------------------------------------------------------- +// Card definitions — each represents a different agent activity +// --------------------------------------------------------------------------- + +type ActivityType = "browsing" | "executing" | "navigating" | "typing" | "multitasking"; + +interface CardDef { + type: ActivityType; + label: string; + /** Angle (radians) from which the card enters */ + entryAngle: number; +} + +const CARDS: CardDef[] = [ + { type: "browsing", label: "Browsing", entryAngle: (-3 * Math.PI) / 4 }, + { type: "executing", label: "Executing", entryAngle: 0 }, + { type: "navigating", label: "Navigating", entryAngle: (3 * Math.PI) / 4 }, + { type: "typing", label: "Typing", entryAngle: -Math.PI / 4 }, + { type: "multitasking", label: "Multitasking", entryAngle: Math.PI / 2 }, +]; + +const CARD_WIDTH = 380; +const CARD_HEIGHT = 240; +const TITLE_BAR_HEIGHT = 28; + +// --------------------------------------------------------------------------- +// macOS-style title bar with traffic lights +// --------------------------------------------------------------------------- + +const TitleBar: React.FC<{ label: string }> = ({ label }) => ( +
+
+
+
+
+ {label} +
+
+); + +// --------------------------------------------------------------------------- +// Agent cursor overlay — animated pointer at a given position +// --------------------------------------------------------------------------- + +const AgentCursor: React.FC<{ x: number; y: number; visible: boolean }> = ({ + x, + y, + visible, +}) => { + if (!visible) return null; + return ( +
+ {/* Simple cursor arrow */} + + + +
+ ); +}; + +// --------------------------------------------------------------------------- +// Overlay window — a semi-transparent window panel overlaid on the desktop +// --------------------------------------------------------------------------- + +interface OverlayWindowProps { + top: number; + left: number; + width: number; + height: number; + title: string; + accentColor: string; + children?: React.ReactNode; +} + +const OverlayWindow: React.FC = ({ + top, + left, + width, + height, + title, + accentColor, + children, +}) => ( +
+ {/* Mini title bar */} +
+
+
+
+
+ {title} +
+
+ {/* Content area */} +
+ {children} +
+
+); + +// --------------------------------------------------------------------------- +// Activity-specific overlays rendered on top of the desktop screenshot +// --------------------------------------------------------------------------- + +const BrowsingOverlay: React.FC<{ frame: number; enterFrame: number }> = ({ frame, enterFrame }) => { + const elapsed = Math.max(0, frame - enterFrame); + const loadPct = interpolate(elapsed, [0, 60], [0, 100], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + const cursorX = interpolate(elapsed, [0, 80], [60, 200], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + const cursorY = interpolate(elapsed, [0, 80], [100, 60], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + return ( + <> + + {/* URL bar with loading progress */} +
+
+
+ {/* Page content skeleton */} +
+
+
+
+ + 10} /> + + ); +}; + +const ExecutingOverlay: React.FC<{ frame: number; enterFrame: number }> = ({ frame, enterFrame }) => { + const elapsed = Math.max(0, frame - enterFrame); + const visibleLines = Math.min(5, Math.floor(elapsed / 20)); + const cursorOn = Math.floor(frame / 15) % 2 === 0; + return ( + <> + + {Array.from({ length: visibleLines }).map((_, i) => ( +
+ ))} + {/* Blinking cursor line */} +
+
+
+
+ + 8} /> + + ); +}; + +const NavigatingOverlay: React.FC<{ frame: number; enterFrame: number }> = ({ frame, enterFrame }) => { + const elapsed = Math.max(0, frame - enterFrame); + const cursorX = interpolate(elapsed, [0, 90], [100, 180], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + const cursorY = interpolate(elapsed, [0, 90], [60, 120], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + const iconColors = ["#61bcff", "#4fd37e", "#a78bfa", "#ffbd2e", "#61bcff", "#ff5f56"]; + return ( + <> + +
+ {/* Sidebar */} +
+ {[35, 28, 32].map((w, i) => ( +
+ ))} +
+ {/* File grid */} +
+ {iconColors.map((color, i) => ( +
+ ))} +
+
+ + 12} /> + + ); +}; + +const TypingOverlay: React.FC<{ frame: number; enterFrame: number }> = ({ frame, enterFrame }) => { + const elapsed = Math.max(0, frame - enterFrame); + const visibleLines = Math.min(6, Math.floor(elapsed / 25) + 1); + const lineColors = ["#4fd37e", "#61bcff", "#f6f8fb", "#a78bfa", "#4fd37e", "#61bcff"]; + const lineWidths = [65, 50, 75, 38, 60, 45]; + return ( + <> + + {lineColors.slice(0, visibleLines).map((color, i) => ( +
+ ))} + + 6} /> + + ); +}; + +const MultitaskingOverlay: React.FC<{ frame: number; enterFrame: number }> = ({ frame, enterFrame }) => { + const elapsed = Math.max(0, frame - enterFrame); + const cursorX = interpolate(elapsed, [0, 40, 80], [80, 200, 140], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + const cursorY = interpolate(elapsed, [0, 40, 80], [50, 100, 70], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }); + return ( + <> + {/* Small browser window */} + +
+
+ + {/* Small terminal window */} + +
+
+ + {/* Small editor window */} + +
+
+ + 10} /> + + ); +}; + +// --------------------------------------------------------------------------- +// DesktopScreenshot — real screenshot with activity overlays +// --------------------------------------------------------------------------- + +interface DesktopScreenshotProps { + type: ActivityType; + label: string; + frame: number; + enterFrame: number; +} + +const DesktopScreenshot: React.FC = ({ + type, + label, + frame, + enterFrame, +}) => { + const renderOverlay = () => { + switch (type) { + case "browsing": + return ; + case "executing": + return ; + case "navigating": + return ; + case "typing": + return ; + case "multitasking": + return ; + } + }; + + return ( +
+ + {/* Desktop screenshot with activity overlay */} +
+ + {/* Activity overlay layer */} +
+ {renderOverlay()} +
+
+
+ ); +}; + +// --------------------------------------------------------------------------- +// Positioning helpers +// --------------------------------------------------------------------------- + +interface CardPosition { + x: number; + y: number; + scale: number; + rotateX: number; + rotateY: number; + opacity: number; + blur: number; +} + +/** + * Phase 1 (frames 0-120): Cards enter from edges then orbit. + * Phase 2 (frames 120-150): Cards converge inward and fade to zero opacity. + */ +const getCardPosition = ( + cardIndex: number, + frame: number, + totalCards: number, + entryAngle: number, +): CardPosition => { + const enterStart = 10 + cardIndex * 10; + const progress = Math.max(0, frame - enterStart); + + // --- Phase 1: Entry + orbit (frames 0-120) --- + + // Radius: starts at 800, converges to 260 (backloaded via Easing.in(cubic)) + const entryRadiusProgress = interpolate(progress, [0, 70], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.in(Easing.cubic), + }); + const orbitRadius = interpolate(entryRadiusProgress, [0, 1], [800, 260]); + + // Base angle from even distribution, blended from entryAngle + const baseAngle = (cardIndex / totalCards) * Math.PI * 2; + const currentAngle = + entryAngle * (1 - entryRadiusProgress) + (baseAngle + frame * 0.008) * entryRadiusProgress; + const orbitAngle = entryRadiusProgress >= 1 ? baseAngle + frame * 0.008 : currentAngle; + + // --- Phase 2: Convergence (frames 120-150) --- + const convergeProgress = interpolate(frame, [120, 150], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: Easing.in(Easing.cubic), + }); + + // Radius shrinks from orbit position (260) to 0 + const radius = frame < 120 + ? orbitRadius + : interpolate(convergeProgress, [0, 1], [260, 0]); + + // Scale: entry 0.6->0.85, then convergence shrinks 0.85->0.4 + const entryScale = interpolate(entryRadiusProgress, [0, 1], [0.6, 0.85]); + const scale = frame < 120 + ? entryScale + : interpolate(convergeProgress, [0, 1], [0.85, 0.4]); + + // Opacity: fade in during entry with smooth easing, fade to zero during convergence + const entryOpacity = interpolate(progress, [0, 15], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + const opacity = frame < 120 + ? entryOpacity + : interpolate(convergeProgress, [0, 1], [1, 0]); + + // Blur: starts at 12px and clears to 0 over the first 15 frames of entry + const entryBlur = interpolate(progress, [0, 15], [12, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + const blur = frame < 120 ? entryBlur : 0; + + // Compute X/Y from polar coordinates (Y squished 0.6x for elliptical orbit) + const x = Math.cos(orbitAngle) * radius; + const y = Math.sin(orbitAngle) * radius * 0.6; + + // 3D tilt + const rotateY = Math.cos(orbitAngle) * 12; + const rotateX = 5 + Math.sin(frame * 0.03 + cardIndex) * 3; + + return { x, y, scale, rotateX, rotateY, opacity, blur }; +}; + +// --------------------------------------------------------------------------- +// ShowcaseTaglineScene — 300 frames (10s) +// --------------------------------------------------------------------------- + +export const ShowcaseTaglineScene: React.FC = () => { + const frame = useCurrentFrame(); + + // --- Wobble for cards --- + const wobble = (index: number) => Math.sin(frame * 0.03 + index) * 8; + + // --- Eyebrow text: "BUILT FOR COMPUTER USE" --- + // Fade out frames 130-150 + const eyebrowOpacity = interpolate(frame, [130, 150], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.exit, + }); + + // --- Grid pulse at frame 205 --- + const gridPulse = + frame >= 205 && frame <= 220 + ? interpolate(frame, [205, 212, 220], [0.16, 0.4, 0.16], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + }) + : 0.16; + + const gridOpacity = frame < 5 + ? interpolate(frame, [0, 5], [0, 0.16], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }) + : gridPulse; + + // --- Koala fade in: frames 208-215 --- + const koalaOpacity = interpolate(frame, [208, 215], [0, 1], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + const koalaY = interpolate(frame, [208, 215], [15, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.entrance, + }); + + // --- Master exit: frames 260-295 --- + const exitOpacity = interpolate(frame, [260, 295], [1, 0], { + extrapolateLeft: "clamp", + extrapolateRight: "clamp", + easing: cuaEasings.smooth, + }); + + const masterOpacity = frame < 260 ? 1 : exitOpacity; + + // --- Black hold: frames 295-300 --- + if (frame >= 295) { + return ( + + + + ); + } + + return ( + + + {/* Eyebrow text: "BUILT FOR COMPUTER USE" */} +
+ +
+ Built for computer use +
+
+
+ + {/* 3D perspective container for desktop cards */} +
+ {CARDS.map((card, i) => { + const pos = getCardPosition(i, frame, CARDS.length, card.entryAngle); + + // After convergence (frame 150+), cards are fully transparent — skip rendering + if (frame >= 150) return null; + + const rotation = wobble(i); + + return ( +
0 ? `blur(${pos.blur}px)` : undefined, + }} + > + +
+ ); + })} +
+ + {/* Tagline text block — centered */} +
+ {/* Line 1: "The infrastructure" — frames 145-175 */} + +
+ The infrastructure +
+
+ + {/* Line 2: "your computer use agents" — frames 165-190 */} + +
+ your{" "} + + computer use agents + +
+
+ + {/* Line 3: "deserve." — frames 185-205 */} + +
+ deserve. +
+
+ + {/* AnimatedKoala below text — fades in frames 208-215 */} +
+ +
+
+
+
+ ); +}; diff --git a/videos/cua-cloud/src/remotion/theme.ts b/videos/cua-cloud/src/remotion/theme.ts new file mode 100644 index 0000000..8e5469f --- /dev/null +++ b/videos/cua-cloud/src/remotion/theme.ts @@ -0,0 +1,46 @@ +import { Easing } from "remotion"; + +// Scene durations (frames at 30fps) +export const HOOK_DURATION = 120; // 4s (v2 emphasis needs room) +export const DEMO_DURATION = 210; // 7s +export const SHOWCASE_TAGLINE_DURATION = 300; // 10s (was 13s — tightened) +export const ENDCARD_DURATION = 270; // 9s (waves + CTA) +export const TOTAL_DURATION = 900; // 30s + +// CUA Brand Colors +export const CUA = { + bg: { base: "#07080a", elev1: "#0a0c10", elev2: "#12161b" }, + brand: { + blue: "#61bcff", + green: "#4fd37e", + deepBlue: "#2d84c6", + lightBlue: "#d8edff", + violet: "#a78bfa", + glow: "rgba(97, 188, 255, 0.34)", + }, + ink: { + strong: "#f6f8fb", + body: "rgba(224, 230, 238, 0.84)", + muted: "rgba(164, 173, 187, 0.82)", + }, + border: { + subtle: "rgba(255, 255, 255, 0.06)", + medium: "rgba(255, 255, 255, 0.12)", + }, + grid: { color: "rgba(255, 255, 255, 0.022)", spacing: 44, opacity: 0.16 }, +} as const; + +// Shared easings — no overshoot, smooth ease-in/out +export const cuaEasings = { + reveal: Easing.bezier(0.22, 1, 0.36, 1), // Expo ease-out (website reveal) + exit: Easing.in(Easing.quad), // Quick exit + smooth: Easing.inOut(Easing.cubic), // Gentle in-out + entrance: Easing.out(Easing.cubic), // Card/element entrance (no overshoot) + dramatic: Easing.bezier(0.16, 1, 0.3, 1), // Dramatic ease-out: slow start, fast finish +} as const; + +// Gradient definitions +export const GRADIENTS = { + brand3: "linear-gradient(135deg, #61bcff, #4fd37e, #2d84c6)", + brand5: "linear-gradient(135deg, #61bcff, #4fd37e, #2d84c6, #d8edff, #a78bfa)", +} as const; diff --git a/videos/cua-cloud/src/remotion/webpack-override.mjs b/videos/cua-cloud/src/remotion/webpack-override.mjs new file mode 100644 index 0000000..65121e1 --- /dev/null +++ b/videos/cua-cloud/src/remotion/webpack-override.mjs @@ -0,0 +1,5 @@ +import { enableTailwind } from "@remotion/tailwind-v4"; + +export const webpackOverride = (config) => { + return enableTailwind(config); +}; diff --git a/videos/cua-cloud/src/styles/global.css b/videos/cua-cloud/src/styles/global.css new file mode 100644 index 0000000..1038643 --- /dev/null +++ b/videos/cua-cloud/src/styles/global.css @@ -0,0 +1,10 @@ +@import "tailwindcss"; + +* { + box-sizing: border-box; +} + +body { + margin: 0; + padding: 0; +} diff --git a/videos/cua-cloud/tsconfig.json b/videos/cua-cloud/tsconfig.json new file mode 100644 index 0000000..007e8f5 --- /dev/null +++ b/videos/cua-cloud/tsconfig.json @@ -0,0 +1,15 @@ +{ + "extends": "../../tsconfig.json", + "compilerOptions": { + "plugins": [{ "name": "next" }], + "paths": { + "@/*": ["./src/*"], + "@launchpad/shared": ["../../packages/shared/src"], + "@launchpad/shared/*": ["../../packages/shared/src/*"], + "@launchpad/assets": ["../../packages/assets"], + "@launchpad/assets/*": ["../../packages/assets/*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/videos/cua-cloud/types/constants.ts b/videos/cua-cloud/types/constants.ts new file mode 100644 index 0000000..0420722 --- /dev/null +++ b/videos/cua-cloud/types/constants.ts @@ -0,0 +1,7 @@ +// Video configuration constants +export const VIDEO_WIDTH = 1920; +export const VIDEO_HEIGHT = 1080; +export const VIDEO_FPS = 30; + +// Composition name (used for rendering) +export const COMP_NAME = "CuaCloud";