Video Generation API
Generate AI videos using providers like MiniMax Hailuo, Luma, and Runway through a unified API with session-based generation and HTTP polling for status updates.
Quick Start
🎬 Generate Videos with AI
MCPHub's Video Generation API supports both direct generation and session-based workflows for creating multiple videos with shared parameters.
Production Endpoint
https://api.mcphub.com/api/v3/videos/generateSession-Based
Support for continuous generation with HTTP polling
Generate a Video
Python
import requests
import time
# Get your API key from https://mcphub.com/settings/api-keys
api_key = "mcp_your_api_key_here"
api_base = "https://api.mcphub.com/api/v3"
# Step 1: Generate a video
response = requests.post(
f"{api_base}/videos/generate",
headers={
"X-API-Key": api_key,
"Content-Type": "application/json"
},
json={
"prompt": "A cat playing with a ball of yarn",
"model": "minimax-hailuo-02-6s-768p-t2v"
}
)
result = response.json()
session_id = result['session_id']
print(f"Session ID: {session_id}")
print(f"Credits used: {result['credits_used']}")
# Step 2: Poll for completion (30-60 seconds)
max_wait = 120 # 2 minutes
poll_interval = 5 # 5 seconds
start_time = time.time()
while time.time() - start_time < max_wait:
session_response = requests.get(
f"{api_base}/videos/sessions/{session_id}",
headers={"X-API-Key": api_key},
params={"page": 1, "page_size": 50}
)
session_data = session_response.json()
videos = session_data.get('videos', [])
if videos and videos[0]['status'] == 'completed':
video_url = videos[0]['video_url']
print(f"✅ Video ready: {video_url}")
# Step 3: Download video
video_data = requests.get(video_url).content
with open('video.mp4', 'wb') as f:
f.write(video_data)
print(f"Downloaded {len(video_data)/1024/1024:.2f} MB")
break
print(" Still processing...")
time.sleep(poll_interval)TypeScript/JavaScript
// Get your API key from https://mcphub.com/settings/api-keys
const apiKey = 'mcp_your_api_key_here';
const apiBase = 'https://api.mcphub.com/api/v3';
// Step 1: Generate a video
const response = await fetch(`${apiBase}/videos/generate`, {
method: 'POST',
headers: {
'X-API-Key': apiKey,
'Content-Type': 'application/json'
},
body: JSON.stringify({
prompt: 'A cat playing with a ball of yarn',
model: 'minimax-hailuo-02-6s-768p-t2v'
})
});
const result = await response.json();
const sessionId = result.session_id;
console.log(`Session ID: ${sessionId}`);
console.log(`Credits used: ${result.credits_used}`);
// Step 2: Poll for completion (30-60 seconds)
const maxWait = 120000; // 2 minutes
const pollInterval = 5000; // 5 seconds
const startTime = Date.now();
while (Date.now() - startTime < maxWait) {
const sessionResponse = await fetch(
`${apiBase}/videos/sessions/${sessionId}?page=1&page_size=50`,
{ headers: { 'X-API-Key': apiKey } }
);
const sessionData = await sessionResponse.json();
const videos = sessionData.videos || [];
if (videos.length > 0 && videos[0].status === 'completed') {
const videoUrl = videos[0].video_url;
console.log(`✅ Video ready: ${videoUrl}`);
// Step 3: Download video
const videoBlob = await fetch(videoUrl).then(r => r.blob());
console.log(`Downloaded ${(videoBlob.size / 1024 / 1024).toFixed(2)} MB`);
break;
}
console.log(' Still processing...');
await new Promise(resolve => setTimeout(resolve, pollInterval));
}cURL
# Get your API key from https://mcphub.com/settings/api-keys
API_KEY="mcp_your_api_key_here"
API_BASE="https://api.mcphub.com/api/v3"
# Step 1: Generate video and save session_id
SESSION_ID=$(curl -s -X POST "$API_BASE/videos/generate" \
-H "X-API-Key: $API_KEY" \
-H "Content-Type: application/json" \
-d '{"prompt":"A cat playing with a ball of yarn","model":"minimax-hailuo-02-6s-768p-t2v"}' \
| jq -r '.session_id')
echo "Session ID: $SESSION_ID"
# Step 2: Wait for video to generate (30-60 seconds)
sleep 30
# Step 3: Check if video is ready
curl "$API_BASE/videos/sessions/$SESSION_ID?page=1&page_size=50" \
-H "X-API-Key: $API_KEY" \
| jq '.videos[0]'Session-Based Generation
For generating multiple videos with shared parameters, create a session. The session will generate videos at specified intervals, and you can poll the session endpoint to retrieve them:
Python Example
import requests
import time
api_key = "mcp_your_api_key_here"
api_base = "https://api.mcphub.com/api/v3"
# 1. Create a video generation session
session_response = requests.post(
f"{api_base}/videos/sessions",
headers={
"X-API-Key": api_key,
"Content-Type": "application/json"
},
json={
"prompt": "Mountain landscapes at different times of day",
"model": "minimax-hailuo-02-6s-768p-t2v",
"duration_minutes": 5,
"max_videos": 3,
"interval_seconds": 60
}
)
session_id = session_response.json()["session_id"]
print(f"Session ID: {session_id}")
# 2. Poll for videos (check every 30 seconds)
for i in range(12): # Check for up to 6 minutes
session_data = requests.get(
f"{api_base}/videos/sessions/{session_id}?page=1&page_size=50",
headers={"X-API-Key": api_key}
).json()
videos = session_data.get('videos', [])
print(f"Check {i+1}: {len(videos)} videos generated")
# Download any new completed videos
for video in videos:
if video['status'] == 'completed':
print(f" - {video['video_id']}: {video['video_url']}")
# Check if session is complete
if session_data['session']['status'] in ['completed', 'stopped']:
print(f"Session {session_data['session']['status']}")
break
time.sleep(30)Image-to-Video Generation
Animate static images into videos using AI. The API supports two modes: Image-to-Video (I2V) for animating a single image, and Keyframe-to-Video (KF2V) for creating transitions between two images.
I2VImage-to-Video
Animate a single image with motion based on your text prompt. Perfect for bringing photos to life.
KF2VKeyframe-to-Video
Create smooth transitions between two images (start and end frames). The AI generates the motion in between.
Image-to-Video (I2V)
Use the head_image parameter to provide a single image URL that will be animated.
import requests
import time
api_key = "mcp_your_api_key_here"
api_base = "https://a.mcphub.com/api/v3"
# Step 1: Generate I2V video
response = requests.post(
f"{api_base}/videos/generate",
headers={
"X-API-Key": api_key,
"Content-Type": "application/json"
},
json={
"prompt": "A tiger walking through a lush jungle, camera slowly zooming in",
"model": "minimax-hailuo-02-6s-768p-i2v",
"head_image": "https://example.com/tiger_photo.jpg"
}
)
result = response.json()
session_id = result['session_id']
print(f"Session ID: {session_id}")
# Step 2: Poll for completion (same as text-to-video)
while True:
session_data = requests.get(
f"{api_base}/videos/sessions/{session_id}?page=1&page_size=50",
headers={"X-API-Key": api_key}
).json()
videos = session_data.get('videos', [])
if videos and videos[0]['status'] == 'completed':
video_url = videos[0]['video_url']
print(f"✅ Animated video ready: {video_url}")
break
time.sleep(5)Keyframe-to-Video (KF2V)
Use both head_image (start frame) and tail_image (end frame) to create smooth transitions.
// TypeScript/JavaScript - Keyframe-to-Video
const apiKey = 'mcp_your_api_key_here';
const apiBase = 'https://a.mcphub.com/api/v3';
// Step 1: Generate KF2V transition
const response = await fetch(`${apiBase}/videos/generate`, {
method: 'POST',
headers: {
'X-API-Key': apiKey,
'Content-Type': 'application/json'
},
body: JSON.stringify({
prompt: 'Smooth cinematic transition from sunrise to sunset',
model: 'wan-wan2.1-kf2v-plus-5s-480p',
head_image: 'https://example.com/sunrise.jpg', // Start frame
tail_image: 'https://example.com/sunset.jpg' // End frame
})
});
const result = await response.json();
const sessionId = result.session_id;
console.log(`Session ID: ${sessionId}`);
// Step 2: Poll for completion
while (true) {
const sessionData = await fetch(
`${apiBase}/videos/sessions/${sessionId}?page=1&page_size=50`,
{ headers: { 'X-API-Key': apiKey } }
).then(r => r.json());
const videos = sessionData.videos || [];
if (videos.length > 0 && videos[0].status === 'completed') {
console.log(`✅ Transition video ready: ${videos[0].video_url}`);
break;
}
await new Promise(r => setTimeout(r, 5000));
}Image-to-Video Models
| Model ID | Type | Provider | Duration | Resolution | Credits |
|---|---|---|---|---|---|
minimax-hailuo-02-6s-768p-i2v | I2V | MiniMax | 6s | 768P | ~48.00 |
minimax-hailuo-02-6s-480p-i2v | I2V | MiniMax | 6s | 480P | ~30.00 |
wan-wan2.2-i2v-flash-5s-480p | I2V | WAN | 5s | 480P | ~25.00 |
wan-wan2.1-kf2v-plus-5s-480p | KF2V | WAN | 5s | 480P | ~30.00 |
luma-ray-2.0-i2v-5s | I2V | Luma | 5s | 720P | ~45.00 |
⚠️ Image Requirements
- • Images must be publicly accessible URLs (HTTPS recommended)
- • Supported formats: JPG, JPEG, PNG, WEBP
- • Recommended size: 1024x576 to 1920x1080 pixels
- • Maximum file size: 10MB per image
- • For KF2V: Both images should have the same aspect ratio
💡 Pro Tips
- • I2V: Use clear, well-composed images with good lighting for best results
- • KF2V: Images with similar composition but different states work best (e.g., day/night, before/after)
- • Prompt: Describe the motion you want, not the image content (e.g., “camera panning left” rather than “a cat”)
- • Resolution: Higher resolution = better quality but higher cost
Provider-Specific Parameters
Different providers support different parameters for image-to-video generation. Here's a complete reference for all supported providers.
| Provider | Image Field | Tail Image | Duration | Resolution | Aspect Ratio | Special Params |
|---|---|---|---|---|---|---|
| MiniMax | image | imageTail | length (max 6s) | resolution768P, 1080P | aspectRatio | promptOptimizer (bool) |
| Google Veo | image | imageTail | length (max 8s) | resolution720p, 1080p | aspectRatio16:9, 9:16 | generateAudio (bool) |
| Runway | imageRequired | N/A | length (max 10s) | Auto | aspectRatio16:9, 9:16, 1:1, 4:3, 3:4, 21:9 | I2V only |
| Kling AI | image | N/A | length (max 10s) | Auto | aspectRatio16:9, 9:16, 1:1 | strength (0-100) |
| Sora | image | N/A | length (max 12s) | Auto | aspectRatio16:9, 9:16 | - |
| WAN/Wanx | image | N/A | length (max 10s) | resolution480P, 720P, 1080P | aspectRatio16:9, 9:16, 4:3, 3:4, 1:1 | wanAudio (bool) |
| Luma | image | imageTail | length (max 5s) | resolution540p, 720p, 1080p, 4k | aspectRatio16:9, 9:16, 4:3, 3:4, 1:1 | - |
| Pika | image | imageTail | length (max 10s) | Auto | aspectRatio16:9, 9:16, 1:1 | - |
MiniMax / Hailuo Example
{
"prompt": "A tiger walking through jungle",
"model": "minimax-hailuo-02-6s-768p-i2v",
"image": "https://example.com/tiger.jpg",
"length": 6,
"resolution": "768P",
"aspectRatio": "16:9",
"promptOptimizer": true
}Runway Gen-4 Example (Image Required)
{
"prompt": "Camera slowly zooming in, cinematic lighting",
"model": "runway-gen-4-turbo",
"image": "https://example.com/scene.jpg",
"length": 10,
"aspectRatio": "16:9"
}Sora Example
{
"prompt": "Aerial view of ocean waves, cinematic 4K",
"model": "sora-2-pro",
"image": "https://example.com/ocean.jpg", // Optional
"length": 12,
"aspectRatio": "16:9"
}Luma Ray 2.0 Example (with Keyframes)
{
"prompt": "Smooth transition from day to night",
"model": "luma-ray-2.0",
"image": "https://example.com/day.jpg",
"imageTail": "https://example.com/night.jpg",
"length": 5,
"resolution": "1080p",
"aspectRatio": "16:9"
}WAN/Wanx Example (with Audio)
{
"prompt": "A peaceful forest scene with birds chirping",
"model": "wan-v2-5-preview",
"image": "https://example.com/forest.jpg",
"length": 10,
"resolution": "1080P",
"aspectRatio": "16:9",
"wanAudio": true
}📝 Field Name Reference
- •
head_image= standard field for first image - •
image= most providers use this (MiniMax, Veo, Runway, etc.) - •
reference_image= alternative name (Sora format)
- •
tail_image= standard field for second image (KF2V) - •
imageTail= MiniMax/Luma camelCase format
- •
duration_seconds= standard field - •
length= MiniMax/most providers use this
Available Models
MiniMax Hailuo
minimax-hailuo-02-6s-768p-t2vminimax-hailuo-2.3-6s-768p-t2vminimax-hailuo-2.3-fast-6s-768p-t2v
Fast generation with prompt optimizer
Other Providers
luma-ray-2.0-flashrunway-gen-4-turbowan-wan2.2-i2v-flash
Various quality & price options
📋 Get Full Model List
Retrieve all available video generation models and their pricing:
GET https://api.mcphub.com/api/v3/media-pricing/models?media_type=videoAPI Endpoints
/api/v3/videos/generateGenerate a video from a text prompt
Request Body
prompt (required) - Text description of the videomodel (required) - Model ID to useduration (optional) - Video duration in secondsaspect_ratio (optional) - e.g., "16:9", "9:16", "1:1"/api/v3/videos/sessionsCreate a video generation session
Request Body
prompt (required) - Base prompt for sessionmodel (required) - Model ID to useduration_minutes (optional) - Session duration (default: 30)max_videos (optional) - Max videos to generate (default: 50)interval_seconds (optional) - Seconds between generations (default: 60)/api/v3/videos/sessions/:session_idGet session details and generated videos
/api/v3/videos/sessions/:session_id/stopStop a running session
/api/v3/videos/historyGet your video generation history
Advanced Features
List Video Sessions
Get all your video generation sessions with pagination and filtering support.
/api/v3/videos/sessionsList all sessions with filters
Query Parameters
page (optional) - Page number (default: 1)page_size (optional) - Items per page (default: 20, max: 100)status (optional) - Filter by status: pending, running, completed, error, stoppedsearch (optional) - Search in prompts (partial match)# Python - List all completed sessions
api_key = "mcp_your_api_key_here"
response = requests.get(
"https://api.mcphub.com/api/v3/videos/sessions",
headers={"X-API-Key": api_key},
params={
"page": 1,
"page_size": 20,
"status": "completed"
}
)
sessions = response.json()
print(f"Total sessions: {sessions['pagination']['total_count']}")
for session in sessions['sessions']:
print(f"{session['session_id']}: {session['total_videos']} videos")// TypeScript - Search sessions by prompt
const apiKey = 'mcp_your_api_key_here';
const response = await fetch(
'https://api.mcphub.com/api/v3/videos/sessions?search=mountain&page=1&page_size=10',
{
headers: { 'X-API-Key': apiKey }
}
);
const { sessions, pagination, summary } = await response.json();
console.log(`Found ${pagination.total_count} sessions`);Delete Video Session
Delete a session and all its associated videos permanently.
/api/v3/videos/sessions/:session_idDelete session and all videos (cannot be undone)
# Python - Delete a session
api_key = "mcp_your_api_key_here"
session_id = "your-session-id"
response = requests.delete(
f"https://api.mcphub.com/api/v3/videos/sessions/{session_id}",
headers={"X-API-Key": api_key}
)
result = response.json()
print(f"Deleted: {result['videos_deleted']} videos")
# Output: Deleted: 5 videos// TypeScript - Delete a session
const apiKey = 'mcp_your_api_key_here';
const sessionId = 'your-session-id';
const response = await fetch(
`https://api.mcphub.com/api/v3/videos/sessions/${sessionId}`,
{
method: 'DELETE',
headers: { 'X-API-Key': apiKey }
}
);
const result = await response.json();
console.log(`${result.message}`);
// Output: "Session and 5 videos deleted successfully"Advanced Generation Parameters
Use additional parameters for more control over video generation.
Available Parameters
head_image - URL for image-to-video (start frame)tail_image - URL for keyframe-to-video (end frame)width - Video width in pixels (e.g., 1280)height - Video height in pixels (e.g., 720)frame_rate - Frames per second (e.g., 24, 30)strategy - Generation style: cinematic, realistic, animated, artistic# Python - Image-to-video generation
api_key = "mcp_your_api_key_here"
response = requests.post(
"https://api.mcphub.com/api/v3/videos/generate",
headers={
"X-API-Key": api_key,
"Content-Type": "application/json"
},
json={
"prompt": "A tiger walking through jungle",
"model": "minimax-hailuo-02-6s-768p-i2v",
"head_image": "https://example.com/tiger.jpg" # Image to animate
}
)
result = response.json()
print(f"Session ID: {result['session_id']}")
# Poll session endpoint to get video when ready// TypeScript - Keyframe-to-video (start + end frames)
const apiKey = 'mcp_your_api_key_here';
const response = await fetch('https://api.mcphub.com/api/v3/videos/generate', {
method: 'POST',
headers: {
'X-API-Key': apiKey,
'Content-Type': 'application/json'
},
body: JSON.stringify({
prompt: 'Smooth transition from day to night',
model: 'wan-wan2.1-kf2v-plus-5s-480p',
head_image: 'https://example.com/day.jpg', // Start frame
tail_image: 'https://example.com/night.jpg' // End frame
})
});
const result = await response.json();
console.log(`Session ID: ${result.session_id}`);
// Poll session endpoint to get video when readyModel ID Format
Understanding the model ID structure helps you choose the right model for your needs.
Format Structure
{vendor}-{model}-{variant}-{duration}s-{resolution}-{type}- •
t2v= Text-to-Video - •
i2v= Image-to-Video - •
kf2v= Keyframe-to-Video (start + end images)
Examples
# Minimax Hailuo 02 - Text-to-Video, 6 seconds, 768p minimax-hailuo-02-6s-768p-t2v # Minimax Hailuo 02 - Image-to-Video, 6 seconds, 768p minimax-hailuo-02-6s-768p-i2v # Luma AI - Text-to-Video, 5 seconds (Flash variant) luma-ray-2.0-flash-5s-t2v # Aliyun WAN - Image-to-Video, 5 seconds, 480p (Flash variant) wan-wan2.2-i2v-flash-5s-480p
💡 Pro Tip
Use flash variants for faster generation at lower cost, or plus variants for higher quality results.
Response Format
{
"success": true,
"video_id": "vid_abc123",
"video_url": "https://...",
"thumbnail_url": "https://...",
"model": "luma-ray-2.0-flash",
"processing_time": 45.2,
"credits_used": 100.0,
"remaining_credits": 900.0,
"duration": 5,
"aspect_ratio": "16:9",
"prompt": "A serene mountain landscape at sunset with clouds moving",
"status": "completed",
"created_at": "2025-11-12T10:30:00Z"
}Authentication
The Video Generation API uses API key authentication for simple and secure access:
Create API Key
Go to Settings → API Keys and create a new API key
Copy Your Key
Copy the generated API key (starts with mcp_)
Use in Requests
Include your API key in the X-API-Key header for all requests
🔐 Keep Your API Key Secure
Never share your API key publicly or commit it to version control. Treat it like a password.
Next Steps
📚 Related Docs
- • Image Generation API - Generate AI images
- • API Reference - Complete API documentation
- • Pricing - Video generation costs
🚀 Get Started
- • Try in Dashboard - Generate videos in the UI
- • Add Credits - Purchase credits
- • View History - Track your generations