Experimental and Emerging APIs
1. Web GPU API for Graphics Computing
| WebGPU Concept | Description |
|---|---|
| GPU Adapter | Represents physical GPU device. Access via navigator.gpu.requestAdapter(). |
| GPU Device | Logical connection to GPU for command submission. Request from adapter. |
| Shader Module | Compiled WGSL (WebGPU Shading Language) code for GPU execution. |
| Pipeline | Complete GPU program including shaders, state, and resource bindings. |
| Command Encoder | Records GPU commands to submit to queue. |
| Buffer | GPU memory for vertex data, uniforms, or compute results. |
| Texture | Multi-dimensional data (images) for rendering or compute. |
Example: Initialize WebGPU
// Check WebGPU support
if (!navigator.gpu) {
console.error("WebGPU not supported");
showWebGLFallback();
} else {
console.log("WebGPU supported");
initWebGPU();
}
async function initWebGPU() {
// Request GPU adapter
const adapter = await navigator.gpu.requestAdapter();
if (!adapter) {
console.error("Failed to get GPU adapter");
return;
}
console.log("Adapter info:", {
vendor: adapter.info?.vendor || "unknown",
architecture: adapter.info?.architecture || "unknown",
features: Array.from(adapter.features)
});
// Request device
const device = await adapter.requestDevice();
console.log("Device limits:", device.limits);
console.log("Device features:", Array.from(device.features));
// Handle device loss
device.lost.then((info) => {
console.error("Device lost:", info.message, info.reason);
if (info.reason === "destroyed") {
console.log("Device intentionally destroyed");
} else {
console.error("Device lost unexpectedly, reinitializing...");
initWebGPU();
}
});
return { adapter, device };
}
// Example: Create render pipeline
async function createRenderPipeline(device, canvas) {
// Configure canvas context
const context = canvas.getContext("webgpu");
const canvasFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device: device,
format: canvasFormat,
alphaMode: "premultiplied"
});
// Vertex shader (WGSL)
const vertexShaderCode = `
@vertex
fn main(@builtin(vertex_index) vertexIndex: u32) -> @builtin(position) vec4<f32> {
var pos = array<vec2<f32>, 3>(
vec2<f32>(0.0, 0.5),
vec2<f32>(-0.5, -0.5),
vec2<f32>(0.5, -0.5)
);
return vec4<f32>(pos[vertexIndex], 0.0, 1.0);
}
`;
// Fragment shader (WGSL)
const fragmentShaderCode = `
@fragment
fn main() -> @location(0) vec4<f32> {
return vec4<f32>(1.0, 0.0, 0.0, 1.0); // Red color
}
`;
// Create shader modules
const vertexModule = device.createShaderModule({
label: "Vertex Shader",
code: vertexShaderCode
});
const fragmentModule = device.createShaderModule({
label: "Fragment Shader",
code: fragmentShaderCode
});
// Create render pipeline
const pipeline = device.createRenderPipeline({
label: "Basic Pipeline",
layout: "auto",
vertex: {
module: vertexModule,
entryPoint: "main"
},
fragment: {
module: fragmentModule,
entryPoint: "main",
targets: [{
format: canvasFormat
}]
},
primitive: {
topology: "triangle-list"
}
});
return { context, pipeline, canvasFormat };
}
Example: WebGPU compute shader
// Compute shader for parallel processing
async function runComputeShader(device) {
// Compute shader code (matrix multiplication example)
const computeShaderCode = `
@group(0) @binding(0) var<storage, read> inputA: array<f32>;
@group(0) @binding(1) var<storage, read> inputB: array<f32>;
@group(0) @binding(2) var<storage, read_write> output: array<f32>;
@compute @workgroup_size(64)
fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
let index = global_id.x;
output[index] = inputA[index] + inputB[index];
}
`;
// Create compute pipeline
const shaderModule = device.createShaderModule({
code: computeShaderCode
});
const computePipeline = device.createComputePipeline({
layout: "auto",
compute: {
module: shaderModule,
entryPoint: "main"
}
});
// Create buffers
const dataSize = 1024;
const bufferSize = dataSize * Float32Array.BYTES_PER_ELEMENT;
const inputABuffer = device.createBuffer({
size: bufferSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
});
const inputBBuffer = device.createBuffer({
size: bufferSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST
});
const outputBuffer = device.createBuffer({
size: bufferSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC
});
const stagingBuffer = device.createBuffer({
size: bufferSize,
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST
});
// Write input data
const inputDataA = new Float32Array(dataSize).fill(1.0);
const inputDataB = new Float32Array(dataSize).fill(2.0);
device.queue.writeBuffer(inputABuffer, 0, inputDataA);
device.queue.writeBuffer(inputBBuffer, 0, inputDataB);
// Create bind group
const bindGroup = device.createBindGroup({
layout: computePipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: inputABuffer } },
{ binding: 1, resource: { buffer: inputBBuffer } },
{ binding: 2, resource: { buffer: outputBuffer } }
]
});
// Encode and submit commands
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginComputePass();
passEncoder.setPipeline(computePipeline);
passEncoder.setBindGroup(0, bindGroup);
passEncoder.dispatchWorkgroups(Math.ceil(dataSize / 64));
passEncoder.end();
// Copy output to staging buffer
commandEncoder.copyBufferToBuffer(
outputBuffer, 0,
stagingBuffer, 0,
bufferSize
);
device.queue.submit([commandEncoder.finish()]);
// Read results
await stagingBuffer.mapAsync(GPUMapMode.READ);
const resultData = new Float32Array(stagingBuffer.getMappedRange());
console.log("Compute result (first 10):", resultData.slice(0, 10));
stagingBuffer.unmap();
}
Note: WebGPU is next-gen GPU API for web, replacing WebGL. Provides compute shaders, better performance, modern GPU features. Uses WGSL shader language. Experimental - currently in Chrome, Edge. Check
navigator.gpu before use.
Warning: WebGPU is experimental and not widely supported yet. API may change. Always provide WebGL fallback. Requires HTTPS. Shaders use WGSL, not GLSL. GPU device can be lost - handle
device.lost promise.
2. Web Assembly (WASM) Integration APIs
| WebAssembly API | Description |
|---|---|
| WebAssembly.compile() | Compile WASM bytes to module asynchronously. |
| WebAssembly.instantiate() | Compile and instantiate WASM module in one step. |
| WebAssembly.Module | Compiled WASM module (stateless, can be cached). |
| WebAssembly.Instance | Instantiated module with memory and exported functions. |
| WebAssembly.Memory | Resizable ArrayBuffer for WASM linear memory. |
| WebAssembly.Table | Resizable typed array of references (functions, objects). |
| WebAssembly.Global | Global variable accessible to WASM and JavaScript. |
Example: Load and use WebAssembly module
// Fetch and instantiate WASM module
async function loadWasm() {
try {
// Fetch WASM file
const response = await fetch("module.wasm");
const bytes = await response.arrayBuffer();
// Instantiate with imports
const imports = {
env: {
// Import JavaScript functions to WASM
consoleLog: (arg) => console.log("WASM says:", arg),
randomNumber: () => Math.random(),
// Import memory (optional)
memory: new WebAssembly.Memory({ initial: 256, maximum: 512 })
}
};
const { instance, module } = await WebAssembly.instantiate(bytes, imports);
console.log("WASM module loaded");
console.log("Exports:", Object.keys(instance.exports));
return instance;
} catch (error) {
console.error("Failed to load WASM:", error);
throw error;
}
}
// Use WASM module
async function useWasm() {
const wasm = await loadWasm();
// Call exported functions
if (wasm.exports.add) {
const result = wasm.exports.add(10, 32);
console.log("10 + 32 =", result);
}
if (wasm.exports.fibonacci) {
const fib10 = wasm.exports.fibonacci(10);
console.log("Fibonacci(10) =", fib10);
}
// Access memory
if (wasm.exports.memory) {
const memory = new Uint8Array(wasm.exports.memory.buffer);
console.log("Memory size:", memory.length);
}
}
// Streaming compilation (faster for large modules)
async function loadWasmStreaming() {
try {
const response = await fetch("module.wasm");
const { instance } = await WebAssembly.instantiateStreaming(response);
console.log("WASM loaded via streaming");
return instance;
} catch (error) {
console.error("Streaming failed, falling back:", error);
// Fallback to regular loading
return loadWasm();
}
}
Example: WASM with shared memory and threads
// Create shared memory for multi-threaded WASM
const sharedMemory = new WebAssembly.Memory({
initial: 1,
maximum: 10,
shared: true // Enable sharing between workers
});
// Main thread: Load WASM with shared memory
async function loadThreadedWasm() {
const response = await fetch("threaded.wasm");
const { instance } = await WebAssembly.instantiateStreaming(response, {
env: {
memory: sharedMemory
},
wasi_snapshot_preview1: {
// WASI imports if needed
}
});
return instance;
}
// Worker thread code (worker.js)
/*
self.onmessage = async (event) => {
const { memory, wasmUrl } = event.data;
const response = await fetch(wasmUrl);
const { instance } = await WebAssembly.instantiateStreaming(response, {
env: { memory }
});
// Run compute-intensive work
const result = instance.exports.processData();
self.postMessage({ result });
};
*/
// Use from main thread
async function runParallelWasm() {
const wasmInstance = await loadThreadedWasm();
// Spawn workers
const workers = [];
const numWorkers = navigator.hardwareConcurrency || 4;
for (let i = 0; i < numWorkers; i++) {
const worker = new Worker("worker.js");
worker.postMessage({
memory: sharedMemory,
wasmUrl: "threaded.wasm"
});
workers.push(worker);
}
// Coordinate work via shared memory and Atomics
const sharedArray = new Int32Array(sharedMemory.buffer);
// Signal workers to start
Atomics.store(sharedArray, 0, 1);
Atomics.notify(sharedArray, 0);
console.log("Workers started");
}
// WASM Binary Interface example
class WasmBinaryInterface {
constructor(instance) {
this.instance = instance;
this.memory = new DataView(instance.exports.memory.buffer);
}
// Read string from WASM memory
readString(ptr, length) {
const bytes = new Uint8Array(this.instance.exports.memory.buffer, ptr, length);
return new TextDecoder().decode(bytes);
}
// Write string to WASM memory
writeString(str, ptr) {
const bytes = new TextEncoder().encode(str);
const memory = new Uint8Array(this.instance.exports.memory.buffer);
memory.set(bytes, ptr);
return bytes.length;
}
// Read array from memory
readArray(ptr, length, type = Float32Array) {
return new type(this.instance.exports.memory.buffer, ptr, length);
}
}
Note: WebAssembly enables near-native performance for compute-intensive tasks. Use for: video/audio processing, games, simulations, cryptography. Import/export functions between JS and WASM. Use
instantiateStreaming() for faster loading. Supports multithreading with SharedArrayBuffer.
3. Web Neural Network API (WebNN)
| WebNN Concept | Description |
|---|---|
| ML Context | Hardware-accelerated context for neural network operations. |
| Graph Builder | Builds ML computation graph with layers and operations. |
| Operand | Tensor (multi-dimensional array) in the computation graph. |
| Operation | Neural network operation: conv2d, matmul, relu, softmax, etc. |
| Graph | Compiled computation graph ready for execution. |
| Backend | Hardware backend: CPU, GPU, or dedicated ML accelerator. |
Example: WebNN basic usage
// Check WebNN support
if (!("ml" in navigator)) {
console.error("WebNN not supported");
} else {
console.log("WebNN supported");
runInference();
}
async function runInference() {
// Create ML context
const context = await navigator.ml.createContext();
console.log("ML Context created");
// Build a simple neural network graph
const builder = new MLGraphBuilder(context);
// Input tensor: [batch, height, width, channels]
const input = builder.input("input", {
type: "float32",
dimensions: [1, 224, 224, 3]
});
// Convolutional layer
const conv1Weights = builder.constant({
type: "float32",
dimensions: [64, 3, 3, 3]
}, new Float32Array(64 * 3 * 3 * 3));
const conv1 = builder.conv2d(input, conv1Weights, {
padding: [1, 1, 1, 1],
strides: [1, 1],
activation: builder.relu()
});
// Max pooling
const pool1 = builder.maxPool2d(conv1, {
windowDimensions: [2, 2],
strides: [2, 2]
});
// Fully connected layer
const fc1Weights = builder.constant({
type: "float32",
dimensions: [1000, 112 * 112 * 64]
}, new Float32Array(1000 * 112 * 112 * 64));
const flatten = builder.reshape(pool1, [1, -1]);
const fc1 = builder.matmul(flatten, fc1Weights);
// Softmax output
const output = builder.softmax(fc1);
// Build graph
const graph = await builder.build({ output });
console.log("Graph built successfully");
// Prepare input data
const inputData = new Float32Array(1 * 224 * 224 * 3);
// ... fill with actual image data
// Execute graph
const inputs = { input: inputData };
const outputs = await context.compute(graph, inputs);
console.log("Inference complete");
console.log("Output shape:", outputs.output.shape);
console.log("Top prediction:", getTopPrediction(outputs.output));
}
function getTopPrediction(tensor) {
const data = tensor.data;
let maxIndex = 0;
let maxValue = data[0];
for (let i = 1; i < data.length; i++) {
if (data[i] > maxValue) {
maxValue = data[i];
maxIndex = i;
}
}
return { class: maxIndex, confidence: maxValue };
}
Example: Load and run ONNX model with WebNN
// Using ONNX Runtime Web with WebNN backend
async function runONNXModel() {
// Load ONNX Runtime
const ort = window.ort;
// Check WebNN availability
if (ort.env.wasm.numThreads) {
console.log("ONNX Runtime loaded");
}
try {
// Create session with WebNN backend
const session = await ort.InferenceSession.create("model.onnx", {
executionProviders: ["webnn", "wasm"]
});
console.log("Model loaded");
console.log("Input names:", session.inputNames);
console.log("Output names:", session.outputNames);
// Prepare input tensor
const inputData = Float32Array.from({ length: 224 * 224 * 3 }, () => Math.random());
const inputTensor = new ort.Tensor("float32", inputData, [1, 3, 224, 224]);
// Run inference
const feeds = { [session.inputNames[0]]: inputTensor };
const results = await session.run(feeds);
console.log("Inference complete");
// Get output
const output = results[session.outputNames[0]];
console.log("Output shape:", output.dims);
console.log("Output data (first 10):", output.data.slice(0, 10));
return output;
} catch (error) {
console.error("ONNX inference failed:", error);
}
}
// Image classification with preprocessing
async function classifyImage(imageElement) {
// Preprocess image
const canvas = document.createElement("canvas");
canvas.width = 224;
canvas.height = 224;
const ctx = canvas.getContext("2d");
ctx.drawImage(imageElement, 0, 0, 224, 224);
const imageData = ctx.getImageData(0, 0, 224, 224);
// Convert to tensor (normalize to [0, 1])
const inputData = new Float32Array(3 * 224 * 224);
for (let i = 0; i < imageData.data.length / 4; i++) {
inputData[i] = imageData.data[i * 4] / 255.0; // R
inputData[224 * 224 + i] = imageData.data[i * 4 + 1] / 255.0; // G
inputData[224 * 224 * 2 + i] = imageData.data[i * 4 + 2] / 255.0; // B
}
// Run model
const ort = window.ort;
const session = await ort.InferenceSession.create("mobilenet.onnx", {
executionProviders: ["webnn"]
});
const tensor = new ort.Tensor("float32", inputData, [1, 3, 224, 224]);
const results = await session.run({ input: tensor });
return results.output.data;
}
Note: WebNN provides hardware-accelerated machine learning inference. Uses GPU, NPU, or dedicated ML accelerators. Supports common operations: conv2d, matmul, relu, softmax. Can load ONNX models. Experimental - limited browser support.
Warning: WebNN is highly experimental. Currently only in some Chromium builds behind flags. API subject to change. For production, use TensorFlow.js or ONNX Runtime Web with WASM backend. Always check
"ml" in navigator and provide fallback.
4. Web Transport API for Low-latency Communication
| WebTransport Feature | Description |
|---|---|
| WebTransport Connection | Low-latency, bidirectional communication over HTTP/3 (QUIC protocol). |
| Datagrams | Unreliable, unordered messages (like UDP) for real-time data. |
| Streams | Reliable, ordered streams (like TCP) with multiplexing. |
| Unidirectional Streams | One-way data flow from client or server. |
| Bidirectional Streams | Two-way data flow, independent read/write. |
| Connection Migration | Maintains connection across network changes (WiFi to cellular). |
Example: WebTransport connection and streams
// Check WebTransport support
if (!("WebTransport" in window)) {
console.error("WebTransport not supported");
} else {
console.log("WebTransport supported");
connectWebTransport();
}
async function connectWebTransport() {
try {
// Connect to server (requires HTTPS and HTTP/3)
const url = "https://example.com:4433/webtransport";
const transport = new WebTransport(url);
// Wait for connection
await transport.ready;
console.log("WebTransport connected");
// Handle incoming bidirectional streams
handleIncomingStreams(transport);
// Handle incoming unidirectional streams
handleIncomingUnidirectionalStreams(transport);
// Send data via bidirectional stream
await sendViaBidirectionalStream(transport);
// Send datagrams
sendDatagrams(transport);
// Handle connection close
transport.closed.then(() => {
console.log("Connection closed gracefully");
}).catch((error) => {
console.error("Connection closed with error:", error);
});
} catch (error) {
console.error("WebTransport connection failed:", error);
}
}
// Bidirectional streams (reliable, ordered)
async function sendViaBidirectionalStream(transport) {
// Create outgoing bidirectional stream
const stream = await transport.createBidirectionalStream();
// Get writer and reader
const writer = stream.writable.getWriter();
const reader = stream.readable.getReader();
// Send data
const encoder = new TextEncoder();
await writer.write(encoder.encode("Hello from client!"));
await writer.close();
console.log("Sent via bidirectional stream");
// Read response
const { value, done } = await reader.read();
if (!done) {
const decoder = new TextDecoder();
console.log("Received:", decoder.decode(value));
}
reader.releaseLock();
}
// Handle incoming bidirectional streams
async function handleIncomingStreams(transport) {
const reader = transport.incomingBidirectionalStreams.getReader();
while (true) {
const { value: stream, done } = await reader.read();
if (done) break;
console.log("Incoming bidirectional stream");
// Handle stream in background
handleStream(stream);
}
}
async function handleStream(stream) {
const reader = stream.readable.getReader();
const writer = stream.writable.getWriter();
try {
const { value, done } = await reader.read();
if (!done) {
const decoder = new TextDecoder();
const message = decoder.decode(value);
console.log("Stream received:", message);
// Send response
const encoder = new TextEncoder();
await writer.write(encoder.encode(`Echo: ${message}`));
}
} finally {
reader.releaseLock();
await writer.close();
}
}
// Unidirectional streams
async function handleIncomingUnidirectionalStreams(transport) {
const reader = transport.incomingUnidirectionalStreams.getReader();
while (true) {
const { value: stream, done } = await reader.read();
if (done) break;
console.log("Incoming unidirectional stream");
const streamReader = stream.getReader();
const { value, done: streamDone } = await streamReader.read();
if (!streamDone) {
const decoder = new TextDecoder();
console.log("Unidirectional data:", decoder.decode(value));
}
}
}
Example: WebTransport datagrams for real-time data
// Datagrams (unreliable, unordered - best for real-time)
async function sendDatagrams(transport) {
const writer = transport.datagrams.writable.getWriter();
const encoder = new TextEncoder();
// Send game state updates
setInterval(async () => {
const gameState = {
position: { x: Math.random(), y: Math.random() },
timestamp: Date.now()
};
const data = encoder.encode(JSON.stringify(gameState));
try {
await writer.write(data);
} catch (error) {
console.error("Failed to send datagram:", error);
}
}, 16); // ~60 FPS
}
// Receive datagrams
async function receiveDatagrams(transport) {
const reader = transport.datagrams.readable.getReader();
const decoder = new TextDecoder();
while (true) {
try {
const { value, done } = await reader.read();
if (done) break;
const message = decoder.decode(value);
const data = JSON.parse(message);
// Update game state
updateGameState(data);
} catch (error) {
console.error("Datagram read error:", error);
}
}
}
// Real-time multiplayer game example
class WebTransportGameClient {
constructor(serverUrl) {
this.serverUrl = serverUrl;
this.transport = null;
this.datagramWriter = null;
}
async connect() {
this.transport = new WebTransport(this.serverUrl);
await this.transport.ready;
console.log("Game client connected");
this.datagramWriter = this.transport.datagrams.writable.getWriter();
// Start receiving updates
this.receiveUpdates();
}
async sendPlayerAction(action) {
if (!this.datagramWriter) return;
const data = new TextEncoder().encode(JSON.stringify({
type: "action",
action: action,
timestamp: performance.now()
}));
try {
await this.datagramWriter.write(data);
} catch (error) {
console.error("Failed to send action:", error);
}
}
async receiveUpdates() {
const reader = this.transport.datagrams.readable.getReader();
const decoder = new TextDecoder();
while (true) {
try {
const { value, done } = await reader.read();
if (done) break;
const update = JSON.parse(decoder.decode(value));
this.handleGameUpdate(update);
} catch (error) {
console.error("Failed to receive update:", error);
break;
}
}
}
handleGameUpdate(update) {
// Process game state update
console.log("Game update:", update);
}
async disconnect() {
if (this.transport) {
await this.transport.close();
}
}
}
Note: WebTransport offers lower latency than WebSocket using HTTP/3 (QUIC). Provides both reliable streams and unreliable datagrams. Ideal for: gaming, video conferencing, live streaming. Supports connection migration (network switching). Experimental - Chrome, Edge support.
Warning: WebTransport requires HTTP/3 server support and HTTPS. Experimental - limited browser support. Not available on iOS Safari. Server setup more complex than WebSocket. Consider WebSocket for broader compatibility.
5. WebCodecs API for Audio/Video Encoding
| WebCodecs Component | Description |
|---|---|
| VideoEncoder | Encode video frames to compressed format (H.264, VP9, AV1). |
| VideoDecoder | Decode compressed video to raw frames. |
| AudioEncoder | Encode audio data to compressed format (Opus, AAC). |
| AudioDecoder | Decode compressed audio to PCM samples. |
| VideoFrame | Represents a single video frame with pixel data. |
| EncodedVideoChunk | Encoded video data (can be keyframe or delta frame). |
| AudioData | Raw audio samples in various formats. |
Example: Encode video frames
// Check WebCodecs support
if (!("VideoEncoder" in window)) {
console.error("WebCodecs not supported");
} else {
console.log("WebCodecs supported");
}
// Initialize video encoder
const encodedChunks = [];
const encoder = new VideoEncoder({
output: (chunk, metadata) => {
// Receive encoded chunk
console.log("Encoded chunk:", {
type: chunk.type, // "key" or "delta"
timestamp: chunk.timestamp,
byteLength: chunk.byteLength
});
encodedChunks.push(chunk);
// If key frame, save decoder config
if (metadata?.decoderConfig) {
console.log("Decoder config:", metadata.decoderConfig);
}
},
error: (error) => {
console.error("Encoder error:", error);
}
});
// Configure encoder
encoder.configure({
codec: "vp09.00.10.08", // VP9
width: 1920,
height: 1080,
bitrate: 2_000_000, // 2 Mbps
framerate: 30,
latencyMode: "realtime" // or "quality"
});
// Encode frames from canvas
async function encodeCanvasFrames(canvas) {
const fps = 30;
const frameDuration = 1_000_000 / fps; // microseconds
let frameCount = 0;
const captureFrame = () => {
// Create VideoFrame from canvas
const frame = new VideoFrame(canvas, {
timestamp: frameCount * frameDuration
});
// Encode frame
encoder.encode(frame, { keyFrame: frameCount % 30 === 0 });
// Close frame to free resources
frame.close();
frameCount++;
if (frameCount < 300) { // Encode 10 seconds
requestAnimationFrame(captureFrame);
} else {
// Finish encoding
encoder.flush().then(() => {
console.log("Encoding complete");
encoder.close();
});
}
};
captureFrame();
}
// Encode from MediaStream (camera/screen)
async function encodeMediaStream() {
const stream = await navigator.mediaDevices.getUserMedia({
video: { width: 1280, height: 720 }
});
const track = stream.getVideoTracks()[0];
const processor = new MediaStreamTrackProcessor({ track });
const reader = processor.readable.getReader();
while (true) {
const { value: frame, done } = await reader.read();
if (done) break;
// Encode frame
encoder.encode(frame);
frame.close();
}
}
Example: Decode and display video
// Video decoder
const decoder = new VideoDecoder({
output: (frame) => {
// Display decoded frame
console.log("Decoded frame:", {
format: frame.format,
codedWidth: frame.codedWidth,
codedHeight: frame.codedHeight,
timestamp: frame.timestamp
});
// Draw to canvas
displayFrame(frame);
// Close frame
frame.close();
},
error: (error) => {
console.error("Decoder error:", error);
}
});
// Configure decoder
decoder.configure({
codec: "vp09.00.10.08",
codedWidth: 1920,
codedHeight: 1080
});
// Decode encoded chunks
function decodeVideo(encodedChunks) {
encodedChunks.forEach(chunk => {
decoder.decode(chunk);
});
decoder.flush().then(() => {
console.log("Decoding complete");
});
}
// Display frame on canvas
function displayFrame(frame) {
const canvas = document.getElementById("output-canvas");
canvas.width = frame.displayWidth;
canvas.height = frame.displayHeight;
const ctx = canvas.getContext("2d");
ctx.drawImage(frame, 0, 0);
}
// Audio encoding example
const audioEncoder = new AudioEncoder({
output: (chunk, metadata) => {
console.log("Encoded audio chunk:", chunk.byteLength);
},
error: (error) => {
console.error("Audio encoder error:", error);
}
});
audioEncoder.configure({
codec: "opus",
sampleRate: 48000,
numberOfChannels: 2,
bitrate: 128000
});
// Encode audio from microphone
async function encodeAudio() {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const track = stream.getAudioTracks()[0];
const processor = new MediaStreamTrackProcessor({ track });
const reader = processor.readable.getReader();
while (true) {
const { value: audioData, done } = await reader.read();
if (done) break;
audioEncoder.encode(audioData);
audioData.close();
}
}
Note: WebCodecs provides low-level access to media codecs. Encode/decode video and audio programmatically. Useful for: video editing, conferencing, streaming, transcoding. More control than MediaRecorder API. Supports H.264, VP8, VP9, AV1 for video; Opus, AAC for audio.
Warning: WebCodecs is low-level - you handle frame-by-frame encoding/decoding. Must manage memory by calling
frame.close(). Not all codecs supported in all browsers. Check codec support with VideoEncoder.isConfigSupported(). Chrome, Edge support - not in Firefox/Safari yet.
6. Origin Trial APIs and Feature Flags
| Concept | Description |
|---|---|
| Origin Trial | Test experimental features in production with token from Chrome. |
| Feature Flag | Enable experimental features locally via browser flags. |
| Origin Trial Token | Signed token that enables feature for specific origin and duration. |
| chrome://flags | Browser page to enable experimental features for testing. |
| Meta Tag Registration | Activate origin trial via <meta> tag in HTML. |
| HTTP Header Registration | Activate origin trial via Origin-Trial HTTP response header. |
Example: Register for Origin Trial
<!-- Method 1: Meta tag -->
<meta http-equiv="origin-trial"
content="YOUR_ORIGIN_TRIAL_TOKEN_HERE">
<!-- Example with actual token format -->
<meta http-equiv="origin-trial"
content="A1b2C3d4E5f6G7h8I9j0K1l2M3n4O5p6Q7r8S9t0U1v2W3x4Y5z6A7b8C9d0E1f2G3h4I5j6K7l8M9n0O1p2Q3r4S5t6U7v8W9x0Y1z2A3b4C5d6E7f8G9h0I1j2K3l4M5n6O7p8Q9r0S1t2U3v4W5x6Y7z8A9b0C1d2E3f4==">
<script>
// Check if experimental feature is available
if ("experimentalFeature" in window) {
console.log("Experimental feature enabled via origin trial");
useExperimentalFeature();
} else {
console.log("Experimental feature not available");
useFallback();
}
</script>
Example: HTTP header registration and feature detection
# HTTP Response Header
Origin-Trial: YOUR_ORIGIN_TRIAL_TOKEN_HERE
# Multiple tokens (comma-separated)
Origin-Trial: TOKEN_1, TOKEN_2
Example: Programmatic feature detection and flags
// Check for experimental features
const experimentalFeatures = {
// Web GPU
webGPU: "gpu" in navigator,
// WebNN
webNN: "ml" in navigator,
// Web Transport
webTransport: "WebTransport" in window,
// WebCodecs
webCodecs: "VideoEncoder" in window,
// File System Access
fileSystemAccess: "showOpenFilePicker" in window,
// Web Bluetooth
webBluetooth: "bluetooth" in navigator,
// Web USB
webUSB: "usb" in navigator,
// Web Serial
webSerial: "serial" in navigator,
// Web HID
webHID: "hid" in navigator,
// Compute Pressure
computePressure: "ComputePressureObserver" in window,
// Eyedropper API
eyeDropper: "EyeDropper" in window,
// Window Controls Overlay
windowControlsOverlay: "windowControlsOverlay" in navigator
};
console.log("Experimental features:", experimentalFeatures);
// Progressive enhancement with feature detection
function useExperimentalFeatures() {
// WebGPU if available, else WebGL
if (experimentalFeatures.webGPU) {
initWebGPU();
} else {
initWebGL();
}
// Web Transport if available, else WebSocket
if (experimentalFeatures.webTransport) {
connectWebTransport();
} else {
connectWebSocket();
}
}
// Origin Trial status check
async function checkOriginTrialStatus() {
// Some APIs expose trial status
if (document.featurePolicy) {
const features = document.featurePolicy.features();
console.log("Available features:", features);
}
// Check specific trial features
try {
// Attempt to use feature
const feature = await tryExperimentalFeature();
console.log("Origin trial active for feature");
return true;
} catch (error) {
console.log("Origin trial not active:", error.message);
return false;
}
}
// Get Origin Trial token programmatically
function getOriginTrialToken() {
// For your actual implementation, get token from:
// https://developer.chrome.com/origintrials/
// Sign up for trial
// 1. Go to Chrome Origin Trials
// 2. Select feature to trial
// 3. Enter your origin (https://example.com)
// 4. Receive token
// 5. Add to meta tag or HTTP header
return "YOUR_TOKEN_HERE";
}
// Enable feature flags for local development
console.log(`
To enable experimental features locally:
1. Open chrome://flags
2. Search for feature name
3. Enable and restart browser
Common flags:
- #enable-experimental-web-platform-features
- #enable-webgpu-developer-features
- #enable-experimental-webassembly-features
`);
Note: Origin Trials let you test experimental features in production. Register at Chrome Origin Trials site, get token for your domain. Token enables feature for all users for trial duration (usually 6 months). Use for gathering feedback before feature ships.
Warning: Origin Trial features may change or be removed. Don't rely on them for critical functionality. Always provide fallback. Trial tokens expire - monitor trial status. Features may become available without trial when they ship. Test thoroughly - experimental APIs can have bugs.
Experimental and Emerging APIs Best Practices
- Always check feature availability before use:
if ("feature" in window) - Provide fallbacks for unsupported browsers - progressive enhancement
- WebGPU: Use for compute-heavy graphics, provide WebGL fallback
- WebAssembly: Great for performance-critical code, use
instantiateStreaming() - WebNN: Hardware ML acceleration, fallback to TensorFlow.js WASM backend
- WebTransport: Lower latency than WebSocket, fallback to WebSocket
- WebCodecs: Low-level media control, requires manual memory management
- Register for Origin Trials to test features in production safely
- Monitor experimental API specifications - they can change
- Use feature flags locally (chrome://flags) for development
- Document which experimental features your app uses
- Have migration plan when experimental features become stable
- Test on multiple browsers - experimental features may not be cross-browser