Clips and Frames in Videos
Videos can get pretty large but sometimes the interesting aspects are in portions of the video, called Clip in ApertureDB, or individual frames, called Frame
Connect to ApertureDB
Option A: ApertureDB Cloud (recommended)
Sign up for a free 30-day trial. Get your key from Connect > Generate API Key, add it to a .env file in this directory:
APERTUREDB_KEY=your_key_here
Option B: Community Edition (local Docker)
Run this in a terminal before starting the notebook:
docker run -d --name aperturedb \
-p 55555:55555 -e ADB_MASTER_KEY=admin -e ADB_FORCE_SSL=false \
aperturedata/aperturedb-community
%pip install --upgrade --quiet aperturedb python-dotenv
# Option A: ApertureDB Cloud
from dotenv import load_dotenv
load_dotenv() # loads APERTUREDB_KEY from .env into the environment
# Option B: Community Edition (local Docker)
# !adb config create localdb --active \
# --host localhost --port 55555 \
# --username admin --password admin \
# --no-use-ssl --no-interactive
from aperturedb.CommonLibrary import create_connector
client = create_connector()
response, _ = client.query([{"GetStatus": {}}])
client.print_last_response()
Add a Video and Interesting Clip + Frame in it to ApertureDB
For bulk additions, we recommend using the Python SDK loaders
# Download the sample file
! mkdir -p data; cd data; wget https://github.com/aperture-data/Cookbook/blob/e333f6c59070b9165033d9ddd5af852a6b9624ba/notebooks/simple/data/crepe_flambe.mp4; cd -
query = [{
"AddVideo": {
"_ref": 1, # Assign a reference to the result of this query which is this video we are adding
"properties": {
"name": "crepe_flambe",
"id": 45,
"category": "dessert",
"cuisine": "French",
"location": "Brittany",
"caption": "Special Brittany flambe crepe"
},
"if_not_found": { # avoid adding twice
"id": ["==", 45]
}
}
}, {
"AddClip": { # In this case add a clip based on frame number but there are other options like time fraction
"video_ref": 1,
"frame_number_range": {
"start": 10,
"stop": 150
},
"properties": {
"label": "active flames"
}
}
},{
"AddFrame": { # introduce a key frame you care about. You could also try ExtractFrame to get something on the fly
"video_ref": 1,
"frame_number": 65,
"properties": {
"scene": "tallest flame"
}
}
}]
# Read the image data as a binary blob
fd = open("data/crepe_flambe.mp4", 'rb')
array = [ fd.read() ]
fd.close()
response, blobs = client.query(query, array)
client.print_last_response()
[
{
"AddVideo": {
"status": 0
}
},
{
"AddClip": {
"status": 0
}
},
{
"AddFrame": {
"status": 0
}
}
]
We can see that the clip was correctly added. Note that a Clip in ApertureDB stores only the position of the clip within the video as metadata properties. There is only one copy of the video in ApertureDB.
Same goes for Frame as well.
Retrieve the Interesting Cooking Moments
query = [{
"FindClip": {
"blobs": True,
"constraints": {
"label": ["==", "active flames"]
},
"fast_cut": True, # Optimized retrieval of clip
"results": {
"all_properties": True
},
"operations": [
{
"type": "resize",
"width": 320,
"height": 240
}
]
}
},{
"FindFrame": {
"blobs": True,
"constraints": {
"scene": ["==", "tallest flame"]
},
"results": {
"all_properties": True
}
}
}]
response, blobs = client.query(query)
client.print_last_response()
[
{
"FindClip": {
"blobs_start": 0,
"entities": [
{
"_frame_number_range": {
"start": 10,
"stop": 150
},
"_range_start": 10,
"_range_stop": 150,
"_range_type": 1,
"_uniqueid": "6.3.224060",
"label": "active flames"
}
],
"returned": 1,
"status": 0
}
},
{
"FindFrame": {
"blobs_start": 1,
"entities": [
{
"_blob_index": 1,
"_frame_number": 65,
"_label": "None",
"_uniqueid": "24.0.224060",
"scene": "tallest flame"
}
],
"returned": 1,
"status": 0
}
}
]
ApertureDB extracts the clip and frame from the original video on the fly.
from aperturedb import NotebookHelpers as nh
nh.display_video_mp4(blobs[0])
from IPython.display import display,Image
image = Image(blobs[1], format="JPEG")
display(image)

With Transformations
query = [{
"FindClip": {
"blobs": True,
"constraints": {
"label": ["==", "active flames"]
},
"fast_cut": True,
"operations": [
{
"type": "resize",
"width": 320,
"height": 240
}
],
"results": {
"all_properties": True
}
}
}]
response, blobs = client.query(query)
client.print_last_response()
nh.display_video_mp4(blobs[0])
[
{
"FindClip": {
"blobs_start": 0,
"entities": [
{
"_frame_number_range": {
"start": 10,
"stop": 150
},
"_range_start": 10,
"_range_stop": 150,
"_range_type": 1,
"_uniqueid": "6.3.224060",
"label": "active flames"
}
],
"returned": 1,
"status": 0
}
}
]
Cleanup
query = [ {
"DeleteFrame": {
"constraints": {
"scene": ["==", "tallest flame"]
}
}
},{
"DeleteClip": {
"constraints": {
"label": ["==", "active flames"]
}
}
},{
"DeleteVideo": {
"constraints": {
"name": ["==", "crepe_flambe"]
}
}
}]
res, blobs = client.query(query)
client.print_last_response()
[
{
"DeleteFrame": {
"count": 1,
"status": 0
}
},
{
"DeleteClip": {
"count": 1,
"status": 0
}
},
{
"DeleteVideo": {
"count": 1,
"status": 0
}
}
]
What's next?