Demo: use AmadeusGPT on your own data 🚀#

!pip install --pre amadeusgpt
!pip install --pre deeplabcut
  • Let’s test that your open AI API Key works:

mykey = "paste-your-key-here"
import os
if 'OPENAI_API_KEY' not in os.environ:
     os.environ['OPENAI_API_KEY'] = mykey
from openai import OpenAI

client = OpenAI(api_key=mykey)

response = client.chat.completions.create(
    model="gpt-4",
    messages=[{"role": "user", "content": "Hello"}]
)
print(response.choices[0].message.content)
import amadeusgpt
import deeplabcut
from amadeusgpt import AMADEUS
from amadeusgpt.utils import parse_result
from amadeusgpt import create_project

Now, unlike other notebooks, we don’t have keypoint_file_path here (as it’s not provided)#

  • By default, we use gpt-4o to determine which SuperAnimal models to run and it will run SuperAnimal in the first time the keypoints related queries are asked. Note to use superanimal, you will need to install the latest DeepLabCut.

  • Make sure you use a short video clips if you are not using GPUs in Linux (Mac silicon support to be added)!

# where you store you video and (optionally) keypoint files
# If you don't have keypoint files, we would try to run SuperAnimal on your video
# If you have pair of video and keypoint files, make sure they follow the naming convention as following:

# your_folder
#   - cat.mp4
#   - cat.h5 (DLC output)
#   - dog.mp4
#   - dog.h5 (DLC output)

data_folder = "../examples/Horse"
result_folder = "temp_result_folder"
video_suffix = ".mp4"

# if you want to overwrite the default config, you can do it here
kwargs = {
        "data_info": {
            "data_folder": data_folder,
            "result_folder": result_folder,
            # can only locate videos specified in video_suffix
            "video_suffix": ".mp4",
        },
        
        "llm_info": {"max_tokens": 4096, 
                     "temperature": 0.0, 
                     # one can swtich this to gpt-4o-mini for cheaper inference with the cost of worse performance.
                     "gpt_model": "gpt-4o",
                     # We only keep conversation history of 2. You can make it longer with more cost. We are switching to a different form of long-term memory.
                     "keep_last_n_messages": 2},
        "keypoint_info": {
            # only set True if you work with 3D keypoint 
            "use_3d": False,
        },
        # this is the frame index for gpt-4o to match the right superanimal model.
        "video_info": {"scene_frame_number": 1},
    }

config = create_project(data_folder, result_folder, video_suffix = video_suffix, **kwargs)

amadeus = AMADEUS(config, use_vlm = True)
video_file_paths = amadeus.get_video_file_paths()
print (video_file_paths)
query = "Plot the trajectory of the animal using the animal center and color it by time"
qa_message = amadeus.step(query)
parse_result(amadeus, qa_message)
query = "give me the occupancy plot using the animal center"
qa_message = amadeus.step(query)
qa_message = parse_result(amadeus, qa_message)