3D model from single photo

This page shows how to quickly understand the work of our services using an example

Prerequisites

  • blender >= 3.5

An example of work from all our service offerings

The full pipeline of our services allows you to receive one photo of a person with a digital avatar.

  1. Create model-config.json file via echo "{}" > model-config.json We are creating an empty config because we don't want to use preloading of models.

  2. Run following python sctipt.

"""
    Full Pipeline Example.
"""

import json
import requests
import urllib

service_headers = {"accept": "application/json"}

name = "ENTER_YOUR_NAME"
mail = "ENTER_YOUR_MAIL"
comment = "ENTER_YOUR_COMMENT"
image_path = "ENTER IMAGE PATH"

class UserSimulate():
    def __init__(self):
        self.auth_url = "http://api.metahumansdk.io/auth"
        self.head_url = "https://api.metahumansdk.io/face_recon"
        self.hair_url = "https://api.metahumansdk.io/hair_recon"
        self.glb_url = "https://api.metahumansdk.io/glb_const"

        self.token = None
        self.head_json = None
        self.head_uv = None
        self.hair_glb = None

    def get_token(self):
        data = {
            "name": name,
            "mail": mail,
            "comment": comment
        }
        response_config = requests.post(self.auth_url + "/token", headers=service_headers, data=data)
        assert (response_config.status_code == 200)
        self.token = response_config.json()["result"]["token"]
    
    def reconstruct_head(self):
        files_base = {
            "token": (None, self.token),
            "image_bytes": ("image", open(image_path, "rb"), "image/png")
        }
        response = requests.post(self.head_url + "/run_pipeline", headers=service_headers, files=files_base)
        print(response.json())
        assert (response.status_code == 200)
        with urllib.request.urlopen(self.head_url + response.json()['result']['face']['textureUrl']) as f:
            self.head_uv = f.read()

        self.head_json = response.json()

    def reconstruct_hair(self):
        files_base = {
            "token": (None, self.token),
            "image_bytes": ("test_image.png", open(image_path, "rb"), "image/png"),
            "mode": (None, "volume")
        }
        response_config = requests.post(self.hair_url + '/run_pipeline', headers=service_headers, files=files_base)
        assert (response_config.status_code == 200)
        self.hair_glb = response_config.content

    def put_schema(self):
        files = {
            "token": (None, self.token),
            "schema": ("data/schema.json", open("model-config.json", 'r'), "application/json")
        }
        response = requests.put(self.glb_url + "/schema", headers=service_headers, files=files)
        assert (response.status_code == 200)

    def assemble_model(self):

        with open("model-config.json", 'r') as out:
            models_config = json.load(out)

        files = {
            "token": (None, self.token),
            "head_uv": ("head_model", self.head_uv, "image/png"),
            "head_config": ("head_config", json.dumps(self.head_json), "application/json"),
            "models_config": ("models_config", json.dumps(models_config), "application/json"),
            "custom_models": ("custom_models", self.hair_glb, "application/json"),
            "add_facs": (None, True)
        }
        response = requests.post(self.glb_url + "/assemble", headers=service_headers, files=files)
        assert (response.status_code == 200)
        return response.content


if __name__ == "__main__":
    user = UserSimulate()
    print("Obtaining token...")
    user.get_token()
    print("Reconstructing head...")
    user.reconstruct_head()
    print("Reconstructing hair...")
    user.reconstruct_hair()
    print("Putting schema...")
    user.put_schema()
    print("Obtaining model...")
    model = user.assemble_model()

    with open("head.glb", "wb") as out:
        out.write(model)

Note: replace curly braces with yout information

If the script is executed successfully, you should have a glb model with the token name.

Last updated