Skip to content

Instantly share code, notes, and snippets.

@gabrielmontagne
Last active January 20, 2024 15:25
Show Gist options
  • Save gabrielmontagne/663b0dd10c6bd7fe3a2376b47f083819 to your computer and use it in GitHub Desktop.
Save gabrielmontagne/663b0dd10c6bd7fe3a2376b47f083819 to your computer and use it in GitHub Desktop.
Bake Faceit / Audio2Face animations directly to a rig (for example Rigify) without having to bake shapekeys

Bake Audio2Video animations through Faceit poses directly to Rigify

Here an attempt to bake Faceit shapes to normal rig f-curves, instead of having to express leap into shape keys.

It seems to work. We loop through the curves and add up scaled versions of the partial poses.

import bpy
import json
from bpy.props import StringProperty, FloatProperty
from bpy_extras.io_utils import ImportHelper
from os.path import splitext, basename
class ANIM_OT_separate_faceit_poses(bpy.types.Operator):
"""Separate to discrete actions"""
bl_idname = "ranimation.separate_faceit_poses"
bl_label = "Separate to discrete actions"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
scene = context.scene
expressions = scene.get('faceit_expression_list')
if not expressions:
self.report({'WARNING'}, 'No action list found')
return {'CANCELLED'}
overwrite_action = bpy.data.actions.get('overwrite_shape_action')
if not overwrite_action:
self.report({'WARNING'}, 'No overwrite action found')
return {'CANCELLED'}
actions = bpy.data.actions
for e in expressions:
name = e.get('name')
frame = e.get('frame')
prev_action = actions.get(name)
if prev_action:
actions.remove(prev_action)
action = actions.new(name)
for curve in overwrite_action.fcurves:
# TMP
if 'scale' in curve.data_path:
continue
new_curve = action.fcurves.new(
curve.data_path, index=curve.array_index)
new_curve.keyframe_points.insert(1, curve.evaluate(frame))
action.asset_mark()
return {'FINISHED'}
class ANIM_OT_bake_poses_to_rigs(bpy.types.Operator, ImportHelper):
"""Bakes animation from reference poses"""
bl_idname = "ranimation.bake_animation_from_ref_poses"
bl_label = "Bake Audio2Face animation to rig from reference poses"
filename_ext = ".json"
filter_glob: StringProperty(
default="*.json", options={'HIDDEN'}, maxlen=255)
original_framerate: FloatProperty(name='Original Framerate', default=10)
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
with open(self.properties.filepath, 'r') as file:
spec = json.load(file)
data = context.space_data
action_name, _ = splitext(basename(self.properties.filepath))
scene = context.scene
total_frames = spec['numFrames']
total_frames = len(spec['weightMat'])
current_fps = scene.render.fps / scene.render.fps_base
fps_ratio = (self.original_framerate / current_fps)
max_frames = int(total_frames / fps_ratio)
# max_frames = min(150, max_frames)
print('max frames', max_frames)
new_action = bpy.data.actions.new(action_name)
current_insert_auto = scene.tool_settings.use_keyframe_insert_auto
scene.tool_settings.use_keyframe_insert_auto = True
facsNames = spec['facsNames']
weightMat = spec['weightMat']
wm = context.window_manager
wm.progress_begin(0, max_frames)
for f in range(0, max_frames):
wm.progress_update(f)
projected_frame = int(f * fps_ratio)
print('projected_frame', f, '->', f * fps_ratio)
weights = weightMat[projected_frame]
for k, w in zip(facsNames, weights):
pose_ref = bpy.data.actions.get(k)
if not pose_ref:
self.report({'WARNING'}, f"Couldn't find ref pose {k}")
continue
for ref_curve in pose_ref.fcurves:
if 'scale' in ref_curve.data_path:
print('skip scale')
continue
ref_value = ref_curve.evaluate(1)
ref_data = ref_curve.data_path
ref_index = ref_curve.array_index
scaled_value = ref_value * w
if ref_value == 0:
continue
target_curve = new_action.fcurves.find(
ref_curve.data_path, index=ref_curve.array_index)
if not target_curve:
target_curve = new_action.fcurves.new(
ref_data,
index=ref_index
)
keyframe_points = target_curve.keyframe_points
target_keyframe = next(
filter(lambda k: k.co.x == f, keyframe_points), None)
if not target_keyframe:
keyframe_points.insert(f, scaled_value)
else:
current_value = target_keyframe.co.y
scaled_delta = (ref_value - current_value) * w
target_keyframe.co.y = current_value + scaled_delta
# if target_keyframe.co.y < 0:
# target_keyframe.co.y = min(scaled_value, target_keyframe.co.y)
# else:
# target_keyframe.co.y = max(scaled_value, target_keyframe.co.y)
wm.progress_end()
context.active_object.animation_data.action = new_action
scene.tool_settings.use_keyframe_insert_auto = current_insert_auto
return {'FINISHED'}
def register():
bpy.utils.register_class(ANIM_OT_separate_faceit_poses)
bpy.utils.register_class(ANIM_OT_bake_poses_to_rigs)
def unregister():
bpy.utils.unregister_class(ANIM_OT_bake_poses_to_rigs)
bpy.utils.unregister_class(ANIM_OT_separate_faceit_poses)
if __name__ == "__main__":
register()
bpy.ops.ranimation.separate_faceit_poses()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment