import gradio as gr from modelscope.pipelines import pipeline from modelscope.outputs import OutputKeys pipe = pipeline(task='image-to-video', model='damo/Image-to-Video', model_revision='v1.1.0') def infer (audio_in): # IMG_PATH: your image path (url or local file) IMG_PATH = audio_in output_video_path = pipe(IMG_PATH, output_video='output.mp4')[OutputKeys.OUTPUT_VIDEO] print(output_video_path) return output_video_path css=""" #col-container { max-width: 780px; margin-left: auto; margin-right: auto; } img[src*='#center'] { display: block; margin: auto; } .footer { margin-bottom: 45px; margin-top: 10px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer > p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer > p { background: #0b0f19; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown("""

MS Image2Video

[![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center)](https://huggingface.co/spaces/fffiloni/MS-Image2Video-cloning?duplicate=true) """) image_in = gr.Image( label = "Source Image", source = "upload", type = "filepath" ) submit_btn = gr.Button( "Submit" ) video_out = gr.Video( label = "Video Result" ) gr.HTML(""" """) submit_btn.click( fn = infer, inputs = [ audio_in ], outputs = [ video_out ] ) demo.queue(max_size=20).launch()