Get the latest tech news

FFmpeg 8.0 adds Whisper support


It adds a new audio filter for running audio transcriptions with the whisper model. Documentation and examples are included into the patch.

@ -0,0 +1,463 @@/* *Copyright(c)2025VittorioPalmisano**ThisfileispartofFFmpeg.**FFmpegisfreesoftware;youcanredistributeitand/or*modifyitunderthetermsoftheGNULesserGeneralPublicLicense*aspublishedbytheFreeSoftwareFoundation;either*version2.1oftheLicense,or(atyouroption)anylaterversion.**FFmpegisdistributedinthehopethatitwillbeuseful,*butWITHOUTANYWARRANTY;withouteventheimpliedwarrantyof*MERCHANTABILITYorFITNESSFORAPARTICULARPURPOSE.Seethe*GNULesserGeneralPublicLicenseformoredetails. **YoushouldhavereceivedacopyoftheGNULesserGeneralPublicLicense*alongwithFFmpeg;ifnot,writetotheFreeSoftwareFoundation,Inc.,*51FranklinStreet,FifthFloor,Boston,MA02110-1301USA*/#include<stdio.h>#include<stdint.h>#include<stdlib.h>#include<whisper.h>#include"libavutil/avutil.h"#include"libavutil/opt.h"#include"libavutil/channel_layout.h"#include"libavutil/samplefmt.h"#include"libavfilter/avfilter.h"#include"libavfilter/audio.h"#include"libavutil/mem.h"#include"libavutil/avstring.h"#include"libavutil/internal.h"#include"libavformat/avio.h"#include"libavutil/thread.h"#include"formats.h"typedefstructWhisperContext{constAVClass*class;char*model_path;char*language;booluse_gpu;intgpu_device;char*vad_model_path;floatvad_threshold;int64_tvad_min_speech_duration;int64_tvad_min_silence_duration;int64_tqueue;char*destination;char*format;structwhisper_context*ctx_wsp;structwhisper_vad_context*ctx_vad;structwhisper_vad_paramsvad_params;float*audio_buffer;intaudio_buffer_queue_size;intaudio_buffer_fill_size;intaudio_buffer_vad_size;int64_taudio_buffer_start_ms;inteof;int64_tnext_pts;AVIOContext*avio_context;intindex;}WhisperContext;staticvoidcb_log(enumggml_log_levellevel,constchar*text,void*user_data){AVFilterContext*ctx=user_data;intav_log_level=AV_LOG_DEBUG;switch(level){caseGGML_LOG_LEVEL_ERROR:av_log_level=AV_LOG_ERROR;break;caseGGML_LOG_LEVEL_WARN:av_log_level=AV_LOG_WARNING;break;}av_log(ctx,av_log_level,"%s",text);}staticintinit(AVFilterContext*ctx){WhisperContext*wctx=ctx->priv;staticAVOnceinit_static_once=AV_ONCE_INIT;ff_thread_once(&init_static_once,ggml_backend_load_all);whisper_log_set(cb_log,ctx);// Init whisper context if(!wctx->model_path){av_log(ctx,AV_LOG_ERROR,"No whisper model path specified. (turn)":"",text_cleaned);if(segments_text){char*new_text=av_asprintf("%s%s",segments_text,text_cleaned);av_freep(&segments_text);segments_text=new_text;}elsesegments_text=av_strdup(text_cleaned);if(wctx->avio_context){constint64_tstart_t=timestamp_ms+t0_ms;constint64_tend_t=timestamp_ms+t1_ms;char*buf=NULL;if(!av_strcasecmp(wctx->format,"srt")){buf=av_asprintf("%d\n%02ld:%02ld:%02ld.%03ld --> %02ld:%02ld:%02ld.%03ld\n%s\n\n",wctx->index,start_t/3600000,(start_t/60000)%60,(start_t/1000)%60,start_t%1000,end_t/3600000,(end_t/60000)%60,(end_t/1000)%60,end_t%1000,text_cleaned);}elseif(!av_strcasecmp(wctx->format,"json")){buf=av_asprintf("{\"start\":%ld,\"end\":%ld,\"text\":\"%s\"}\n",start_t,end_t,text_cleaned);}elsebuf=av_strdup(text_cleaned);if(buf){avio_write(wctx->avio_context,buf,strlen(buf));av_freep(&buf);}}av_freep(&text_cleaned);}wctx->index++;AVDictionary**metadata=&frame->metadata;if(metadata&&segments_text){av_dict_set(metadata,"lavfi.whisper.text",segments_text,0);char*duration_text=av_asprintf("%f",duration);av_dict_set(metadata,"lavfi.whisper.duration",duration_text,AV_DICT_DONT_STRDUP_VAL);}av_freep(&segments_text);if(wctx->audio_buffer_fill_size>samples){memcpy(wctx->audio_buffer,wctx->audio_buffer+samples,(wctx->audio_buffer_fill_size-samples)*sizeof(*wctx->audio_buffer));wctx->audio_buffer_start_ms+=duration*1000;}wctx->audio_buffer_fill_size-=samples;wctx->audio_buffer_vad_size=wctx->audio_buffer_fill_size;}staticintfilter_frame(AVFilterLink*inlink,AVFrame*frame){AVFilterContext*ctx=inlink->dst;WhisperContext*wctx=ctx->priv;AVFilterLink*outlink=ctx->outputs[0];constintsamples=frame->nb_samples;constfloat*input_data=(constfloat*)frame->data[0];if(wctx->audio_buffer_fill_size+samples>wctx->audio_buffer_queue_size){run_transcription(ctx,frame,wctx->audio_buffer_fill_size);}if(!wctx->audio_buffer_fill_size)wctx->audio_buffer_start_ms=av_rescale_q(frame->pts,(AVRational){1000,1},(AVRational){inlink->time_base.den,inlink->time_base.num});memcpy(wctx->audio_buffer+wctx->audio_buffer_fill_size,input_data,samples*sizeof(*wctx->audio_buffer));wctx->audio_buffer_fill_size+=samples;if(wctx->ctx_vad&&(wctx->audio_buffer_fill_size-wctx->audio_buffer_vad_size)>=av_rescale(wctx->vad_min_speech_duration+wctx->vad_min_silence_duration,WHISPER_SAMPLE_RATE,AV_TIME_BASE)){structwhisper_vad_segments*segments=whisper_vad_segments_from_samples(wctx->ctx_vad,wctx->vad_params,wctx->audio_buffer,wctx->audio_buffer_fill_size);wctx->audio_buffer_vad_size=wctx->audio_buffer_fill_size;if(!segments){av_log(ctx,AV_LOG_ERROR,"failed to detect VAD\n");}else{intn_segments=whisper_vad_segments_n_segments(segments);if(n_segments>0){constfloatstart_ms=whisper_vad_segments_get_segment_t0(segments,0)*10.0;constfloatend_ms=whisper_vad_segments_get_segment_t1(segments,n_segments-1)*10.0;intend_pos=(int)(end_ms*WHISPER_SAMPLE_RATE/1000);if(end_pos<=wctx->audio_buffer_fill_size-av_rescale(wctx->vad_min_silence_duration,WHISPER_SAMPLE_RATE,AV_TIME_BASE)){av_log(ctx,AV_LOG_INFO,"VAD detected %d segments, start: %.0f ms, end: %.0f ms (buffer: %d ms)\n",n_segments,start_ms,end_ms,1000*wctx->audio_buffer_fill_size/WHISPER_SAMPLE_RATE);run_transcription(ctx,frame,end_pos);}}whisper_vad_free_segments(segments);}}elseif(wctx->audio_buffer_fill_size>=wctx->audio_buffer_queue_size)run_transcription(ctx,frame,wctx->audio_buffer_fill_size);wctx->next_pts=frame->pts+av_rescale_q(samples,(AVRational){1,inlink->sample_rate},inlink->time_base);returnff_filter_frame(outlink,frame);}staticintpush_last_frame(AVFilterLink*outlink){AVFilterContext*ctx=outlink->src;WhisperContext*wctx=ctx->priv;AVFrame*frame;intn_out=1;if(ctx->is_disabled||wctx->audio_buffer_fill_size==0)return0;frame=ff_get_audio_buffer(outlink,n_out);if(!frame)returnAVERROR(ENOMEM);av_samples_set_silence(frame->extended_data,0,n_out,frame->ch_layout.nb_channels,frame->format);frame->pts=wctx->next_pts;if(wctx->next_pts!=AV_NOPTS_VALUE)wctx->next_pts+=av_rescale_q(n_out,(AVRational){1,outlink->sample_rate},outlink->time_base);run_transcription(ctx,frame,wctx->audio_buffer_fill_size);returnff_filter_frame(outlink,frame);}staticintactivate(AVFilterContext*ctx){AVFilterLink*inlink=ctx->inputs[0];AVFilterLink*outlink=ctx->outputs[0];WhisperContext*wctx=ctx->priv;int64_tpts;intstatus;FF_FILTER_FORWARD_STATUS_BACK(outlink,inlink);if(!wctx->eof&&ff_inlink_queued_frames(inlink)){AVFrame*frame=NULL;intret;ret=ff_inlink_consume_frame(inlink,&frame);if(ret<0)returnret;if(ret>0)returnfilter_frame(inlink,frame);}if(!wctx->eof&&ff_inlink_acknowledge_status(inlink,&status,&pts))wctx->eof=status==AVERROR_EOF;if(wctx->eof){push_last_frame(outlink);ff_outlink_set_status(outlink,AVERROR_EOF,wctx->next_pts);return0;}FF_FILTER_FORWARD_WANTED(outlink,inlink);returnFFERROR_NOT_READY;}staticintquery_formats(constAVFilterContext*ctx,AVFilterFormatsConfig**cfg_in,AVFilterFormatsConfig**cfg_out){staticconstenumAVSampleFormatsample_fmts[]={AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE};AVChannelLayoutchlayouts[]={FF_COUNT2LAYOUT(1),{0}};intsample_rates[]={WHISPER_SAMPLE_RATE,-1};intret;ret=ff_set_common_formats_from_list2(ctx,cfg_in,cfg_out,sample_fmts);if(ret<0)returnret;ret=ff_set_common_channel_layouts_from_list2(ctx,cfg_in,cfg_out,chlayouts);if(ret<0)returnret;returnff_set_common_samplerates_from_list2(ctx,cfg_in,cfg_out,sample_rates);}#define OFFSET(x) offsetof(WhisperContext, x)#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM#define HOURS 3600000000staticconstAVOptionwhisper_options[]={{"model","Path to the whisper.cpp model file",OFFSET(model_path),AV_OPT_TYPE_STRING,.flags=FLAGS},{"language","Language for transcription ('auto' for auto-detect)",OFFSET(language),AV_OPT_TYPE_STRING,{.str="auto"},.flags=FLAGS},{"queue","Audio queue size",OFFSET(queue),AV_OPT_TYPE_DURATION,{.i64=3000000},20000,HOURS,.flags=FLAGS},{"use_gpu","Use GPU for processing",OFFSET(use_gpu),AV_OPT_TYPE_BOOL,{.i64=1},0,1,.flags=FLAGS},{"gpu_device","GPU device to use",OFFSET(gpu_device),AV_OPT_TYPE_INT,{.i64=0},0,INT_MAX,.flags=FLAGS},{"destination","Output destination",OFFSET(destination),AV_OPT_TYPE_STRING,{.str=""},.flags=FLAGS},{"format","Output format (text|srt|json)",OFFSET(format),AV_OPT_TYPE_STRING,{.str="text"},.flags=FLAGS},{"vad_model","Path to the VAD model file",OFFSET(vad_model_path),AV_OPT_TYPE_STRING,.flags=FLAGS},{"vad_threshold","VAD threshold",OFFSET(vad_threshold),AV_OPT_TYPE_FLOAT,{.dbl=0.5},0.0,1.0,.flags=FLAGS},{"vad_min_speech_duration","Minimum speech duration for VAD",OFFSET(vad_min_speech_duration),AV_OPT_TYPE_DURATION,{.i64=100000},20000,HOURS,.flags=FLAGS},{"vad_min_silence_duration","Minimum silence duration for VAD",OFFSET(vad_min_silence_duration),AV_OPT_TYPE_DURATION,{.i64=500000},0,HOURS,.flags=FLAGS},{NULL}};staticconstAVClasswhisper_class={.class_name="whisper",.item_name=av_default_item_name,.option=whisper_options,.version=LIBAVUTIL_VERSION_INT,};constFFFilterff_af_whisper={.p.name="whisper",.p.description=NULL_IF_CONFIG_SMALL("Transcribe audio using whisper.cpp.

Get the Android app

Or read this on Hacker News

Read more on:

Photo of ffmpeg

ffmpeg

Photo of Whisper

Whisper

Photo of Whisper support

Whisper support

Related news:

News photo

FFmpeg Develops Vulkan Hardware Acceleration For Apple ProRes RAW Codec

News photo

FFmpeg 8.0 Merges Vulkan AV1 Encoding & VP9 Decoding

News photo

FFmpeg Delivers Very Nice Performance Gains For Bwdif Deinterlacing With AVX-512