ffmpeg.c 170 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915
  1. /*
  2. * Copyright (c) 2000-2003 Fabrice Bellard
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * multimedia converter based on the FFmpeg libraries
  23. */
  24. #include "config.h"
  25. #include <ctype.h>
  26. #include <string.h>
  27. #include <math.h>
  28. #include <stdlib.h>
  29. #include <errno.h>
  30. #include <limits.h>
  31. #include <stdatomic.h>
  32. #include <stdint.h>
  33. #if HAVE_IO_H
  34. #include <io.h>
  35. #endif
  36. #if HAVE_UNISTD_H
  37. #include <unistd.h>
  38. #endif
  39. #include "libavformat/avformat.h"
  40. #include "libavdevice/avdevice.h"
  41. #include "libswresample/swresample.h"
  42. #include "libavutil/opt.h"
  43. #include "libavutil/channel_layout.h"
  44. #include "libavutil/parseutils.h"
  45. #include "libavutil/samplefmt.h"
  46. #include "libavutil/fifo.h"
  47. #include "libavutil/hwcontext.h"
  48. #include "libavutil/internal.h"
  49. #include "libavutil/intreadwrite.h"
  50. #include "libavutil/dict.h"
  51. #include "libavutil/display.h"
  52. #include "libavutil/mathematics.h"
  53. #include "libavutil/pixdesc.h"
  54. #include "libavutil/avstring.h"
  55. #include "libavutil/libm.h"
  56. #include "libavutil/imgutils.h"
  57. #include "libavutil/timestamp.h"
  58. #include "libavutil/bprint.h"
  59. #include "libavutil/time.h"
  60. #include "libavutil/thread.h"
  61. #include "libavutil/threadmessage.h"
  62. #include "libavcodec/mathops.h"
  63. #include "libavformat/os_support.h"
  64. # include "libavfilter/avfilter.h"
  65. # include "libavfilter/buffersrc.h"
  66. # include "libavfilter/buffersink.h"
  67. #if HAVE_SYS_RESOURCE_H
  68. #include <sys/time.h>
  69. #include <sys/types.h>
  70. #include <sys/resource.h>
  71. #elif HAVE_GETPROCESSTIMES
  72. #include <windows.h>
  73. #endif
  74. #if HAVE_GETPROCESSMEMORYINFO
  75. #include <windows.h>
  76. #include <psapi.h>
  77. #endif
  78. #if HAVE_SETCONSOLECTRLHANDLER
  79. #include <windows.h>
  80. #endif
  81. #if HAVE_SYS_SELECT_H
  82. #include <sys/select.h>
  83. #endif
  84. #if HAVE_TERMIOS_H
  85. #include <fcntl.h>
  86. #include <sys/ioctl.h>
  87. #include <sys/time.h>
  88. #include <termios.h>
  89. #elif HAVE_KBHIT
  90. #include <conio.h>
  91. #endif
  92. #include <time.h>
  93. #include "ffmpeg.h"
  94. #include "cmdutils.h"
  95. #include "libavutil/avassert.h"
  96. const char program_name[] = "ffmpeg";
  97. const int program_birth_year = 2000;
  98. static FILE *vstats_file;
  99. const char *const forced_keyframes_const_names[] = {
  100. "n",
  101. "n_forced",
  102. "prev_forced_n",
  103. "prev_forced_t",
  104. "t",
  105. NULL
  106. };
  107. typedef struct BenchmarkTimeStamps {
  108. int64_t real_usec;
  109. int64_t user_usec;
  110. int64_t sys_usec;
  111. } BenchmarkTimeStamps;
  112. static void do_video_stats(OutputStream *ost, int frame_size);
  113. static BenchmarkTimeStamps get_benchmark_time_stamps(void);
  114. static int64_t getmaxrss(void);
  115. static int ifilter_has_all_input_formats(FilterGraph *fg);
  116. static int run_as_daemon = 0;
  117. static int nb_frames_dup = 0;
  118. static unsigned dup_warning = 1000;
  119. static int nb_frames_drop = 0;
  120. static int64_t decode_error_stat[2];
  121. static int want_sdp = 1;
  122. static BenchmarkTimeStamps current_time;
  123. AVIOContext *progress_avio = NULL;
  124. static uint8_t *subtitle_out;
  125. InputStream **input_streams = NULL;
  126. int nb_input_streams = 0;
  127. InputFile **input_files = NULL;
  128. int nb_input_files = 0;
  129. OutputStream **output_streams = NULL;
  130. int nb_output_streams = 0;
  131. OutputFile **output_files = NULL;
  132. int nb_output_files = 0;
  133. FilterGraph **filtergraphs;
  134. int nb_filtergraphs;
  135. #if HAVE_TERMIOS_H
  136. /* init terminal so that we can grab keys */
  137. static struct termios oldtty;
  138. static int restore_tty;
  139. #endif
  140. #if HAVE_THREADS
  141. static void free_input_threads(void);
  142. #endif
  143. /* sub2video hack:
  144. Convert subtitles to video with alpha to insert them in filter graphs.
  145. This is a temporary solution until libavfilter gets real subtitles support.
  146. */
  147. static int sub2video_get_blank_frame(InputStream *ist)
  148. {
  149. int ret;
  150. AVFrame *frame = ist->sub2video.frame;
  151. av_frame_unref(frame);
  152. ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
  153. ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
  154. ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
  155. if ((ret = av_frame_get_buffer(frame, 32)) < 0)
  156. return ret;
  157. memset(frame->data[0], 0, frame->height * frame->linesize[0]);
  158. return 0;
  159. }
  160. static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
  161. AVSubtitleRect *r)
  162. {
  163. uint32_t *pal, *dst2;
  164. uint8_t *src, *src2;
  165. int x, y;
  166. if (r->type != SUBTITLE_BITMAP) {
  167. av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
  168. return;
  169. }
  170. if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
  171. av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
  172. r->x, r->y, r->w, r->h, w, h
  173. );
  174. return;
  175. }
  176. dst += r->y * dst_linesize + r->x * 4;
  177. src = r->data[0];
  178. pal = (uint32_t *)r->data[1];
  179. for (y = 0; y < r->h; y++) {
  180. dst2 = (uint32_t *)dst;
  181. src2 = src;
  182. for (x = 0; x < r->w; x++)
  183. *(dst2++) = pal[*(src2++)];
  184. dst += dst_linesize;
  185. src += r->linesize[0];
  186. }
  187. }
  188. static void sub2video_push_ref(InputStream *ist, int64_t pts)
  189. {
  190. AVFrame *frame = ist->sub2video.frame;
  191. int i;
  192. int ret;
  193. av_assert1(frame->data[0]);
  194. ist->sub2video.last_pts = frame->pts = pts;
  195. for (i = 0; i < ist->nb_filters; i++) {
  196. ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
  197. AV_BUFFERSRC_FLAG_KEEP_REF |
  198. AV_BUFFERSRC_FLAG_PUSH);
  199. if (ret != AVERROR_EOF && ret < 0)
  200. av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
  201. av_err2str(ret));
  202. }
  203. }
  204. void sub2video_update(InputStream *ist, AVSubtitle *sub)
  205. {
  206. AVFrame *frame = ist->sub2video.frame;
  207. int8_t *dst;
  208. int dst_linesize;
  209. int num_rects, i;
  210. int64_t pts, end_pts;
  211. if (!frame)
  212. return;
  213. if (sub) {
  214. pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
  215. AV_TIME_BASE_Q, ist->st->time_base);
  216. end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
  217. AV_TIME_BASE_Q, ist->st->time_base);
  218. num_rects = sub->num_rects;
  219. } else {
  220. pts = ist->sub2video.end_pts;
  221. end_pts = INT64_MAX;
  222. num_rects = 0;
  223. }
  224. if (sub2video_get_blank_frame(ist) < 0) {
  225. av_log(ist->dec_ctx, AV_LOG_ERROR,
  226. "Impossible to get a blank canvas.\n");
  227. return;
  228. }
  229. dst = frame->data [0];
  230. dst_linesize = frame->linesize[0];
  231. for (i = 0; i < num_rects; i++)
  232. sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
  233. sub2video_push_ref(ist, pts);
  234. ist->sub2video.end_pts = end_pts;
  235. }
  236. static void sub2video_heartbeat(InputStream *ist, int64_t pts)
  237. {
  238. InputFile *infile = input_files[ist->file_index];
  239. int i, j, nb_reqs;
  240. int64_t pts2;
  241. /* When a frame is read from a file, examine all sub2video streams in
  242. the same file and send the sub2video frame again. Otherwise, decoded
  243. video frames could be accumulating in the filter graph while a filter
  244. (possibly overlay) is desperately waiting for a subtitle frame. */
  245. for (i = 0; i < infile->nb_streams; i++) {
  246. InputStream *ist2 = input_streams[infile->ist_index + i];
  247. if (!ist2->sub2video.frame)
  248. continue;
  249. /* subtitles seem to be usually muxed ahead of other streams;
  250. if not, subtracting a larger time here is necessary */
  251. pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
  252. /* do not send the heartbeat frame if the subtitle is already ahead */
  253. if (pts2 <= ist2->sub2video.last_pts)
  254. continue;
  255. if (pts2 >= ist2->sub2video.end_pts ||
  256. (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
  257. sub2video_update(ist2, NULL);
  258. for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
  259. nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
  260. if (nb_reqs)
  261. sub2video_push_ref(ist2, pts2);
  262. }
  263. }
  264. static void sub2video_flush(InputStream *ist)
  265. {
  266. int i;
  267. int ret;
  268. if (ist->sub2video.end_pts < INT64_MAX)
  269. sub2video_update(ist, NULL);
  270. for (i = 0; i < ist->nb_filters; i++) {
  271. ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
  272. if (ret != AVERROR_EOF && ret < 0)
  273. av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
  274. }
  275. }
  276. /* end of sub2video hack */
  277. static void term_exit_sigsafe(void)
  278. {
  279. #if HAVE_TERMIOS_H
  280. if(restore_tty)
  281. tcsetattr (0, TCSANOW, &oldtty);
  282. #endif
  283. }
  284. void term_exit(void)
  285. {
  286. av_log(NULL, AV_LOG_QUIET, "%s", "");
  287. term_exit_sigsafe();
  288. }
  289. static volatile int received_sigterm = 0;
  290. static volatile int received_nb_signals = 0;
  291. static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
  292. static volatile int ffmpeg_exited = 0;
  293. static int main_return_code = 0;
  294. static void
  295. sigterm_handler(int sig)
  296. {
  297. int ret;
  298. received_sigterm = sig;
  299. received_nb_signals++;
  300. term_exit_sigsafe();
  301. if(received_nb_signals > 3) {
  302. ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
  303. strlen("Received > 3 system signals, hard exiting\n"));
  304. if (ret < 0) { /* Do nothing */ };
  305. exit(123);
  306. }
  307. }
  308. #if HAVE_SETCONSOLECTRLHANDLER
  309. static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
  310. {
  311. av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
  312. switch (fdwCtrlType)
  313. {
  314. case CTRL_C_EVENT:
  315. case CTRL_BREAK_EVENT:
  316. sigterm_handler(SIGINT);
  317. return TRUE;
  318. case CTRL_CLOSE_EVENT:
  319. case CTRL_LOGOFF_EVENT:
  320. case CTRL_SHUTDOWN_EVENT:
  321. sigterm_handler(SIGTERM);
  322. /* Basically, with these 3 events, when we return from this method the
  323. process is hard terminated, so stall as long as we need to
  324. to try and let the main thread(s) clean up and gracefully terminate
  325. (we have at most 5 seconds, but should be done far before that). */
  326. while (!ffmpeg_exited) {
  327. Sleep(0);
  328. }
  329. return TRUE;
  330. default:
  331. av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
  332. return FALSE;
  333. }
  334. }
  335. #endif
  336. void term_init(void)
  337. {
  338. #if HAVE_TERMIOS_H
  339. if (!run_as_daemon && stdin_interaction) {
  340. struct termios tty;
  341. if (tcgetattr (0, &tty) == 0) {
  342. oldtty = tty;
  343. restore_tty = 1;
  344. tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
  345. |INLCR|IGNCR|ICRNL|IXON);
  346. tty.c_oflag |= OPOST;
  347. tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
  348. tty.c_cflag &= ~(CSIZE|PARENB);
  349. tty.c_cflag |= CS8;
  350. tty.c_cc[VMIN] = 1;
  351. tty.c_cc[VTIME] = 0;
  352. tcsetattr (0, TCSANOW, &tty);
  353. }
  354. signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
  355. }
  356. #endif
  357. signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
  358. signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
  359. #ifdef SIGXCPU
  360. signal(SIGXCPU, sigterm_handler);
  361. #endif
  362. #ifdef SIGPIPE
  363. signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
  364. #endif
  365. #if HAVE_SETCONSOLECTRLHANDLER
  366. SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
  367. #endif
  368. }
  369. /* read a key without blocking */
  370. static int read_key(void)
  371. {
  372. unsigned char ch;
  373. #if HAVE_TERMIOS_H
  374. int n = 1;
  375. struct timeval tv;
  376. fd_set rfds;
  377. FD_ZERO(&rfds);
  378. FD_SET(0, &rfds);
  379. tv.tv_sec = 0;
  380. tv.tv_usec = 0;
  381. n = select(1, &rfds, NULL, NULL, &tv);
  382. if (n > 0) {
  383. n = read(0, &ch, 1);
  384. if (n == 1)
  385. return ch;
  386. return n;
  387. }
  388. #elif HAVE_KBHIT
  389. # if HAVE_PEEKNAMEDPIPE
  390. static int is_pipe;
  391. static HANDLE input_handle;
  392. DWORD dw, nchars;
  393. if(!input_handle){
  394. input_handle = GetStdHandle(STD_INPUT_HANDLE);
  395. is_pipe = !GetConsoleMode(input_handle, &dw);
  396. }
  397. if (is_pipe) {
  398. /* When running under a GUI, you will end here. */
  399. if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
  400. // input pipe may have been closed by the program that ran ffmpeg
  401. return -1;
  402. }
  403. //Read it
  404. if(nchars != 0) {
  405. read(0, &ch, 1);
  406. return ch;
  407. }else{
  408. return -1;
  409. }
  410. }
  411. # endif
  412. if(kbhit())
  413. return(getch());
  414. #endif
  415. return -1;
  416. }
  417. static int decode_interrupt_cb(void *ctx)
  418. {
  419. return received_nb_signals > atomic_load(&transcode_init_done);
  420. }
  421. const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
  422. static void ffmpeg_cleanup(int ret)
  423. {
  424. int i, j;
  425. if (do_benchmark) {
  426. int maxrss = getmaxrss() / 1024;
  427. av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
  428. }
  429. for (i = 0; i < nb_filtergraphs; i++) {
  430. FilterGraph *fg = filtergraphs[i];
  431. avfilter_graph_free(&fg->graph);
  432. for (j = 0; j < fg->nb_inputs; j++) {
  433. while (av_fifo_size(fg->inputs[j]->frame_queue)) {
  434. AVFrame *frame;
  435. av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
  436. sizeof(frame), NULL);
  437. av_frame_free(&frame);
  438. }
  439. av_fifo_freep(&fg->inputs[j]->frame_queue);
  440. if (fg->inputs[j]->ist->sub2video.sub_queue) {
  441. while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
  442. AVSubtitle sub;
  443. av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
  444. &sub, sizeof(sub), NULL);
  445. avsubtitle_free(&sub);
  446. }
  447. av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
  448. }
  449. av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
  450. av_freep(&fg->inputs[j]->name);
  451. av_freep(&fg->inputs[j]);
  452. }
  453. av_freep(&fg->inputs);
  454. for (j = 0; j < fg->nb_outputs; j++) {
  455. av_freep(&fg->outputs[j]->name);
  456. av_freep(&fg->outputs[j]->formats);
  457. av_freep(&fg->outputs[j]->channel_layouts);
  458. av_freep(&fg->outputs[j]->sample_rates);
  459. av_freep(&fg->outputs[j]);
  460. }
  461. av_freep(&fg->outputs);
  462. av_freep(&fg->graph_desc);
  463. av_freep(&filtergraphs[i]);
  464. }
  465. av_freep(&filtergraphs);
  466. av_freep(&subtitle_out);
  467. /* close files */
  468. for (i = 0; i < nb_output_files; i++) {
  469. OutputFile *of = output_files[i];
  470. AVFormatContext *s;
  471. if (!of)
  472. continue;
  473. s = of->ctx;
  474. if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
  475. avio_closep(&s->pb);
  476. avformat_free_context(s);
  477. av_dict_free(&of->opts);
  478. av_freep(&output_files[i]);
  479. }
  480. for (i = 0; i < nb_output_streams; i++) {
  481. OutputStream *ost = output_streams[i];
  482. if (!ost)
  483. continue;
  484. for (j = 0; j < ost->nb_bitstream_filters; j++)
  485. av_bsf_free(&ost->bsf_ctx[j]);
  486. av_freep(&ost->bsf_ctx);
  487. av_frame_free(&ost->filtered_frame);
  488. av_frame_free(&ost->last_frame);
  489. av_dict_free(&ost->encoder_opts);
  490. av_freep(&ost->forced_keyframes);
  491. av_expr_free(ost->forced_keyframes_pexpr);
  492. av_freep(&ost->avfilter);
  493. av_freep(&ost->logfile_prefix);
  494. av_freep(&ost->audio_channels_map);
  495. ost->audio_channels_mapped = 0;
  496. av_dict_free(&ost->sws_dict);
  497. av_dict_free(&ost->swr_opts);
  498. avcodec_free_context(&ost->enc_ctx);
  499. avcodec_parameters_free(&ost->ref_par);
  500. if (ost->muxing_queue) {
  501. while (av_fifo_size(ost->muxing_queue)) {
  502. AVPacket pkt;
  503. av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
  504. av_packet_unref(&pkt);
  505. }
  506. av_fifo_freep(&ost->muxing_queue);
  507. }
  508. av_freep(&output_streams[i]);
  509. }
  510. #if HAVE_THREADS
  511. free_input_threads();
  512. #endif
  513. for (i = 0; i < nb_input_files; i++) {
  514. avformat_close_input(&input_files[i]->ctx);
  515. av_freep(&input_files[i]);
  516. }
  517. for (i = 0; i < nb_input_streams; i++) {
  518. InputStream *ist = input_streams[i];
  519. av_frame_free(&ist->decoded_frame);
  520. av_frame_free(&ist->filter_frame);
  521. av_dict_free(&ist->decoder_opts);
  522. avsubtitle_free(&ist->prev_sub.subtitle);
  523. av_frame_free(&ist->sub2video.frame);
  524. av_freep(&ist->filters);
  525. av_freep(&ist->hwaccel_device);
  526. av_freep(&ist->dts_buffer);
  527. avcodec_free_context(&ist->dec_ctx);
  528. av_freep(&input_streams[i]);
  529. }
  530. if (vstats_file) {
  531. if (fclose(vstats_file))
  532. av_log(NULL, AV_LOG_ERROR,
  533. "Error closing vstats file, loss of information possible: %s\n",
  534. av_err2str(AVERROR(errno)));
  535. }
  536. av_freep(&vstats_filename);
  537. av_freep(&input_streams);
  538. av_freep(&input_files);
  539. av_freep(&output_streams);
  540. av_freep(&output_files);
  541. uninit_opts();
  542. avformat_network_deinit();
  543. if (received_sigterm) {
  544. av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
  545. (int) received_sigterm);
  546. } else if (ret && atomic_load(&transcode_init_done)) {
  547. av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
  548. }
  549. term_exit();
  550. ffmpeg_exited = 1;
  551. }
  552. void remove_avoptions(AVDictionary **a, AVDictionary *b)
  553. {
  554. AVDictionaryEntry *t = NULL;
  555. while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
  556. av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
  557. }
  558. }
  559. void assert_avoptions(AVDictionary *m)
  560. {
  561. AVDictionaryEntry *t;
  562. if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  563. av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
  564. exit_program(1);
  565. }
  566. }
  567. static void abort_codec_experimental(AVCodec *c, int encoder)
  568. {
  569. exit_program(1);
  570. }
  571. static void update_benchmark(const char *fmt, ...)
  572. {
  573. if (do_benchmark_all) {
  574. BenchmarkTimeStamps t = get_benchmark_time_stamps();
  575. va_list va;
  576. char buf[1024];
  577. if (fmt) {
  578. va_start(va, fmt);
  579. vsnprintf(buf, sizeof(buf), fmt, va);
  580. va_end(va);
  581. av_log(NULL, AV_LOG_INFO,
  582. "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
  583. t.user_usec - current_time.user_usec,
  584. t.sys_usec - current_time.sys_usec,
  585. t.real_usec - current_time.real_usec, buf);
  586. }
  587. current_time = t;
  588. }
  589. }
  590. static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
  591. {
  592. int i;
  593. for (i = 0; i < nb_output_streams; i++) {
  594. OutputStream *ost2 = output_streams[i];
  595. ost2->finished |= ost == ost2 ? this_stream : others;
  596. }
  597. }
  598. static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
  599. {
  600. AVFormatContext *s = of->ctx;
  601. AVStream *st = ost->st;
  602. int ret;
  603. /*
  604. * Audio encoders may split the packets -- #frames in != #packets out.
  605. * But there is no reordering, so we can limit the number of output packets
  606. * by simply dropping them here.
  607. * Counting encoded video frames needs to be done separately because of
  608. * reordering, see do_video_out().
  609. * Do not count the packet when unqueued because it has been counted when queued.
  610. */
  611. if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
  612. if (ost->frame_number >= ost->max_frames) {
  613. av_packet_unref(pkt);
  614. return;
  615. }
  616. ost->frame_number++;
  617. }
  618. if (!of->header_written) {
  619. AVPacket tmp_pkt = {0};
  620. /* the muxer is not initialized yet, buffer the packet */
  621. if (!av_fifo_space(ost->muxing_queue)) {
  622. int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
  623. ost->max_muxing_queue_size);
  624. if (new_size <= av_fifo_size(ost->muxing_queue)) {
  625. av_log(NULL, AV_LOG_ERROR,
  626. "Too many packets buffered for output stream %d:%d.\n",
  627. ost->file_index, ost->st->index);
  628. exit_program(1);
  629. }
  630. ret = av_fifo_realloc2(ost->muxing_queue, new_size);
  631. if (ret < 0)
  632. exit_program(1);
  633. }
  634. ret = av_packet_make_refcounted(pkt);
  635. if (ret < 0)
  636. exit_program(1);
  637. av_packet_move_ref(&tmp_pkt, pkt);
  638. av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
  639. return;
  640. }
  641. if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
  642. (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
  643. pkt->pts = pkt->dts = AV_NOPTS_VALUE;
  644. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  645. int i;
  646. uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
  647. NULL);
  648. ost->quality = sd ? AV_RL32(sd) : -1;
  649. ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
  650. for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
  651. if (sd && i < sd[5])
  652. ost->error[i] = AV_RL64(sd + 8 + 8*i);
  653. else
  654. ost->error[i] = -1;
  655. }
  656. if (ost->frame_rate.num && ost->is_cfr) {
  657. if (pkt->duration > 0)
  658. av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
  659. pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
  660. ost->mux_timebase);
  661. }
  662. }
  663. av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
  664. if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
  665. if (pkt->dts != AV_NOPTS_VALUE &&
  666. pkt->pts != AV_NOPTS_VALUE &&
  667. pkt->dts > pkt->pts) {
  668. av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
  669. pkt->dts, pkt->pts,
  670. ost->file_index, ost->st->index);
  671. pkt->pts =
  672. pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
  673. - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
  674. - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
  675. }
  676. if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
  677. pkt->dts != AV_NOPTS_VALUE &&
  678. !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
  679. ost->last_mux_dts != AV_NOPTS_VALUE) {
  680. int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
  681. if (pkt->dts < max) {
  682. int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
  683. av_log(s, loglevel, "Non-monotonous DTS in output stream "
  684. "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
  685. ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
  686. if (exit_on_error) {
  687. av_log(NULL, AV_LOG_FATAL, "aborting.\n");
  688. exit_program(1);
  689. }
  690. av_log(s, loglevel, "changing to %"PRId64". This may result "
  691. "in incorrect timestamps in the output file.\n",
  692. max);
  693. if (pkt->pts >= pkt->dts)
  694. pkt->pts = FFMAX(pkt->pts, max);
  695. pkt->dts = max;
  696. }
  697. }
  698. }
  699. ost->last_mux_dts = pkt->dts;
  700. ost->data_size += pkt->size;
  701. ost->packets_written++;
  702. pkt->stream_index = ost->index;
  703. if (debug_ts) {
  704. av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
  705. "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
  706. av_get_media_type_string(ost->enc_ctx->codec_type),
  707. av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
  708. av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
  709. pkt->size
  710. );
  711. }
  712. ret = av_interleaved_write_frame(s, pkt);
  713. if (ret < 0) {
  714. print_error("av_interleaved_write_frame()", ret);
  715. main_return_code = 1;
  716. close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
  717. }
  718. av_packet_unref(pkt);
  719. }
  720. static void close_output_stream(OutputStream *ost)
  721. {
  722. OutputFile *of = output_files[ost->file_index];
  723. ost->finished |= ENCODER_FINISHED;
  724. if (of->shortest) {
  725. int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
  726. of->recording_time = FFMIN(of->recording_time, end);
  727. }
  728. }
  729. /*
  730. * Send a single packet to the output, applying any bitstream filters
  731. * associated with the output stream. This may result in any number
  732. * of packets actually being written, depending on what bitstream
  733. * filters are applied. The supplied packet is consumed and will be
  734. * blank (as if newly-allocated) when this function returns.
  735. *
  736. * If eof is set, instead indicate EOF to all bitstream filters and
  737. * therefore flush any delayed packets to the output. A blank packet
  738. * must be supplied in this case.
  739. */
  740. static void output_packet(OutputFile *of, AVPacket *pkt,
  741. OutputStream *ost, int eof)
  742. {
  743. int ret = 0;
  744. /* apply the output bitstream filters, if any */
  745. if (ost->nb_bitstream_filters) {
  746. int idx;
  747. ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
  748. if (ret < 0)
  749. goto finish;
  750. eof = 0;
  751. idx = 1;
  752. while (idx) {
  753. /* get a packet from the previous filter up the chain */
  754. ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
  755. if (ret == AVERROR(EAGAIN)) {
  756. ret = 0;
  757. idx--;
  758. continue;
  759. } else if (ret == AVERROR_EOF) {
  760. eof = 1;
  761. } else if (ret < 0)
  762. goto finish;
  763. /* send it to the next filter down the chain or to the muxer */
  764. if (idx < ost->nb_bitstream_filters) {
  765. ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
  766. if (ret < 0)
  767. goto finish;
  768. idx++;
  769. eof = 0;
  770. } else if (eof)
  771. goto finish;
  772. else
  773. write_packet(of, pkt, ost, 0);
  774. }
  775. } else if (!eof)
  776. write_packet(of, pkt, ost, 0);
  777. finish:
  778. if (ret < 0 && ret != AVERROR_EOF) {
  779. av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
  780. "packet for stream #%d:%d.\n", ost->file_index, ost->index);
  781. if(exit_on_error)
  782. exit_program(1);
  783. }
  784. }
  785. static int check_recording_time(OutputStream *ost)
  786. {
  787. OutputFile *of = output_files[ost->file_index];
  788. if (of->recording_time != INT64_MAX &&
  789. av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
  790. AV_TIME_BASE_Q) >= 0) {
  791. close_output_stream(ost);
  792. return 0;
  793. }
  794. return 1;
  795. }
  796. static void do_audio_out(OutputFile *of, OutputStream *ost,
  797. AVFrame *frame)
  798. {
  799. AVCodecContext *enc = ost->enc_ctx;
  800. AVPacket pkt;
  801. int ret;
  802. av_init_packet(&pkt);
  803. pkt.data = NULL;
  804. pkt.size = 0;
  805. if (!check_recording_time(ost))
  806. return;
  807. if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
  808. frame->pts = ost->sync_opts;
  809. ost->sync_opts = frame->pts + frame->nb_samples;
  810. ost->samples_encoded += frame->nb_samples;
  811. ost->frames_encoded++;
  812. av_assert0(pkt.size || !pkt.data);
  813. update_benchmark(NULL);
  814. if (debug_ts) {
  815. av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
  816. "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
  817. av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
  818. enc->time_base.num, enc->time_base.den);
  819. }
  820. ret = avcodec_send_frame(enc, frame);
  821. if (ret < 0)
  822. goto error;
  823. while (1) {
  824. ret = avcodec_receive_packet(enc, &pkt);
  825. if (ret == AVERROR(EAGAIN))
  826. break;
  827. if (ret < 0)
  828. goto error;
  829. update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
  830. av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
  831. if (debug_ts) {
  832. av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
  833. "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
  834. av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
  835. av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
  836. }
  837. output_packet(of, &pkt, ost, 0);
  838. }
  839. return;
  840. error:
  841. av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
  842. exit_program(1);
  843. }
  844. static void do_subtitle_out(OutputFile *of,
  845. OutputStream *ost,
  846. AVSubtitle *sub)
  847. {
  848. int subtitle_out_max_size = 1024 * 1024;
  849. int subtitle_out_size, nb, i;
  850. AVCodecContext *enc;
  851. AVPacket pkt;
  852. int64_t pts;
  853. if (sub->pts == AV_NOPTS_VALUE) {
  854. av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
  855. if (exit_on_error)
  856. exit_program(1);
  857. return;
  858. }
  859. enc = ost->enc_ctx;
  860. if (!subtitle_out) {
  861. subtitle_out = av_malloc(subtitle_out_max_size);
  862. if (!subtitle_out) {
  863. av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
  864. exit_program(1);
  865. }
  866. }
  867. /* Note: DVB subtitle need one packet to draw them and one other
  868. packet to clear them */
  869. /* XXX: signal it in the codec context ? */
  870. if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
  871. nb = 2;
  872. else
  873. nb = 1;
  874. /* shift timestamp to honor -ss and make check_recording_time() work with -t */
  875. pts = sub->pts;
  876. if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
  877. pts -= output_files[ost->file_index]->start_time;
  878. for (i = 0; i < nb; i++) {
  879. unsigned save_num_rects = sub->num_rects;
  880. ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
  881. if (!check_recording_time(ost))
  882. return;
  883. sub->pts = pts;
  884. // start_display_time is required to be 0
  885. sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
  886. sub->end_display_time -= sub->start_display_time;
  887. sub->start_display_time = 0;
  888. if (i == 1)
  889. sub->num_rects = 0;
  890. ost->frames_encoded++;
  891. subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
  892. subtitle_out_max_size, sub);
  893. if (i == 1)
  894. sub->num_rects = save_num_rects;
  895. if (subtitle_out_size < 0) {
  896. av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
  897. exit_program(1);
  898. }
  899. av_init_packet(&pkt);
  900. pkt.data = subtitle_out;
  901. pkt.size = subtitle_out_size;
  902. pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
  903. pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
  904. if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
  905. /* XXX: the pts correction is handled here. Maybe handling
  906. it in the codec would be better */
  907. if (i == 0)
  908. pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
  909. else
  910. pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
  911. }
  912. pkt.dts = pkt.pts;
  913. output_packet(of, &pkt, ost, 0);
  914. }
  915. }
  916. static void do_video_out(OutputFile *of,
  917. OutputStream *ost,
  918. AVFrame *next_picture,
  919. double sync_ipts)
  920. {
  921. int ret, format_video_sync;
  922. AVPacket pkt;
  923. AVCodecContext *enc = ost->enc_ctx;
  924. AVCodecParameters *mux_par = ost->st->codecpar;
  925. AVRational frame_rate;
  926. int nb_frames, nb0_frames, i;
  927. double delta, delta0;
  928. double duration = 0;
  929. int frame_size = 0;
  930. InputStream *ist = NULL;
  931. AVFilterContext *filter = ost->filter->filter;
  932. if (ost->source_index >= 0)
  933. ist = input_streams[ost->source_index];
  934. frame_rate = av_buffersink_get_frame_rate(filter);
  935. if (frame_rate.num > 0 && frame_rate.den > 0)
  936. duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
  937. if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
  938. duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
  939. if (!ost->filters_script &&
  940. !ost->filters &&
  941. (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
  942. next_picture &&
  943. ist &&
  944. lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
  945. duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
  946. }
  947. if (!next_picture) {
  948. //end, flushing
  949. nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
  950. ost->last_nb0_frames[1],
  951. ost->last_nb0_frames[2]);
  952. } else {
  953. delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
  954. delta = delta0 + duration;
  955. /* by default, we output a single frame */
  956. nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
  957. nb_frames = 1;
  958. format_video_sync = video_sync_method;
  959. if (format_video_sync == VSYNC_AUTO) {
  960. if(!strcmp(of->ctx->oformat->name, "avi")) {
  961. format_video_sync = VSYNC_VFR;
  962. } else
  963. format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
  964. if ( ist
  965. && format_video_sync == VSYNC_CFR
  966. && input_files[ist->file_index]->ctx->nb_streams == 1
  967. && input_files[ist->file_index]->input_ts_offset == 0) {
  968. format_video_sync = VSYNC_VSCFR;
  969. }
  970. if (format_video_sync == VSYNC_CFR && copy_ts) {
  971. format_video_sync = VSYNC_VSCFR;
  972. }
  973. }
  974. ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
  975. if (delta0 < 0 &&
  976. delta > 0 &&
  977. format_video_sync != VSYNC_PASSTHROUGH &&
  978. format_video_sync != VSYNC_DROP) {
  979. if (delta0 < -0.6) {
  980. av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
  981. } else
  982. av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
  983. sync_ipts = ost->sync_opts;
  984. duration += delta0;
  985. delta0 = 0;
  986. }
  987. switch (format_video_sync) {
  988. case VSYNC_VSCFR:
  989. if (ost->frame_number == 0 && delta0 >= 0.5) {
  990. av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
  991. delta = duration;
  992. delta0 = 0;
  993. ost->sync_opts = lrint(sync_ipts);
  994. }
  995. case VSYNC_CFR:
  996. // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
  997. if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
  998. nb_frames = 0;
  999. } else if (delta < -1.1)
  1000. nb_frames = 0;
  1001. else if (delta > 1.1) {
  1002. nb_frames = lrintf(delta);
  1003. if (delta0 > 1.1)
  1004. nb0_frames = lrintf(delta0 - 0.6);
  1005. }
  1006. break;
  1007. case VSYNC_VFR:
  1008. if (delta <= -0.6)
  1009. nb_frames = 0;
  1010. else if (delta > 0.6)
  1011. ost->sync_opts = lrint(sync_ipts);
  1012. break;
  1013. case VSYNC_DROP:
  1014. case VSYNC_PASSTHROUGH:
  1015. ost->sync_opts = lrint(sync_ipts);
  1016. break;
  1017. default:
  1018. av_assert0(0);
  1019. }
  1020. }
  1021. nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
  1022. nb0_frames = FFMIN(nb0_frames, nb_frames);
  1023. memmove(ost->last_nb0_frames + 1,
  1024. ost->last_nb0_frames,
  1025. sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
  1026. ost->last_nb0_frames[0] = nb0_frames;
  1027. if (nb0_frames == 0 && ost->last_dropped) {
  1028. nb_frames_drop++;
  1029. av_log(NULL, AV_LOG_VERBOSE,
  1030. "*** dropping frame %d from stream %d at ts %"PRId64"\n",
  1031. ost->frame_number, ost->st->index, ost->last_frame->pts);
  1032. }
  1033. if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
  1034. if (nb_frames > dts_error_threshold * 30) {
  1035. av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
  1036. nb_frames_drop++;
  1037. return;
  1038. }
  1039. nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
  1040. av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
  1041. if (nb_frames_dup > dup_warning) {
  1042. av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
  1043. dup_warning *= 10;
  1044. }
  1045. }
  1046. ost->last_dropped = nb_frames == nb0_frames && next_picture;
  1047. /* duplicates frame if needed */
  1048. for (i = 0; i < nb_frames; i++) {
  1049. AVFrame *in_picture;
  1050. int forced_keyframe = 0;
  1051. double pts_time;
  1052. av_init_packet(&pkt);
  1053. pkt.data = NULL;
  1054. pkt.size = 0;
  1055. if (i < nb0_frames && ost->last_frame) {
  1056. in_picture = ost->last_frame;
  1057. } else
  1058. in_picture = next_picture;
  1059. if (!in_picture)
  1060. return;
  1061. in_picture->pts = ost->sync_opts;
  1062. if (!check_recording_time(ost))
  1063. return;
  1064. if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
  1065. ost->top_field_first >= 0)
  1066. in_picture->top_field_first = !!ost->top_field_first;
  1067. if (in_picture->interlaced_frame) {
  1068. if (enc->codec->id == AV_CODEC_ID_MJPEG)
  1069. mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
  1070. else
  1071. mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
  1072. } else
  1073. mux_par->field_order = AV_FIELD_PROGRESSIVE;
  1074. in_picture->quality = enc->global_quality;
  1075. in_picture->pict_type = 0;
  1076. if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
  1077. in_picture->pts != AV_NOPTS_VALUE)
  1078. ost->forced_kf_ref_pts = in_picture->pts;
  1079. pts_time = in_picture->pts != AV_NOPTS_VALUE ?
  1080. (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
  1081. if (ost->forced_kf_index < ost->forced_kf_count &&
  1082. in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
  1083. ost->forced_kf_index++;
  1084. forced_keyframe = 1;
  1085. } else if (ost->forced_keyframes_pexpr) {
  1086. double res;
  1087. ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
  1088. res = av_expr_eval(ost->forced_keyframes_pexpr,
  1089. ost->forced_keyframes_expr_const_values, NULL);
  1090. ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
  1091. ost->forced_keyframes_expr_const_values[FKF_N],
  1092. ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
  1093. ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
  1094. ost->forced_keyframes_expr_const_values[FKF_T],
  1095. ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
  1096. res);
  1097. if (res) {
  1098. forced_keyframe = 1;
  1099. ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
  1100. ost->forced_keyframes_expr_const_values[FKF_N];
  1101. ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
  1102. ost->forced_keyframes_expr_const_values[FKF_T];
  1103. ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
  1104. }
  1105. ost->forced_keyframes_expr_const_values[FKF_N] += 1;
  1106. } else if ( ost->forced_keyframes
  1107. && !strncmp(ost->forced_keyframes, "source", 6)
  1108. && in_picture->key_frame==1) {
  1109. forced_keyframe = 1;
  1110. }
  1111. if (forced_keyframe) {
  1112. in_picture->pict_type = AV_PICTURE_TYPE_I;
  1113. av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
  1114. }
  1115. update_benchmark(NULL);
  1116. if (debug_ts) {
  1117. av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
  1118. "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
  1119. av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
  1120. enc->time_base.num, enc->time_base.den);
  1121. }
  1122. ost->frames_encoded++;
  1123. ret = avcodec_send_frame(enc, in_picture);
  1124. if (ret < 0)
  1125. goto error;
  1126. // Make sure Closed Captions will not be duplicated
  1127. av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
  1128. while (1) {
  1129. ret = avcodec_receive_packet(enc, &pkt);
  1130. update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
  1131. if (ret == AVERROR(EAGAIN))
  1132. break;
  1133. if (ret < 0)
  1134. goto error;
  1135. if (debug_ts) {
  1136. av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
  1137. "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
  1138. av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
  1139. av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
  1140. }
  1141. if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
  1142. pkt.pts = ost->sync_opts;
  1143. av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
  1144. if (debug_ts) {
  1145. av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
  1146. "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
  1147. av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
  1148. av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
  1149. }
  1150. frame_size = pkt.size;
  1151. output_packet(of, &pkt, ost, 0);
  1152. /* if two pass, output log */
  1153. if (ost->logfile && enc->stats_out) {
  1154. fprintf(ost->logfile, "%s", enc->stats_out);
  1155. }
  1156. }
  1157. ost->sync_opts++;
  1158. /*
  1159. * For video, number of frames in == number of packets out.
  1160. * But there may be reordering, so we can't throw away frames on encoder
  1161. * flush, we need to limit them here, before they go into encoder.
  1162. */
  1163. ost->frame_number++;
  1164. if (vstats_filename && frame_size)
  1165. do_video_stats(ost, frame_size);
  1166. }
  1167. if (!ost->last_frame)
  1168. ost->last_frame = av_frame_alloc();
  1169. av_frame_unref(ost->last_frame);
  1170. if (next_picture && ost->last_frame)
  1171. av_frame_ref(ost->last_frame, next_picture);
  1172. else
  1173. av_frame_free(&ost->last_frame);
  1174. return;
  1175. error:
  1176. av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
  1177. exit_program(1);
  1178. }
  1179. static double psnr(double d)
  1180. {
  1181. return -10.0 * log10(d);
  1182. }
  1183. static void do_video_stats(OutputStream *ost, int frame_size)
  1184. {
  1185. AVCodecContext *enc;
  1186. int frame_number;
  1187. double ti1, bitrate, avg_bitrate;
  1188. /* this is executed just the first time do_video_stats is called */
  1189. if (!vstats_file) {
  1190. vstats_file = fopen(vstats_filename, "w");
  1191. if (!vstats_file) {
  1192. perror("fopen");
  1193. exit_program(1);
  1194. }
  1195. }
  1196. enc = ost->enc_ctx;
  1197. if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
  1198. frame_number = ost->st->nb_frames;
  1199. if (vstats_version <= 1) {
  1200. fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
  1201. ost->quality / (float)FF_QP2LAMBDA);
  1202. } else {
  1203. fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
  1204. ost->quality / (float)FF_QP2LAMBDA);
  1205. }
  1206. if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
  1207. fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
  1208. fprintf(vstats_file,"f_size= %6d ", frame_size);
  1209. /* compute pts value */
  1210. ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
  1211. if (ti1 < 0.01)
  1212. ti1 = 0.01;
  1213. bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
  1214. avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
  1215. fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
  1216. (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
  1217. fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
  1218. }
  1219. }
  1220. static int init_output_stream(OutputStream *ost, char *error, int error_len);
  1221. static void finish_output_stream(OutputStream *ost)
  1222. {
  1223. OutputFile *of = output_files[ost->file_index];
  1224. int i;
  1225. ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
  1226. if (of->shortest) {
  1227. for (i = 0; i < of->ctx->nb_streams; i++)
  1228. output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
  1229. }
  1230. }
  1231. /**
  1232. * Get and encode new output from any of the filtergraphs, without causing
  1233. * activity.
  1234. *
  1235. * @return 0 for success, <0 for severe errors
  1236. */
  1237. static int reap_filters(int flush)
  1238. {
  1239. AVFrame *filtered_frame = NULL;
  1240. int i;
  1241. /* Reap all buffers present in the buffer sinks */
  1242. for (i = 0; i < nb_output_streams; i++) {
  1243. OutputStream *ost = output_streams[i];
  1244. OutputFile *of = output_files[ost->file_index];
  1245. AVFilterContext *filter;
  1246. AVCodecContext *enc = ost->enc_ctx;
  1247. int ret = 0;
  1248. if (!ost->filter || !ost->filter->graph->graph)
  1249. continue;
  1250. filter = ost->filter->filter;
  1251. if (!ost->initialized) {
  1252. char error[1024] = "";
  1253. ret = init_output_stream(ost, error, sizeof(error));
  1254. if (ret < 0) {
  1255. av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
  1256. ost->file_index, ost->index, error);
  1257. exit_program(1);
  1258. }
  1259. }
  1260. if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
  1261. return AVERROR(ENOMEM);
  1262. }
  1263. filtered_frame = ost->filtered_frame;
  1264. while (1) {
  1265. double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
  1266. ret = av_buffersink_get_frame_flags(filter, filtered_frame,
  1267. AV_BUFFERSINK_FLAG_NO_REQUEST);
  1268. if (ret < 0) {
  1269. if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
  1270. av_log(NULL, AV_LOG_WARNING,
  1271. "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
  1272. } else if (flush && ret == AVERROR_EOF) {
  1273. if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
  1274. do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
  1275. }
  1276. break;
  1277. }
  1278. if (ost->finished) {
  1279. av_frame_unref(filtered_frame);
  1280. continue;
  1281. }
  1282. if (filtered_frame->pts != AV_NOPTS_VALUE) {
  1283. int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
  1284. AVRational filter_tb = av_buffersink_get_time_base(filter);
  1285. AVRational tb = enc->time_base;
  1286. int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
  1287. tb.den <<= extra_bits;
  1288. float_pts =
  1289. av_rescale_q(filtered_frame->pts, filter_tb, tb) -
  1290. av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
  1291. float_pts /= 1 << extra_bits;
  1292. // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
  1293. float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
  1294. filtered_frame->pts =
  1295. av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
  1296. av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
  1297. }
  1298. switch (av_buffersink_get_type(filter)) {
  1299. case AVMEDIA_TYPE_VIDEO:
  1300. if (!ost->frame_aspect_ratio.num)
  1301. enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
  1302. if (debug_ts) {
  1303. av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
  1304. av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
  1305. float_pts,
  1306. enc->time_base.num, enc->time_base.den);
  1307. }
  1308. do_video_out(of, ost, filtered_frame, float_pts);
  1309. break;
  1310. case AVMEDIA_TYPE_AUDIO:
  1311. if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
  1312. enc->channels != filtered_frame->channels) {
  1313. av_log(NULL, AV_LOG_ERROR,
  1314. "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
  1315. break;
  1316. }
  1317. do_audio_out(of, ost, filtered_frame);
  1318. break;
  1319. default:
  1320. // TODO support subtitle filters
  1321. av_assert0(0);
  1322. }
  1323. av_frame_unref(filtered_frame);
  1324. }
  1325. }
  1326. return 0;
  1327. }
  1328. static void print_final_stats(int64_t total_size)
  1329. {
  1330. uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
  1331. uint64_t subtitle_size = 0;
  1332. uint64_t data_size = 0;
  1333. float percent = -1.0;
  1334. int i, j;
  1335. int pass1_used = 1;
  1336. for (i = 0; i < nb_output_streams; i++) {
  1337. OutputStream *ost = output_streams[i];
  1338. switch (ost->enc_ctx->codec_type) {
  1339. case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
  1340. case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
  1341. case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
  1342. default: other_size += ost->data_size; break;
  1343. }
  1344. extra_size += ost->enc_ctx->extradata_size;
  1345. data_size += ost->data_size;
  1346. if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
  1347. != AV_CODEC_FLAG_PASS1)
  1348. pass1_used = 0;
  1349. }
  1350. if (data_size && total_size>0 && total_size >= data_size)
  1351. percent = 100.0 * (total_size - data_size) / data_size;
  1352. av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
  1353. video_size / 1024.0,
  1354. audio_size / 1024.0,
  1355. subtitle_size / 1024.0,
  1356. other_size / 1024.0,
  1357. extra_size / 1024.0);
  1358. if (percent >= 0.0)
  1359. av_log(NULL, AV_LOG_INFO, "%f%%", percent);
  1360. else
  1361. av_log(NULL, AV_LOG_INFO, "unknown");
  1362. av_log(NULL, AV_LOG_INFO, "\n");
  1363. /* print verbose per-stream stats */
  1364. for (i = 0; i < nb_input_files; i++) {
  1365. InputFile *f = input_files[i];
  1366. uint64_t total_packets = 0, total_size = 0;
  1367. av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
  1368. i, f->ctx->url);
  1369. for (j = 0; j < f->nb_streams; j++) {
  1370. InputStream *ist = input_streams[f->ist_index + j];
  1371. enum AVMediaType type = ist->dec_ctx->codec_type;
  1372. total_size += ist->data_size;
  1373. total_packets += ist->nb_packets;
  1374. av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
  1375. i, j, media_type_string(type));
  1376. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
  1377. ist->nb_packets, ist->data_size);
  1378. if (ist->decoding_needed) {
  1379. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
  1380. ist->frames_decoded);
  1381. if (type == AVMEDIA_TYPE_AUDIO)
  1382. av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
  1383. av_log(NULL, AV_LOG_VERBOSE, "; ");
  1384. }
  1385. av_log(NULL, AV_LOG_VERBOSE, "\n");
  1386. }
  1387. av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
  1388. total_packets, total_size);
  1389. }
  1390. for (i = 0; i < nb_output_files; i++) {
  1391. OutputFile *of = output_files[i];
  1392. uint64_t total_packets = 0, total_size = 0;
  1393. av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
  1394. i, of->ctx->url);
  1395. for (j = 0; j < of->ctx->nb_streams; j++) {
  1396. OutputStream *ost = output_streams[of->ost_index + j];
  1397. enum AVMediaType type = ost->enc_ctx->codec_type;
  1398. total_size += ost->data_size;
  1399. total_packets += ost->packets_written;
  1400. av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
  1401. i, j, media_type_string(type));
  1402. if (ost->encoding_needed) {
  1403. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
  1404. ost->frames_encoded);
  1405. if (type == AVMEDIA_TYPE_AUDIO)
  1406. av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
  1407. av_log(NULL, AV_LOG_VERBOSE, "; ");
  1408. }
  1409. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
  1410. ost->packets_written, ost->data_size);
  1411. av_log(NULL, AV_LOG_VERBOSE, "\n");
  1412. }
  1413. av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
  1414. total_packets, total_size);
  1415. }
  1416. if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
  1417. av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
  1418. if (pass1_used) {
  1419. av_log(NULL, AV_LOG_WARNING, "\n");
  1420. } else {
  1421. av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
  1422. }
  1423. }
  1424. }
  1425. static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
  1426. {
  1427. AVBPrint buf, buf_script;
  1428. OutputStream *ost;
  1429. AVFormatContext *oc;
  1430. int64_t total_size;
  1431. AVCodecContext *enc;
  1432. int frame_number, vid, i;
  1433. double bitrate;
  1434. double speed;
  1435. int64_t pts = INT64_MIN + 1;
  1436. static int64_t last_time = -1;
  1437. static int qp_histogram[52];
  1438. int hours, mins, secs, us;
  1439. const char *hours_sign;
  1440. int ret;
  1441. float t;
  1442. if (!print_stats && !is_last_report && !progress_avio)
  1443. return;
  1444. if (!is_last_report) {
  1445. if (last_time == -1) {
  1446. last_time = cur_time;
  1447. return;
  1448. }
  1449. if ((cur_time - last_time) < 500000)
  1450. return;
  1451. last_time = cur_time;
  1452. }
  1453. t = (cur_time-timer_start) / 1000000.0;
  1454. oc = output_files[0]->ctx;
  1455. total_size = avio_size(oc->pb);
  1456. if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
  1457. total_size = avio_tell(oc->pb);
  1458. vid = 0;
  1459. av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
  1460. av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
  1461. for (i = 0; i < nb_output_streams; i++) {
  1462. float q = -1;
  1463. ost = output_streams[i];
  1464. enc = ost->enc_ctx;
  1465. if (!ost->stream_copy)
  1466. q = ost->quality / (float) FF_QP2LAMBDA;
  1467. if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
  1468. av_bprintf(&buf, "q=%2.1f ", q);
  1469. av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
  1470. ost->file_index, ost->index, q);
  1471. }
  1472. if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
  1473. float fps;
  1474. frame_number = ost->frame_number;
  1475. fps = t > 1 ? frame_number / t : 0;
  1476. av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
  1477. frame_number, fps < 9.95, fps, q);
  1478. av_bprintf(&buf_script, "frame=%d\n", frame_number);
  1479. av_bprintf(&buf_script, "fps=%.2f\n", fps);
  1480. av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
  1481. ost->file_index, ost->index, q);
  1482. if (is_last_report)
  1483. av_bprintf(&buf, "L");
  1484. if (qp_hist) {
  1485. int j;
  1486. int qp = lrintf(q);
  1487. if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
  1488. qp_histogram[qp]++;
  1489. for (j = 0; j < 32; j++)
  1490. av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
  1491. }
  1492. if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
  1493. int j;
  1494. double error, error_sum = 0;
  1495. double scale, scale_sum = 0;
  1496. double p;
  1497. char type[3] = { 'Y','U','V' };
  1498. av_bprintf(&buf, "PSNR=");
  1499. for (j = 0; j < 3; j++) {
  1500. if (is_last_report) {
  1501. error = enc->error[j];
  1502. scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
  1503. } else {
  1504. error = ost->error[j];
  1505. scale = enc->width * enc->height * 255.0 * 255.0;
  1506. }
  1507. if (j)
  1508. scale /= 4;
  1509. error_sum += error;
  1510. scale_sum += scale;
  1511. p = psnr(error / scale);
  1512. av_bprintf(&buf, "%c:%2.2f ", type[j], p);
  1513. av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
  1514. ost->file_index, ost->index, type[j] | 32, p);
  1515. }
  1516. p = psnr(error_sum / scale_sum);
  1517. av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
  1518. av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
  1519. ost->file_index, ost->index, p);
  1520. }
  1521. vid = 1;
  1522. }
  1523. /* compute min output value */
  1524. if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
  1525. pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
  1526. ost->st->time_base, AV_TIME_BASE_Q));
  1527. if (is_last_report)
  1528. nb_frames_drop += ost->last_dropped;
  1529. }
  1530. secs = FFABS(pts) / AV_TIME_BASE;
  1531. us = FFABS(pts) % AV_TIME_BASE;
  1532. mins = secs / 60;
  1533. secs %= 60;
  1534. hours = mins / 60;
  1535. mins %= 60;
  1536. hours_sign = (pts < 0) ? "-" : "";
  1537. bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
  1538. speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
  1539. if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
  1540. else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
  1541. if (pts == AV_NOPTS_VALUE) {
  1542. av_bprintf(&buf, "N/A ");
  1543. } else {
  1544. av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
  1545. hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
  1546. }
  1547. if (bitrate < 0) {
  1548. av_bprintf(&buf, "bitrate=N/A");
  1549. av_bprintf(&buf_script, "bitrate=N/A\n");
  1550. }else{
  1551. av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
  1552. av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
  1553. }
  1554. if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
  1555. else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
  1556. if (pts == AV_NOPTS_VALUE) {
  1557. av_bprintf(&buf_script, "out_time_us=N/A\n");
  1558. av_bprintf(&buf_script, "out_time_ms=N/A\n");
  1559. av_bprintf(&buf_script, "out_time=N/A\n");
  1560. } else {
  1561. av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
  1562. av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
  1563. av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
  1564. hours_sign, hours, mins, secs, us);
  1565. }
  1566. if (nb_frames_dup || nb_frames_drop)
  1567. av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
  1568. av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
  1569. av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
  1570. if (speed < 0) {
  1571. av_bprintf(&buf, " speed=N/A");
  1572. av_bprintf(&buf_script, "speed=N/A\n");
  1573. } else {
  1574. av_bprintf(&buf, " speed=%4.3gx", speed);
  1575. av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
  1576. }
  1577. if (print_stats || is_last_report) {
  1578. const char end = is_last_report ? '\n' : '\r';
  1579. if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
  1580. fprintf(stderr, "%s %c", buf.str, end);
  1581. } else
  1582. av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
  1583. fflush(stderr);
  1584. }
  1585. av_bprint_finalize(&buf, NULL);
  1586. if (progress_avio) {
  1587. av_bprintf(&buf_script, "progress=%s\n",
  1588. is_last_report ? "end" : "continue");
  1589. avio_write(progress_avio, buf_script.str,
  1590. FFMIN(buf_script.len, buf_script.size - 1));
  1591. avio_flush(progress_avio);
  1592. av_bprint_finalize(&buf_script, NULL);
  1593. if (is_last_report) {
  1594. if ((ret = avio_closep(&progress_avio)) < 0)
  1595. av_log(NULL, AV_LOG_ERROR,
  1596. "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
  1597. }
  1598. }
  1599. if (is_last_report)
  1600. print_final_stats(total_size);
  1601. }
  1602. static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
  1603. {
  1604. // We never got any input. Set a fake format, which will
  1605. // come from libavformat.
  1606. ifilter->format = par->format;
  1607. ifilter->sample_rate = par->sample_rate;
  1608. ifilter->channels = par->channels;
  1609. ifilter->channel_layout = par->channel_layout;
  1610. ifilter->width = par->width;
  1611. ifilter->height = par->height;
  1612. ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
  1613. }
  1614. static void flush_encoders(void)
  1615. {
  1616. int i, ret;
  1617. for (i = 0; i < nb_output_streams; i++) {
  1618. OutputStream *ost = output_streams[i];
  1619. AVCodecContext *enc = ost->enc_ctx;
  1620. OutputFile *of = output_files[ost->file_index];
  1621. if (!ost->encoding_needed)
  1622. continue;
  1623. // Try to enable encoding with no input frames.
  1624. // Maybe we should just let encoding fail instead.
  1625. if (!ost->initialized) {
  1626. FilterGraph *fg = ost->filter->graph;
  1627. char error[1024] = "";
  1628. av_log(NULL, AV_LOG_WARNING,
  1629. "Finishing stream %d:%d without any data written to it.\n",
  1630. ost->file_index, ost->st->index);
  1631. if (ost->filter && !fg->graph) {
  1632. int x;
  1633. for (x = 0; x < fg->nb_inputs; x++) {
  1634. InputFilter *ifilter = fg->inputs[x];
  1635. if (ifilter->format < 0)
  1636. ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
  1637. }
  1638. if (!ifilter_has_all_input_formats(fg))
  1639. continue;
  1640. ret = configure_filtergraph(fg);
  1641. if (ret < 0) {
  1642. av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
  1643. exit_program(1);
  1644. }
  1645. finish_output_stream(ost);
  1646. }
  1647. ret = init_output_stream(ost, error, sizeof(error));
  1648. if (ret < 0) {
  1649. av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
  1650. ost->file_index, ost->index, error);
  1651. exit_program(1);
  1652. }
  1653. }
  1654. if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
  1655. continue;
  1656. if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
  1657. continue;
  1658. for (;;) {
  1659. const char *desc = NULL;
  1660. AVPacket pkt;
  1661. int pkt_size;
  1662. switch (enc->codec_type) {
  1663. case AVMEDIA_TYPE_AUDIO:
  1664. desc = "audio";
  1665. break;
  1666. case AVMEDIA_TYPE_VIDEO:
  1667. desc = "video";
  1668. break;
  1669. default:
  1670. av_assert0(0);
  1671. }
  1672. av_init_packet(&pkt);
  1673. pkt.data = NULL;
  1674. pkt.size = 0;
  1675. update_benchmark(NULL);
  1676. while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
  1677. ret = avcodec_send_frame(enc, NULL);
  1678. if (ret < 0) {
  1679. av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
  1680. desc,
  1681. av_err2str(ret));
  1682. exit_program(1);
  1683. }
  1684. }
  1685. update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
  1686. if (ret < 0 && ret != AVERROR_EOF) {
  1687. av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
  1688. desc,
  1689. av_err2str(ret));
  1690. exit_program(1);
  1691. }
  1692. if (ost->logfile && enc->stats_out) {
  1693. fprintf(ost->logfile, "%s", enc->stats_out);
  1694. }
  1695. if (ret == AVERROR_EOF) {
  1696. output_packet(of, &pkt, ost, 1);
  1697. break;
  1698. }
  1699. if (ost->finished & MUXER_FINISHED) {
  1700. av_packet_unref(&pkt);
  1701. continue;
  1702. }
  1703. av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
  1704. pkt_size = pkt.size;
  1705. output_packet(of, &pkt, ost, 0);
  1706. if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
  1707. do_video_stats(ost, pkt_size);
  1708. }
  1709. }
  1710. }
  1711. }
  1712. /*
  1713. * Check whether a packet from ist should be written into ost at this time
  1714. */
  1715. static int check_output_constraints(InputStream *ist, OutputStream *ost)
  1716. {
  1717. OutputFile *of = output_files[ost->file_index];
  1718. int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
  1719. if (ost->source_index != ist_index)
  1720. return 0;
  1721. if (ost->finished)
  1722. return 0;
  1723. if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
  1724. return 0;
  1725. return 1;
  1726. }
  1727. static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
  1728. {
  1729. OutputFile *of = output_files[ost->file_index];
  1730. InputFile *f = input_files [ist->file_index];
  1731. int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
  1732. int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
  1733. AVPacket opkt = { 0 };
  1734. av_init_packet(&opkt);
  1735. // EOF: flush output bitstream filters.
  1736. if (!pkt) {
  1737. output_packet(of, &opkt, ost, 1);
  1738. return;
  1739. }
  1740. if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
  1741. !ost->copy_initial_nonkeyframes)
  1742. return;
  1743. if (!ost->frame_number && !ost->copy_prior_start) {
  1744. int64_t comp_start = start_time;
  1745. if (copy_ts && f->start_time != AV_NOPTS_VALUE)
  1746. comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
  1747. if (pkt->pts == AV_NOPTS_VALUE ?
  1748. ist->pts < comp_start :
  1749. pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
  1750. return;
  1751. }
  1752. if (of->recording_time != INT64_MAX &&
  1753. ist->pts >= of->recording_time + start_time) {
  1754. close_output_stream(ost);
  1755. return;
  1756. }
  1757. if (f->recording_time != INT64_MAX) {
  1758. start_time = f->ctx->start_time;
  1759. if (f->start_time != AV_NOPTS_VALUE && copy_ts)
  1760. start_time += f->start_time;
  1761. if (ist->pts >= f->recording_time + start_time) {
  1762. close_output_stream(ost);
  1763. return;
  1764. }
  1765. }
  1766. /* force the input stream PTS */
  1767. if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
  1768. ost->sync_opts++;
  1769. if (pkt->pts != AV_NOPTS_VALUE)
  1770. opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
  1771. else
  1772. opkt.pts = AV_NOPTS_VALUE;
  1773. if (pkt->dts == AV_NOPTS_VALUE)
  1774. opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
  1775. else
  1776. opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
  1777. opkt.dts -= ost_tb_start_time;
  1778. if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
  1779. int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
  1780. if(!duration)
  1781. duration = ist->dec_ctx->frame_size;
  1782. opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
  1783. (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
  1784. ost->mux_timebase) - ost_tb_start_time;
  1785. }
  1786. opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
  1787. opkt.flags = pkt->flags;
  1788. if (pkt->buf) {
  1789. opkt.buf = av_buffer_ref(pkt->buf);
  1790. if (!opkt.buf)
  1791. exit_program(1);
  1792. }
  1793. opkt.data = pkt->data;
  1794. opkt.size = pkt->size;
  1795. av_copy_packet_side_data(&opkt, pkt);
  1796. output_packet(of, &opkt, ost, 0);
  1797. }
  1798. int guess_input_channel_layout(InputStream *ist)
  1799. {
  1800. AVCodecContext *dec = ist->dec_ctx;
  1801. if (!dec->channel_layout) {
  1802. char layout_name[256];
  1803. if (dec->channels > ist->guess_layout_max)
  1804. return 0;
  1805. dec->channel_layout = av_get_default_channel_layout(dec->channels);
  1806. if (!dec->channel_layout)
  1807. return 0;
  1808. av_get_channel_layout_string(layout_name, sizeof(layout_name),
  1809. dec->channels, dec->channel_layout);
  1810. av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
  1811. "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
  1812. }
  1813. return 1;
  1814. }
  1815. static void check_decode_result(InputStream *ist, int *got_output, int ret)
  1816. {
  1817. if (*got_output || ret<0)
  1818. decode_error_stat[ret<0] ++;
  1819. if (ret < 0 && exit_on_error)
  1820. exit_program(1);
  1821. if (*got_output && ist) {
  1822. if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
  1823. av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
  1824. "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
  1825. if (exit_on_error)
  1826. exit_program(1);
  1827. }
  1828. }
  1829. }
  1830. // Filters can be configured only if the formats of all inputs are known.
  1831. static int ifilter_has_all_input_formats(FilterGraph *fg)
  1832. {
  1833. int i;
  1834. for (i = 0; i < fg->nb_inputs; i++) {
  1835. if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
  1836. fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
  1837. return 0;
  1838. }
  1839. return 1;
  1840. }
  1841. static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
  1842. {
  1843. FilterGraph *fg = ifilter->graph;
  1844. int need_reinit, ret, i;
  1845. /* determine if the parameters for this input changed */
  1846. need_reinit = ifilter->format != frame->format;
  1847. switch (ifilter->ist->st->codecpar->codec_type) {
  1848. case AVMEDIA_TYPE_AUDIO:
  1849. need_reinit |= ifilter->sample_rate != frame->sample_rate ||
  1850. ifilter->channels != frame->channels ||
  1851. ifilter->channel_layout != frame->channel_layout;
  1852. break;
  1853. case AVMEDIA_TYPE_VIDEO:
  1854. need_reinit |= ifilter->width != frame->width ||
  1855. ifilter->height != frame->height;
  1856. break;
  1857. }
  1858. if (!ifilter->ist->reinit_filters && fg->graph)
  1859. need_reinit = 0;
  1860. if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
  1861. (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
  1862. need_reinit = 1;
  1863. if (need_reinit) {
  1864. ret = ifilter_parameters_from_frame(ifilter, frame);
  1865. if (ret < 0)
  1866. return ret;
  1867. }
  1868. /* (re)init the graph if possible, otherwise buffer the frame and return */
  1869. if (need_reinit || !fg->graph) {
  1870. for (i = 0; i < fg->nb_inputs; i++) {
  1871. if (!ifilter_has_all_input_formats(fg)) {
  1872. AVFrame *tmp = av_frame_clone(frame);
  1873. if (!tmp)
  1874. return AVERROR(ENOMEM);
  1875. av_frame_unref(frame);
  1876. if (!av_fifo_space(ifilter->frame_queue)) {
  1877. ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
  1878. if (ret < 0) {
  1879. av_frame_free(&tmp);
  1880. return ret;
  1881. }
  1882. }
  1883. av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
  1884. return 0;
  1885. }
  1886. }
  1887. ret = reap_filters(1);
  1888. if (ret < 0 && ret != AVERROR_EOF) {
  1889. av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  1890. return ret;
  1891. }
  1892. ret = configure_filtergraph(fg);
  1893. if (ret < 0) {
  1894. av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
  1895. return ret;
  1896. }
  1897. }
  1898. ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
  1899. if (ret < 0) {
  1900. if (ret != AVERROR_EOF)
  1901. av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  1902. return ret;
  1903. }
  1904. return 0;
  1905. }
  1906. static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
  1907. {
  1908. int ret;
  1909. ifilter->eof = 1;
  1910. if (ifilter->filter) {
  1911. ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
  1912. if (ret < 0)
  1913. return ret;
  1914. } else {
  1915. // the filtergraph was never configured
  1916. if (ifilter->format < 0)
  1917. ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
  1918. if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
  1919. av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
  1920. return AVERROR_INVALIDDATA;
  1921. }
  1922. }
  1923. return 0;
  1924. }
  1925. // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
  1926. // There is the following difference: if you got a frame, you must call
  1927. // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
  1928. // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
  1929. static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
  1930. {
  1931. int ret;
  1932. *got_frame = 0;
  1933. if (pkt) {
  1934. ret = avcodec_send_packet(avctx, pkt);
  1935. // In particular, we don't expect AVERROR(EAGAIN), because we read all
  1936. // decoded frames with avcodec_receive_frame() until done.
  1937. if (ret < 0 && ret != AVERROR_EOF)
  1938. return ret;
  1939. }
  1940. ret = avcodec_receive_frame(avctx, frame);
  1941. if (ret < 0 && ret != AVERROR(EAGAIN))
  1942. return ret;
  1943. if (ret >= 0)
  1944. *got_frame = 1;
  1945. return 0;
  1946. }
  1947. static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
  1948. {
  1949. int i, ret;
  1950. AVFrame *f;
  1951. av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
  1952. for (i = 0; i < ist->nb_filters; i++) {
  1953. if (i < ist->nb_filters - 1) {
  1954. f = ist->filter_frame;
  1955. ret = av_frame_ref(f, decoded_frame);
  1956. if (ret < 0)
  1957. break;
  1958. } else
  1959. f = decoded_frame;
  1960. ret = ifilter_send_frame(ist->filters[i], f);
  1961. if (ret == AVERROR_EOF)
  1962. ret = 0; /* ignore */
  1963. if (ret < 0) {
  1964. av_log(NULL, AV_LOG_ERROR,
  1965. "Failed to inject frame into filter network: %s\n", av_err2str(ret));
  1966. break;
  1967. }
  1968. }
  1969. return ret;
  1970. }
  1971. static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
  1972. int *decode_failed)
  1973. {
  1974. AVFrame *decoded_frame;
  1975. AVCodecContext *avctx = ist->dec_ctx;
  1976. int ret, err = 0;
  1977. AVRational decoded_frame_tb;
  1978. if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
  1979. return AVERROR(ENOMEM);
  1980. if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
  1981. return AVERROR(ENOMEM);
  1982. decoded_frame = ist->decoded_frame;
  1983. update_benchmark(NULL);
  1984. ret = decode(avctx, decoded_frame, got_output, pkt);
  1985. update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
  1986. if (ret < 0)
  1987. *decode_failed = 1;
  1988. if (ret >= 0 && avctx->sample_rate <= 0) {
  1989. av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
  1990. ret = AVERROR_INVALIDDATA;
  1991. }
  1992. if (ret != AVERROR_EOF)
  1993. check_decode_result(ist, got_output, ret);
  1994. if (!*got_output || ret < 0)
  1995. return ret;
  1996. ist->samples_decoded += decoded_frame->nb_samples;
  1997. ist->frames_decoded++;
  1998. /* increment next_dts to use for the case where the input stream does not
  1999. have timestamps or there are multiple frames in the packet */
  2000. ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
  2001. avctx->sample_rate;
  2002. ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
  2003. avctx->sample_rate;
  2004. if (decoded_frame->pts != AV_NOPTS_VALUE) {
  2005. decoded_frame_tb = ist->st->time_base;
  2006. } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
  2007. decoded_frame->pts = pkt->pts;
  2008. decoded_frame_tb = ist->st->time_base;
  2009. }else {
  2010. decoded_frame->pts = ist->dts;
  2011. decoded_frame_tb = AV_TIME_BASE_Q;
  2012. }
  2013. if (decoded_frame->pts != AV_NOPTS_VALUE)
  2014. decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
  2015. (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
  2016. (AVRational){1, avctx->sample_rate});
  2017. ist->nb_samples = decoded_frame->nb_samples;
  2018. err = send_frame_to_filters(ist, decoded_frame);
  2019. av_frame_unref(ist->filter_frame);
  2020. av_frame_unref(decoded_frame);
  2021. return err < 0 ? err : ret;
  2022. }
  2023. static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
  2024. int *decode_failed)
  2025. {
  2026. AVFrame *decoded_frame;
  2027. int i, ret = 0, err = 0;
  2028. int64_t best_effort_timestamp;
  2029. int64_t dts = AV_NOPTS_VALUE;
  2030. AVPacket avpkt;
  2031. // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
  2032. // reason. This seems like a semi-critical bug. Don't trigger EOF, and
  2033. // skip the packet.
  2034. if (!eof && pkt && pkt->size == 0)
  2035. return 0;
  2036. if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
  2037. return AVERROR(ENOMEM);
  2038. if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
  2039. return AVERROR(ENOMEM);
  2040. decoded_frame = ist->decoded_frame;
  2041. if (ist->dts != AV_NOPTS_VALUE)
  2042. dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
  2043. if (pkt) {
  2044. avpkt = *pkt;
  2045. avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
  2046. }
  2047. // The old code used to set dts on the drain packet, which does not work
  2048. // with the new API anymore.
  2049. if (eof) {
  2050. void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
  2051. if (!new)
  2052. return AVERROR(ENOMEM);
  2053. ist->dts_buffer = new;
  2054. ist->dts_buffer[ist->nb_dts_buffer++] = dts;
  2055. }
  2056. update_benchmark(NULL);
  2057. ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
  2058. update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
  2059. if (ret < 0)
  2060. *decode_failed = 1;
  2061. // The following line may be required in some cases where there is no parser
  2062. // or the parser does not has_b_frames correctly
  2063. if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
  2064. if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
  2065. ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
  2066. } else
  2067. av_log(ist->dec_ctx, AV_LOG_WARNING,
  2068. "video_delay is larger in decoder than demuxer %d > %d.\n"
  2069. "If you want to help, upload a sample "
  2070. "of this file to ftp://upload.ffmpeg.org/incoming/ "
  2071. "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
  2072. ist->dec_ctx->has_b_frames,
  2073. ist->st->codecpar->video_delay);
  2074. }
  2075. if (ret != AVERROR_EOF)
  2076. check_decode_result(ist, got_output, ret);
  2077. if (*got_output && ret >= 0) {
  2078. if (ist->dec_ctx->width != decoded_frame->width ||
  2079. ist->dec_ctx->height != decoded_frame->height ||
  2080. ist->dec_ctx->pix_fmt != decoded_frame->format) {
  2081. av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
  2082. decoded_frame->width,
  2083. decoded_frame->height,
  2084. decoded_frame->format,
  2085. ist->dec_ctx->width,
  2086. ist->dec_ctx->height,
  2087. ist->dec_ctx->pix_fmt);
  2088. }
  2089. }
  2090. if (!*got_output || ret < 0)
  2091. return ret;
  2092. if(ist->top_field_first>=0)
  2093. decoded_frame->top_field_first = ist->top_field_first;
  2094. ist->frames_decoded++;
  2095. if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
  2096. err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
  2097. if (err < 0)
  2098. goto fail;
  2099. }
  2100. ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
  2101. best_effort_timestamp= decoded_frame->best_effort_timestamp;
  2102. *duration_pts = decoded_frame->pkt_duration;
  2103. if (ist->framerate.num)
  2104. best_effort_timestamp = ist->cfr_next_pts++;
  2105. if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
  2106. best_effort_timestamp = ist->dts_buffer[0];
  2107. for (i = 0; i < ist->nb_dts_buffer - 1; i++)
  2108. ist->dts_buffer[i] = ist->dts_buffer[i + 1];
  2109. ist->nb_dts_buffer--;
  2110. }
  2111. if(best_effort_timestamp != AV_NOPTS_VALUE) {
  2112. int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
  2113. if (ts != AV_NOPTS_VALUE)
  2114. ist->next_pts = ist->pts = ts;
  2115. }
  2116. if (debug_ts) {
  2117. av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
  2118. "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
  2119. ist->st->index, av_ts2str(decoded_frame->pts),
  2120. av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
  2121. best_effort_timestamp,
  2122. av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
  2123. decoded_frame->key_frame, decoded_frame->pict_type,
  2124. ist->st->time_base.num, ist->st->time_base.den);
  2125. }
  2126. if (ist->st->sample_aspect_ratio.num)
  2127. decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
  2128. err = send_frame_to_filters(ist, decoded_frame);
  2129. fail:
  2130. av_frame_unref(ist->filter_frame);
  2131. av_frame_unref(decoded_frame);
  2132. return err < 0 ? err : ret;
  2133. }
  2134. static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
  2135. int *decode_failed)
  2136. {
  2137. AVSubtitle subtitle;
  2138. int free_sub = 1;
  2139. int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
  2140. &subtitle, got_output, pkt);
  2141. check_decode_result(NULL, got_output, ret);
  2142. if (ret < 0 || !*got_output) {
  2143. *decode_failed = 1;
  2144. if (!pkt->size)
  2145. sub2video_flush(ist);
  2146. return ret;
  2147. }
  2148. if (ist->fix_sub_duration) {
  2149. int end = 1;
  2150. if (ist->prev_sub.got_output) {
  2151. end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
  2152. 1000, AV_TIME_BASE);
  2153. if (end < ist->prev_sub.subtitle.end_display_time) {
  2154. av_log(ist->dec_ctx, AV_LOG_DEBUG,
  2155. "Subtitle duration reduced from %"PRId32" to %d%s\n",
  2156. ist->prev_sub.subtitle.end_display_time, end,
  2157. end <= 0 ? ", dropping it" : "");
  2158. ist->prev_sub.subtitle.end_display_time = end;
  2159. }
  2160. }
  2161. FFSWAP(int, *got_output, ist->prev_sub.got_output);
  2162. FFSWAP(int, ret, ist->prev_sub.ret);
  2163. FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
  2164. if (end <= 0)
  2165. goto out;
  2166. }
  2167. if (!*got_output)
  2168. return ret;
  2169. if (ist->sub2video.frame) {
  2170. sub2video_update(ist, &subtitle);
  2171. } else if (ist->nb_filters) {
  2172. if (!ist->sub2video.sub_queue)
  2173. ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
  2174. if (!ist->sub2video.sub_queue)
  2175. exit_program(1);
  2176. if (!av_fifo_space(ist->sub2video.sub_queue)) {
  2177. ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
  2178. if (ret < 0)
  2179. exit_program(1);
  2180. }
  2181. av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
  2182. free_sub = 0;
  2183. }
  2184. if (!subtitle.num_rects)
  2185. goto out;
  2186. ist->frames_decoded++;
  2187. for (i = 0; i < nb_output_streams; i++) {
  2188. OutputStream *ost = output_streams[i];
  2189. if (!check_output_constraints(ist, ost) || !ost->encoding_needed
  2190. || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
  2191. continue;
  2192. do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
  2193. }
  2194. out:
  2195. if (free_sub)
  2196. avsubtitle_free(&subtitle);
  2197. return ret;
  2198. }
  2199. static int send_filter_eof(InputStream *ist)
  2200. {
  2201. int i, ret;
  2202. /* TODO keep pts also in stream time base to avoid converting back */
  2203. int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
  2204. AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
  2205. for (i = 0; i < ist->nb_filters; i++) {
  2206. ret = ifilter_send_eof(ist->filters[i], pts);
  2207. if (ret < 0)
  2208. return ret;
  2209. }
  2210. return 0;
  2211. }
  2212. /* pkt = NULL means EOF (needed to flush decoder buffers) */
  2213. static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
  2214. {
  2215. int ret = 0, i;
  2216. int repeating = 0;
  2217. int eof_reached = 0;
  2218. AVPacket avpkt;
  2219. if (!ist->saw_first_ts) {
  2220. ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
  2221. ist->pts = 0;
  2222. if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
  2223. ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
  2224. ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
  2225. }
  2226. ist->saw_first_ts = 1;
  2227. }
  2228. if (ist->next_dts == AV_NOPTS_VALUE)
  2229. ist->next_dts = ist->dts;
  2230. if (ist->next_pts == AV_NOPTS_VALUE)
  2231. ist->next_pts = ist->pts;
  2232. if (!pkt) {
  2233. /* EOF handling */
  2234. av_init_packet(&avpkt);
  2235. avpkt.data = NULL;
  2236. avpkt.size = 0;
  2237. } else {
  2238. avpkt = *pkt;
  2239. }
  2240. if (pkt && pkt->dts != AV_NOPTS_VALUE) {
  2241. ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
  2242. if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
  2243. ist->next_pts = ist->pts = ist->dts;
  2244. }
  2245. // while we have more to decode or while the decoder did output something on EOF
  2246. while (ist->decoding_needed) {
  2247. int64_t duration_dts = 0;
  2248. int64_t duration_pts = 0;
  2249. int got_output = 0;
  2250. int decode_failed = 0;
  2251. ist->pts = ist->next_pts;
  2252. ist->dts = ist->next_dts;
  2253. switch (ist->dec_ctx->codec_type) {
  2254. case AVMEDIA_TYPE_AUDIO:
  2255. ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
  2256. &decode_failed);
  2257. break;
  2258. case AVMEDIA_TYPE_VIDEO:
  2259. ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
  2260. &decode_failed);
  2261. if (!repeating || !pkt || got_output) {
  2262. if (pkt && pkt->duration) {
  2263. duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
  2264. } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
  2265. int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
  2266. duration_dts = ((int64_t)AV_TIME_BASE *
  2267. ist->dec_ctx->framerate.den * ticks) /
  2268. ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
  2269. }
  2270. if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
  2271. ist->next_dts += duration_dts;
  2272. }else
  2273. ist->next_dts = AV_NOPTS_VALUE;
  2274. }
  2275. if (got_output) {
  2276. if (duration_pts > 0) {
  2277. ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
  2278. } else {
  2279. ist->next_pts += duration_dts;
  2280. }
  2281. }
  2282. break;
  2283. case AVMEDIA_TYPE_SUBTITLE:
  2284. if (repeating)
  2285. break;
  2286. ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
  2287. if (!pkt && ret >= 0)
  2288. ret = AVERROR_EOF;
  2289. break;
  2290. default:
  2291. return -1;
  2292. }
  2293. if (ret == AVERROR_EOF) {
  2294. eof_reached = 1;
  2295. break;
  2296. }
  2297. if (ret < 0) {
  2298. if (decode_failed) {
  2299. av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
  2300. ist->file_index, ist->st->index, av_err2str(ret));
  2301. } else {
  2302. av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
  2303. "data for stream #%d:%d\n", ist->file_index, ist->st->index);
  2304. }
  2305. if (!decode_failed || exit_on_error)
  2306. exit_program(1);
  2307. break;
  2308. }
  2309. if (got_output)
  2310. ist->got_output = 1;
  2311. if (!got_output)
  2312. break;
  2313. // During draining, we might get multiple output frames in this loop.
  2314. // ffmpeg.c does not drain the filter chain on configuration changes,
  2315. // which means if we send multiple frames at once to the filters, and
  2316. // one of those frames changes configuration, the buffered frames will
  2317. // be lost. This can upset certain FATE tests.
  2318. // Decode only 1 frame per call on EOF to appease these FATE tests.
  2319. // The ideal solution would be to rewrite decoding to use the new
  2320. // decoding API in a better way.
  2321. if (!pkt)
  2322. break;
  2323. repeating = 1;
  2324. }
  2325. /* after flushing, send an EOF on all the filter inputs attached to the stream */
  2326. /* except when looping we need to flush but not to send an EOF */
  2327. if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
  2328. int ret = send_filter_eof(ist);
  2329. if (ret < 0) {
  2330. av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
  2331. exit_program(1);
  2332. }
  2333. }
  2334. /* handle stream copy */
  2335. if (!ist->decoding_needed && pkt) {
  2336. ist->dts = ist->next_dts;
  2337. switch (ist->dec_ctx->codec_type) {
  2338. case AVMEDIA_TYPE_AUDIO:
  2339. av_assert1(pkt->duration >= 0);
  2340. if (ist->dec_ctx->sample_rate) {
  2341. ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
  2342. ist->dec_ctx->sample_rate;
  2343. } else {
  2344. ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
  2345. }
  2346. break;
  2347. case AVMEDIA_TYPE_VIDEO:
  2348. if (ist->framerate.num) {
  2349. // TODO: Remove work-around for c99-to-c89 issue 7
  2350. AVRational time_base_q = AV_TIME_BASE_Q;
  2351. int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
  2352. ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
  2353. } else if (pkt->duration) {
  2354. ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
  2355. } else if(ist->dec_ctx->framerate.num != 0) {
  2356. int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
  2357. ist->next_dts += ((int64_t)AV_TIME_BASE *
  2358. ist->dec_ctx->framerate.den * ticks) /
  2359. ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
  2360. }
  2361. break;
  2362. }
  2363. ist->pts = ist->dts;
  2364. ist->next_pts = ist->next_dts;
  2365. }
  2366. for (i = 0; i < nb_output_streams; i++) {
  2367. OutputStream *ost = output_streams[i];
  2368. if (!check_output_constraints(ist, ost) || ost->encoding_needed)
  2369. continue;
  2370. do_streamcopy(ist, ost, pkt);
  2371. }
  2372. return !eof_reached;
  2373. }
  2374. static void print_sdp(void)
  2375. {
  2376. char sdp[16384];
  2377. int i;
  2378. int j;
  2379. AVIOContext *sdp_pb;
  2380. AVFormatContext **avc;
  2381. for (i = 0; i < nb_output_files; i++) {
  2382. if (!output_files[i]->header_written)
  2383. return;
  2384. }
  2385. avc = av_malloc_array(nb_output_files, sizeof(*avc));
  2386. if (!avc)
  2387. exit_program(1);
  2388. for (i = 0, j = 0; i < nb_output_files; i++) {
  2389. if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
  2390. avc[j] = output_files[i]->ctx;
  2391. j++;
  2392. }
  2393. }
  2394. if (!j)
  2395. goto fail;
  2396. av_sdp_create(avc, j, sdp, sizeof(sdp));
  2397. if (!sdp_filename) {
  2398. printf("SDP:\n%s\n", sdp);
  2399. fflush(stdout);
  2400. } else {
  2401. if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
  2402. av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
  2403. } else {
  2404. avio_printf(sdp_pb, "SDP:\n%s", sdp);
  2405. avio_closep(&sdp_pb);
  2406. av_freep(&sdp_filename);
  2407. }
  2408. }
  2409. fail:
  2410. av_freep(&avc);
  2411. }
  2412. static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
  2413. {
  2414. InputStream *ist = s->opaque;
  2415. const enum AVPixelFormat *p;
  2416. int ret;
  2417. for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
  2418. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
  2419. const AVCodecHWConfig *config = NULL;
  2420. int i;
  2421. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
  2422. break;
  2423. if (ist->hwaccel_id == HWACCEL_GENERIC ||
  2424. ist->hwaccel_id == HWACCEL_AUTO) {
  2425. for (i = 0;; i++) {
  2426. config = avcodec_get_hw_config(s->codec, i);
  2427. if (!config)
  2428. break;
  2429. if (!(config->methods &
  2430. AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
  2431. continue;
  2432. if (config->pix_fmt == *p)
  2433. break;
  2434. }
  2435. }
  2436. if (config) {
  2437. if (config->device_type != ist->hwaccel_device_type) {
  2438. // Different hwaccel offered, ignore.
  2439. continue;
  2440. }
  2441. ret = hwaccel_decode_init(s);
  2442. if (ret < 0) {
  2443. if (ist->hwaccel_id == HWACCEL_GENERIC) {
  2444. av_log(NULL, AV_LOG_FATAL,
  2445. "%s hwaccel requested for input stream #%d:%d, "
  2446. "but cannot be initialized.\n",
  2447. av_hwdevice_get_type_name(config->device_type),
  2448. ist->file_index, ist->st->index);
  2449. return AV_PIX_FMT_NONE;
  2450. }
  2451. continue;
  2452. }
  2453. } else {
  2454. const HWAccel *hwaccel = NULL;
  2455. int i;
  2456. for (i = 0; hwaccels[i].name; i++) {
  2457. if (hwaccels[i].pix_fmt == *p) {
  2458. hwaccel = &hwaccels[i];
  2459. break;
  2460. }
  2461. }
  2462. if (!hwaccel) {
  2463. // No hwaccel supporting this pixfmt.
  2464. continue;
  2465. }
  2466. if (hwaccel->id != ist->hwaccel_id) {
  2467. // Does not match requested hwaccel.
  2468. continue;
  2469. }
  2470. ret = hwaccel->init(s);
  2471. if (ret < 0) {
  2472. av_log(NULL, AV_LOG_FATAL,
  2473. "%s hwaccel requested for input stream #%d:%d, "
  2474. "but cannot be initialized.\n", hwaccel->name,
  2475. ist->file_index, ist->st->index);
  2476. return AV_PIX_FMT_NONE;
  2477. }
  2478. }
  2479. if (ist->hw_frames_ctx) {
  2480. s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
  2481. if (!s->hw_frames_ctx)
  2482. return AV_PIX_FMT_NONE;
  2483. }
  2484. ist->hwaccel_pix_fmt = *p;
  2485. break;
  2486. }
  2487. return *p;
  2488. }
  2489. static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
  2490. {
  2491. InputStream *ist = s->opaque;
  2492. if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
  2493. return ist->hwaccel_get_buffer(s, frame, flags);
  2494. return avcodec_default_get_buffer2(s, frame, flags);
  2495. }
  2496. static int init_input_stream(int ist_index, char *error, int error_len)
  2497. {
  2498. int ret;
  2499. InputStream *ist = input_streams[ist_index];
  2500. if (ist->decoding_needed) {
  2501. AVCodec *codec = ist->dec;
  2502. if (!codec) {
  2503. snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
  2504. avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
  2505. return AVERROR(EINVAL);
  2506. }
  2507. ist->dec_ctx->opaque = ist;
  2508. ist->dec_ctx->get_format = get_format;
  2509. ist->dec_ctx->get_buffer2 = get_buffer;
  2510. ist->dec_ctx->thread_safe_callbacks = 1;
  2511. av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
  2512. if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
  2513. (ist->decoding_needed & DECODING_FOR_OST)) {
  2514. av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
  2515. if (ist->decoding_needed & DECODING_FOR_FILTER)
  2516. av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
  2517. }
  2518. av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
  2519. /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
  2520. * audio, and video decoders such as cuvid or mediacodec */
  2521. ist->dec_ctx->pkt_timebase = ist->st->time_base;
  2522. if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
  2523. av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
  2524. /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
  2525. if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
  2526. av_dict_set(&ist->decoder_opts, "threads", "1", 0);
  2527. ret = hw_device_setup_for_decode(ist);
  2528. if (ret < 0) {
  2529. snprintf(error, error_len, "Device setup failed for "
  2530. "decoder on input stream #%d:%d : %s",
  2531. ist->file_index, ist->st->index, av_err2str(ret));
  2532. return ret;
  2533. }
  2534. if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
  2535. if (ret == AVERROR_EXPERIMENTAL)
  2536. abort_codec_experimental(codec, 0);
  2537. snprintf(error, error_len,
  2538. "Error while opening decoder for input stream "
  2539. "#%d:%d : %s",
  2540. ist->file_index, ist->st->index, av_err2str(ret));
  2541. return ret;
  2542. }
  2543. assert_avoptions(ist->decoder_opts);
  2544. }
  2545. ist->next_pts = AV_NOPTS_VALUE;
  2546. ist->next_dts = AV_NOPTS_VALUE;
  2547. return 0;
  2548. }
  2549. static InputStream *get_input_stream(OutputStream *ost)
  2550. {
  2551. if (ost->source_index >= 0)
  2552. return input_streams[ost->source_index];
  2553. return NULL;
  2554. }
  2555. static int compare_int64(const void *a, const void *b)
  2556. {
  2557. return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
  2558. }
  2559. /* open the muxer when all the streams are initialized */
  2560. static int check_init_output_file(OutputFile *of, int file_index)
  2561. {
  2562. int ret, i;
  2563. for (i = 0; i < of->ctx->nb_streams; i++) {
  2564. OutputStream *ost = output_streams[of->ost_index + i];
  2565. if (!ost->initialized)
  2566. return 0;
  2567. }
  2568. of->ctx->interrupt_callback = int_cb;
  2569. ret = avformat_write_header(of->ctx, &of->opts);
  2570. if (ret < 0) {
  2571. av_log(NULL, AV_LOG_ERROR,
  2572. "Could not write header for output file #%d "
  2573. "(incorrect codec parameters ?): %s\n",
  2574. file_index, av_err2str(ret));
  2575. return ret;
  2576. }
  2577. //assert_avoptions(of->opts);
  2578. of->header_written = 1;
  2579. av_dump_format(of->ctx, file_index, of->ctx->url, 1);
  2580. if (sdp_filename || want_sdp)
  2581. print_sdp();
  2582. /* flush the muxing queues */
  2583. for (i = 0; i < of->ctx->nb_streams; i++) {
  2584. OutputStream *ost = output_streams[of->ost_index + i];
  2585. /* try to improve muxing time_base (only possible if nothing has been written yet) */
  2586. if (!av_fifo_size(ost->muxing_queue))
  2587. ost->mux_timebase = ost->st->time_base;
  2588. while (av_fifo_size(ost->muxing_queue)) {
  2589. AVPacket pkt;
  2590. av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
  2591. write_packet(of, &pkt, ost, 1);
  2592. }
  2593. }
  2594. return 0;
  2595. }
  2596. static int init_output_bsfs(OutputStream *ost)
  2597. {
  2598. AVBSFContext *ctx;
  2599. int i, ret;
  2600. if (!ost->nb_bitstream_filters)
  2601. return 0;
  2602. for (i = 0; i < ost->nb_bitstream_filters; i++) {
  2603. ctx = ost->bsf_ctx[i];
  2604. ret = avcodec_parameters_copy(ctx->par_in,
  2605. i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
  2606. if (ret < 0)
  2607. return ret;
  2608. ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
  2609. ret = av_bsf_init(ctx);
  2610. if (ret < 0) {
  2611. av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
  2612. ost->bsf_ctx[i]->filter->name);
  2613. return ret;
  2614. }
  2615. }
  2616. ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
  2617. ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
  2618. if (ret < 0)
  2619. return ret;
  2620. ost->st->time_base = ctx->time_base_out;
  2621. return 0;
  2622. }
  2623. static int init_output_stream_streamcopy(OutputStream *ost)
  2624. {
  2625. OutputFile *of = output_files[ost->file_index];
  2626. InputStream *ist = get_input_stream(ost);
  2627. AVCodecParameters *par_dst = ost->st->codecpar;
  2628. AVCodecParameters *par_src = ost->ref_par;
  2629. AVRational sar;
  2630. int i, ret;
  2631. uint32_t codec_tag = par_dst->codec_tag;
  2632. av_assert0(ist && !ost->filter);
  2633. ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
  2634. if (ret >= 0)
  2635. ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
  2636. if (ret < 0) {
  2637. av_log(NULL, AV_LOG_FATAL,
  2638. "Error setting up codec context options.\n");
  2639. return ret;
  2640. }
  2641. ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
  2642. if (ret < 0) {
  2643. av_log(NULL, AV_LOG_FATAL,
  2644. "Error getting reference codec parameters.\n");
  2645. return ret;
  2646. }
  2647. if (!codec_tag) {
  2648. unsigned int codec_tag_tmp;
  2649. if (!of->ctx->oformat->codec_tag ||
  2650. av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
  2651. !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
  2652. codec_tag = par_src->codec_tag;
  2653. }
  2654. ret = avcodec_parameters_copy(par_dst, par_src);
  2655. if (ret < 0)
  2656. return ret;
  2657. par_dst->codec_tag = codec_tag;
  2658. if (!ost->frame_rate.num)
  2659. ost->frame_rate = ist->framerate;
  2660. ost->st->avg_frame_rate = ost->frame_rate;
  2661. ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
  2662. if (ret < 0)
  2663. return ret;
  2664. // copy timebase while removing common factors
  2665. if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
  2666. ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
  2667. // copy estimated duration as a hint to the muxer
  2668. if (ost->st->duration <= 0 && ist->st->duration > 0)
  2669. ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
  2670. // copy disposition
  2671. ost->st->disposition = ist->st->disposition;
  2672. if (ist->st->nb_side_data) {
  2673. for (i = 0; i < ist->st->nb_side_data; i++) {
  2674. const AVPacketSideData *sd_src = &ist->st->side_data[i];
  2675. uint8_t *dst_data;
  2676. dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
  2677. if (!dst_data)
  2678. return AVERROR(ENOMEM);
  2679. memcpy(dst_data, sd_src->data, sd_src->size);
  2680. }
  2681. }
  2682. if (ost->rotate_overridden) {
  2683. uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
  2684. sizeof(int32_t) * 9);
  2685. if (sd)
  2686. av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
  2687. }
  2688. switch (par_dst->codec_type) {
  2689. case AVMEDIA_TYPE_AUDIO:
  2690. if (audio_volume != 256) {
  2691. av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
  2692. exit_program(1);
  2693. }
  2694. if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
  2695. par_dst->block_align= 0;
  2696. if(par_dst->codec_id == AV_CODEC_ID_AC3)
  2697. par_dst->block_align= 0;
  2698. break;
  2699. case AVMEDIA_TYPE_VIDEO:
  2700. if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
  2701. sar =
  2702. av_mul_q(ost->frame_aspect_ratio,
  2703. (AVRational){ par_dst->height, par_dst->width });
  2704. av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
  2705. "with stream copy may produce invalid files\n");
  2706. }
  2707. else if (ist->st->sample_aspect_ratio.num)
  2708. sar = ist->st->sample_aspect_ratio;
  2709. else
  2710. sar = par_src->sample_aspect_ratio;
  2711. ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
  2712. ost->st->avg_frame_rate = ist->st->avg_frame_rate;
  2713. ost->st->r_frame_rate = ist->st->r_frame_rate;
  2714. break;
  2715. }
  2716. ost->mux_timebase = ist->st->time_base;
  2717. return 0;
  2718. }
  2719. static void set_encoder_id(OutputFile *of, OutputStream *ost)
  2720. {
  2721. AVDictionaryEntry *e;
  2722. uint8_t *encoder_string;
  2723. int encoder_string_len;
  2724. int format_flags = 0;
  2725. int codec_flags = ost->enc_ctx->flags;
  2726. if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
  2727. return;
  2728. e = av_dict_get(of->opts, "fflags", NULL, 0);
  2729. if (e) {
  2730. const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
  2731. if (!o)
  2732. return;
  2733. av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
  2734. }
  2735. e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
  2736. if (e) {
  2737. const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
  2738. if (!o)
  2739. return;
  2740. av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
  2741. }
  2742. encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
  2743. encoder_string = av_mallocz(encoder_string_len);
  2744. if (!encoder_string)
  2745. exit_program(1);
  2746. if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
  2747. av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
  2748. else
  2749. av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
  2750. av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
  2751. av_dict_set(&ost->st->metadata, "encoder", encoder_string,
  2752. AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
  2753. }
  2754. static void parse_forced_key_frames(char *kf, OutputStream *ost,
  2755. AVCodecContext *avctx)
  2756. {
  2757. char *p;
  2758. int n = 1, i, size, index = 0;
  2759. int64_t t, *pts;
  2760. for (p = kf; *p; p++)
  2761. if (*p == ',')
  2762. n++;
  2763. size = n;
  2764. pts = av_malloc_array(size, sizeof(*pts));
  2765. if (!pts) {
  2766. av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
  2767. exit_program(1);
  2768. }
  2769. p = kf;
  2770. for (i = 0; i < n; i++) {
  2771. char *next = strchr(p, ',');
  2772. if (next)
  2773. *next++ = 0;
  2774. if (!memcmp(p, "chapters", 8)) {
  2775. AVFormatContext *avf = output_files[ost->file_index]->ctx;
  2776. int j;
  2777. if (avf->nb_chapters > INT_MAX - size ||
  2778. !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
  2779. sizeof(*pts)))) {
  2780. av_log(NULL, AV_LOG_FATAL,
  2781. "Could not allocate forced key frames array.\n");
  2782. exit_program(1);
  2783. }
  2784. t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
  2785. t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
  2786. for (j = 0; j < avf->nb_chapters; j++) {
  2787. AVChapter *c = avf->chapters[j];
  2788. av_assert1(index < size);
  2789. pts[index++] = av_rescale_q(c->start, c->time_base,
  2790. avctx->time_base) + t;
  2791. }
  2792. } else {
  2793. t = parse_time_or_die("force_key_frames", p, 1);
  2794. av_assert1(index < size);
  2795. pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
  2796. }
  2797. p = next;
  2798. }
  2799. av_assert0(index == size);
  2800. qsort(pts, size, sizeof(*pts), compare_int64);
  2801. ost->forced_kf_count = size;
  2802. ost->forced_kf_pts = pts;
  2803. }
  2804. static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
  2805. {
  2806. InputStream *ist = get_input_stream(ost);
  2807. AVCodecContext *enc_ctx = ost->enc_ctx;
  2808. AVFormatContext *oc;
  2809. if (ost->enc_timebase.num > 0) {
  2810. enc_ctx->time_base = ost->enc_timebase;
  2811. return;
  2812. }
  2813. if (ost->enc_timebase.num < 0) {
  2814. if (ist) {
  2815. enc_ctx->time_base = ist->st->time_base;
  2816. return;
  2817. }
  2818. oc = output_files[ost->file_index]->ctx;
  2819. av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
  2820. }
  2821. enc_ctx->time_base = default_time_base;
  2822. }
  2823. static int init_output_stream_encode(OutputStream *ost)
  2824. {
  2825. InputStream *ist = get_input_stream(ost);
  2826. AVCodecContext *enc_ctx = ost->enc_ctx;
  2827. AVCodecContext *dec_ctx = NULL;
  2828. AVFormatContext *oc = output_files[ost->file_index]->ctx;
  2829. int j, ret;
  2830. set_encoder_id(output_files[ost->file_index], ost);
  2831. // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
  2832. // hand, the legacy API makes demuxers set "rotate" metadata entries,
  2833. // which have to be filtered out to prevent leaking them to output files.
  2834. av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
  2835. if (ist) {
  2836. ost->st->disposition = ist->st->disposition;
  2837. dec_ctx = ist->dec_ctx;
  2838. enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
  2839. } else {
  2840. for (j = 0; j < oc->nb_streams; j++) {
  2841. AVStream *st = oc->streams[j];
  2842. if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
  2843. break;
  2844. }
  2845. if (j == oc->nb_streams)
  2846. if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
  2847. ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  2848. ost->st->disposition = AV_DISPOSITION_DEFAULT;
  2849. }
  2850. if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  2851. if (!ost->frame_rate.num)
  2852. ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
  2853. if (ist && !ost->frame_rate.num)
  2854. ost->frame_rate = ist->framerate;
  2855. if (ist && !ost->frame_rate.num)
  2856. ost->frame_rate = ist->st->r_frame_rate;
  2857. if (ist && !ost->frame_rate.num) {
  2858. ost->frame_rate = (AVRational){25, 1};
  2859. av_log(NULL, AV_LOG_WARNING,
  2860. "No information "
  2861. "about the input framerate is available. Falling "
  2862. "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
  2863. "if you want a different framerate.\n",
  2864. ost->file_index, ost->index);
  2865. }
  2866. if (ost->enc->supported_framerates && !ost->force_fps) {
  2867. int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
  2868. ost->frame_rate = ost->enc->supported_framerates[idx];
  2869. }
  2870. // reduce frame rate for mpeg4 to be within the spec limits
  2871. if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
  2872. av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
  2873. ost->frame_rate.num, ost->frame_rate.den, 65535);
  2874. }
  2875. }
  2876. switch (enc_ctx->codec_type) {
  2877. case AVMEDIA_TYPE_AUDIO:
  2878. enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
  2879. if (dec_ctx)
  2880. enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
  2881. av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
  2882. enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
  2883. enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
  2884. enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
  2885. init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
  2886. break;
  2887. case AVMEDIA_TYPE_VIDEO:
  2888. init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
  2889. if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
  2890. enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
  2891. if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
  2892. && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
  2893. av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
  2894. "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
  2895. }
  2896. for (j = 0; j < ost->forced_kf_count; j++)
  2897. ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
  2898. AV_TIME_BASE_Q,
  2899. enc_ctx->time_base);
  2900. enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
  2901. enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
  2902. enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
  2903. ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
  2904. av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
  2905. av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
  2906. enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
  2907. if (dec_ctx)
  2908. enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
  2909. av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
  2910. enc_ctx->framerate = ost->frame_rate;
  2911. ost->st->avg_frame_rate = ost->frame_rate;
  2912. if (!dec_ctx ||
  2913. enc_ctx->width != dec_ctx->width ||
  2914. enc_ctx->height != dec_ctx->height ||
  2915. enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
  2916. enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
  2917. }
  2918. if (ost->top_field_first == 0) {
  2919. enc_ctx->field_order = AV_FIELD_BB;
  2920. } else if (ost->top_field_first == 1) {
  2921. enc_ctx->field_order = AV_FIELD_TT;
  2922. }
  2923. if (ost->forced_keyframes) {
  2924. if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
  2925. ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
  2926. forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
  2927. if (ret < 0) {
  2928. av_log(NULL, AV_LOG_ERROR,
  2929. "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
  2930. return ret;
  2931. }
  2932. ost->forced_keyframes_expr_const_values[FKF_N] = 0;
  2933. ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
  2934. ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
  2935. ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
  2936. // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
  2937. // parse it only for static kf timings
  2938. } else if(strncmp(ost->forced_keyframes, "source", 6)) {
  2939. parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
  2940. }
  2941. }
  2942. break;
  2943. case AVMEDIA_TYPE_SUBTITLE:
  2944. enc_ctx->time_base = AV_TIME_BASE_Q;
  2945. if (!enc_ctx->width) {
  2946. enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
  2947. enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
  2948. }
  2949. break;
  2950. case AVMEDIA_TYPE_DATA:
  2951. break;
  2952. default:
  2953. abort();
  2954. break;
  2955. }
  2956. ost->mux_timebase = enc_ctx->time_base;
  2957. return 0;
  2958. }
  2959. static int init_output_stream(OutputStream *ost, char *error, int error_len)
  2960. {
  2961. int ret = 0;
  2962. if (ost->encoding_needed) {
  2963. AVCodec *codec = ost->enc;
  2964. AVCodecContext *dec = NULL;
  2965. InputStream *ist;
  2966. ret = init_output_stream_encode(ost);
  2967. if (ret < 0)
  2968. return ret;
  2969. if ((ist = get_input_stream(ost)))
  2970. dec = ist->dec_ctx;
  2971. if (dec && dec->subtitle_header) {
  2972. /* ASS code assumes this buffer is null terminated so add extra byte. */
  2973. ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
  2974. if (!ost->enc_ctx->subtitle_header)
  2975. return AVERROR(ENOMEM);
  2976. memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
  2977. ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
  2978. }
  2979. if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
  2980. av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
  2981. if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
  2982. !codec->defaults &&
  2983. !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
  2984. !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
  2985. av_dict_set(&ost->encoder_opts, "b", "128000", 0);
  2986. if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
  2987. ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
  2988. av_buffersink_get_format(ost->filter->filter)) {
  2989. ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
  2990. if (!ost->enc_ctx->hw_frames_ctx)
  2991. return AVERROR(ENOMEM);
  2992. } else {
  2993. ret = hw_device_setup_for_encode(ost);
  2994. if (ret < 0) {
  2995. snprintf(error, error_len, "Device setup failed for "
  2996. "encoder on output stream #%d:%d : %s",
  2997. ost->file_index, ost->index, av_err2str(ret));
  2998. return ret;
  2999. }
  3000. }
  3001. if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
  3002. int input_props = 0, output_props = 0;
  3003. AVCodecDescriptor const *input_descriptor =
  3004. avcodec_descriptor_get(dec->codec_id);
  3005. AVCodecDescriptor const *output_descriptor =
  3006. avcodec_descriptor_get(ost->enc_ctx->codec_id);
  3007. if (input_descriptor)
  3008. input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
  3009. if (output_descriptor)
  3010. output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
  3011. if (input_props && output_props && input_props != output_props) {
  3012. snprintf(error, error_len,
  3013. "Subtitle encoding currently only possible from text to text "
  3014. "or bitmap to bitmap");
  3015. return AVERROR_INVALIDDATA;
  3016. }
  3017. }
  3018. if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
  3019. if (ret == AVERROR_EXPERIMENTAL)
  3020. abort_codec_experimental(codec, 1);
  3021. snprintf(error, error_len,
  3022. "Error while opening encoder for output stream #%d:%d - "
  3023. "maybe incorrect parameters such as bit_rate, rate, width or height",
  3024. ost->file_index, ost->index);
  3025. return ret;
  3026. }
  3027. if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
  3028. !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  3029. av_buffersink_set_frame_size(ost->filter->filter,
  3030. ost->enc_ctx->frame_size);
  3031. assert_avoptions(ost->encoder_opts);
  3032. if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
  3033. ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
  3034. av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
  3035. " It takes bits/s as argument, not kbits/s\n");
  3036. ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
  3037. if (ret < 0) {
  3038. av_log(NULL, AV_LOG_FATAL,
  3039. "Error initializing the output stream codec context.\n");
  3040. exit_program(1);
  3041. }
  3042. /*
  3043. * FIXME: ost->st->codec should't be needed here anymore.
  3044. */
  3045. ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
  3046. if (ret < 0)
  3047. return ret;
  3048. if (ost->enc_ctx->nb_coded_side_data) {
  3049. int i;
  3050. for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
  3051. const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
  3052. uint8_t *dst_data;
  3053. dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
  3054. if (!dst_data)
  3055. return AVERROR(ENOMEM);
  3056. memcpy(dst_data, sd_src->data, sd_src->size);
  3057. }
  3058. }
  3059. /*
  3060. * Add global input side data. For now this is naive, and copies it
  3061. * from the input stream's global side data. All side data should
  3062. * really be funneled over AVFrame and libavfilter, then added back to
  3063. * packet side data, and then potentially using the first packet for
  3064. * global side data.
  3065. */
  3066. if (ist) {
  3067. int i;
  3068. for (i = 0; i < ist->st->nb_side_data; i++) {
  3069. AVPacketSideData *sd = &ist->st->side_data[i];
  3070. uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
  3071. if (!dst)
  3072. return AVERROR(ENOMEM);
  3073. memcpy(dst, sd->data, sd->size);
  3074. if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
  3075. av_display_rotation_set((uint32_t *)dst, 0);
  3076. }
  3077. }
  3078. // copy timebase while removing common factors
  3079. if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
  3080. ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
  3081. // copy estimated duration as a hint to the muxer
  3082. if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
  3083. ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
  3084. ost->st->codec->codec= ost->enc_ctx->codec;
  3085. } else if (ost->stream_copy) {
  3086. ret = init_output_stream_streamcopy(ost);
  3087. if (ret < 0)
  3088. return ret;
  3089. }
  3090. // parse user provided disposition, and update stream values
  3091. if (ost->disposition) {
  3092. static const AVOption opts[] = {
  3093. { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
  3094. { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
  3095. { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
  3096. { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
  3097. { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
  3098. { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
  3099. { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
  3100. { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
  3101. { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
  3102. { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
  3103. { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
  3104. { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
  3105. { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
  3106. { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
  3107. { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
  3108. { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
  3109. { NULL },
  3110. };
  3111. static const AVClass class = {
  3112. .class_name = "",
  3113. .item_name = av_default_item_name,
  3114. .option = opts,
  3115. .version = LIBAVUTIL_VERSION_INT,
  3116. };
  3117. const AVClass *pclass = &class;
  3118. ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
  3119. if (ret < 0)
  3120. return ret;
  3121. }
  3122. /* initialize bitstream filters for the output stream
  3123. * needs to be done here, because the codec id for streamcopy is not
  3124. * known until now */
  3125. ret = init_output_bsfs(ost);
  3126. if (ret < 0)
  3127. return ret;
  3128. ost->initialized = 1;
  3129. ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
  3130. if (ret < 0)
  3131. return ret;
  3132. return ret;
  3133. }
  3134. static void report_new_stream(int input_index, AVPacket *pkt)
  3135. {
  3136. InputFile *file = input_files[input_index];
  3137. AVStream *st = file->ctx->streams[pkt->stream_index];
  3138. if (pkt->stream_index < file->nb_streams_warn)
  3139. return;
  3140. av_log(file->ctx, AV_LOG_WARNING,
  3141. "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
  3142. av_get_media_type_string(st->codecpar->codec_type),
  3143. input_index, pkt->stream_index,
  3144. pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
  3145. file->nb_streams_warn = pkt->stream_index + 1;
  3146. }
  3147. static int transcode_init(void)
  3148. {
  3149. int ret = 0, i, j, k;
  3150. AVFormatContext *oc;
  3151. OutputStream *ost;
  3152. InputStream *ist;
  3153. char error[1024] = {0};
  3154. for (i = 0; i < nb_filtergraphs; i++) {
  3155. FilterGraph *fg = filtergraphs[i];
  3156. for (j = 0; j < fg->nb_outputs; j++) {
  3157. OutputFilter *ofilter = fg->outputs[j];
  3158. if (!ofilter->ost || ofilter->ost->source_index >= 0)
  3159. continue;
  3160. if (fg->nb_inputs != 1)
  3161. continue;
  3162. for (k = nb_input_streams-1; k >= 0 ; k--)
  3163. if (fg->inputs[0]->ist == input_streams[k])
  3164. break;
  3165. ofilter->ost->source_index = k;
  3166. }
  3167. }
  3168. /* init framerate emulation */
  3169. for (i = 0; i < nb_input_files; i++) {
  3170. InputFile *ifile = input_files[i];
  3171. if (ifile->rate_emu)
  3172. for (j = 0; j < ifile->nb_streams; j++)
  3173. input_streams[j + ifile->ist_index]->start = av_gettime_relative();
  3174. }
  3175. /* init input streams */
  3176. for (i = 0; i < nb_input_streams; i++)
  3177. if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
  3178. for (i = 0; i < nb_output_streams; i++) {
  3179. ost = output_streams[i];
  3180. avcodec_close(ost->enc_ctx);
  3181. }
  3182. goto dump_format;
  3183. }
  3184. /* open each encoder */
  3185. for (i = 0; i < nb_output_streams; i++) {
  3186. // skip streams fed from filtergraphs until we have a frame for them
  3187. if (output_streams[i]->filter)
  3188. continue;
  3189. ret = init_output_stream(output_streams[i], error, sizeof(error));
  3190. if (ret < 0)
  3191. goto dump_format;
  3192. }
  3193. /* discard unused programs */
  3194. for (i = 0; i < nb_input_files; i++) {
  3195. InputFile *ifile = input_files[i];
  3196. for (j = 0; j < ifile->ctx->nb_programs; j++) {
  3197. AVProgram *p = ifile->ctx->programs[j];
  3198. int discard = AVDISCARD_ALL;
  3199. for (k = 0; k < p->nb_stream_indexes; k++)
  3200. if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
  3201. discard = AVDISCARD_DEFAULT;
  3202. break;
  3203. }
  3204. p->discard = discard;
  3205. }
  3206. }
  3207. /* write headers for files with no streams */
  3208. for (i = 0; i < nb_output_files; i++) {
  3209. oc = output_files[i]->ctx;
  3210. if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
  3211. ret = check_init_output_file(output_files[i], i);
  3212. if (ret < 0)
  3213. goto dump_format;
  3214. }
  3215. }
  3216. dump_format:
  3217. /* dump the stream mapping */
  3218. av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
  3219. for (i = 0; i < nb_input_streams; i++) {
  3220. ist = input_streams[i];
  3221. for (j = 0; j < ist->nb_filters; j++) {
  3222. if (!filtergraph_is_simple(ist->filters[j]->graph)) {
  3223. av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
  3224. ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
  3225. ist->filters[j]->name);
  3226. if (nb_filtergraphs > 1)
  3227. av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
  3228. av_log(NULL, AV_LOG_INFO, "\n");
  3229. }
  3230. }
  3231. }
  3232. for (i = 0; i < nb_output_streams; i++) {
  3233. ost = output_streams[i];
  3234. if (ost->attachment_filename) {
  3235. /* an attached file */
  3236. av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
  3237. ost->attachment_filename, ost->file_index, ost->index);
  3238. continue;
  3239. }
  3240. if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
  3241. /* output from a complex graph */
  3242. av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
  3243. if (nb_filtergraphs > 1)
  3244. av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
  3245. av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
  3246. ost->index, ost->enc ? ost->enc->name : "?");
  3247. continue;
  3248. }
  3249. av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
  3250. input_streams[ost->source_index]->file_index,
  3251. input_streams[ost->source_index]->st->index,
  3252. ost->file_index,
  3253. ost->index);
  3254. if (ost->sync_ist != input_streams[ost->source_index])
  3255. av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
  3256. ost->sync_ist->file_index,
  3257. ost->sync_ist->st->index);
  3258. if (ost->stream_copy)
  3259. av_log(NULL, AV_LOG_INFO, " (copy)");
  3260. else {
  3261. const AVCodec *in_codec = input_streams[ost->source_index]->dec;
  3262. const AVCodec *out_codec = ost->enc;
  3263. const char *decoder_name = "?";
  3264. const char *in_codec_name = "?";
  3265. const char *encoder_name = "?";
  3266. const char *out_codec_name = "?";
  3267. const AVCodecDescriptor *desc;
  3268. if (in_codec) {
  3269. decoder_name = in_codec->name;
  3270. desc = avcodec_descriptor_get(in_codec->id);
  3271. if (desc)
  3272. in_codec_name = desc->name;
  3273. if (!strcmp(decoder_name, in_codec_name))
  3274. decoder_name = "native";
  3275. }
  3276. if (out_codec) {
  3277. encoder_name = out_codec->name;
  3278. desc = avcodec_descriptor_get(out_codec->id);
  3279. if (desc)
  3280. out_codec_name = desc->name;
  3281. if (!strcmp(encoder_name, out_codec_name))
  3282. encoder_name = "native";
  3283. }
  3284. av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
  3285. in_codec_name, decoder_name,
  3286. out_codec_name, encoder_name);
  3287. }
  3288. av_log(NULL, AV_LOG_INFO, "\n");
  3289. }
  3290. if (ret) {
  3291. av_log(NULL, AV_LOG_ERROR, "%s\n", error);
  3292. return ret;
  3293. }
  3294. atomic_store(&transcode_init_done, 1);
  3295. return 0;
  3296. }
  3297. /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
  3298. static int need_output(void)
  3299. {
  3300. int i;
  3301. for (i = 0; i < nb_output_streams; i++) {
  3302. OutputStream *ost = output_streams[i];
  3303. OutputFile *of = output_files[ost->file_index];
  3304. AVFormatContext *os = output_files[ost->file_index]->ctx;
  3305. if (ost->finished ||
  3306. (os->pb && avio_tell(os->pb) >= of->limit_filesize))
  3307. continue;
  3308. if (ost->frame_number >= ost->max_frames) {
  3309. int j;
  3310. for (j = 0; j < of->ctx->nb_streams; j++)
  3311. close_output_stream(output_streams[of->ost_index + j]);
  3312. continue;
  3313. }
  3314. return 1;
  3315. }
  3316. return 0;
  3317. }
  3318. /**
  3319. * Select the output stream to process.
  3320. *
  3321. * @return selected output stream, or NULL if none available
  3322. */
  3323. static OutputStream *choose_output(void)
  3324. {
  3325. int i;
  3326. int64_t opts_min = INT64_MAX;
  3327. OutputStream *ost_min = NULL;
  3328. for (i = 0; i < nb_output_streams; i++) {
  3329. OutputStream *ost = output_streams[i];
  3330. int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
  3331. av_rescale_q(ost->st->cur_dts, ost->st->time_base,
  3332. AV_TIME_BASE_Q);
  3333. if (ost->st->cur_dts == AV_NOPTS_VALUE)
  3334. av_log(NULL, AV_LOG_DEBUG,
  3335. "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
  3336. ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
  3337. if (!ost->initialized && !ost->inputs_done)
  3338. return ost;
  3339. if (!ost->finished && opts < opts_min) {
  3340. opts_min = opts;
  3341. ost_min = ost->unavailable ? NULL : ost;
  3342. }
  3343. }
  3344. return ost_min;
  3345. }
  3346. static void set_tty_echo(int on)
  3347. {
  3348. #if HAVE_TERMIOS_H
  3349. struct termios tty;
  3350. if (tcgetattr(0, &tty) == 0) {
  3351. if (on) tty.c_lflag |= ECHO;
  3352. else tty.c_lflag &= ~ECHO;
  3353. tcsetattr(0, TCSANOW, &tty);
  3354. }
  3355. #endif
  3356. }
  3357. static int check_keyboard_interaction(int64_t cur_time)
  3358. {
  3359. int i, ret, key;
  3360. static int64_t last_time;
  3361. if (received_nb_signals)
  3362. return AVERROR_EXIT;
  3363. /* read_key() returns 0 on EOF */
  3364. if(cur_time - last_time >= 100000 && !run_as_daemon){
  3365. key = read_key();
  3366. last_time = cur_time;
  3367. }else
  3368. key = -1;
  3369. if (key == 'q')
  3370. return AVERROR_EXIT;
  3371. if (key == '+') av_log_set_level(av_log_get_level()+10);
  3372. if (key == '-') av_log_set_level(av_log_get_level()-10);
  3373. if (key == 's') qp_hist ^= 1;
  3374. if (key == 'h'){
  3375. if (do_hex_dump){
  3376. do_hex_dump = do_pkt_dump = 0;
  3377. } else if(do_pkt_dump){
  3378. do_hex_dump = 1;
  3379. } else
  3380. do_pkt_dump = 1;
  3381. av_log_set_level(AV_LOG_DEBUG);
  3382. }
  3383. if (key == 'c' || key == 'C'){
  3384. char buf[4096], target[64], command[256], arg[256] = {0};
  3385. double time;
  3386. int k, n = 0;
  3387. fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
  3388. i = 0;
  3389. set_tty_echo(1);
  3390. while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
  3391. if (k > 0)
  3392. buf[i++] = k;
  3393. buf[i] = 0;
  3394. set_tty_echo(0);
  3395. fprintf(stderr, "\n");
  3396. if (k > 0 &&
  3397. (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
  3398. av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
  3399. target, time, command, arg);
  3400. for (i = 0; i < nb_filtergraphs; i++) {
  3401. FilterGraph *fg = filtergraphs[i];
  3402. if (fg->graph) {
  3403. if (time < 0) {
  3404. ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
  3405. key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
  3406. fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
  3407. } else if (key == 'c') {
  3408. fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
  3409. ret = AVERROR_PATCHWELCOME;
  3410. } else {
  3411. ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
  3412. if (ret < 0)
  3413. fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
  3414. }
  3415. }
  3416. }
  3417. } else {
  3418. av_log(NULL, AV_LOG_ERROR,
  3419. "Parse error, at least 3 arguments were expected, "
  3420. "only %d given in string '%s'\n", n, buf);
  3421. }
  3422. }
  3423. if (key == 'd' || key == 'D'){
  3424. int debug=0;
  3425. if(key == 'D') {
  3426. debug = input_streams[0]->st->codec->debug<<1;
  3427. if(!debug) debug = 1;
  3428. while(debug & (FF_DEBUG_DCT_COEFF
  3429. #if FF_API_DEBUG_MV
  3430. |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
  3431. #endif
  3432. )) //unsupported, would just crash
  3433. debug += debug;
  3434. }else{
  3435. char buf[32];
  3436. int k = 0;
  3437. i = 0;
  3438. set_tty_echo(1);
  3439. while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
  3440. if (k > 0)
  3441. buf[i++] = k;
  3442. buf[i] = 0;
  3443. set_tty_echo(0);
  3444. fprintf(stderr, "\n");
  3445. if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
  3446. fprintf(stderr,"error parsing debug value\n");
  3447. }
  3448. for(i=0;i<nb_input_streams;i++) {
  3449. input_streams[i]->st->codec->debug = debug;
  3450. }
  3451. for(i=0;i<nb_output_streams;i++) {
  3452. OutputStream *ost = output_streams[i];
  3453. ost->enc_ctx->debug = debug;
  3454. }
  3455. if(debug) av_log_set_level(AV_LOG_DEBUG);
  3456. fprintf(stderr,"debug=%d\n", debug);
  3457. }
  3458. if (key == '?'){
  3459. fprintf(stderr, "key function\n"
  3460. "? show this help\n"
  3461. "+ increase verbosity\n"
  3462. "- decrease verbosity\n"
  3463. "c Send command to first matching filter supporting it\n"
  3464. "C Send/Queue command to all matching filters\n"
  3465. "D cycle through available debug modes\n"
  3466. "h dump packets/hex press to cycle through the 3 states\n"
  3467. "q quit\n"
  3468. "s Show QP histogram\n"
  3469. );
  3470. }
  3471. return 0;
  3472. }
  3473. #if HAVE_THREADS
  3474. static void *input_thread(void *arg)
  3475. {
  3476. InputFile *f = arg;
  3477. unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
  3478. int ret = 0;
  3479. while (1) {
  3480. AVPacket pkt;
  3481. ret = av_read_frame(f->ctx, &pkt);
  3482. if (ret == AVERROR(EAGAIN)) {
  3483. av_usleep(10000);
  3484. continue;
  3485. }
  3486. if (ret < 0) {
  3487. av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
  3488. break;
  3489. }
  3490. ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
  3491. if (flags && ret == AVERROR(EAGAIN)) {
  3492. flags = 0;
  3493. ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
  3494. av_log(f->ctx, AV_LOG_WARNING,
  3495. "Thread message queue blocking; consider raising the "
  3496. "thread_queue_size option (current value: %d)\n",
  3497. f->thread_queue_size);
  3498. }
  3499. if (ret < 0) {
  3500. if (ret != AVERROR_EOF)
  3501. av_log(f->ctx, AV_LOG_ERROR,
  3502. "Unable to send packet to main thread: %s\n",
  3503. av_err2str(ret));
  3504. av_packet_unref(&pkt);
  3505. av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
  3506. break;
  3507. }
  3508. }
  3509. return NULL;
  3510. }
  3511. static void free_input_thread(int i)
  3512. {
  3513. InputFile *f = input_files[i];
  3514. AVPacket pkt;
  3515. if (!f || !f->in_thread_queue)
  3516. return;
  3517. av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
  3518. while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
  3519. av_packet_unref(&pkt);
  3520. pthread_join(f->thread, NULL);
  3521. f->joined = 1;
  3522. av_thread_message_queue_free(&f->in_thread_queue);
  3523. }
  3524. static void free_input_threads(void)
  3525. {
  3526. int i;
  3527. for (i = 0; i < nb_input_files; i++)
  3528. free_input_thread(i);
  3529. }
  3530. static int init_input_thread(int i)
  3531. {
  3532. int ret;
  3533. InputFile *f = input_files[i];
  3534. if (nb_input_files == 1)
  3535. return 0;
  3536. if (f->ctx->pb ? !f->ctx->pb->seekable :
  3537. strcmp(f->ctx->iformat->name, "lavfi"))
  3538. f->non_blocking = 1;
  3539. ret = av_thread_message_queue_alloc(&f->in_thread_queue,
  3540. f->thread_queue_size, sizeof(AVPacket));
  3541. if (ret < 0)
  3542. return ret;
  3543. if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
  3544. av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
  3545. av_thread_message_queue_free(&f->in_thread_queue);
  3546. return AVERROR(ret);
  3547. }
  3548. return 0;
  3549. }
  3550. static int init_input_threads(void)
  3551. {
  3552. int i, ret;
  3553. for (i = 0; i < nb_input_files; i++) {
  3554. ret = init_input_thread(i);
  3555. if (ret < 0)
  3556. return ret;
  3557. }
  3558. return 0;
  3559. }
  3560. static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
  3561. {
  3562. return av_thread_message_queue_recv(f->in_thread_queue, pkt,
  3563. f->non_blocking ?
  3564. AV_THREAD_MESSAGE_NONBLOCK : 0);
  3565. }
  3566. #endif
  3567. static int get_input_packet(InputFile *f, AVPacket *pkt)
  3568. {
  3569. if (f->rate_emu) {
  3570. int i;
  3571. for (i = 0; i < f->nb_streams; i++) {
  3572. InputStream *ist = input_streams[f->ist_index + i];
  3573. int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
  3574. int64_t now = av_gettime_relative() - ist->start;
  3575. if (pts > now)
  3576. return AVERROR(EAGAIN);
  3577. }
  3578. }
  3579. #if HAVE_THREADS
  3580. if (nb_input_files > 1)
  3581. return get_input_packet_mt(f, pkt);
  3582. #endif
  3583. return av_read_frame(f->ctx, pkt);
  3584. }
  3585. static int got_eagain(void)
  3586. {
  3587. int i;
  3588. for (i = 0; i < nb_output_streams; i++)
  3589. if (output_streams[i]->unavailable)
  3590. return 1;
  3591. return 0;
  3592. }
  3593. static void reset_eagain(void)
  3594. {
  3595. int i;
  3596. for (i = 0; i < nb_input_files; i++)
  3597. input_files[i]->eagain = 0;
  3598. for (i = 0; i < nb_output_streams; i++)
  3599. output_streams[i]->unavailable = 0;
  3600. }
  3601. // set duration to max(tmp, duration) in a proper time base and return duration's time_base
  3602. static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
  3603. AVRational time_base)
  3604. {
  3605. int ret;
  3606. if (!*duration) {
  3607. *duration = tmp;
  3608. return tmp_time_base;
  3609. }
  3610. ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
  3611. if (ret < 0) {
  3612. *duration = tmp;
  3613. return tmp_time_base;
  3614. }
  3615. return time_base;
  3616. }
  3617. static int seek_to_start(InputFile *ifile, AVFormatContext *is)
  3618. {
  3619. InputStream *ist;
  3620. AVCodecContext *avctx;
  3621. int i, ret, has_audio = 0;
  3622. int64_t duration = 0;
  3623. ret = av_seek_frame(is, -1, is->start_time, 0);
  3624. if (ret < 0)
  3625. return ret;
  3626. for (i = 0; i < ifile->nb_streams; i++) {
  3627. ist = input_streams[ifile->ist_index + i];
  3628. avctx = ist->dec_ctx;
  3629. /* duration is the length of the last frame in a stream
  3630. * when audio stream is present we don't care about
  3631. * last video frame length because it's not defined exactly */
  3632. if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
  3633. has_audio = 1;
  3634. }
  3635. for (i = 0; i < ifile->nb_streams; i++) {
  3636. ist = input_streams[ifile->ist_index + i];
  3637. avctx = ist->dec_ctx;
  3638. if (has_audio) {
  3639. if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
  3640. AVRational sample_rate = {1, avctx->sample_rate};
  3641. duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
  3642. } else {
  3643. continue;
  3644. }
  3645. } else {
  3646. if (ist->framerate.num) {
  3647. duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
  3648. } else if (ist->st->avg_frame_rate.num) {
  3649. duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
  3650. } else {
  3651. duration = 1;
  3652. }
  3653. }
  3654. if (!ifile->duration)
  3655. ifile->time_base = ist->st->time_base;
  3656. /* the total duration of the stream, max_pts - min_pts is
  3657. * the duration of the stream without the last frame */
  3658. if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
  3659. duration += ist->max_pts - ist->min_pts;
  3660. ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
  3661. ifile->time_base);
  3662. }
  3663. if (ifile->loop > 0)
  3664. ifile->loop--;
  3665. return ret;
  3666. }
  3667. /*
  3668. * Return
  3669. * - 0 -- one packet was read and processed
  3670. * - AVERROR(EAGAIN) -- no packets were available for selected file,
  3671. * this function should be called again
  3672. * - AVERROR_EOF -- this function should not be called again
  3673. */
  3674. static int process_input(int file_index)
  3675. {
  3676. InputFile *ifile = input_files[file_index];
  3677. AVFormatContext *is;
  3678. InputStream *ist;
  3679. AVPacket pkt;
  3680. int ret, thread_ret, i, j;
  3681. int64_t duration;
  3682. int64_t pkt_dts;
  3683. is = ifile->ctx;
  3684. ret = get_input_packet(ifile, &pkt);
  3685. if (ret == AVERROR(EAGAIN)) {
  3686. ifile->eagain = 1;
  3687. return ret;
  3688. }
  3689. if (ret < 0 && ifile->loop) {
  3690. AVCodecContext *avctx;
  3691. for (i = 0; i < ifile->nb_streams; i++) {
  3692. ist = input_streams[ifile->ist_index + i];
  3693. avctx = ist->dec_ctx;
  3694. if (ist->decoding_needed) {
  3695. ret = process_input_packet(ist, NULL, 1);
  3696. if (ret>0)
  3697. return 0;
  3698. avcodec_flush_buffers(avctx);
  3699. }
  3700. }
  3701. #if HAVE_THREADS
  3702. free_input_thread(file_index);
  3703. #endif
  3704. ret = seek_to_start(ifile, is);
  3705. #if HAVE_THREADS
  3706. thread_ret = init_input_thread(file_index);
  3707. if (thread_ret < 0)
  3708. return thread_ret;
  3709. #endif
  3710. if (ret < 0)
  3711. av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
  3712. else
  3713. ret = get_input_packet(ifile, &pkt);
  3714. if (ret == AVERROR(EAGAIN)) {
  3715. ifile->eagain = 1;
  3716. return ret;
  3717. }
  3718. }
  3719. if (ret < 0) {
  3720. if (ret != AVERROR_EOF) {
  3721. print_error(is->url, ret);
  3722. if (exit_on_error)
  3723. exit_program(1);
  3724. }
  3725. for (i = 0; i < ifile->nb_streams; i++) {
  3726. ist = input_streams[ifile->ist_index + i];
  3727. if (ist->decoding_needed) {
  3728. ret = process_input_packet(ist, NULL, 0);
  3729. if (ret>0)
  3730. return 0;
  3731. }
  3732. /* mark all outputs that don't go through lavfi as finished */
  3733. for (j = 0; j < nb_output_streams; j++) {
  3734. OutputStream *ost = output_streams[j];
  3735. if (ost->source_index == ifile->ist_index + i &&
  3736. (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
  3737. finish_output_stream(ost);
  3738. }
  3739. }
  3740. ifile->eof_reached = 1;
  3741. return AVERROR(EAGAIN);
  3742. }
  3743. reset_eagain();
  3744. if (do_pkt_dump) {
  3745. av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
  3746. is->streams[pkt.stream_index]);
  3747. }
  3748. /* the following test is needed in case new streams appear
  3749. dynamically in stream : we ignore them */
  3750. if (pkt.stream_index >= ifile->nb_streams) {
  3751. report_new_stream(file_index, &pkt);
  3752. goto discard_packet;
  3753. }
  3754. ist = input_streams[ifile->ist_index + pkt.stream_index];
  3755. ist->data_size += pkt.size;
  3756. ist->nb_packets++;
  3757. if (ist->discard)
  3758. goto discard_packet;
  3759. if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
  3760. av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
  3761. "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
  3762. if (exit_on_error)
  3763. exit_program(1);
  3764. }
  3765. if (debug_ts) {
  3766. av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
  3767. "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
  3768. ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
  3769. av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
  3770. av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
  3771. av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
  3772. av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
  3773. av_ts2str(input_files[ist->file_index]->ts_offset),
  3774. av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
  3775. }
  3776. if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
  3777. int64_t stime, stime2;
  3778. // Correcting starttime based on the enabled streams
  3779. // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
  3780. // so we instead do it here as part of discontinuity handling
  3781. if ( ist->next_dts == AV_NOPTS_VALUE
  3782. && ifile->ts_offset == -is->start_time
  3783. && (is->iformat->flags & AVFMT_TS_DISCONT)) {
  3784. int64_t new_start_time = INT64_MAX;
  3785. for (i=0; i<is->nb_streams; i++) {
  3786. AVStream *st = is->streams[i];
  3787. if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
  3788. continue;
  3789. new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
  3790. }
  3791. if (new_start_time > is->start_time) {
  3792. av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
  3793. ifile->ts_offset = -new_start_time;
  3794. }
  3795. }
  3796. stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
  3797. stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
  3798. ist->wrap_correction_done = 1;
  3799. if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
  3800. pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
  3801. ist->wrap_correction_done = 0;
  3802. }
  3803. if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
  3804. pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
  3805. ist->wrap_correction_done = 0;
  3806. }
  3807. }
  3808. /* add the stream-global side data to the first packet */
  3809. if (ist->nb_packets == 1) {
  3810. for (i = 0; i < ist->st->nb_side_data; i++) {
  3811. AVPacketSideData *src_sd = &ist->st->side_data[i];
  3812. uint8_t *dst_data;
  3813. if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
  3814. continue;
  3815. if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
  3816. continue;
  3817. dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
  3818. if (!dst_data)
  3819. exit_program(1);
  3820. memcpy(dst_data, src_sd->data, src_sd->size);
  3821. }
  3822. }
  3823. if (pkt.dts != AV_NOPTS_VALUE)
  3824. pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
  3825. if (pkt.pts != AV_NOPTS_VALUE)
  3826. pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
  3827. if (pkt.pts != AV_NOPTS_VALUE)
  3828. pkt.pts *= ist->ts_scale;
  3829. if (pkt.dts != AV_NOPTS_VALUE)
  3830. pkt.dts *= ist->ts_scale;
  3831. pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
  3832. if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  3833. ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
  3834. pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
  3835. && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
  3836. int64_t delta = pkt_dts - ifile->last_ts;
  3837. if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
  3838. delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
  3839. ifile->ts_offset -= delta;
  3840. av_log(NULL, AV_LOG_DEBUG,
  3841. "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
  3842. delta, ifile->ts_offset);
  3843. pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3844. if (pkt.pts != AV_NOPTS_VALUE)
  3845. pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3846. }
  3847. }
  3848. duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
  3849. if (pkt.pts != AV_NOPTS_VALUE) {
  3850. pkt.pts += duration;
  3851. ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
  3852. ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
  3853. }
  3854. if (pkt.dts != AV_NOPTS_VALUE)
  3855. pkt.dts += duration;
  3856. pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
  3857. if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  3858. ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
  3859. pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
  3860. !copy_ts) {
  3861. int64_t delta = pkt_dts - ist->next_dts;
  3862. if (is->iformat->flags & AVFMT_TS_DISCONT) {
  3863. if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
  3864. delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
  3865. pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
  3866. ifile->ts_offset -= delta;
  3867. av_log(NULL, AV_LOG_DEBUG,
  3868. "timestamp discontinuity for stream #%d:%d "
  3869. "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
  3870. ist->file_index, ist->st->index, ist->st->id,
  3871. av_get_media_type_string(ist->dec_ctx->codec_type),
  3872. delta, ifile->ts_offset);
  3873. pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3874. if (pkt.pts != AV_NOPTS_VALUE)
  3875. pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3876. }
  3877. } else {
  3878. if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
  3879. delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
  3880. av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
  3881. pkt.dts = AV_NOPTS_VALUE;
  3882. }
  3883. if (pkt.pts != AV_NOPTS_VALUE){
  3884. int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
  3885. delta = pkt_pts - ist->next_dts;
  3886. if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
  3887. delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
  3888. av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
  3889. pkt.pts = AV_NOPTS_VALUE;
  3890. }
  3891. }
  3892. }
  3893. }
  3894. if (pkt.dts != AV_NOPTS_VALUE)
  3895. ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
  3896. if (debug_ts) {
  3897. av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
  3898. ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
  3899. av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
  3900. av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
  3901. av_ts2str(input_files[ist->file_index]->ts_offset),
  3902. av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
  3903. }
  3904. sub2video_heartbeat(ist, pkt.pts);
  3905. process_input_packet(ist, &pkt, 0);
  3906. discard_packet:
  3907. av_packet_unref(&pkt);
  3908. return 0;
  3909. }
  3910. /**
  3911. * Perform a step of transcoding for the specified filter graph.
  3912. *
  3913. * @param[in] graph filter graph to consider
  3914. * @param[out] best_ist input stream where a frame would allow to continue
  3915. * @return 0 for success, <0 for error
  3916. */
  3917. static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
  3918. {
  3919. int i, ret;
  3920. int nb_requests, nb_requests_max = 0;
  3921. InputFilter *ifilter;
  3922. InputStream *ist;
  3923. *best_ist = NULL;
  3924. ret = avfilter_graph_request_oldest(graph->graph);
  3925. if (ret >= 0)
  3926. return reap_filters(0);
  3927. if (ret == AVERROR_EOF) {
  3928. ret = reap_filters(1);
  3929. for (i = 0; i < graph->nb_outputs; i++)
  3930. close_output_stream(graph->outputs[i]->ost);
  3931. return ret;
  3932. }
  3933. if (ret != AVERROR(EAGAIN))
  3934. return ret;
  3935. for (i = 0; i < graph->nb_inputs; i++) {
  3936. ifilter = graph->inputs[i];
  3937. ist = ifilter->ist;
  3938. if (input_files[ist->file_index]->eagain ||
  3939. input_files[ist->file_index]->eof_reached)
  3940. continue;
  3941. nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
  3942. if (nb_requests > nb_requests_max) {
  3943. nb_requests_max = nb_requests;
  3944. *best_ist = ist;
  3945. }
  3946. }
  3947. if (!*best_ist)
  3948. for (i = 0; i < graph->nb_outputs; i++)
  3949. graph->outputs[i]->ost->unavailable = 1;
  3950. return 0;
  3951. }
  3952. /**
  3953. * Run a single step of transcoding.
  3954. *
  3955. * @return 0 for success, <0 for error
  3956. */
  3957. static int transcode_step(void)
  3958. {
  3959. OutputStream *ost;
  3960. InputStream *ist = NULL;
  3961. int ret;
  3962. ost = choose_output();
  3963. if (!ost) {
  3964. if (got_eagain()) {
  3965. reset_eagain();
  3966. av_usleep(10000);
  3967. return 0;
  3968. }
  3969. av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
  3970. return AVERROR_EOF;
  3971. }
  3972. if (ost->filter && !ost->filter->graph->graph) {
  3973. if (ifilter_has_all_input_formats(ost->filter->graph)) {
  3974. ret = configure_filtergraph(ost->filter->graph);
  3975. if (ret < 0) {
  3976. av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
  3977. return ret;
  3978. }
  3979. }
  3980. }
  3981. if (ost->filter && ost->filter->graph->graph) {
  3982. if (!ost->initialized) {
  3983. char error[1024] = {0};
  3984. ret = init_output_stream(ost, error, sizeof(error));
  3985. if (ret < 0) {
  3986. av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
  3987. ost->file_index, ost->index, error);
  3988. exit_program(1);
  3989. }
  3990. }
  3991. if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
  3992. return ret;
  3993. if (!ist)
  3994. return 0;
  3995. } else if (ost->filter) {
  3996. int i;
  3997. for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
  3998. InputFilter *ifilter = ost->filter->graph->inputs[i];
  3999. if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
  4000. ist = ifilter->ist;
  4001. break;
  4002. }
  4003. }
  4004. if (!ist) {
  4005. ost->inputs_done = 1;
  4006. return 0;
  4007. }
  4008. } else {
  4009. av_assert0(ost->source_index >= 0);
  4010. ist = input_streams[ost->source_index];
  4011. }
  4012. ret = process_input(ist->file_index);
  4013. if (ret == AVERROR(EAGAIN)) {
  4014. if (input_files[ist->file_index]->eagain)
  4015. ost->unavailable = 1;
  4016. return 0;
  4017. }
  4018. if (ret < 0)
  4019. return ret == AVERROR_EOF ? 0 : ret;
  4020. return reap_filters(0);
  4021. }
  4022. /*
  4023. * The following code is the main loop of the file converter
  4024. */
  4025. static int transcode(void)
  4026. {
  4027. int ret, i;
  4028. AVFormatContext *os;
  4029. OutputStream *ost;
  4030. InputStream *ist;
  4031. int64_t timer_start;
  4032. int64_t total_packets_written = 0;
  4033. ret = transcode_init();
  4034. if (ret < 0)
  4035. goto fail;
  4036. if (stdin_interaction) {
  4037. av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
  4038. }
  4039. timer_start = av_gettime_relative();
  4040. #if HAVE_THREADS
  4041. if ((ret = init_input_threads()) < 0)
  4042. goto fail;
  4043. #endif
  4044. while (!received_sigterm) {
  4045. int64_t cur_time= av_gettime_relative();
  4046. /* if 'q' pressed, exits */
  4047. if (stdin_interaction)
  4048. if (check_keyboard_interaction(cur_time) < 0)
  4049. break;
  4050. /* check if there's any stream where output is still needed */
  4051. if (!need_output()) {
  4052. av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
  4053. break;
  4054. }
  4055. ret = transcode_step();
  4056. if (ret < 0 && ret != AVERROR_EOF) {
  4057. av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  4058. break;
  4059. }
  4060. /* dump report by using the output first video and audio streams */
  4061. print_report(0, timer_start, cur_time);
  4062. }
  4063. #if HAVE_THREADS
  4064. free_input_threads();
  4065. #endif
  4066. /* at the end of stream, we must flush the decoder buffers */
  4067. for (i = 0; i < nb_input_streams; i++) {
  4068. ist = input_streams[i];
  4069. if (!input_files[ist->file_index]->eof_reached) {
  4070. process_input_packet(ist, NULL, 0);
  4071. }
  4072. }
  4073. flush_encoders();
  4074. term_exit();
  4075. /* write the trailer if needed and close file */
  4076. for (i = 0; i < nb_output_files; i++) {
  4077. os = output_files[i]->ctx;
  4078. if (!output_files[i]->header_written) {
  4079. av_log(NULL, AV_LOG_ERROR,
  4080. "Nothing was written into output file %d (%s), because "
  4081. "at least one of its streams received no packets.\n",
  4082. i, os->url);
  4083. continue;
  4084. }
  4085. if ((ret = av_write_trailer(os)) < 0) {
  4086. av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
  4087. if (exit_on_error)
  4088. exit_program(1);
  4089. }
  4090. }
  4091. /* dump report by using the first video and audio streams */
  4092. print_report(1, timer_start, av_gettime_relative());
  4093. /* close each encoder */
  4094. for (i = 0; i < nb_output_streams; i++) {
  4095. ost = output_streams[i];
  4096. if (ost->encoding_needed) {
  4097. av_freep(&ost->enc_ctx->stats_in);
  4098. }
  4099. total_packets_written += ost->packets_written;
  4100. }
  4101. if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
  4102. av_log(NULL, AV_LOG_FATAL, "Empty output\n");
  4103. exit_program(1);
  4104. }
  4105. /* close each decoder */
  4106. for (i = 0; i < nb_input_streams; i++) {
  4107. ist = input_streams[i];
  4108. if (ist->decoding_needed) {
  4109. avcodec_close(ist->dec_ctx);
  4110. if (ist->hwaccel_uninit)
  4111. ist->hwaccel_uninit(ist->dec_ctx);
  4112. }
  4113. }
  4114. av_buffer_unref(&hw_device_ctx);
  4115. hw_device_free_all();
  4116. /* finished ! */
  4117. ret = 0;
  4118. fail:
  4119. #if HAVE_THREADS
  4120. free_input_threads();
  4121. #endif
  4122. if (output_streams) {
  4123. for (i = 0; i < nb_output_streams; i++) {
  4124. ost = output_streams[i];
  4125. if (ost) {
  4126. if (ost->logfile) {
  4127. if (fclose(ost->logfile))
  4128. av_log(NULL, AV_LOG_ERROR,
  4129. "Error closing logfile, loss of information possible: %s\n",
  4130. av_err2str(AVERROR(errno)));
  4131. ost->logfile = NULL;
  4132. }
  4133. av_freep(&ost->forced_kf_pts);
  4134. av_freep(&ost->apad);
  4135. av_freep(&ost->disposition);
  4136. av_dict_free(&ost->encoder_opts);
  4137. av_dict_free(&ost->sws_dict);
  4138. av_dict_free(&ost->swr_opts);
  4139. av_dict_free(&ost->resample_opts);
  4140. }
  4141. }
  4142. }
  4143. return ret;
  4144. }
  4145. static BenchmarkTimeStamps get_benchmark_time_stamps(void)
  4146. {
  4147. BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
  4148. #if HAVE_GETRUSAGE
  4149. struct rusage rusage;
  4150. getrusage(RUSAGE_SELF, &rusage);
  4151. time_stamps.user_usec =
  4152. (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
  4153. time_stamps.sys_usec =
  4154. (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
  4155. #elif HAVE_GETPROCESSTIMES
  4156. HANDLE proc;
  4157. FILETIME c, e, k, u;
  4158. proc = GetCurrentProcess();
  4159. GetProcessTimes(proc, &c, &e, &k, &u);
  4160. time_stamps.user_usec =
  4161. ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
  4162. time_stamps.sys_usec =
  4163. ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
  4164. #else
  4165. time_stamps.user_usec = time_stamps.sys_usec = 0;
  4166. #endif
  4167. return time_stamps;
  4168. }
  4169. static int64_t getmaxrss(void)
  4170. {
  4171. #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
  4172. struct rusage rusage;
  4173. getrusage(RUSAGE_SELF, &rusage);
  4174. return (int64_t)rusage.ru_maxrss * 1024;
  4175. #elif HAVE_GETPROCESSMEMORYINFO
  4176. HANDLE proc;
  4177. PROCESS_MEMORY_COUNTERS memcounters;
  4178. proc = GetCurrentProcess();
  4179. memcounters.cb = sizeof(memcounters);
  4180. GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
  4181. return memcounters.PeakPagefileUsage;
  4182. #else
  4183. return 0;
  4184. #endif
  4185. }
  4186. static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
  4187. {
  4188. }
  4189. int main(int argc, char **argv)
  4190. {
  4191. int i, ret;
  4192. BenchmarkTimeStamps ti;
  4193. init_dynload();
  4194. register_exit(ffmpeg_cleanup);
  4195. setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
  4196. av_log_set_flags(AV_LOG_SKIP_REPEATED);
  4197. parse_loglevel(argc, argv, options);
  4198. if(argc>1 && !strcmp(argv[1], "-d")){
  4199. run_as_daemon=1;
  4200. av_log_set_callback(log_callback_null);
  4201. argc--;
  4202. argv++;
  4203. }
  4204. #if CONFIG_AVDEVICE
  4205. avdevice_register_all();
  4206. #endif
  4207. avformat_network_init();
  4208. show_banner(argc, argv, options);
  4209. /* parse options and open all input/output files */
  4210. ret = ffmpeg_parse_options(argc, argv);
  4211. if (ret < 0)
  4212. exit_program(1);
  4213. if (nb_output_files <= 0 && nb_input_files == 0) {
  4214. show_usage();
  4215. av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
  4216. exit_program(1);
  4217. }
  4218. /* file converter / grab */
  4219. if (nb_output_files <= 0) {
  4220. av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
  4221. exit_program(1);
  4222. }
  4223. for (i = 0; i < nb_output_files; i++) {
  4224. if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
  4225. want_sdp = 0;
  4226. }
  4227. current_time = ti = get_benchmark_time_stamps();
  4228. if (transcode() < 0)
  4229. exit_program(1);
  4230. if (do_benchmark) {
  4231. int64_t utime, stime, rtime;
  4232. current_time = get_benchmark_time_stamps();
  4233. utime = current_time.user_usec - ti.user_usec;
  4234. stime = current_time.sys_usec - ti.sys_usec;
  4235. rtime = current_time.real_usec - ti.real_usec;
  4236. av_log(NULL, AV_LOG_INFO,
  4237. "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
  4238. utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
  4239. }
  4240. av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
  4241. decode_error_stat[0], decode_error_stat[1]);
  4242. if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
  4243. exit_program(69);
  4244. exit_program(received_nb_signals ? 255 : main_return_code);
  4245. return main_return_code;
  4246. }