vp9_encodeframe.c 155 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237
  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <limits.h>
  11. #include <math.h>
  12. #include <stdio.h>
  13. #include "./vp9_rtcd.h"
  14. #include "./vpx_dsp_rtcd.h"
  15. #include "./vpx_config.h"
  16. #include "vpx_ports/mem.h"
  17. #include "vpx_ports/vpx_timer.h"
  18. #include "vp9/common/vp9_common.h"
  19. #include "vp9/common/vp9_entropy.h"
  20. #include "vp9/common/vp9_entropymode.h"
  21. #include "vp9/common/vp9_idct.h"
  22. #include "vp9/common/vp9_mvref_common.h"
  23. #include "vp9/common/vp9_pred_common.h"
  24. #include "vp9/common/vp9_quant_common.h"
  25. #include "vp9/common/vp9_reconintra.h"
  26. #include "vp9/common/vp9_reconinter.h"
  27. #include "vp9/common/vp9_seg_common.h"
  28. #include "vp9/common/vp9_systemdependent.h"
  29. #include "vp9/common/vp9_tile_common.h"
  30. #include "vp9/encoder/vp9_aq_complexity.h"
  31. #include "vp9/encoder/vp9_aq_cyclicrefresh.h"
  32. #include "vp9/encoder/vp9_aq_variance.h"
  33. #include "vp9/encoder/vp9_encodeframe.h"
  34. #include "vp9/encoder/vp9_encodemb.h"
  35. #include "vp9/encoder/vp9_encodemv.h"
  36. #include "vp9/encoder/vp9_ethread.h"
  37. #include "vp9/encoder/vp9_extend.h"
  38. #include "vp9/encoder/vp9_pickmode.h"
  39. #include "vp9/encoder/vp9_rd.h"
  40. #include "vp9/encoder/vp9_rdopt.h"
  41. #include "vp9/encoder/vp9_segmentation.h"
  42. #include "vp9/encoder/vp9_tokenize.h"
  43. static void encode_superblock(VP9_COMP *cpi, ThreadData * td,
  44. TOKENEXTRA **t, int output_enabled,
  45. int mi_row, int mi_col, BLOCK_SIZE bsize,
  46. PICK_MODE_CONTEXT *ctx);
  47. // This is used as a reference when computing the source variance for the
  48. // purposes of activity masking.
  49. // Eventually this should be replaced by custom no-reference routines,
  50. // which will be faster.
  51. static const uint8_t VP9_VAR_OFFS[64] = {
  52. 128, 128, 128, 128, 128, 128, 128, 128,
  53. 128, 128, 128, 128, 128, 128, 128, 128,
  54. 128, 128, 128, 128, 128, 128, 128, 128,
  55. 128, 128, 128, 128, 128, 128, 128, 128,
  56. 128, 128, 128, 128, 128, 128, 128, 128,
  57. 128, 128, 128, 128, 128, 128, 128, 128,
  58. 128, 128, 128, 128, 128, 128, 128, 128,
  59. 128, 128, 128, 128, 128, 128, 128, 128
  60. };
  61. #if CONFIG_VP9_HIGHBITDEPTH
  62. static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
  63. 128, 128, 128, 128, 128, 128, 128, 128,
  64. 128, 128, 128, 128, 128, 128, 128, 128,
  65. 128, 128, 128, 128, 128, 128, 128, 128,
  66. 128, 128, 128, 128, 128, 128, 128, 128,
  67. 128, 128, 128, 128, 128, 128, 128, 128,
  68. 128, 128, 128, 128, 128, 128, 128, 128,
  69. 128, 128, 128, 128, 128, 128, 128, 128,
  70. 128, 128, 128, 128, 128, 128, 128, 128
  71. };
  72. static const uint16_t VP9_HIGH_VAR_OFFS_10[64] = {
  73. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  74. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  75. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  76. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  77. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  78. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  79. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4,
  80. 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4, 128*4
  81. };
  82. static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
  83. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  84. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  85. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  86. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  87. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  88. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  89. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16,
  90. 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16, 128*16
  91. };
  92. #endif // CONFIG_VP9_HIGHBITDEPTH
  93. unsigned int vp9_get_sby_perpixel_variance(VP9_COMP *cpi,
  94. const struct buf_2d *ref,
  95. BLOCK_SIZE bs) {
  96. unsigned int sse;
  97. const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
  98. VP9_VAR_OFFS, 0, &sse);
  99. return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
  100. }
  101. #if CONFIG_VP9_HIGHBITDEPTH
  102. unsigned int vp9_high_get_sby_perpixel_variance(
  103. VP9_COMP *cpi, const struct buf_2d *ref, BLOCK_SIZE bs, int bd) {
  104. unsigned int var, sse;
  105. switch (bd) {
  106. case 10:
  107. var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
  108. CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10),
  109. 0, &sse);
  110. break;
  111. case 12:
  112. var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
  113. CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12),
  114. 0, &sse);
  115. break;
  116. case 8:
  117. default:
  118. var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
  119. CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8),
  120. 0, &sse);
  121. break;
  122. }
  123. return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
  124. }
  125. #endif // CONFIG_VP9_HIGHBITDEPTH
  126. static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
  127. const struct buf_2d *ref,
  128. int mi_row, int mi_col,
  129. BLOCK_SIZE bs) {
  130. unsigned int sse, var;
  131. uint8_t *last_y;
  132. const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
  133. assert(last != NULL);
  134. last_y =
  135. &last->y_buffer[mi_row * MI_SIZE * last->y_stride + mi_col * MI_SIZE];
  136. var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride, last_y, last->y_stride, &sse);
  137. return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
  138. }
  139. static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
  140. int mi_row,
  141. int mi_col) {
  142. unsigned int var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
  143. mi_row, mi_col,
  144. BLOCK_64X64);
  145. if (var < 8)
  146. return BLOCK_64X64;
  147. else if (var < 128)
  148. return BLOCK_32X32;
  149. else if (var < 2048)
  150. return BLOCK_16X16;
  151. else
  152. return BLOCK_8X8;
  153. }
  154. // Lighter version of set_offsets that only sets the mode info
  155. // pointers.
  156. static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
  157. MACROBLOCKD *const xd,
  158. int mi_row,
  159. int mi_col) {
  160. const int idx_str = xd->mi_stride * mi_row + mi_col;
  161. xd->mi = cm->mi_grid_visible + idx_str;
  162. xd->mi[0] = cm->mi + idx_str;
  163. }
  164. static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
  165. MACROBLOCK *const x, int mi_row, int mi_col,
  166. BLOCK_SIZE bsize) {
  167. VP9_COMMON *const cm = &cpi->common;
  168. MACROBLOCKD *const xd = &x->e_mbd;
  169. MB_MODE_INFO *mbmi;
  170. const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  171. const int mi_height = num_8x8_blocks_high_lookup[bsize];
  172. const struct segmentation *const seg = &cm->seg;
  173. set_skip_context(xd, mi_row, mi_col);
  174. set_mode_info_offsets(cm, xd, mi_row, mi_col);
  175. mbmi = &xd->mi[0]->mbmi;
  176. // Set up destination pointers.
  177. vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
  178. // Set up limit values for MV components.
  179. // Mv beyond the range do not produce new/different prediction block.
  180. x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
  181. x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
  182. x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
  183. x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
  184. // Set up distance of MB to edge of frame in 1/8th pel units.
  185. assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
  186. set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
  187. cm->mi_rows, cm->mi_cols);
  188. // Set up source buffers.
  189. vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
  190. // R/D setup.
  191. x->rddiv = cpi->rd.RDDIV;
  192. x->rdmult = cpi->rd.RDMULT;
  193. // Setup segment ID.
  194. if (seg->enabled) {
  195. if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
  196. const uint8_t *const map = seg->update_map ? cpi->segmentation_map
  197. : cm->last_frame_seg_map;
  198. mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
  199. }
  200. vp9_init_plane_quantizers(cpi, x);
  201. x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
  202. } else {
  203. mbmi->segment_id = 0;
  204. x->encode_breakout = cpi->encode_breakout;
  205. }
  206. }
  207. static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
  208. int mi_row, int mi_col,
  209. BLOCK_SIZE bsize) {
  210. const int block_width = num_8x8_blocks_wide_lookup[bsize];
  211. const int block_height = num_8x8_blocks_high_lookup[bsize];
  212. int i, j;
  213. for (j = 0; j < block_height; ++j)
  214. for (i = 0; i < block_width; ++i) {
  215. if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
  216. xd->mi[j * xd->mi_stride + i] = xd->mi[0];
  217. }
  218. }
  219. static void set_block_size(VP9_COMP * const cpi,
  220. MACROBLOCKD *const xd,
  221. int mi_row, int mi_col,
  222. BLOCK_SIZE bsize) {
  223. if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
  224. set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
  225. xd->mi[0]->mbmi.sb_type = bsize;
  226. }
  227. }
  228. typedef struct {
  229. int64_t sum_square_error;
  230. int64_t sum_error;
  231. int log2_count;
  232. int variance;
  233. } var;
  234. typedef struct {
  235. var none;
  236. var horz[2];
  237. var vert[2];
  238. } partition_variance;
  239. typedef struct {
  240. partition_variance part_variances;
  241. var split[4];
  242. } v4x4;
  243. typedef struct {
  244. partition_variance part_variances;
  245. v4x4 split[4];
  246. } v8x8;
  247. typedef struct {
  248. partition_variance part_variances;
  249. v8x8 split[4];
  250. } v16x16;
  251. typedef struct {
  252. partition_variance part_variances;
  253. v16x16 split[4];
  254. } v32x32;
  255. typedef struct {
  256. partition_variance part_variances;
  257. v32x32 split[4];
  258. } v64x64;
  259. typedef struct {
  260. partition_variance *part_variances;
  261. var *split[4];
  262. } variance_node;
  263. typedef enum {
  264. V16X16,
  265. V32X32,
  266. V64X64,
  267. } TREE_LEVEL;
  268. static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
  269. int i;
  270. node->part_variances = NULL;
  271. switch (bsize) {
  272. case BLOCK_64X64: {
  273. v64x64 *vt = (v64x64 *) data;
  274. node->part_variances = &vt->part_variances;
  275. for (i = 0; i < 4; i++)
  276. node->split[i] = &vt->split[i].part_variances.none;
  277. break;
  278. }
  279. case BLOCK_32X32: {
  280. v32x32 *vt = (v32x32 *) data;
  281. node->part_variances = &vt->part_variances;
  282. for (i = 0; i < 4; i++)
  283. node->split[i] = &vt->split[i].part_variances.none;
  284. break;
  285. }
  286. case BLOCK_16X16: {
  287. v16x16 *vt = (v16x16 *) data;
  288. node->part_variances = &vt->part_variances;
  289. for (i = 0; i < 4; i++)
  290. node->split[i] = &vt->split[i].part_variances.none;
  291. break;
  292. }
  293. case BLOCK_8X8: {
  294. v8x8 *vt = (v8x8 *) data;
  295. node->part_variances = &vt->part_variances;
  296. for (i = 0; i < 4; i++)
  297. node->split[i] = &vt->split[i].part_variances.none;
  298. break;
  299. }
  300. case BLOCK_4X4: {
  301. v4x4 *vt = (v4x4 *) data;
  302. node->part_variances = &vt->part_variances;
  303. for (i = 0; i < 4; i++)
  304. node->split[i] = &vt->split[i];
  305. break;
  306. }
  307. default: {
  308. assert(0);
  309. break;
  310. }
  311. }
  312. }
  313. // Set variance values given sum square error, sum error, count.
  314. static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
  315. v->sum_square_error = s2;
  316. v->sum_error = s;
  317. v->log2_count = c;
  318. }
  319. static void get_variance(var *v) {
  320. v->variance = (int)(256 * (v->sum_square_error -
  321. ((v->sum_error * v->sum_error) >> v->log2_count)) >> v->log2_count);
  322. }
  323. static void sum_2_variances(const var *a, const var *b, var *r) {
  324. assert(a->log2_count == b->log2_count);
  325. fill_variance(a->sum_square_error + b->sum_square_error,
  326. a->sum_error + b->sum_error, a->log2_count + 1, r);
  327. }
  328. static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
  329. variance_node node;
  330. tree_to_node(data, bsize, &node);
  331. sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
  332. sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
  333. sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
  334. sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
  335. sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
  336. &node.part_variances->none);
  337. }
  338. static int set_vt_partitioning(VP9_COMP *cpi,
  339. MACROBLOCKD *const xd,
  340. void *data,
  341. BLOCK_SIZE bsize,
  342. int mi_row,
  343. int mi_col,
  344. int64_t threshold,
  345. BLOCK_SIZE bsize_min,
  346. int force_split) {
  347. VP9_COMMON * const cm = &cpi->common;
  348. variance_node vt;
  349. const int block_width = num_8x8_blocks_wide_lookup[bsize];
  350. const int block_height = num_8x8_blocks_high_lookup[bsize];
  351. const int low_res = (cm->width <= 352 && cm->height <= 288);
  352. assert(block_height == block_width);
  353. tree_to_node(data, bsize, &vt);
  354. if (force_split == 1)
  355. return 0;
  356. // For bsize=bsize_min (16x16/8x8 for 8x8/4x4 downsampling), select if
  357. // variance is below threshold, otherwise split will be selected.
  358. // No check for vert/horiz split as too few samples for variance.
  359. if (bsize == bsize_min) {
  360. // Variance already computed to set the force_split.
  361. if (low_res || cm->frame_type == KEY_FRAME)
  362. get_variance(&vt.part_variances->none);
  363. if (mi_col + block_width / 2 < cm->mi_cols &&
  364. mi_row + block_height / 2 < cm->mi_rows &&
  365. vt.part_variances->none.variance < threshold) {
  366. set_block_size(cpi, xd, mi_row, mi_col, bsize);
  367. return 1;
  368. }
  369. return 0;
  370. } else if (bsize > bsize_min) {
  371. // Variance already computed to set the force_split.
  372. if (low_res || cm->frame_type == KEY_FRAME)
  373. get_variance(&vt.part_variances->none);
  374. // For key frame: take split for bsize above 32X32 or very high variance.
  375. if (cm->frame_type == KEY_FRAME &&
  376. (bsize > BLOCK_32X32 ||
  377. vt.part_variances->none.variance > (threshold << 4))) {
  378. return 0;
  379. }
  380. // If variance is low, take the bsize (no split).
  381. if (mi_col + block_width / 2 < cm->mi_cols &&
  382. mi_row + block_height / 2 < cm->mi_rows &&
  383. vt.part_variances->none.variance < threshold) {
  384. set_block_size(cpi, xd, mi_row, mi_col, bsize);
  385. return 1;
  386. }
  387. // Check vertical split.
  388. if (mi_row + block_height / 2 < cm->mi_rows) {
  389. BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
  390. get_variance(&vt.part_variances->vert[0]);
  391. get_variance(&vt.part_variances->vert[1]);
  392. if (vt.part_variances->vert[0].variance < threshold &&
  393. vt.part_variances->vert[1].variance < threshold &&
  394. get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
  395. set_block_size(cpi, xd, mi_row, mi_col, subsize);
  396. set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
  397. return 1;
  398. }
  399. }
  400. // Check horizontal split.
  401. if (mi_col + block_width / 2 < cm->mi_cols) {
  402. BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
  403. get_variance(&vt.part_variances->horz[0]);
  404. get_variance(&vt.part_variances->horz[1]);
  405. if (vt.part_variances->horz[0].variance < threshold &&
  406. vt.part_variances->horz[1].variance < threshold &&
  407. get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
  408. set_block_size(cpi, xd, mi_row, mi_col, subsize);
  409. set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
  410. return 1;
  411. }
  412. }
  413. return 0;
  414. }
  415. return 0;
  416. }
  417. // Set the variance split thresholds for following the block sizes:
  418. // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
  419. // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
  420. // currently only used on key frame.
  421. static void set_vbp_thresholds(VP9_COMP *cpi, int64_t thresholds[], int q) {
  422. VP9_COMMON *const cm = &cpi->common;
  423. const int is_key_frame = (cm->frame_type == KEY_FRAME);
  424. const int threshold_multiplier = is_key_frame ? 20 : 1;
  425. const int64_t threshold_base = (int64_t)(threshold_multiplier *
  426. cpi->y_dequant[q][1]);
  427. if (is_key_frame) {
  428. thresholds[0] = threshold_base;
  429. thresholds[1] = threshold_base >> 2;
  430. thresholds[2] = threshold_base >> 2;
  431. thresholds[3] = threshold_base << 2;
  432. } else {
  433. thresholds[1] = threshold_base;
  434. if (cm->width <= 352 && cm->height <= 288) {
  435. thresholds[0] = threshold_base >> 2;
  436. thresholds[2] = threshold_base << 3;
  437. } else {
  438. thresholds[0] = threshold_base;
  439. thresholds[1] = (5 * threshold_base) >> 2;
  440. if (cm->width >= 1920 && cm->height >= 1080)
  441. thresholds[1] = (7 * threshold_base) >> 2;
  442. thresholds[2] = threshold_base << cpi->oxcf.speed;
  443. }
  444. }
  445. }
  446. void vp9_set_variance_partition_thresholds(VP9_COMP *cpi, int q) {
  447. VP9_COMMON *const cm = &cpi->common;
  448. SPEED_FEATURES *const sf = &cpi->sf;
  449. const int is_key_frame = (cm->frame_type == KEY_FRAME);
  450. if (sf->partition_search_type != VAR_BASED_PARTITION &&
  451. sf->partition_search_type != REFERENCE_PARTITION) {
  452. return;
  453. } else {
  454. set_vbp_thresholds(cpi, cpi->vbp_thresholds, q);
  455. // The thresholds below are not changed locally.
  456. if (is_key_frame) {
  457. cpi->vbp_threshold_sad = 0;
  458. cpi->vbp_bsize_min = BLOCK_8X8;
  459. } else {
  460. if (cm->width <= 352 && cm->height <= 288)
  461. cpi->vbp_threshold_sad = 100;
  462. else
  463. cpi->vbp_threshold_sad = (cpi->y_dequant[q][1] << 1) > 1000 ?
  464. (cpi->y_dequant[q][1] << 1) : 1000;
  465. cpi->vbp_bsize_min = BLOCK_16X16;
  466. }
  467. cpi->vbp_threshold_minmax = 15 + (q >> 3);
  468. }
  469. }
  470. // Compute the minmax over the 8x8 subblocks.
  471. static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
  472. int dp, int x16_idx, int y16_idx,
  473. #if CONFIG_VP9_HIGHBITDEPTH
  474. int highbd_flag,
  475. #endif
  476. int pixels_wide,
  477. int pixels_high) {
  478. int k;
  479. int minmax_max = 0;
  480. int minmax_min = 255;
  481. // Loop over the 4 8x8 subblocks.
  482. for (k = 0; k < 4; k++) {
  483. int x8_idx = x16_idx + ((k & 1) << 3);
  484. int y8_idx = y16_idx + ((k >> 1) << 3);
  485. int min = 0;
  486. int max = 0;
  487. if (x8_idx < pixels_wide && y8_idx < pixels_high) {
  488. #if CONFIG_VP9_HIGHBITDEPTH
  489. if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
  490. vp9_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
  491. d + y8_idx * dp + x8_idx, dp,
  492. &min, &max);
  493. } else {
  494. vp9_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
  495. d + y8_idx * dp + x8_idx, dp,
  496. &min, &max);
  497. }
  498. #else
  499. vp9_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
  500. d + y8_idx * dp + x8_idx, dp,
  501. &min, &max);
  502. #endif
  503. if ((max - min) > minmax_max)
  504. minmax_max = (max - min);
  505. if ((max - min) < minmax_min)
  506. minmax_min = (max - min);
  507. }
  508. }
  509. return (minmax_max - minmax_min);
  510. }
  511. static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
  512. int dp, int x8_idx, int y8_idx, v8x8 *vst,
  513. #if CONFIG_VP9_HIGHBITDEPTH
  514. int highbd_flag,
  515. #endif
  516. int pixels_wide,
  517. int pixels_high,
  518. int is_key_frame) {
  519. int k;
  520. for (k = 0; k < 4; k++) {
  521. int x4_idx = x8_idx + ((k & 1) << 2);
  522. int y4_idx = y8_idx + ((k >> 1) << 2);
  523. unsigned int sse = 0;
  524. int sum = 0;
  525. if (x4_idx < pixels_wide && y4_idx < pixels_high) {
  526. int s_avg;
  527. int d_avg = 128;
  528. #if CONFIG_VP9_HIGHBITDEPTH
  529. if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
  530. s_avg = vp9_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
  531. if (!is_key_frame)
  532. d_avg = vp9_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
  533. } else {
  534. s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
  535. if (!is_key_frame)
  536. d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
  537. }
  538. #else
  539. s_avg = vp9_avg_4x4(s + y4_idx * sp + x4_idx, sp);
  540. if (!is_key_frame)
  541. d_avg = vp9_avg_4x4(d + y4_idx * dp + x4_idx, dp);
  542. #endif
  543. sum = s_avg - d_avg;
  544. sse = sum * sum;
  545. }
  546. fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
  547. }
  548. }
  549. static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
  550. int dp, int x16_idx, int y16_idx, v16x16 *vst,
  551. #if CONFIG_VP9_HIGHBITDEPTH
  552. int highbd_flag,
  553. #endif
  554. int pixels_wide,
  555. int pixels_high,
  556. int is_key_frame) {
  557. int k;
  558. for (k = 0; k < 4; k++) {
  559. int x8_idx = x16_idx + ((k & 1) << 3);
  560. int y8_idx = y16_idx + ((k >> 1) << 3);
  561. unsigned int sse = 0;
  562. int sum = 0;
  563. if (x8_idx < pixels_wide && y8_idx < pixels_high) {
  564. int s_avg;
  565. int d_avg = 128;
  566. #if CONFIG_VP9_HIGHBITDEPTH
  567. if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
  568. s_avg = vp9_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
  569. if (!is_key_frame)
  570. d_avg = vp9_highbd_avg_8x8(d + y8_idx * dp + x8_idx, dp);
  571. } else {
  572. s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
  573. if (!is_key_frame)
  574. d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
  575. }
  576. #else
  577. s_avg = vp9_avg_8x8(s + y8_idx * sp + x8_idx, sp);
  578. if (!is_key_frame)
  579. d_avg = vp9_avg_8x8(d + y8_idx * dp + x8_idx, dp);
  580. #endif
  581. sum = s_avg - d_avg;
  582. sse = sum * sum;
  583. }
  584. fill_variance(sse, sum, 0, &vst->split[k].part_variances.none);
  585. }
  586. }
  587. // This function chooses partitioning based on the variance between source and
  588. // reconstructed last, where variance is computed for down-sampled inputs.
  589. static int choose_partitioning(VP9_COMP *cpi,
  590. const TileInfo *const tile,
  591. MACROBLOCK *x,
  592. int mi_row, int mi_col) {
  593. VP9_COMMON * const cm = &cpi->common;
  594. MACROBLOCKD *xd = &x->e_mbd;
  595. int i, j, k, m;
  596. v64x64 vt;
  597. v16x16 vt2[16];
  598. int force_split[21];
  599. uint8_t *s;
  600. const uint8_t *d;
  601. int sp;
  602. int dp;
  603. int pixels_wide = 64, pixels_high = 64;
  604. int64_t thresholds[4] = {cpi->vbp_thresholds[0], cpi->vbp_thresholds[1],
  605. cpi->vbp_thresholds[2], cpi->vbp_thresholds[3]};
  606. // Always use 4x4 partition for key frame.
  607. const int is_key_frame = (cm->frame_type == KEY_FRAME);
  608. const int use_4x4_partition = is_key_frame;
  609. const int low_res = (cm->width <= 352 && cm->height <= 288);
  610. int variance4x4downsample[16];
  611. int segment_id = CR_SEGMENT_ID_BASE;
  612. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
  613. const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map :
  614. cm->last_frame_seg_map;
  615. segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
  616. if (cyclic_refresh_segment_id_boosted(segment_id)) {
  617. int q = vp9_get_qindex(&cm->seg, segment_id, cm->base_qindex);
  618. set_vbp_thresholds(cpi, thresholds, q);
  619. }
  620. }
  621. set_offsets(cpi, tile, x, mi_row, mi_col, BLOCK_64X64);
  622. if (xd->mb_to_right_edge < 0)
  623. pixels_wide += (xd->mb_to_right_edge >> 3);
  624. if (xd->mb_to_bottom_edge < 0)
  625. pixels_high += (xd->mb_to_bottom_edge >> 3);
  626. s = x->plane[0].src.buf;
  627. sp = x->plane[0].src.stride;
  628. if (!is_key_frame && !(is_one_pass_cbr_svc(cpi) &&
  629. cpi->svc.layer_context[cpi->svc.temporal_layer_id].is_key_frame)) {
  630. // In the case of spatial/temporal scalable coding, the assumption here is
  631. // that the temporal reference frame will always be of type LAST_FRAME.
  632. // TODO(marpan): If that assumption is broken, we need to revisit this code.
  633. MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
  634. unsigned int uv_sad;
  635. const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
  636. const YV12_BUFFER_CONFIG *yv12_g = NULL;
  637. unsigned int y_sad, y_sad_g;
  638. const BLOCK_SIZE bsize = BLOCK_32X32
  639. + (mi_col + 4 < cm->mi_cols) * 2 + (mi_row + 4 < cm->mi_rows);
  640. assert(yv12 != NULL);
  641. if (!(is_one_pass_cbr_svc(cpi) && cpi->svc.spatial_layer_id)) {
  642. // For now, GOLDEN will not be used for non-zero spatial layers, since
  643. // it may not be a temporal reference.
  644. yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
  645. }
  646. if (yv12_g && yv12_g != yv12) {
  647. vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
  648. &cm->frame_refs[GOLDEN_FRAME - 1].sf);
  649. y_sad_g = cpi->fn_ptr[bsize].sdf(x->plane[0].src.buf,
  650. x->plane[0].src.stride,
  651. xd->plane[0].pre[0].buf,
  652. xd->plane[0].pre[0].stride);
  653. } else {
  654. y_sad_g = UINT_MAX;
  655. }
  656. vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
  657. &cm->frame_refs[LAST_FRAME - 1].sf);
  658. mbmi->ref_frame[0] = LAST_FRAME;
  659. mbmi->ref_frame[1] = NONE;
  660. mbmi->sb_type = BLOCK_64X64;
  661. mbmi->mv[0].as_int = 0;
  662. mbmi->interp_filter = BILINEAR;
  663. y_sad = vp9_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
  664. if (y_sad_g < y_sad) {
  665. vp9_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
  666. &cm->frame_refs[GOLDEN_FRAME - 1].sf);
  667. mbmi->ref_frame[0] = GOLDEN_FRAME;
  668. mbmi->mv[0].as_int = 0;
  669. y_sad = y_sad_g;
  670. } else {
  671. x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
  672. }
  673. vp9_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
  674. for (i = 1; i <= 2; ++i) {
  675. struct macroblock_plane *p = &x->plane[i];
  676. struct macroblockd_plane *pd = &xd->plane[i];
  677. const BLOCK_SIZE bs = get_plane_block_size(bsize, pd);
  678. if (bs == BLOCK_INVALID)
  679. uv_sad = UINT_MAX;
  680. else
  681. uv_sad = cpi->fn_ptr[bs].sdf(p->src.buf, p->src.stride,
  682. pd->dst.buf, pd->dst.stride);
  683. x->color_sensitivity[i - 1] = uv_sad > (y_sad >> 2);
  684. }
  685. d = xd->plane[0].dst.buf;
  686. dp = xd->plane[0].dst.stride;
  687. // If the y_sad is very small, take 64x64 as partition and exit.
  688. // Don't check on boosted segment for now, as 64x64 is suppressed there.
  689. if (segment_id == CR_SEGMENT_ID_BASE &&
  690. y_sad < cpi->vbp_threshold_sad) {
  691. const int block_width = num_8x8_blocks_wide_lookup[BLOCK_64X64];
  692. const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
  693. if (mi_col + block_width / 2 < cm->mi_cols &&
  694. mi_row + block_height / 2 < cm->mi_rows) {
  695. set_block_size(cpi, xd, mi_row, mi_col, BLOCK_64X64);
  696. return 0;
  697. }
  698. }
  699. } else {
  700. d = VP9_VAR_OFFS;
  701. dp = 0;
  702. #if CONFIG_VP9_HIGHBITDEPTH
  703. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  704. switch (xd->bd) {
  705. case 10:
  706. d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10);
  707. break;
  708. case 12:
  709. d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_12);
  710. break;
  711. case 8:
  712. default:
  713. d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8);
  714. break;
  715. }
  716. }
  717. #endif // CONFIG_VP9_HIGHBITDEPTH
  718. }
  719. // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
  720. // 5-20 for the 16x16 blocks.
  721. force_split[0] = 0;
  722. // Fill in the entire tree of 8x8 (or 4x4 under some conditions) variances
  723. // for splits.
  724. for (i = 0; i < 4; i++) {
  725. const int x32_idx = ((i & 1) << 5);
  726. const int y32_idx = ((i >> 1) << 5);
  727. const int i2 = i << 2;
  728. force_split[i + 1] = 0;
  729. for (j = 0; j < 4; j++) {
  730. const int x16_idx = x32_idx + ((j & 1) << 4);
  731. const int y16_idx = y32_idx + ((j >> 1) << 4);
  732. const int split_index = 5 + i2 + j;
  733. v16x16 *vst = &vt.split[i].split[j];
  734. force_split[split_index] = 0;
  735. variance4x4downsample[i2 + j] = 0;
  736. if (!is_key_frame) {
  737. fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
  738. #if CONFIG_VP9_HIGHBITDEPTH
  739. xd->cur_buf->flags,
  740. #endif
  741. pixels_wide,
  742. pixels_high,
  743. is_key_frame);
  744. fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
  745. get_variance(&vt.split[i].split[j].part_variances.none);
  746. if (vt.split[i].split[j].part_variances.none.variance >
  747. thresholds[2]) {
  748. // 16X16 variance is above threshold for split, so force split to 8x8
  749. // for this 16x16 block (this also forces splits for upper levels).
  750. force_split[split_index] = 1;
  751. force_split[i + 1] = 1;
  752. force_split[0] = 1;
  753. } else if (vt.split[i].split[j].part_variances.none.variance >
  754. thresholds[1] &&
  755. !cyclic_refresh_segment_id_boosted(segment_id)) {
  756. // We have some nominal amount of 16x16 variance (based on average),
  757. // compute the minmax over the 8x8 sub-blocks, and if above threshold,
  758. // force split to 8x8 block for this 16x16 block.
  759. int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
  760. #if CONFIG_VP9_HIGHBITDEPTH
  761. xd->cur_buf->flags,
  762. #endif
  763. pixels_wide, pixels_high);
  764. if (minmax > cpi->vbp_threshold_minmax) {
  765. force_split[split_index] = 1;
  766. force_split[i + 1] = 1;
  767. force_split[0] = 1;
  768. }
  769. }
  770. }
  771. // TODO(marpan): There is an issue with variance based on 4x4 average in
  772. // svc mode, don't allow it for now.
  773. if (is_key_frame || (low_res && !cpi->use_svc &&
  774. vt.split[i].split[j].part_variances.none.variance >
  775. (thresholds[1] << 1))) {
  776. force_split[split_index] = 0;
  777. // Go down to 4x4 down-sampling for variance.
  778. variance4x4downsample[i2 + j] = 1;
  779. for (k = 0; k < 4; k++) {
  780. int x8_idx = x16_idx + ((k & 1) << 3);
  781. int y8_idx = y16_idx + ((k >> 1) << 3);
  782. v8x8 *vst2 = is_key_frame ? &vst->split[k] :
  783. &vt2[i2 + j].split[k];
  784. fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
  785. #if CONFIG_VP9_HIGHBITDEPTH
  786. xd->cur_buf->flags,
  787. #endif
  788. pixels_wide,
  789. pixels_high,
  790. is_key_frame);
  791. }
  792. }
  793. }
  794. }
  795. // Fill the rest of the variance tree by summing split partition values.
  796. for (i = 0; i < 4; i++) {
  797. const int i2 = i << 2;
  798. for (j = 0; j < 4; j++) {
  799. if (variance4x4downsample[i2 + j] == 1) {
  800. v16x16 *vtemp = (!is_key_frame) ? &vt2[i2 + j] :
  801. &vt.split[i].split[j];
  802. for (m = 0; m < 4; m++)
  803. fill_variance_tree(&vtemp->split[m], BLOCK_8X8);
  804. fill_variance_tree(vtemp, BLOCK_16X16);
  805. }
  806. }
  807. fill_variance_tree(&vt.split[i], BLOCK_32X32);
  808. // If variance of this 32x32 block is above the threshold, force the block
  809. // to split. This also forces a split on the upper (64x64) level.
  810. if (!force_split[i + 1]) {
  811. get_variance(&vt.split[i].part_variances.none);
  812. if (vt.split[i].part_variances.none.variance > thresholds[1]) {
  813. force_split[i + 1] = 1;
  814. force_split[0] = 1;
  815. }
  816. }
  817. }
  818. if (!force_split[0]) {
  819. fill_variance_tree(&vt, BLOCK_64X64);
  820. get_variance(&vt.part_variances.none);
  821. }
  822. // Now go through the entire structure, splitting every block size until
  823. // we get to one that's got a variance lower than our threshold.
  824. if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
  825. !set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
  826. thresholds[0], BLOCK_16X16, force_split[0])) {
  827. for (i = 0; i < 4; ++i) {
  828. const int x32_idx = ((i & 1) << 2);
  829. const int y32_idx = ((i >> 1) << 2);
  830. const int i2 = i << 2;
  831. if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
  832. (mi_row + y32_idx), (mi_col + x32_idx),
  833. thresholds[1], BLOCK_16X16,
  834. force_split[i + 1])) {
  835. for (j = 0; j < 4; ++j) {
  836. const int x16_idx = ((j & 1) << 1);
  837. const int y16_idx = ((j >> 1) << 1);
  838. // For inter frames: if variance4x4downsample[] == 1 for this 16x16
  839. // block, then the variance is based on 4x4 down-sampling, so use vt2
  840. // in set_vt_partioning(), otherwise use vt.
  841. v16x16 *vtemp = (!is_key_frame &&
  842. variance4x4downsample[i2 + j] == 1) ?
  843. &vt2[i2 + j] : &vt.split[i].split[j];
  844. if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
  845. mi_row + y32_idx + y16_idx,
  846. mi_col + x32_idx + x16_idx,
  847. thresholds[2],
  848. cpi->vbp_bsize_min,
  849. force_split[5 + i2 + j])) {
  850. for (k = 0; k < 4; ++k) {
  851. const int x8_idx = (k & 1);
  852. const int y8_idx = (k >> 1);
  853. if (use_4x4_partition) {
  854. if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
  855. BLOCK_8X8,
  856. mi_row + y32_idx + y16_idx + y8_idx,
  857. mi_col + x32_idx + x16_idx + x8_idx,
  858. thresholds[3], BLOCK_8X8, 0)) {
  859. set_block_size(cpi, xd,
  860. (mi_row + y32_idx + y16_idx + y8_idx),
  861. (mi_col + x32_idx + x16_idx + x8_idx),
  862. BLOCK_4X4);
  863. }
  864. } else {
  865. set_block_size(cpi, xd,
  866. (mi_row + y32_idx + y16_idx + y8_idx),
  867. (mi_col + x32_idx + x16_idx + x8_idx),
  868. BLOCK_8X8);
  869. }
  870. }
  871. }
  872. }
  873. }
  874. }
  875. }
  876. return 0;
  877. }
  878. static void update_state(VP9_COMP *cpi, ThreadData *td,
  879. PICK_MODE_CONTEXT *ctx,
  880. int mi_row, int mi_col, BLOCK_SIZE bsize,
  881. int output_enabled) {
  882. int i, x_idx, y;
  883. VP9_COMMON *const cm = &cpi->common;
  884. RD_COUNTS *const rdc = &td->rd_counts;
  885. MACROBLOCK *const x = &td->mb;
  886. MACROBLOCKD *const xd = &x->e_mbd;
  887. struct macroblock_plane *const p = x->plane;
  888. struct macroblockd_plane *const pd = xd->plane;
  889. MODE_INFO *mi = &ctx->mic;
  890. MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  891. MODE_INFO *mi_addr = xd->mi[0];
  892. const struct segmentation *const seg = &cm->seg;
  893. const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
  894. const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
  895. const int x_mis = MIN(bw, cm->mi_cols - mi_col);
  896. const int y_mis = MIN(bh, cm->mi_rows - mi_row);
  897. MV_REF *const frame_mvs =
  898. cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
  899. int w, h;
  900. const int mis = cm->mi_stride;
  901. const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  902. const int mi_height = num_8x8_blocks_high_lookup[bsize];
  903. int max_plane;
  904. assert(mi->mbmi.sb_type == bsize);
  905. *mi_addr = *mi;
  906. // If segmentation in use
  907. if (seg->enabled) {
  908. // For in frame complexity AQ copy the segment id from the segment map.
  909. if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
  910. const uint8_t *const map = seg->update_map ? cpi->segmentation_map
  911. : cm->last_frame_seg_map;
  912. mi_addr->mbmi.segment_id =
  913. vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
  914. }
  915. // Else for cyclic refresh mode update the segment map, set the segment id
  916. // and then update the quantizer.
  917. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
  918. vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row,
  919. mi_col, bsize, ctx->rate, ctx->dist,
  920. x->skip);
  921. }
  922. }
  923. max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
  924. for (i = 0; i < max_plane; ++i) {
  925. p[i].coeff = ctx->coeff_pbuf[i][1];
  926. p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
  927. pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
  928. p[i].eobs = ctx->eobs_pbuf[i][1];
  929. }
  930. for (i = max_plane; i < MAX_MB_PLANE; ++i) {
  931. p[i].coeff = ctx->coeff_pbuf[i][2];
  932. p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
  933. pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
  934. p[i].eobs = ctx->eobs_pbuf[i][2];
  935. }
  936. // Restore the coding context of the MB to that that was in place
  937. // when the mode was picked for it
  938. for (y = 0; y < mi_height; y++)
  939. for (x_idx = 0; x_idx < mi_width; x_idx++)
  940. if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
  941. && (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
  942. xd->mi[x_idx + y * mis] = mi_addr;
  943. }
  944. if (cpi->oxcf.aq_mode)
  945. vp9_init_plane_quantizers(cpi, x);
  946. // FIXME(rbultje) I'm pretty sure this should go to the end of this block
  947. // (i.e. after the output_enabled)
  948. if (bsize < BLOCK_32X32) {
  949. if (bsize < BLOCK_16X16)
  950. ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
  951. ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
  952. }
  953. if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
  954. mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
  955. mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
  956. }
  957. x->skip = ctx->skip;
  958. memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
  959. sizeof(uint8_t) * ctx->num_4x4_blk);
  960. if (!output_enabled)
  961. return;
  962. if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
  963. for (i = 0; i < TX_MODES; i++)
  964. rdc->tx_select_diff[i] += ctx->tx_rd_diff[i];
  965. }
  966. #if CONFIG_INTERNAL_STATS
  967. if (frame_is_intra_only(cm)) {
  968. static const int kf_mode_index[] = {
  969. THR_DC /*DC_PRED*/,
  970. THR_V_PRED /*V_PRED*/,
  971. THR_H_PRED /*H_PRED*/,
  972. THR_D45_PRED /*D45_PRED*/,
  973. THR_D135_PRED /*D135_PRED*/,
  974. THR_D117_PRED /*D117_PRED*/,
  975. THR_D153_PRED /*D153_PRED*/,
  976. THR_D207_PRED /*D207_PRED*/,
  977. THR_D63_PRED /*D63_PRED*/,
  978. THR_TM /*TM_PRED*/,
  979. };
  980. ++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
  981. } else {
  982. // Note how often each mode chosen as best
  983. ++cpi->mode_chosen_counts[ctx->best_mode_index];
  984. }
  985. #endif
  986. if (!frame_is_intra_only(cm)) {
  987. if (is_inter_block(mbmi)) {
  988. vp9_update_mv_count(td);
  989. if (cm->interp_filter == SWITCHABLE) {
  990. const int ctx = vp9_get_pred_context_switchable_interp(xd);
  991. ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
  992. }
  993. }
  994. rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
  995. rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
  996. rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
  997. for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
  998. rdc->filter_diff[i] += ctx->best_filter_diff[i];
  999. }
  1000. for (h = 0; h < y_mis; ++h) {
  1001. MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
  1002. for (w = 0; w < x_mis; ++w) {
  1003. MV_REF *const mv = frame_mv + w;
  1004. mv->ref_frame[0] = mi->mbmi.ref_frame[0];
  1005. mv->ref_frame[1] = mi->mbmi.ref_frame[1];
  1006. mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
  1007. mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
  1008. }
  1009. }
  1010. }
  1011. void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
  1012. int mi_row, int mi_col) {
  1013. uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
  1014. const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
  1015. int i;
  1016. // Set current frame pointer.
  1017. x->e_mbd.cur_buf = src;
  1018. for (i = 0; i < MAX_MB_PLANE; i++)
  1019. setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
  1020. NULL, x->e_mbd.plane[i].subsampling_x,
  1021. x->e_mbd.plane[i].subsampling_y);
  1022. }
  1023. static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode,
  1024. RD_COST *rd_cost, BLOCK_SIZE bsize) {
  1025. MACROBLOCKD *const xd = &x->e_mbd;
  1026. MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  1027. INTERP_FILTER filter_ref;
  1028. if (xd->up_available)
  1029. filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
  1030. else if (xd->left_available)
  1031. filter_ref = xd->mi[-1]->mbmi.interp_filter;
  1032. else
  1033. filter_ref = EIGHTTAP;
  1034. mbmi->sb_type = bsize;
  1035. mbmi->mode = ZEROMV;
  1036. mbmi->tx_size = MIN(max_txsize_lookup[bsize],
  1037. tx_mode_to_biggest_tx_size[tx_mode]);
  1038. mbmi->skip = 1;
  1039. mbmi->uv_mode = DC_PRED;
  1040. mbmi->ref_frame[0] = LAST_FRAME;
  1041. mbmi->ref_frame[1] = NONE;
  1042. mbmi->mv[0].as_int = 0;
  1043. mbmi->interp_filter = filter_ref;
  1044. xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
  1045. x->skip = 1;
  1046. vp9_rd_cost_init(rd_cost);
  1047. }
  1048. static int set_segment_rdmult(VP9_COMP *const cpi,
  1049. MACROBLOCK *const x,
  1050. int8_t segment_id) {
  1051. int segment_qindex;
  1052. VP9_COMMON *const cm = &cpi->common;
  1053. vp9_init_plane_quantizers(cpi, x);
  1054. vp9_clear_system_state();
  1055. segment_qindex = vp9_get_qindex(&cm->seg, segment_id,
  1056. cm->base_qindex);
  1057. return vp9_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
  1058. }
  1059. static void rd_pick_sb_modes(VP9_COMP *cpi,
  1060. TileDataEnc *tile_data,
  1061. MACROBLOCK *const x,
  1062. int mi_row, int mi_col, RD_COST *rd_cost,
  1063. BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
  1064. int64_t best_rd) {
  1065. VP9_COMMON *const cm = &cpi->common;
  1066. TileInfo *const tile_info = &tile_data->tile_info;
  1067. MACROBLOCKD *const xd = &x->e_mbd;
  1068. MB_MODE_INFO *mbmi;
  1069. struct macroblock_plane *const p = x->plane;
  1070. struct macroblockd_plane *const pd = xd->plane;
  1071. const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
  1072. int i, orig_rdmult;
  1073. vp9_clear_system_state();
  1074. // Use the lower precision, but faster, 32x32 fdct for mode selection.
  1075. x->use_lp32x32fdct = 1;
  1076. set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
  1077. mbmi = &xd->mi[0]->mbmi;
  1078. mbmi->sb_type = bsize;
  1079. for (i = 0; i < MAX_MB_PLANE; ++i) {
  1080. p[i].coeff = ctx->coeff_pbuf[i][0];
  1081. p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
  1082. pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
  1083. p[i].eobs = ctx->eobs_pbuf[i][0];
  1084. }
  1085. ctx->is_coded = 0;
  1086. ctx->skippable = 0;
  1087. ctx->pred_pixel_ready = 0;
  1088. x->skip_recode = 0;
  1089. // Set to zero to make sure we do not use the previous encoded frame stats
  1090. mbmi->skip = 0;
  1091. #if CONFIG_VP9_HIGHBITDEPTH
  1092. if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
  1093. x->source_variance =
  1094. vp9_high_get_sby_perpixel_variance(cpi, &x->plane[0].src,
  1095. bsize, xd->bd);
  1096. } else {
  1097. x->source_variance =
  1098. vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
  1099. }
  1100. #else
  1101. x->source_variance =
  1102. vp9_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
  1103. #endif // CONFIG_VP9_HIGHBITDEPTH
  1104. // Save rdmult before it might be changed, so it can be restored later.
  1105. orig_rdmult = x->rdmult;
  1106. if (aq_mode == VARIANCE_AQ) {
  1107. const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
  1108. : vp9_block_energy(cpi, x, bsize);
  1109. if (cm->frame_type == KEY_FRAME ||
  1110. cpi->refresh_alt_ref_frame ||
  1111. (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
  1112. mbmi->segment_id = vp9_vaq_segment_id(energy);
  1113. } else {
  1114. const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
  1115. : cm->last_frame_seg_map;
  1116. mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
  1117. }
  1118. x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
  1119. } else if (aq_mode == COMPLEXITY_AQ) {
  1120. x->rdmult = set_segment_rdmult(cpi, x, mbmi->segment_id);
  1121. } else if (aq_mode == CYCLIC_REFRESH_AQ) {
  1122. const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
  1123. : cm->last_frame_seg_map;
  1124. // If segment is boosted, use rdmult for that segment.
  1125. if (cyclic_refresh_segment_id_boosted(
  1126. vp9_get_segment_id(cm, map, bsize, mi_row, mi_col)))
  1127. x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
  1128. }
  1129. // Find best coding mode & reconstruct the MB so it is available
  1130. // as a predictor for MBs that follow in the SB
  1131. if (frame_is_intra_only(cm)) {
  1132. vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
  1133. } else {
  1134. if (bsize >= BLOCK_8X8) {
  1135. if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
  1136. vp9_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
  1137. ctx, best_rd);
  1138. else
  1139. vp9_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col,
  1140. rd_cost, bsize, ctx, best_rd);
  1141. } else {
  1142. vp9_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col,
  1143. rd_cost, bsize, ctx, best_rd);
  1144. }
  1145. }
  1146. // Examine the resulting rate and for AQ mode 2 make a segment choice.
  1147. if ((rd_cost->rate != INT_MAX) &&
  1148. (aq_mode == COMPLEXITY_AQ) && (bsize >= BLOCK_16X16) &&
  1149. (cm->frame_type == KEY_FRAME ||
  1150. cpi->refresh_alt_ref_frame ||
  1151. (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
  1152. vp9_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
  1153. }
  1154. x->rdmult = orig_rdmult;
  1155. // TODO(jingning) The rate-distortion optimization flow needs to be
  1156. // refactored to provide proper exit/return handle.
  1157. if (rd_cost->rate == INT_MAX)
  1158. rd_cost->rdcost = INT64_MAX;
  1159. ctx->rate = rd_cost->rate;
  1160. ctx->dist = rd_cost->dist;
  1161. }
  1162. static void update_stats(VP9_COMMON *cm, ThreadData *td) {
  1163. const MACROBLOCK *x = &td->mb;
  1164. const MACROBLOCKD *const xd = &x->e_mbd;
  1165. const MODE_INFO *const mi = xd->mi[0];
  1166. const MB_MODE_INFO *const mbmi = &mi->mbmi;
  1167. const BLOCK_SIZE bsize = mbmi->sb_type;
  1168. if (!frame_is_intra_only(cm)) {
  1169. FRAME_COUNTS *const counts = td->counts;
  1170. const int inter_block = is_inter_block(mbmi);
  1171. const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
  1172. SEG_LVL_REF_FRAME);
  1173. if (!seg_ref_active) {
  1174. counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
  1175. // If the segment reference feature is enabled we have only a single
  1176. // reference frame allowed for the segment so exclude it from
  1177. // the reference frame counts used to work out probabilities.
  1178. if (inter_block) {
  1179. const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
  1180. if (cm->reference_mode == REFERENCE_MODE_SELECT)
  1181. counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
  1182. [has_second_ref(mbmi)]++;
  1183. if (has_second_ref(mbmi)) {
  1184. counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
  1185. [ref0 == GOLDEN_FRAME]++;
  1186. } else {
  1187. counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
  1188. [ref0 != LAST_FRAME]++;
  1189. if (ref0 != LAST_FRAME)
  1190. counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
  1191. [ref0 != GOLDEN_FRAME]++;
  1192. }
  1193. }
  1194. }
  1195. if (inter_block &&
  1196. !vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
  1197. const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
  1198. if (bsize >= BLOCK_8X8) {
  1199. const PREDICTION_MODE mode = mbmi->mode;
  1200. ++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
  1201. } else {
  1202. const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
  1203. const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
  1204. int idx, idy;
  1205. for (idy = 0; idy < 2; idy += num_4x4_h) {
  1206. for (idx = 0; idx < 2; idx += num_4x4_w) {
  1207. const int j = idy * 2 + idx;
  1208. const PREDICTION_MODE b_mode = mi->bmi[j].as_mode;
  1209. ++counts->inter_mode[mode_ctx][INTER_OFFSET(b_mode)];
  1210. }
  1211. }
  1212. }
  1213. }
  1214. }
  1215. }
  1216. static void restore_context(MACROBLOCK *const x, int mi_row, int mi_col,
  1217. ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
  1218. ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
  1219. PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
  1220. BLOCK_SIZE bsize) {
  1221. MACROBLOCKD *const xd = &x->e_mbd;
  1222. int p;
  1223. const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
  1224. const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
  1225. int mi_width = num_8x8_blocks_wide_lookup[bsize];
  1226. int mi_height = num_8x8_blocks_high_lookup[bsize];
  1227. for (p = 0; p < MAX_MB_PLANE; p++) {
  1228. memcpy(
  1229. xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
  1230. a + num_4x4_blocks_wide * p,
  1231. (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
  1232. xd->plane[p].subsampling_x);
  1233. memcpy(
  1234. xd->left_context[p]
  1235. + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
  1236. l + num_4x4_blocks_high * p,
  1237. (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
  1238. xd->plane[p].subsampling_y);
  1239. }
  1240. memcpy(xd->above_seg_context + mi_col, sa,
  1241. sizeof(*xd->above_seg_context) * mi_width);
  1242. memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
  1243. sizeof(xd->left_seg_context[0]) * mi_height);
  1244. }
  1245. static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
  1246. ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
  1247. ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
  1248. PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
  1249. BLOCK_SIZE bsize) {
  1250. const MACROBLOCKD *const xd = &x->e_mbd;
  1251. int p;
  1252. const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
  1253. const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
  1254. int mi_width = num_8x8_blocks_wide_lookup[bsize];
  1255. int mi_height = num_8x8_blocks_high_lookup[bsize];
  1256. // buffer the above/left context information of the block in search.
  1257. for (p = 0; p < MAX_MB_PLANE; ++p) {
  1258. memcpy(
  1259. a + num_4x4_blocks_wide * p,
  1260. xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
  1261. (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
  1262. xd->plane[p].subsampling_x);
  1263. memcpy(
  1264. l + num_4x4_blocks_high * p,
  1265. xd->left_context[p]
  1266. + ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
  1267. (sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
  1268. xd->plane[p].subsampling_y);
  1269. }
  1270. memcpy(sa, xd->above_seg_context + mi_col,
  1271. sizeof(*xd->above_seg_context) * mi_width);
  1272. memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
  1273. sizeof(xd->left_seg_context[0]) * mi_height);
  1274. }
  1275. static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
  1276. ThreadData *td,
  1277. TOKENEXTRA **tp, int mi_row, int mi_col,
  1278. int output_enabled, BLOCK_SIZE bsize,
  1279. PICK_MODE_CONTEXT *ctx) {
  1280. MACROBLOCK *const x = &td->mb;
  1281. set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
  1282. update_state(cpi, td, ctx, mi_row, mi_col, bsize, output_enabled);
  1283. encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
  1284. if (output_enabled) {
  1285. update_stats(&cpi->common, td);
  1286. (*tp)->token = EOSB_TOKEN;
  1287. (*tp)++;
  1288. }
  1289. }
  1290. static void encode_sb(VP9_COMP *cpi, ThreadData *td,
  1291. const TileInfo *const tile,
  1292. TOKENEXTRA **tp, int mi_row, int mi_col,
  1293. int output_enabled, BLOCK_SIZE bsize,
  1294. PC_TREE *pc_tree) {
  1295. VP9_COMMON *const cm = &cpi->common;
  1296. MACROBLOCK *const x = &td->mb;
  1297. MACROBLOCKD *const xd = &x->e_mbd;
  1298. const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
  1299. int ctx;
  1300. PARTITION_TYPE partition;
  1301. BLOCK_SIZE subsize = bsize;
  1302. if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
  1303. return;
  1304. if (bsize >= BLOCK_8X8) {
  1305. ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
  1306. subsize = get_subsize(bsize, pc_tree->partitioning);
  1307. } else {
  1308. ctx = 0;
  1309. subsize = BLOCK_4X4;
  1310. }
  1311. partition = partition_lookup[bsl][subsize];
  1312. if (output_enabled && bsize != BLOCK_4X4)
  1313. td->counts->partition[ctx][partition]++;
  1314. switch (partition) {
  1315. case PARTITION_NONE:
  1316. encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
  1317. &pc_tree->none);
  1318. break;
  1319. case PARTITION_VERT:
  1320. encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
  1321. &pc_tree->vertical[0]);
  1322. if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
  1323. encode_b(cpi, tile, td, tp, mi_row, mi_col + hbs, output_enabled,
  1324. subsize, &pc_tree->vertical[1]);
  1325. }
  1326. break;
  1327. case PARTITION_HORZ:
  1328. encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
  1329. &pc_tree->horizontal[0]);
  1330. if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
  1331. encode_b(cpi, tile, td, tp, mi_row + hbs, mi_col, output_enabled,
  1332. subsize, &pc_tree->horizontal[1]);
  1333. }
  1334. break;
  1335. case PARTITION_SPLIT:
  1336. if (bsize == BLOCK_8X8) {
  1337. encode_b(cpi, tile, td, tp, mi_row, mi_col, output_enabled, subsize,
  1338. pc_tree->leaf_split[0]);
  1339. } else {
  1340. encode_sb(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
  1341. pc_tree->split[0]);
  1342. encode_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
  1343. subsize, pc_tree->split[1]);
  1344. encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
  1345. subsize, pc_tree->split[2]);
  1346. encode_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
  1347. subsize, pc_tree->split[3]);
  1348. }
  1349. break;
  1350. default:
  1351. assert(0 && "Invalid partition type.");
  1352. break;
  1353. }
  1354. if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
  1355. update_partition_context(xd, mi_row, mi_col, subsize, bsize);
  1356. }
  1357. // Check to see if the given partition size is allowed for a specified number
  1358. // of 8x8 block rows and columns remaining in the image.
  1359. // If not then return the largest allowed partition size
  1360. static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
  1361. int rows_left, int cols_left,
  1362. int *bh, int *bw) {
  1363. if (rows_left <= 0 || cols_left <= 0) {
  1364. return MIN(bsize, BLOCK_8X8);
  1365. } else {
  1366. for (; bsize > 0; bsize -= 3) {
  1367. *bh = num_8x8_blocks_high_lookup[bsize];
  1368. *bw = num_8x8_blocks_wide_lookup[bsize];
  1369. if ((*bh <= rows_left) && (*bw <= cols_left)) {
  1370. break;
  1371. }
  1372. }
  1373. }
  1374. return bsize;
  1375. }
  1376. static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
  1377. int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
  1378. BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
  1379. int bh = bh_in;
  1380. int r, c;
  1381. for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
  1382. int bw = bw_in;
  1383. for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
  1384. const int index = r * mis + c;
  1385. mi_8x8[index] = mi + index;
  1386. mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
  1387. row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
  1388. }
  1389. }
  1390. }
  1391. // This function attempts to set all mode info entries in a given SB64
  1392. // to the same block partition size.
  1393. // However, at the bottom and right borders of the image the requested size
  1394. // may not be allowed in which case this code attempts to choose the largest
  1395. // allowable partition.
  1396. static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
  1397. MODE_INFO **mi_8x8, int mi_row, int mi_col,
  1398. BLOCK_SIZE bsize) {
  1399. VP9_COMMON *const cm = &cpi->common;
  1400. const int mis = cm->mi_stride;
  1401. const int row8x8_remaining = tile->mi_row_end - mi_row;
  1402. const int col8x8_remaining = tile->mi_col_end - mi_col;
  1403. int block_row, block_col;
  1404. MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
  1405. int bh = num_8x8_blocks_high_lookup[bsize];
  1406. int bw = num_8x8_blocks_wide_lookup[bsize];
  1407. assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
  1408. // Apply the requested partition size to the SB64 if it is all "in image"
  1409. if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
  1410. (row8x8_remaining >= MI_BLOCK_SIZE)) {
  1411. for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
  1412. for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
  1413. int index = block_row * mis + block_col;
  1414. mi_8x8[index] = mi_upper_left + index;
  1415. mi_8x8[index]->mbmi.sb_type = bsize;
  1416. }
  1417. }
  1418. } else {
  1419. // Else this is a partial SB64.
  1420. set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
  1421. col8x8_remaining, bsize, mi_8x8);
  1422. }
  1423. }
  1424. const struct {
  1425. int row;
  1426. int col;
  1427. } coord_lookup[16] = {
  1428. // 32x32 index = 0
  1429. {0, 0}, {0, 2}, {2, 0}, {2, 2},
  1430. // 32x32 index = 1
  1431. {0, 4}, {0, 6}, {2, 4}, {2, 6},
  1432. // 32x32 index = 2
  1433. {4, 0}, {4, 2}, {6, 0}, {6, 2},
  1434. // 32x32 index = 3
  1435. {4, 4}, {4, 6}, {6, 4}, {6, 6},
  1436. };
  1437. static void set_source_var_based_partition(VP9_COMP *cpi,
  1438. const TileInfo *const tile,
  1439. MACROBLOCK *const x,
  1440. MODE_INFO **mi_8x8,
  1441. int mi_row, int mi_col) {
  1442. VP9_COMMON *const cm = &cpi->common;
  1443. const int mis = cm->mi_stride;
  1444. const int row8x8_remaining = tile->mi_row_end - mi_row;
  1445. const int col8x8_remaining = tile->mi_col_end - mi_col;
  1446. MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
  1447. vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
  1448. assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
  1449. // In-image SB64
  1450. if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
  1451. (row8x8_remaining >= MI_BLOCK_SIZE)) {
  1452. int i, j;
  1453. int index;
  1454. diff d32[4];
  1455. const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
  1456. int is_larger_better = 0;
  1457. int use32x32 = 0;
  1458. unsigned int thr = cpi->source_var_thresh;
  1459. memset(d32, 0, 4 * sizeof(diff));
  1460. for (i = 0; i < 4; i++) {
  1461. diff *d16[4];
  1462. for (j = 0; j < 4; j++) {
  1463. int b_mi_row = coord_lookup[i * 4 + j].row;
  1464. int b_mi_col = coord_lookup[i * 4 + j].col;
  1465. int boffset = b_mi_row / 2 * cm->mb_cols +
  1466. b_mi_col / 2;
  1467. d16[j] = cpi->source_diff_var + offset + boffset;
  1468. index = b_mi_row * mis + b_mi_col;
  1469. mi_8x8[index] = mi_upper_left + index;
  1470. mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
  1471. // TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
  1472. // size to further improve quality.
  1473. }
  1474. is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
  1475. (d16[2]->var < thr) && (d16[3]->var < thr);
  1476. // Use 32x32 partition
  1477. if (is_larger_better) {
  1478. use32x32 += 1;
  1479. for (j = 0; j < 4; j++) {
  1480. d32[i].sse += d16[j]->sse;
  1481. d32[i].sum += d16[j]->sum;
  1482. }
  1483. d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
  1484. index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
  1485. mi_8x8[index] = mi_upper_left + index;
  1486. mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
  1487. }
  1488. }
  1489. if (use32x32 == 4) {
  1490. thr <<= 1;
  1491. is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
  1492. (d32[2].var < thr) && (d32[3].var < thr);
  1493. // Use 64x64 partition
  1494. if (is_larger_better) {
  1495. mi_8x8[0] = mi_upper_left;
  1496. mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
  1497. }
  1498. }
  1499. } else { // partial in-image SB64
  1500. int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
  1501. int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
  1502. set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
  1503. row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
  1504. }
  1505. }
  1506. static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
  1507. PICK_MODE_CONTEXT *ctx,
  1508. int mi_row, int mi_col, int bsize) {
  1509. VP9_COMMON *const cm = &cpi->common;
  1510. MACROBLOCK *const x = &td->mb;
  1511. MACROBLOCKD *const xd = &x->e_mbd;
  1512. MODE_INFO *const mi = xd->mi[0];
  1513. MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
  1514. const struct segmentation *const seg = &cm->seg;
  1515. const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type];
  1516. const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type];
  1517. const int x_mis = MIN(bw, cm->mi_cols - mi_col);
  1518. const int y_mis = MIN(bh, cm->mi_rows - mi_row);
  1519. *(xd->mi[0]) = ctx->mic;
  1520. if (seg->enabled && cpi->oxcf.aq_mode) {
  1521. // For in frame complexity AQ or variance AQ, copy segment_id from
  1522. // segmentation_map.
  1523. if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
  1524. cpi->oxcf.aq_mode == VARIANCE_AQ ) {
  1525. const uint8_t *const map = seg->update_map ? cpi->segmentation_map
  1526. : cm->last_frame_seg_map;
  1527. mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
  1528. } else {
  1529. // Setting segmentation map for cyclic_refresh.
  1530. vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize,
  1531. ctx->rate, ctx->dist, x->skip);
  1532. }
  1533. vp9_init_plane_quantizers(cpi, x);
  1534. }
  1535. if (is_inter_block(mbmi)) {
  1536. vp9_update_mv_count(td);
  1537. if (cm->interp_filter == SWITCHABLE) {
  1538. const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
  1539. ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter];
  1540. }
  1541. if (mbmi->sb_type < BLOCK_8X8) {
  1542. mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
  1543. mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
  1544. }
  1545. }
  1546. if (cm->use_prev_frame_mvs) {
  1547. MV_REF *const frame_mvs =
  1548. cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
  1549. int w, h;
  1550. for (h = 0; h < y_mis; ++h) {
  1551. MV_REF *const frame_mv = frame_mvs + h * cm->mi_cols;
  1552. for (w = 0; w < x_mis; ++w) {
  1553. MV_REF *const mv = frame_mv + w;
  1554. mv->ref_frame[0] = mi->mbmi.ref_frame[0];
  1555. mv->ref_frame[1] = mi->mbmi.ref_frame[1];
  1556. mv->mv[0].as_int = mi->mbmi.mv[0].as_int;
  1557. mv->mv[1].as_int = mi->mbmi.mv[1].as_int;
  1558. }
  1559. }
  1560. }
  1561. x->skip = ctx->skip;
  1562. x->skip_txfm[0] = mbmi->segment_id ? 0 : ctx->skip_txfm[0];
  1563. }
  1564. static void encode_b_rt(VP9_COMP *cpi, ThreadData *td,
  1565. const TileInfo *const tile,
  1566. TOKENEXTRA **tp, int mi_row, int mi_col,
  1567. int output_enabled, BLOCK_SIZE bsize,
  1568. PICK_MODE_CONTEXT *ctx) {
  1569. MACROBLOCK *const x = &td->mb;
  1570. set_offsets(cpi, tile, x, mi_row, mi_col, bsize);
  1571. update_state_rt(cpi, td, ctx, mi_row, mi_col, bsize);
  1572. #if CONFIG_VP9_TEMPORAL_DENOISING
  1573. if (cpi->oxcf.noise_sensitivity > 0 && output_enabled &&
  1574. cpi->common.frame_type != KEY_FRAME) {
  1575. vp9_denoiser_denoise(&cpi->denoiser, x, mi_row, mi_col,
  1576. MAX(BLOCK_8X8, bsize), ctx);
  1577. }
  1578. #endif
  1579. encode_superblock(cpi, td, tp, output_enabled, mi_row, mi_col, bsize, ctx);
  1580. update_stats(&cpi->common, td);
  1581. (*tp)->token = EOSB_TOKEN;
  1582. (*tp)++;
  1583. }
  1584. static void encode_sb_rt(VP9_COMP *cpi, ThreadData *td,
  1585. const TileInfo *const tile,
  1586. TOKENEXTRA **tp, int mi_row, int mi_col,
  1587. int output_enabled, BLOCK_SIZE bsize,
  1588. PC_TREE *pc_tree) {
  1589. VP9_COMMON *const cm = &cpi->common;
  1590. MACROBLOCK *const x = &td->mb;
  1591. MACROBLOCKD *const xd = &x->e_mbd;
  1592. const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
  1593. int ctx;
  1594. PARTITION_TYPE partition;
  1595. BLOCK_SIZE subsize;
  1596. if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
  1597. return;
  1598. if (bsize >= BLOCK_8X8) {
  1599. const int idx_str = xd->mi_stride * mi_row + mi_col;
  1600. MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
  1601. ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
  1602. subsize = mi_8x8[0]->mbmi.sb_type;
  1603. } else {
  1604. ctx = 0;
  1605. subsize = BLOCK_4X4;
  1606. }
  1607. partition = partition_lookup[bsl][subsize];
  1608. if (output_enabled && bsize != BLOCK_4X4)
  1609. td->counts->partition[ctx][partition]++;
  1610. switch (partition) {
  1611. case PARTITION_NONE:
  1612. encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
  1613. &pc_tree->none);
  1614. break;
  1615. case PARTITION_VERT:
  1616. encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
  1617. &pc_tree->vertical[0]);
  1618. if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
  1619. encode_b_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
  1620. subsize, &pc_tree->vertical[1]);
  1621. }
  1622. break;
  1623. case PARTITION_HORZ:
  1624. encode_b_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
  1625. &pc_tree->horizontal[0]);
  1626. if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
  1627. encode_b_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
  1628. subsize, &pc_tree->horizontal[1]);
  1629. }
  1630. break;
  1631. case PARTITION_SPLIT:
  1632. subsize = get_subsize(bsize, PARTITION_SPLIT);
  1633. encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col, output_enabled, subsize,
  1634. pc_tree->split[0]);
  1635. encode_sb_rt(cpi, td, tile, tp, mi_row, mi_col + hbs, output_enabled,
  1636. subsize, pc_tree->split[1]);
  1637. encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col, output_enabled,
  1638. subsize, pc_tree->split[2]);
  1639. encode_sb_rt(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs,
  1640. output_enabled, subsize, pc_tree->split[3]);
  1641. break;
  1642. default:
  1643. assert(0 && "Invalid partition type.");
  1644. break;
  1645. }
  1646. if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
  1647. update_partition_context(xd, mi_row, mi_col, subsize, bsize);
  1648. }
  1649. static void rd_use_partition(VP9_COMP *cpi,
  1650. ThreadData *td,
  1651. TileDataEnc *tile_data,
  1652. MODE_INFO **mi_8x8, TOKENEXTRA **tp,
  1653. int mi_row, int mi_col,
  1654. BLOCK_SIZE bsize,
  1655. int *rate, int64_t *dist,
  1656. int do_recon, PC_TREE *pc_tree) {
  1657. VP9_COMMON *const cm = &cpi->common;
  1658. TileInfo *const tile_info = &tile_data->tile_info;
  1659. MACROBLOCK *const x = &td->mb;
  1660. MACROBLOCKD *const xd = &x->e_mbd;
  1661. const int mis = cm->mi_stride;
  1662. const int bsl = b_width_log2_lookup[bsize];
  1663. const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
  1664. const int bss = (1 << bsl) / 4;
  1665. int i, pl;
  1666. PARTITION_TYPE partition = PARTITION_NONE;
  1667. BLOCK_SIZE subsize;
  1668. ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  1669. PARTITION_CONTEXT sl[8], sa[8];
  1670. RD_COST last_part_rdc, none_rdc, chosen_rdc;
  1671. BLOCK_SIZE sub_subsize = BLOCK_4X4;
  1672. int splits_below = 0;
  1673. BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
  1674. int do_partition_search = 1;
  1675. PICK_MODE_CONTEXT *ctx = &pc_tree->none;
  1676. if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
  1677. return;
  1678. assert(num_4x4_blocks_wide_lookup[bsize] ==
  1679. num_4x4_blocks_high_lookup[bsize]);
  1680. vp9_rd_cost_reset(&last_part_rdc);
  1681. vp9_rd_cost_reset(&none_rdc);
  1682. vp9_rd_cost_reset(&chosen_rdc);
  1683. partition = partition_lookup[bsl][bs_type];
  1684. subsize = get_subsize(bsize, partition);
  1685. pc_tree->partitioning = partition;
  1686. save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  1687. if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
  1688. set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
  1689. x->mb_energy = vp9_block_energy(cpi, x, bsize);
  1690. }
  1691. if (do_partition_search &&
  1692. cpi->sf.partition_search_type == SEARCH_PARTITION &&
  1693. cpi->sf.adjust_partitioning_from_last_frame) {
  1694. // Check if any of the sub blocks are further split.
  1695. if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
  1696. sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
  1697. splits_below = 1;
  1698. for (i = 0; i < 4; i++) {
  1699. int jj = i >> 1, ii = i & 0x01;
  1700. MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss];
  1701. if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
  1702. splits_below = 0;
  1703. }
  1704. }
  1705. }
  1706. // If partition is not none try none unless each of the 4 splits are split
  1707. // even further..
  1708. if (partition != PARTITION_NONE && !splits_below &&
  1709. mi_row + (mi_step >> 1) < cm->mi_rows &&
  1710. mi_col + (mi_step >> 1) < cm->mi_cols) {
  1711. pc_tree->partitioning = PARTITION_NONE;
  1712. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, bsize,
  1713. ctx, INT64_MAX);
  1714. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  1715. if (none_rdc.rate < INT_MAX) {
  1716. none_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
  1717. none_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, none_rdc.rate,
  1718. none_rdc.dist);
  1719. }
  1720. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  1721. mi_8x8[0]->mbmi.sb_type = bs_type;
  1722. pc_tree->partitioning = partition;
  1723. }
  1724. }
  1725. switch (partition) {
  1726. case PARTITION_NONE:
  1727. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
  1728. bsize, ctx, INT64_MAX);
  1729. break;
  1730. case PARTITION_HORZ:
  1731. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
  1732. subsize, &pc_tree->horizontal[0],
  1733. INT64_MAX);
  1734. if (last_part_rdc.rate != INT_MAX &&
  1735. bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
  1736. RD_COST tmp_rdc;
  1737. PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
  1738. vp9_rd_cost_init(&tmp_rdc);
  1739. update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
  1740. encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
  1741. rd_pick_sb_modes(cpi, tile_data, x,
  1742. mi_row + (mi_step >> 1), mi_col, &tmp_rdc,
  1743. subsize, &pc_tree->horizontal[1], INT64_MAX);
  1744. if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
  1745. vp9_rd_cost_reset(&last_part_rdc);
  1746. break;
  1747. }
  1748. last_part_rdc.rate += tmp_rdc.rate;
  1749. last_part_rdc.dist += tmp_rdc.dist;
  1750. last_part_rdc.rdcost += tmp_rdc.rdcost;
  1751. }
  1752. break;
  1753. case PARTITION_VERT:
  1754. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
  1755. subsize, &pc_tree->vertical[0], INT64_MAX);
  1756. if (last_part_rdc.rate != INT_MAX &&
  1757. bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
  1758. RD_COST tmp_rdc;
  1759. PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
  1760. vp9_rd_cost_init(&tmp_rdc);
  1761. update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
  1762. encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
  1763. rd_pick_sb_modes(cpi, tile_data, x,
  1764. mi_row, mi_col + (mi_step >> 1), &tmp_rdc,
  1765. subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
  1766. INT64_MAX);
  1767. if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
  1768. vp9_rd_cost_reset(&last_part_rdc);
  1769. break;
  1770. }
  1771. last_part_rdc.rate += tmp_rdc.rate;
  1772. last_part_rdc.dist += tmp_rdc.dist;
  1773. last_part_rdc.rdcost += tmp_rdc.rdcost;
  1774. }
  1775. break;
  1776. case PARTITION_SPLIT:
  1777. if (bsize == BLOCK_8X8) {
  1778. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc,
  1779. subsize, pc_tree->leaf_split[0], INT64_MAX);
  1780. break;
  1781. }
  1782. last_part_rdc.rate = 0;
  1783. last_part_rdc.dist = 0;
  1784. last_part_rdc.rdcost = 0;
  1785. for (i = 0; i < 4; i++) {
  1786. int x_idx = (i & 1) * (mi_step >> 1);
  1787. int y_idx = (i >> 1) * (mi_step >> 1);
  1788. int jj = i >> 1, ii = i & 0x01;
  1789. RD_COST tmp_rdc;
  1790. if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
  1791. continue;
  1792. vp9_rd_cost_init(&tmp_rdc);
  1793. rd_use_partition(cpi, td, tile_data,
  1794. mi_8x8 + jj * bss * mis + ii * bss, tp,
  1795. mi_row + y_idx, mi_col + x_idx, subsize,
  1796. &tmp_rdc.rate, &tmp_rdc.dist,
  1797. i != 3, pc_tree->split[i]);
  1798. if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
  1799. vp9_rd_cost_reset(&last_part_rdc);
  1800. break;
  1801. }
  1802. last_part_rdc.rate += tmp_rdc.rate;
  1803. last_part_rdc.dist += tmp_rdc.dist;
  1804. }
  1805. break;
  1806. default:
  1807. assert(0);
  1808. break;
  1809. }
  1810. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  1811. if (last_part_rdc.rate < INT_MAX) {
  1812. last_part_rdc.rate += cpi->partition_cost[pl][partition];
  1813. last_part_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  1814. last_part_rdc.rate, last_part_rdc.dist);
  1815. }
  1816. if (do_partition_search
  1817. && cpi->sf.adjust_partitioning_from_last_frame
  1818. && cpi->sf.partition_search_type == SEARCH_PARTITION
  1819. && partition != PARTITION_SPLIT && bsize > BLOCK_8X8
  1820. && (mi_row + mi_step < cm->mi_rows ||
  1821. mi_row + (mi_step >> 1) == cm->mi_rows)
  1822. && (mi_col + mi_step < cm->mi_cols ||
  1823. mi_col + (mi_step >> 1) == cm->mi_cols)) {
  1824. BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
  1825. chosen_rdc.rate = 0;
  1826. chosen_rdc.dist = 0;
  1827. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  1828. pc_tree->partitioning = PARTITION_SPLIT;
  1829. // Split partition.
  1830. for (i = 0; i < 4; i++) {
  1831. int x_idx = (i & 1) * (mi_step >> 1);
  1832. int y_idx = (i >> 1) * (mi_step >> 1);
  1833. RD_COST tmp_rdc;
  1834. ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  1835. PARTITION_CONTEXT sl[8], sa[8];
  1836. if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
  1837. continue;
  1838. save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  1839. pc_tree->split[i]->partitioning = PARTITION_NONE;
  1840. rd_pick_sb_modes(cpi, tile_data, x,
  1841. mi_row + y_idx, mi_col + x_idx, &tmp_rdc,
  1842. split_subsize, &pc_tree->split[i]->none, INT64_MAX);
  1843. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  1844. if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
  1845. vp9_rd_cost_reset(&chosen_rdc);
  1846. break;
  1847. }
  1848. chosen_rdc.rate += tmp_rdc.rate;
  1849. chosen_rdc.dist += tmp_rdc.dist;
  1850. if (i != 3)
  1851. encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, 0,
  1852. split_subsize, pc_tree->split[i]);
  1853. pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
  1854. split_subsize);
  1855. chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
  1856. }
  1857. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  1858. if (chosen_rdc.rate < INT_MAX) {
  1859. chosen_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
  1860. chosen_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  1861. chosen_rdc.rate, chosen_rdc.dist);
  1862. }
  1863. }
  1864. // If last_part is better set the partitioning to that.
  1865. if (last_part_rdc.rdcost < chosen_rdc.rdcost) {
  1866. mi_8x8[0]->mbmi.sb_type = bsize;
  1867. if (bsize >= BLOCK_8X8)
  1868. pc_tree->partitioning = partition;
  1869. chosen_rdc = last_part_rdc;
  1870. }
  1871. // If none was better set the partitioning to that.
  1872. if (none_rdc.rdcost < chosen_rdc.rdcost) {
  1873. if (bsize >= BLOCK_8X8)
  1874. pc_tree->partitioning = PARTITION_NONE;
  1875. chosen_rdc = none_rdc;
  1876. }
  1877. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  1878. // We must have chosen a partitioning and encoding or we'll fail later on.
  1879. // No other opportunities for success.
  1880. if (bsize == BLOCK_64X64)
  1881. assert(chosen_rdc.rate < INT_MAX && chosen_rdc.dist < INT64_MAX);
  1882. if (do_recon) {
  1883. int output_enabled = (bsize == BLOCK_64X64);
  1884. encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled, bsize,
  1885. pc_tree);
  1886. }
  1887. *rate = chosen_rdc.rate;
  1888. *dist = chosen_rdc.dist;
  1889. }
  1890. static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
  1891. BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
  1892. BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
  1893. BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
  1894. BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
  1895. BLOCK_16X16
  1896. };
  1897. static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
  1898. BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
  1899. BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
  1900. BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
  1901. BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
  1902. BLOCK_64X64
  1903. };
  1904. // Look at all the mode_info entries for blocks that are part of this
  1905. // partition and find the min and max values for sb_type.
  1906. // At the moment this is designed to work on a 64x64 SB but could be
  1907. // adjusted to use a size parameter.
  1908. //
  1909. // The min and max are assumed to have been initialized prior to calling this
  1910. // function so repeat calls can accumulate a min and max of more than one sb64.
  1911. static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO **mi_8x8,
  1912. BLOCK_SIZE *min_block_size,
  1913. BLOCK_SIZE *max_block_size,
  1914. int bs_hist[BLOCK_SIZES]) {
  1915. int sb_width_in_blocks = MI_BLOCK_SIZE;
  1916. int sb_height_in_blocks = MI_BLOCK_SIZE;
  1917. int i, j;
  1918. int index = 0;
  1919. // Check the sb_type for each block that belongs to this region.
  1920. for (i = 0; i < sb_height_in_blocks; ++i) {
  1921. for (j = 0; j < sb_width_in_blocks; ++j) {
  1922. MODE_INFO *mi = mi_8x8[index+j];
  1923. BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
  1924. bs_hist[sb_type]++;
  1925. *min_block_size = MIN(*min_block_size, sb_type);
  1926. *max_block_size = MAX(*max_block_size, sb_type);
  1927. }
  1928. index += xd->mi_stride;
  1929. }
  1930. }
  1931. // Next square block size less or equal than current block size.
  1932. static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
  1933. BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
  1934. BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
  1935. BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
  1936. BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
  1937. BLOCK_64X64
  1938. };
  1939. // Look at neighboring blocks and set a min and max partition size based on
  1940. // what they chose.
  1941. static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
  1942. MACROBLOCKD *const xd,
  1943. int mi_row, int mi_col,
  1944. BLOCK_SIZE *min_block_size,
  1945. BLOCK_SIZE *max_block_size) {
  1946. VP9_COMMON *const cm = &cpi->common;
  1947. MODE_INFO **mi = xd->mi;
  1948. const int left_in_image = xd->left_available && mi[-1];
  1949. const int above_in_image = xd->up_available && mi[-xd->mi_stride];
  1950. const int row8x8_remaining = tile->mi_row_end - mi_row;
  1951. const int col8x8_remaining = tile->mi_col_end - mi_col;
  1952. int bh, bw;
  1953. BLOCK_SIZE min_size = BLOCK_4X4;
  1954. BLOCK_SIZE max_size = BLOCK_64X64;
  1955. int bs_hist[BLOCK_SIZES] = {0};
  1956. // Trap case where we do not have a prediction.
  1957. if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
  1958. // Default "min to max" and "max to min"
  1959. min_size = BLOCK_64X64;
  1960. max_size = BLOCK_4X4;
  1961. // NOTE: each call to get_sb_partition_size_range() uses the previous
  1962. // passed in values for min and max as a starting point.
  1963. // Find the min and max partition used in previous frame at this location
  1964. if (cm->frame_type != KEY_FRAME) {
  1965. MODE_INFO **prev_mi =
  1966. &cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
  1967. get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
  1968. }
  1969. // Find the min and max partition sizes used in the left SB64
  1970. if (left_in_image) {
  1971. MODE_INFO **left_sb64_mi = &mi[-MI_BLOCK_SIZE];
  1972. get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
  1973. bs_hist);
  1974. }
  1975. // Find the min and max partition sizes used in the above SB64.
  1976. if (above_in_image) {
  1977. MODE_INFO **above_sb64_mi = &mi[-xd->mi_stride * MI_BLOCK_SIZE];
  1978. get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
  1979. bs_hist);
  1980. }
  1981. // Adjust observed min and max for "relaxed" auto partition case.
  1982. if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
  1983. min_size = min_partition_size[min_size];
  1984. max_size = max_partition_size[max_size];
  1985. }
  1986. }
  1987. // Check border cases where max and min from neighbors may not be legal.
  1988. max_size = find_partition_size(max_size,
  1989. row8x8_remaining, col8x8_remaining,
  1990. &bh, &bw);
  1991. min_size = MIN(cpi->sf.rd_auto_partition_min_limit, MIN(min_size, max_size));
  1992. // When use_square_partition_only is true, make sure at least one square
  1993. // partition is allowed by selecting the next smaller square size as
  1994. // *min_block_size.
  1995. if (cpi->sf.use_square_partition_only &&
  1996. next_square_size[max_size] < min_size) {
  1997. min_size = next_square_size[max_size];
  1998. }
  1999. *min_block_size = min_size;
  2000. *max_block_size = max_size;
  2001. }
  2002. static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
  2003. MACROBLOCKD *const xd,
  2004. int mi_row, int mi_col,
  2005. BLOCK_SIZE *min_block_size,
  2006. BLOCK_SIZE *max_block_size) {
  2007. VP9_COMMON *const cm = &cpi->common;
  2008. MODE_INFO **mi_8x8 = xd->mi;
  2009. const int left_in_image = xd->left_available && mi_8x8[-1];
  2010. const int above_in_image = xd->up_available && mi_8x8[-xd->mi_stride];
  2011. int row8x8_remaining = tile->mi_row_end - mi_row;
  2012. int col8x8_remaining = tile->mi_col_end - mi_col;
  2013. int bh, bw;
  2014. BLOCK_SIZE min_size = BLOCK_32X32;
  2015. BLOCK_SIZE max_size = BLOCK_8X8;
  2016. int bsl = mi_width_log2_lookup[BLOCK_64X64];
  2017. const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
  2018. get_chessboard_index(cm->current_video_frame)) & 0x1;
  2019. // Trap case where we do not have a prediction.
  2020. if (search_range_ctrl &&
  2021. (left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
  2022. int block;
  2023. MODE_INFO **mi;
  2024. BLOCK_SIZE sb_type;
  2025. // Find the min and max partition sizes used in the left SB64.
  2026. if (left_in_image) {
  2027. MODE_INFO *cur_mi;
  2028. mi = &mi_8x8[-1];
  2029. for (block = 0; block < MI_BLOCK_SIZE; ++block) {
  2030. cur_mi = mi[block * xd->mi_stride];
  2031. sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
  2032. min_size = MIN(min_size, sb_type);
  2033. max_size = MAX(max_size, sb_type);
  2034. }
  2035. }
  2036. // Find the min and max partition sizes used in the above SB64.
  2037. if (above_in_image) {
  2038. mi = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
  2039. for (block = 0; block < MI_BLOCK_SIZE; ++block) {
  2040. sb_type = mi[block] ? mi[block]->mbmi.sb_type : 0;
  2041. min_size = MIN(min_size, sb_type);
  2042. max_size = MAX(max_size, sb_type);
  2043. }
  2044. }
  2045. min_size = min_partition_size[min_size];
  2046. max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
  2047. &bh, &bw);
  2048. min_size = MIN(min_size, max_size);
  2049. min_size = MAX(min_size, BLOCK_8X8);
  2050. max_size = MIN(max_size, BLOCK_32X32);
  2051. } else {
  2052. min_size = BLOCK_8X8;
  2053. max_size = BLOCK_32X32;
  2054. }
  2055. *min_block_size = min_size;
  2056. *max_block_size = max_size;
  2057. }
  2058. // TODO(jingning) refactor functions setting partition search range
  2059. static void set_partition_range(VP9_COMMON *cm, MACROBLOCKD *xd,
  2060. int mi_row, int mi_col, BLOCK_SIZE bsize,
  2061. BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
  2062. int mi_width = num_8x8_blocks_wide_lookup[bsize];
  2063. int mi_height = num_8x8_blocks_high_lookup[bsize];
  2064. int idx, idy;
  2065. MODE_INFO *mi;
  2066. const int idx_str = cm->mi_stride * mi_row + mi_col;
  2067. MODE_INFO **prev_mi = &cm->prev_mi_grid_visible[idx_str];
  2068. BLOCK_SIZE bs, min_size, max_size;
  2069. min_size = BLOCK_64X64;
  2070. max_size = BLOCK_4X4;
  2071. if (prev_mi) {
  2072. for (idy = 0; idy < mi_height; ++idy) {
  2073. for (idx = 0; idx < mi_width; ++idx) {
  2074. mi = prev_mi[idy * cm->mi_stride + idx];
  2075. bs = mi ? mi->mbmi.sb_type : bsize;
  2076. min_size = MIN(min_size, bs);
  2077. max_size = MAX(max_size, bs);
  2078. }
  2079. }
  2080. }
  2081. if (xd->left_available) {
  2082. for (idy = 0; idy < mi_height; ++idy) {
  2083. mi = xd->mi[idy * cm->mi_stride - 1];
  2084. bs = mi ? mi->mbmi.sb_type : bsize;
  2085. min_size = MIN(min_size, bs);
  2086. max_size = MAX(max_size, bs);
  2087. }
  2088. }
  2089. if (xd->up_available) {
  2090. for (idx = 0; idx < mi_width; ++idx) {
  2091. mi = xd->mi[idx - cm->mi_stride];
  2092. bs = mi ? mi->mbmi.sb_type : bsize;
  2093. min_size = MIN(min_size, bs);
  2094. max_size = MAX(max_size, bs);
  2095. }
  2096. }
  2097. if (min_size == max_size) {
  2098. min_size = min_partition_size[min_size];
  2099. max_size = max_partition_size[max_size];
  2100. }
  2101. *min_bs = min_size;
  2102. *max_bs = max_size;
  2103. }
  2104. static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
  2105. memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
  2106. }
  2107. static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
  2108. memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
  2109. }
  2110. #if CONFIG_FP_MB_STATS
  2111. const int num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
  2112. {1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4};
  2113. const int num_16x16_blocks_high_lookup[BLOCK_SIZES] =
  2114. {1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4};
  2115. const int qindex_skip_threshold_lookup[BLOCK_SIZES] =
  2116. {0, 10, 10, 30, 40, 40, 60, 80, 80, 90, 100, 100, 120};
  2117. const int qindex_split_threshold_lookup[BLOCK_SIZES] =
  2118. {0, 3, 3, 7, 15, 15, 30, 40, 40, 60, 80, 80, 120};
  2119. const int complexity_16x16_blocks_threshold[BLOCK_SIZES] =
  2120. {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 6};
  2121. typedef enum {
  2122. MV_ZERO = 0,
  2123. MV_LEFT = 1,
  2124. MV_UP = 2,
  2125. MV_RIGHT = 3,
  2126. MV_DOWN = 4,
  2127. MV_INVALID
  2128. } MOTION_DIRECTION;
  2129. static INLINE MOTION_DIRECTION get_motion_direction_fp(uint8_t fp_byte) {
  2130. if (fp_byte & FPMB_MOTION_ZERO_MASK) {
  2131. return MV_ZERO;
  2132. } else if (fp_byte & FPMB_MOTION_LEFT_MASK) {
  2133. return MV_LEFT;
  2134. } else if (fp_byte & FPMB_MOTION_RIGHT_MASK) {
  2135. return MV_RIGHT;
  2136. } else if (fp_byte & FPMB_MOTION_UP_MASK) {
  2137. return MV_UP;
  2138. } else {
  2139. return MV_DOWN;
  2140. }
  2141. }
  2142. static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
  2143. MOTION_DIRECTION that_mv) {
  2144. if (this_mv == that_mv) {
  2145. return 0;
  2146. } else {
  2147. return abs(this_mv - that_mv) == 2 ? 2 : 1;
  2148. }
  2149. }
  2150. #endif
  2151. // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
  2152. // unlikely to be selected depending on previous rate-distortion optimization
  2153. // results, for encoding speed-up.
  2154. static void rd_pick_partition(VP9_COMP *cpi, ThreadData *td,
  2155. TileDataEnc *tile_data,
  2156. TOKENEXTRA **tp, int mi_row, int mi_col,
  2157. BLOCK_SIZE bsize, RD_COST *rd_cost,
  2158. int64_t best_rd, PC_TREE *pc_tree) {
  2159. VP9_COMMON *const cm = &cpi->common;
  2160. TileInfo *const tile_info = &tile_data->tile_info;
  2161. MACROBLOCK *const x = &td->mb;
  2162. MACROBLOCKD *const xd = &x->e_mbd;
  2163. const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
  2164. ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
  2165. PARTITION_CONTEXT sl[8], sa[8];
  2166. TOKENEXTRA *tp_orig = *tp;
  2167. PICK_MODE_CONTEXT *ctx = &pc_tree->none;
  2168. int i, pl;
  2169. BLOCK_SIZE subsize;
  2170. RD_COST this_rdc, sum_rdc, best_rdc;
  2171. int do_split = bsize >= BLOCK_8X8;
  2172. int do_rect = 1;
  2173. // Override skipping rectangular partition operations for edge blocks
  2174. const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
  2175. const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
  2176. const int xss = x->e_mbd.plane[1].subsampling_x;
  2177. const int yss = x->e_mbd.plane[1].subsampling_y;
  2178. BLOCK_SIZE min_size = x->min_partition_size;
  2179. BLOCK_SIZE max_size = x->max_partition_size;
  2180. #if CONFIG_FP_MB_STATS
  2181. unsigned int src_diff_var = UINT_MAX;
  2182. int none_complexity = 0;
  2183. #endif
  2184. int partition_none_allowed = !force_horz_split && !force_vert_split;
  2185. int partition_horz_allowed = !force_vert_split && yss <= xss &&
  2186. bsize >= BLOCK_8X8;
  2187. int partition_vert_allowed = !force_horz_split && xss <= yss &&
  2188. bsize >= BLOCK_8X8;
  2189. (void) *tp_orig;
  2190. assert(num_8x8_blocks_wide_lookup[bsize] ==
  2191. num_8x8_blocks_high_lookup[bsize]);
  2192. vp9_rd_cost_init(&this_rdc);
  2193. vp9_rd_cost_init(&sum_rdc);
  2194. vp9_rd_cost_reset(&best_rdc);
  2195. best_rdc.rdcost = best_rd;
  2196. set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
  2197. if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
  2198. x->mb_energy = vp9_block_energy(cpi, x, bsize);
  2199. if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
  2200. int cb_partition_search_ctrl = ((pc_tree->index == 0 || pc_tree->index == 3)
  2201. + get_chessboard_index(cm->current_video_frame)) & 0x1;
  2202. if (cb_partition_search_ctrl && bsize > min_size && bsize < max_size)
  2203. set_partition_range(cm, xd, mi_row, mi_col, bsize, &min_size, &max_size);
  2204. }
  2205. // Determine partition types in search according to the speed features.
  2206. // The threshold set here has to be of square block size.
  2207. if (cpi->sf.auto_min_max_partition_size) {
  2208. partition_none_allowed &= (bsize <= max_size && bsize >= min_size);
  2209. partition_horz_allowed &= ((bsize <= max_size && bsize > min_size) ||
  2210. force_horz_split);
  2211. partition_vert_allowed &= ((bsize <= max_size && bsize > min_size) ||
  2212. force_vert_split);
  2213. do_split &= bsize > min_size;
  2214. }
  2215. if (cpi->sf.use_square_partition_only) {
  2216. partition_horz_allowed &= force_horz_split;
  2217. partition_vert_allowed &= force_vert_split;
  2218. }
  2219. save_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  2220. #if CONFIG_FP_MB_STATS
  2221. if (cpi->use_fp_mb_stats) {
  2222. set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
  2223. src_diff_var = get_sby_perpixel_diff_variance(cpi, &x->plane[0].src,
  2224. mi_row, mi_col, bsize);
  2225. }
  2226. #endif
  2227. #if CONFIG_FP_MB_STATS
  2228. // Decide whether we shall split directly and skip searching NONE by using
  2229. // the first pass block statistics
  2230. if (cpi->use_fp_mb_stats && bsize >= BLOCK_32X32 && do_split &&
  2231. partition_none_allowed && src_diff_var > 4 &&
  2232. cm->base_qindex < qindex_split_threshold_lookup[bsize]) {
  2233. int mb_row = mi_row >> 1;
  2234. int mb_col = mi_col >> 1;
  2235. int mb_row_end =
  2236. MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
  2237. int mb_col_end =
  2238. MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
  2239. int r, c;
  2240. // compute a complexity measure, basically measure inconsistency of motion
  2241. // vectors obtained from the first pass in the current block
  2242. for (r = mb_row; r < mb_row_end ; r++) {
  2243. for (c = mb_col; c < mb_col_end; c++) {
  2244. const int mb_index = r * cm->mb_cols + c;
  2245. MOTION_DIRECTION this_mv;
  2246. MOTION_DIRECTION right_mv;
  2247. MOTION_DIRECTION bottom_mv;
  2248. this_mv =
  2249. get_motion_direction_fp(cpi->twopass.this_frame_mb_stats[mb_index]);
  2250. // to its right
  2251. if (c != mb_col_end - 1) {
  2252. right_mv = get_motion_direction_fp(
  2253. cpi->twopass.this_frame_mb_stats[mb_index + 1]);
  2254. none_complexity += get_motion_inconsistency(this_mv, right_mv);
  2255. }
  2256. // to its bottom
  2257. if (r != mb_row_end - 1) {
  2258. bottom_mv = get_motion_direction_fp(
  2259. cpi->twopass.this_frame_mb_stats[mb_index + cm->mb_cols]);
  2260. none_complexity += get_motion_inconsistency(this_mv, bottom_mv);
  2261. }
  2262. // do not count its left and top neighbors to avoid double counting
  2263. }
  2264. }
  2265. if (none_complexity > complexity_16x16_blocks_threshold[bsize]) {
  2266. partition_none_allowed = 0;
  2267. }
  2268. }
  2269. #endif
  2270. // PARTITION_NONE
  2271. if (partition_none_allowed) {
  2272. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
  2273. &this_rdc, bsize, ctx, best_rdc.rdcost);
  2274. if (this_rdc.rate != INT_MAX) {
  2275. if (bsize >= BLOCK_8X8) {
  2276. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2277. this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
  2278. this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  2279. this_rdc.rate, this_rdc.dist);
  2280. }
  2281. if (this_rdc.rdcost < best_rdc.rdcost) {
  2282. int64_t dist_breakout_thr = cpi->sf.partition_search_breakout_dist_thr;
  2283. int rate_breakout_thr = cpi->sf.partition_search_breakout_rate_thr;
  2284. best_rdc = this_rdc;
  2285. if (bsize >= BLOCK_8X8)
  2286. pc_tree->partitioning = PARTITION_NONE;
  2287. // Adjust dist breakout threshold according to the partition size.
  2288. dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
  2289. b_height_log2_lookup[bsize]);
  2290. rate_breakout_thr *= num_pels_log2_lookup[bsize];
  2291. // If all y, u, v transform blocks in this partition are skippable, and
  2292. // the dist & rate are within the thresholds, the partition search is
  2293. // terminated for current branch of the partition search tree.
  2294. // The dist & rate thresholds are set to 0 at speed 0 to disable the
  2295. // early termination at that speed.
  2296. if (!x->e_mbd.lossless &&
  2297. (ctx->skippable && best_rdc.dist < dist_breakout_thr &&
  2298. best_rdc.rate < rate_breakout_thr)) {
  2299. do_split = 0;
  2300. do_rect = 0;
  2301. }
  2302. #if CONFIG_FP_MB_STATS
  2303. // Check if every 16x16 first pass block statistics has zero
  2304. // motion and the corresponding first pass residue is small enough.
  2305. // If that is the case, check the difference variance between the
  2306. // current frame and the last frame. If the variance is small enough,
  2307. // stop further splitting in RD optimization
  2308. if (cpi->use_fp_mb_stats && do_split != 0 &&
  2309. cm->base_qindex > qindex_skip_threshold_lookup[bsize]) {
  2310. int mb_row = mi_row >> 1;
  2311. int mb_col = mi_col >> 1;
  2312. int mb_row_end =
  2313. MIN(mb_row + num_16x16_blocks_high_lookup[bsize], cm->mb_rows);
  2314. int mb_col_end =
  2315. MIN(mb_col + num_16x16_blocks_wide_lookup[bsize], cm->mb_cols);
  2316. int r, c;
  2317. int skip = 1;
  2318. for (r = mb_row; r < mb_row_end; r++) {
  2319. for (c = mb_col; c < mb_col_end; c++) {
  2320. const int mb_index = r * cm->mb_cols + c;
  2321. if (!(cpi->twopass.this_frame_mb_stats[mb_index] &
  2322. FPMB_MOTION_ZERO_MASK) ||
  2323. !(cpi->twopass.this_frame_mb_stats[mb_index] &
  2324. FPMB_ERROR_SMALL_MASK)) {
  2325. skip = 0;
  2326. break;
  2327. }
  2328. }
  2329. if (skip == 0) {
  2330. break;
  2331. }
  2332. }
  2333. if (skip) {
  2334. if (src_diff_var == UINT_MAX) {
  2335. set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
  2336. src_diff_var = get_sby_perpixel_diff_variance(
  2337. cpi, &x->plane[0].src, mi_row, mi_col, bsize);
  2338. }
  2339. if (src_diff_var < 8) {
  2340. do_split = 0;
  2341. do_rect = 0;
  2342. }
  2343. }
  2344. }
  2345. #endif
  2346. }
  2347. }
  2348. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  2349. }
  2350. // store estimated motion vector
  2351. if (cpi->sf.adaptive_motion_search)
  2352. store_pred_mv(x, ctx);
  2353. // PARTITION_SPLIT
  2354. // TODO(jingning): use the motion vectors given by the above search as
  2355. // the starting point of motion search in the following partition type check.
  2356. if (do_split) {
  2357. subsize = get_subsize(bsize, PARTITION_SPLIT);
  2358. if (bsize == BLOCK_8X8) {
  2359. i = 4;
  2360. if (cpi->sf.adaptive_pred_interp_filter && partition_none_allowed)
  2361. pc_tree->leaf_split[0]->pred_interp_filter =
  2362. ctx->mic.mbmi.interp_filter;
  2363. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
  2364. pc_tree->leaf_split[0], best_rdc.rdcost);
  2365. if (sum_rdc.rate == INT_MAX)
  2366. sum_rdc.rdcost = INT64_MAX;
  2367. } else {
  2368. for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
  2369. const int x_idx = (i & 1) * mi_step;
  2370. const int y_idx = (i >> 1) * mi_step;
  2371. if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
  2372. continue;
  2373. if (cpi->sf.adaptive_motion_search)
  2374. load_pred_mv(x, ctx);
  2375. pc_tree->split[i]->index = i;
  2376. rd_pick_partition(cpi, td, tile_data, tp,
  2377. mi_row + y_idx, mi_col + x_idx,
  2378. subsize, &this_rdc,
  2379. best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
  2380. if (this_rdc.rate == INT_MAX) {
  2381. sum_rdc.rdcost = INT64_MAX;
  2382. break;
  2383. } else {
  2384. sum_rdc.rate += this_rdc.rate;
  2385. sum_rdc.dist += this_rdc.dist;
  2386. sum_rdc.rdcost += this_rdc.rdcost;
  2387. }
  2388. }
  2389. }
  2390. if (sum_rdc.rdcost < best_rdc.rdcost && i == 4) {
  2391. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2392. sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
  2393. sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  2394. sum_rdc.rate, sum_rdc.dist);
  2395. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2396. best_rdc = sum_rdc;
  2397. pc_tree->partitioning = PARTITION_SPLIT;
  2398. }
  2399. } else {
  2400. // skip rectangular partition test when larger block size
  2401. // gives better rd cost
  2402. if (cpi->sf.less_rectangular_check)
  2403. do_rect &= !partition_none_allowed;
  2404. }
  2405. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  2406. }
  2407. // PARTITION_HORZ
  2408. if (partition_horz_allowed && do_rect) {
  2409. subsize = get_subsize(bsize, PARTITION_HORZ);
  2410. if (cpi->sf.adaptive_motion_search)
  2411. load_pred_mv(x, ctx);
  2412. if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
  2413. partition_none_allowed)
  2414. pc_tree->horizontal[0].pred_interp_filter =
  2415. ctx->mic.mbmi.interp_filter;
  2416. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
  2417. &pc_tree->horizontal[0], best_rdc.rdcost);
  2418. if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + mi_step < cm->mi_rows &&
  2419. bsize > BLOCK_8X8) {
  2420. PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
  2421. update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
  2422. encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
  2423. if (cpi->sf.adaptive_motion_search)
  2424. load_pred_mv(x, ctx);
  2425. if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
  2426. partition_none_allowed)
  2427. pc_tree->horizontal[1].pred_interp_filter =
  2428. ctx->mic.mbmi.interp_filter;
  2429. rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col,
  2430. &this_rdc, subsize, &pc_tree->horizontal[1],
  2431. best_rdc.rdcost - sum_rdc.rdcost);
  2432. if (this_rdc.rate == INT_MAX) {
  2433. sum_rdc.rdcost = INT64_MAX;
  2434. } else {
  2435. sum_rdc.rate += this_rdc.rate;
  2436. sum_rdc.dist += this_rdc.dist;
  2437. sum_rdc.rdcost += this_rdc.rdcost;
  2438. }
  2439. }
  2440. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2441. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2442. sum_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
  2443. sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
  2444. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2445. best_rdc = sum_rdc;
  2446. pc_tree->partitioning = PARTITION_HORZ;
  2447. }
  2448. }
  2449. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  2450. }
  2451. // PARTITION_VERT
  2452. if (partition_vert_allowed && do_rect) {
  2453. subsize = get_subsize(bsize, PARTITION_VERT);
  2454. if (cpi->sf.adaptive_motion_search)
  2455. load_pred_mv(x, ctx);
  2456. if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
  2457. partition_none_allowed)
  2458. pc_tree->vertical[0].pred_interp_filter =
  2459. ctx->mic.mbmi.interp_filter;
  2460. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
  2461. &pc_tree->vertical[0], best_rdc.rdcost);
  2462. if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + mi_step < cm->mi_cols &&
  2463. bsize > BLOCK_8X8) {
  2464. update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 0);
  2465. encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize,
  2466. &pc_tree->vertical[0]);
  2467. if (cpi->sf.adaptive_motion_search)
  2468. load_pred_mv(x, ctx);
  2469. if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
  2470. partition_none_allowed)
  2471. pc_tree->vertical[1].pred_interp_filter =
  2472. ctx->mic.mbmi.interp_filter;
  2473. rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step,
  2474. &this_rdc, subsize,
  2475. &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost);
  2476. if (this_rdc.rate == INT_MAX) {
  2477. sum_rdc.rdcost = INT64_MAX;
  2478. } else {
  2479. sum_rdc.rate += this_rdc.rate;
  2480. sum_rdc.dist += this_rdc.dist;
  2481. sum_rdc.rdcost += this_rdc.rdcost;
  2482. }
  2483. }
  2484. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2485. pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2486. sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
  2487. sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  2488. sum_rdc.rate, sum_rdc.dist);
  2489. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2490. best_rdc = sum_rdc;
  2491. pc_tree->partitioning = PARTITION_VERT;
  2492. }
  2493. }
  2494. restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
  2495. }
  2496. // TODO(jbb): This code added so that we avoid static analysis
  2497. // warning related to the fact that best_rd isn't used after this
  2498. // point. This code should be refactored so that the duplicate
  2499. // checks occur in some sub function and thus are used...
  2500. (void) best_rd;
  2501. *rd_cost = best_rdc;
  2502. if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX &&
  2503. pc_tree->index != 3) {
  2504. int output_enabled = (bsize == BLOCK_64X64);
  2505. encode_sb(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
  2506. bsize, pc_tree);
  2507. }
  2508. if (bsize == BLOCK_64X64) {
  2509. assert(tp_orig < *tp);
  2510. assert(best_rdc.rate < INT_MAX);
  2511. assert(best_rdc.dist < INT64_MAX);
  2512. } else {
  2513. assert(tp_orig == *tp);
  2514. }
  2515. }
  2516. static void encode_rd_sb_row(VP9_COMP *cpi,
  2517. ThreadData *td,
  2518. TileDataEnc *tile_data,
  2519. int mi_row,
  2520. TOKENEXTRA **tp) {
  2521. VP9_COMMON *const cm = &cpi->common;
  2522. TileInfo *const tile_info = &tile_data->tile_info;
  2523. MACROBLOCK *const x = &td->mb;
  2524. MACROBLOCKD *const xd = &x->e_mbd;
  2525. SPEED_FEATURES *const sf = &cpi->sf;
  2526. int mi_col;
  2527. // Initialize the left context for the new SB row
  2528. memset(&xd->left_context, 0, sizeof(xd->left_context));
  2529. memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
  2530. // Code each SB in the row
  2531. for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
  2532. mi_col += MI_BLOCK_SIZE) {
  2533. const struct segmentation *const seg = &cm->seg;
  2534. int dummy_rate;
  2535. int64_t dummy_dist;
  2536. RD_COST dummy_rdc;
  2537. int i;
  2538. int seg_skip = 0;
  2539. const int idx_str = cm->mi_stride * mi_row + mi_col;
  2540. MODE_INFO **mi = cm->mi_grid_visible + idx_str;
  2541. if (sf->adaptive_pred_interp_filter) {
  2542. for (i = 0; i < 64; ++i)
  2543. td->leaf_tree[i].pred_interp_filter = SWITCHABLE;
  2544. for (i = 0; i < 64; ++i) {
  2545. td->pc_tree[i].vertical[0].pred_interp_filter = SWITCHABLE;
  2546. td->pc_tree[i].vertical[1].pred_interp_filter = SWITCHABLE;
  2547. td->pc_tree[i].horizontal[0].pred_interp_filter = SWITCHABLE;
  2548. td->pc_tree[i].horizontal[1].pred_interp_filter = SWITCHABLE;
  2549. }
  2550. }
  2551. vp9_zero(x->pred_mv);
  2552. td->pc_root->index = 0;
  2553. if (seg->enabled) {
  2554. const uint8_t *const map = seg->update_map ? cpi->segmentation_map
  2555. : cm->last_frame_seg_map;
  2556. int segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
  2557. seg_skip = vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP);
  2558. }
  2559. x->source_variance = UINT_MAX;
  2560. if (sf->partition_search_type == FIXED_PARTITION || seg_skip) {
  2561. const BLOCK_SIZE bsize =
  2562. seg_skip ? BLOCK_64X64 : sf->always_this_block_size;
  2563. set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
  2564. set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
  2565. rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  2566. BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
  2567. } else if (cpi->partition_search_skippable_frame) {
  2568. BLOCK_SIZE bsize;
  2569. set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
  2570. bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col);
  2571. set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
  2572. rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  2573. BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
  2574. } else if (sf->partition_search_type == VAR_BASED_PARTITION &&
  2575. cm->frame_type != KEY_FRAME) {
  2576. choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
  2577. rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  2578. BLOCK_64X64, &dummy_rate, &dummy_dist, 1, td->pc_root);
  2579. } else {
  2580. // If required set upper and lower partition size limits
  2581. if (sf->auto_min_max_partition_size) {
  2582. set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
  2583. rd_auto_partition_range(cpi, tile_info, xd, mi_row, mi_col,
  2584. &x->min_partition_size,
  2585. &x->max_partition_size);
  2586. }
  2587. rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, BLOCK_64X64,
  2588. &dummy_rdc, INT64_MAX, td->pc_root);
  2589. }
  2590. }
  2591. }
  2592. static void init_encode_frame_mb_context(VP9_COMP *cpi) {
  2593. MACROBLOCK *const x = &cpi->td.mb;
  2594. VP9_COMMON *const cm = &cpi->common;
  2595. MACROBLOCKD *const xd = &x->e_mbd;
  2596. const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
  2597. // Copy data over into macro block data structures.
  2598. vp9_setup_src_planes(x, cpi->Source, 0, 0);
  2599. vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
  2600. // Note: this memset assumes above_context[0], [1] and [2]
  2601. // are allocated as part of the same buffer.
  2602. memset(xd->above_context[0], 0,
  2603. sizeof(*xd->above_context[0]) *
  2604. 2 * aligned_mi_cols * MAX_MB_PLANE);
  2605. memset(xd->above_seg_context, 0,
  2606. sizeof(*xd->above_seg_context) * aligned_mi_cols);
  2607. }
  2608. static int check_dual_ref_flags(VP9_COMP *cpi) {
  2609. const int ref_flags = cpi->ref_frame_flags;
  2610. if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
  2611. return 0;
  2612. } else {
  2613. return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
  2614. + !!(ref_flags & VP9_ALT_FLAG)) >= 2;
  2615. }
  2616. }
  2617. static void reset_skip_tx_size(VP9_COMMON *cm, TX_SIZE max_tx_size) {
  2618. int mi_row, mi_col;
  2619. const int mis = cm->mi_stride;
  2620. MODE_INFO **mi_ptr = cm->mi_grid_visible;
  2621. for (mi_row = 0; mi_row < cm->mi_rows; ++mi_row, mi_ptr += mis) {
  2622. for (mi_col = 0; mi_col < cm->mi_cols; ++mi_col) {
  2623. if (mi_ptr[mi_col]->mbmi.tx_size > max_tx_size)
  2624. mi_ptr[mi_col]->mbmi.tx_size = max_tx_size;
  2625. }
  2626. }
  2627. }
  2628. static MV_REFERENCE_FRAME get_frame_type(const VP9_COMP *cpi) {
  2629. if (frame_is_intra_only(&cpi->common))
  2630. return INTRA_FRAME;
  2631. else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
  2632. return ALTREF_FRAME;
  2633. else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
  2634. return GOLDEN_FRAME;
  2635. else
  2636. return LAST_FRAME;
  2637. }
  2638. static TX_MODE select_tx_mode(const VP9_COMP *cpi, MACROBLOCKD *const xd) {
  2639. if (xd->lossless)
  2640. return ONLY_4X4;
  2641. if (cpi->common.frame_type == KEY_FRAME &&
  2642. cpi->sf.use_nonrd_pick_mode &&
  2643. cpi->sf.partition_search_type == VAR_BASED_PARTITION)
  2644. return ALLOW_16X16;
  2645. if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
  2646. return ALLOW_32X32;
  2647. else if (cpi->sf.tx_size_search_method == USE_FULL_RD||
  2648. cpi->sf.tx_size_search_method == USE_TX_8X8)
  2649. return TX_MODE_SELECT;
  2650. else
  2651. return cpi->common.tx_mode;
  2652. }
  2653. static void hybrid_intra_mode_search(VP9_COMP *cpi, MACROBLOCK *const x,
  2654. RD_COST *rd_cost, BLOCK_SIZE bsize,
  2655. PICK_MODE_CONTEXT *ctx) {
  2656. if (bsize < BLOCK_16X16)
  2657. vp9_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, INT64_MAX);
  2658. else
  2659. vp9_pick_intra_mode(cpi, x, rd_cost, bsize, ctx);
  2660. }
  2661. static void nonrd_pick_sb_modes(VP9_COMP *cpi,
  2662. TileDataEnc *tile_data, MACROBLOCK *const x,
  2663. int mi_row, int mi_col, RD_COST *rd_cost,
  2664. BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
  2665. VP9_COMMON *const cm = &cpi->common;
  2666. TileInfo *const tile_info = &tile_data->tile_info;
  2667. MACROBLOCKD *const xd = &x->e_mbd;
  2668. MB_MODE_INFO *mbmi;
  2669. set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
  2670. mbmi = &xd->mi[0]->mbmi;
  2671. mbmi->sb_type = bsize;
  2672. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
  2673. if (cyclic_refresh_segment_id_boosted(mbmi->segment_id))
  2674. x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
  2675. if (cm->frame_type == KEY_FRAME)
  2676. hybrid_intra_mode_search(cpi, x, rd_cost, bsize, ctx);
  2677. else if (vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
  2678. set_mode_info_seg_skip(x, cm->tx_mode, rd_cost, bsize);
  2679. else if (bsize >= BLOCK_8X8)
  2680. vp9_pick_inter_mode(cpi, x, tile_data, mi_row, mi_col,
  2681. rd_cost, bsize, ctx);
  2682. else
  2683. vp9_pick_inter_mode_sub8x8(cpi, x, tile_data, mi_row, mi_col,
  2684. rd_cost, bsize, ctx);
  2685. duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
  2686. if (rd_cost->rate == INT_MAX)
  2687. vp9_rd_cost_reset(rd_cost);
  2688. ctx->rate = rd_cost->rate;
  2689. ctx->dist = rd_cost->dist;
  2690. }
  2691. static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
  2692. int mi_row, int mi_col,
  2693. BLOCK_SIZE bsize,
  2694. PC_TREE *pc_tree) {
  2695. MACROBLOCKD *xd = &x->e_mbd;
  2696. int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
  2697. PARTITION_TYPE partition = pc_tree->partitioning;
  2698. BLOCK_SIZE subsize = get_subsize(bsize, partition);
  2699. assert(bsize >= BLOCK_8X8);
  2700. if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
  2701. return;
  2702. switch (partition) {
  2703. case PARTITION_NONE:
  2704. set_mode_info_offsets(cm, xd, mi_row, mi_col);
  2705. *(xd->mi[0]) = pc_tree->none.mic;
  2706. duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
  2707. break;
  2708. case PARTITION_VERT:
  2709. set_mode_info_offsets(cm, xd, mi_row, mi_col);
  2710. *(xd->mi[0]) = pc_tree->vertical[0].mic;
  2711. duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
  2712. if (mi_col + hbs < cm->mi_cols) {
  2713. set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs);
  2714. *(xd->mi[0]) = pc_tree->vertical[1].mic;
  2715. duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
  2716. }
  2717. break;
  2718. case PARTITION_HORZ:
  2719. set_mode_info_offsets(cm, xd, mi_row, mi_col);
  2720. *(xd->mi[0]) = pc_tree->horizontal[0].mic;
  2721. duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
  2722. if (mi_row + hbs < cm->mi_rows) {
  2723. set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col);
  2724. *(xd->mi[0]) = pc_tree->horizontal[1].mic;
  2725. duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
  2726. }
  2727. break;
  2728. case PARTITION_SPLIT: {
  2729. fill_mode_info_sb(cm, x, mi_row, mi_col, subsize, pc_tree->split[0]);
  2730. fill_mode_info_sb(cm, x, mi_row, mi_col + hbs, subsize,
  2731. pc_tree->split[1]);
  2732. fill_mode_info_sb(cm, x, mi_row + hbs, mi_col, subsize,
  2733. pc_tree->split[2]);
  2734. fill_mode_info_sb(cm, x, mi_row + hbs, mi_col + hbs, subsize,
  2735. pc_tree->split[3]);
  2736. break;
  2737. }
  2738. default:
  2739. break;
  2740. }
  2741. }
  2742. // Reset the prediction pixel ready flag recursively.
  2743. static void pred_pixel_ready_reset(PC_TREE *pc_tree, BLOCK_SIZE bsize) {
  2744. pc_tree->none.pred_pixel_ready = 0;
  2745. pc_tree->horizontal[0].pred_pixel_ready = 0;
  2746. pc_tree->horizontal[1].pred_pixel_ready = 0;
  2747. pc_tree->vertical[0].pred_pixel_ready = 0;
  2748. pc_tree->vertical[1].pred_pixel_ready = 0;
  2749. if (bsize > BLOCK_8X8) {
  2750. BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_SPLIT);
  2751. int i;
  2752. for (i = 0; i < 4; ++i)
  2753. pred_pixel_ready_reset(pc_tree->split[i], subsize);
  2754. }
  2755. }
  2756. static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
  2757. TileDataEnc *tile_data,
  2758. TOKENEXTRA **tp, int mi_row,
  2759. int mi_col, BLOCK_SIZE bsize, RD_COST *rd_cost,
  2760. int do_recon, int64_t best_rd,
  2761. PC_TREE *pc_tree) {
  2762. const SPEED_FEATURES *const sf = &cpi->sf;
  2763. VP9_COMMON *const cm = &cpi->common;
  2764. TileInfo *const tile_info = &tile_data->tile_info;
  2765. MACROBLOCK *const x = &td->mb;
  2766. MACROBLOCKD *const xd = &x->e_mbd;
  2767. const int ms = num_8x8_blocks_wide_lookup[bsize] / 2;
  2768. TOKENEXTRA *tp_orig = *tp;
  2769. PICK_MODE_CONTEXT *ctx = &pc_tree->none;
  2770. int i;
  2771. BLOCK_SIZE subsize = bsize;
  2772. RD_COST this_rdc, sum_rdc, best_rdc;
  2773. int do_split = bsize >= BLOCK_8X8;
  2774. int do_rect = 1;
  2775. // Override skipping rectangular partition operations for edge blocks
  2776. const int force_horz_split = (mi_row + ms >= cm->mi_rows);
  2777. const int force_vert_split = (mi_col + ms >= cm->mi_cols);
  2778. const int xss = x->e_mbd.plane[1].subsampling_x;
  2779. const int yss = x->e_mbd.plane[1].subsampling_y;
  2780. int partition_none_allowed = !force_horz_split && !force_vert_split;
  2781. int partition_horz_allowed = !force_vert_split && yss <= xss &&
  2782. bsize >= BLOCK_8X8;
  2783. int partition_vert_allowed = !force_horz_split && xss <= yss &&
  2784. bsize >= BLOCK_8X8;
  2785. (void) *tp_orig;
  2786. assert(num_8x8_blocks_wide_lookup[bsize] ==
  2787. num_8x8_blocks_high_lookup[bsize]);
  2788. vp9_rd_cost_init(&sum_rdc);
  2789. vp9_rd_cost_reset(&best_rdc);
  2790. best_rdc.rdcost = best_rd;
  2791. // Determine partition types in search according to the speed features.
  2792. // The threshold set here has to be of square block size.
  2793. if (sf->auto_min_max_partition_size) {
  2794. partition_none_allowed &= (bsize <= x->max_partition_size &&
  2795. bsize >= x->min_partition_size);
  2796. partition_horz_allowed &= ((bsize <= x->max_partition_size &&
  2797. bsize > x->min_partition_size) ||
  2798. force_horz_split);
  2799. partition_vert_allowed &= ((bsize <= x->max_partition_size &&
  2800. bsize > x->min_partition_size) ||
  2801. force_vert_split);
  2802. do_split &= bsize > x->min_partition_size;
  2803. }
  2804. if (sf->use_square_partition_only) {
  2805. partition_horz_allowed &= force_horz_split;
  2806. partition_vert_allowed &= force_vert_split;
  2807. }
  2808. ctx->pred_pixel_ready = !(partition_vert_allowed ||
  2809. partition_horz_allowed ||
  2810. do_split);
  2811. // PARTITION_NONE
  2812. if (partition_none_allowed) {
  2813. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
  2814. &this_rdc, bsize, ctx);
  2815. ctx->mic.mbmi = xd->mi[0]->mbmi;
  2816. ctx->skip_txfm[0] = x->skip_txfm[0];
  2817. ctx->skip = x->skip;
  2818. if (this_rdc.rate != INT_MAX) {
  2819. int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2820. this_rdc.rate += cpi->partition_cost[pl][PARTITION_NONE];
  2821. this_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  2822. this_rdc.rate, this_rdc.dist);
  2823. if (this_rdc.rdcost < best_rdc.rdcost) {
  2824. int64_t dist_breakout_thr = sf->partition_search_breakout_dist_thr;
  2825. int64_t rate_breakout_thr = sf->partition_search_breakout_rate_thr;
  2826. dist_breakout_thr >>= 8 - (b_width_log2_lookup[bsize] +
  2827. b_height_log2_lookup[bsize]);
  2828. rate_breakout_thr *= num_pels_log2_lookup[bsize];
  2829. best_rdc = this_rdc;
  2830. if (bsize >= BLOCK_8X8)
  2831. pc_tree->partitioning = PARTITION_NONE;
  2832. if (!x->e_mbd.lossless &&
  2833. this_rdc.rate < rate_breakout_thr &&
  2834. this_rdc.dist < dist_breakout_thr) {
  2835. do_split = 0;
  2836. do_rect = 0;
  2837. }
  2838. }
  2839. }
  2840. }
  2841. // store estimated motion vector
  2842. store_pred_mv(x, ctx);
  2843. // PARTITION_SPLIT
  2844. if (do_split) {
  2845. int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2846. sum_rdc.rate += cpi->partition_cost[pl][PARTITION_SPLIT];
  2847. sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv, sum_rdc.rate, sum_rdc.dist);
  2848. subsize = get_subsize(bsize, PARTITION_SPLIT);
  2849. for (i = 0; i < 4 && sum_rdc.rdcost < best_rdc.rdcost; ++i) {
  2850. const int x_idx = (i & 1) * ms;
  2851. const int y_idx = (i >> 1) * ms;
  2852. if (mi_row + y_idx >= cm->mi_rows || mi_col + x_idx >= cm->mi_cols)
  2853. continue;
  2854. load_pred_mv(x, ctx);
  2855. nonrd_pick_partition(cpi, td, tile_data, tp,
  2856. mi_row + y_idx, mi_col + x_idx,
  2857. subsize, &this_rdc, 0,
  2858. best_rdc.rdcost - sum_rdc.rdcost, pc_tree->split[i]);
  2859. if (this_rdc.rate == INT_MAX) {
  2860. vp9_rd_cost_reset(&sum_rdc);
  2861. } else {
  2862. sum_rdc.rate += this_rdc.rate;
  2863. sum_rdc.dist += this_rdc.dist;
  2864. sum_rdc.rdcost += this_rdc.rdcost;
  2865. }
  2866. }
  2867. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2868. best_rdc = sum_rdc;
  2869. pc_tree->partitioning = PARTITION_SPLIT;
  2870. } else {
  2871. // skip rectangular partition test when larger block size
  2872. // gives better rd cost
  2873. if (sf->less_rectangular_check)
  2874. do_rect &= !partition_none_allowed;
  2875. }
  2876. }
  2877. // PARTITION_HORZ
  2878. if (partition_horz_allowed && do_rect) {
  2879. subsize = get_subsize(bsize, PARTITION_HORZ);
  2880. if (sf->adaptive_motion_search)
  2881. load_pred_mv(x, ctx);
  2882. pc_tree->horizontal[0].pred_pixel_ready = 1;
  2883. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
  2884. &pc_tree->horizontal[0]);
  2885. pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
  2886. pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
  2887. pc_tree->horizontal[0].skip = x->skip;
  2888. if (sum_rdc.rdcost < best_rdc.rdcost && mi_row + ms < cm->mi_rows) {
  2889. load_pred_mv(x, ctx);
  2890. pc_tree->horizontal[1].pred_pixel_ready = 1;
  2891. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + ms, mi_col,
  2892. &this_rdc, subsize,
  2893. &pc_tree->horizontal[1]);
  2894. pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
  2895. pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
  2896. pc_tree->horizontal[1].skip = x->skip;
  2897. if (this_rdc.rate == INT_MAX) {
  2898. vp9_rd_cost_reset(&sum_rdc);
  2899. } else {
  2900. int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2901. this_rdc.rate += cpi->partition_cost[pl][PARTITION_HORZ];
  2902. sum_rdc.rate += this_rdc.rate;
  2903. sum_rdc.dist += this_rdc.dist;
  2904. sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  2905. sum_rdc.rate, sum_rdc.dist);
  2906. }
  2907. }
  2908. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2909. best_rdc = sum_rdc;
  2910. pc_tree->partitioning = PARTITION_HORZ;
  2911. } else {
  2912. pred_pixel_ready_reset(pc_tree, bsize);
  2913. }
  2914. }
  2915. // PARTITION_VERT
  2916. if (partition_vert_allowed && do_rect) {
  2917. subsize = get_subsize(bsize, PARTITION_VERT);
  2918. if (sf->adaptive_motion_search)
  2919. load_pred_mv(x, ctx);
  2920. pc_tree->vertical[0].pred_pixel_ready = 1;
  2921. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
  2922. &pc_tree->vertical[0]);
  2923. pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
  2924. pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
  2925. pc_tree->vertical[0].skip = x->skip;
  2926. if (sum_rdc.rdcost < best_rdc.rdcost && mi_col + ms < cm->mi_cols) {
  2927. load_pred_mv(x, ctx);
  2928. pc_tree->vertical[1].pred_pixel_ready = 1;
  2929. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + ms,
  2930. &this_rdc, subsize,
  2931. &pc_tree->vertical[1]);
  2932. pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
  2933. pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
  2934. pc_tree->vertical[1].skip = x->skip;
  2935. if (this_rdc.rate == INT_MAX) {
  2936. vp9_rd_cost_reset(&sum_rdc);
  2937. } else {
  2938. int pl = partition_plane_context(xd, mi_row, mi_col, bsize);
  2939. sum_rdc.rate += cpi->partition_cost[pl][PARTITION_VERT];
  2940. sum_rdc.rate += this_rdc.rate;
  2941. sum_rdc.dist += this_rdc.dist;
  2942. sum_rdc.rdcost = RDCOST(x->rdmult, x->rddiv,
  2943. sum_rdc.rate, sum_rdc.dist);
  2944. }
  2945. }
  2946. if (sum_rdc.rdcost < best_rdc.rdcost) {
  2947. best_rdc = sum_rdc;
  2948. pc_tree->partitioning = PARTITION_VERT;
  2949. } else {
  2950. pred_pixel_ready_reset(pc_tree, bsize);
  2951. }
  2952. }
  2953. *rd_cost = best_rdc;
  2954. if (best_rdc.rate == INT_MAX) {
  2955. vp9_rd_cost_reset(rd_cost);
  2956. return;
  2957. }
  2958. // update mode info array
  2959. fill_mode_info_sb(cm, x, mi_row, mi_col, bsize, pc_tree);
  2960. if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && do_recon) {
  2961. int output_enabled = (bsize == BLOCK_64X64);
  2962. encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
  2963. bsize, pc_tree);
  2964. }
  2965. if (bsize == BLOCK_64X64 && do_recon) {
  2966. assert(tp_orig < *tp);
  2967. assert(best_rdc.rate < INT_MAX);
  2968. assert(best_rdc.dist < INT64_MAX);
  2969. } else {
  2970. assert(tp_orig == *tp);
  2971. }
  2972. }
  2973. static void nonrd_select_partition(VP9_COMP *cpi,
  2974. ThreadData *td,
  2975. TileDataEnc *tile_data,
  2976. MODE_INFO **mi,
  2977. TOKENEXTRA **tp,
  2978. int mi_row, int mi_col,
  2979. BLOCK_SIZE bsize, int output_enabled,
  2980. RD_COST *rd_cost, PC_TREE *pc_tree) {
  2981. VP9_COMMON *const cm = &cpi->common;
  2982. TileInfo *const tile_info = &tile_data->tile_info;
  2983. MACROBLOCK *const x = &td->mb;
  2984. MACROBLOCKD *const xd = &x->e_mbd;
  2985. const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
  2986. const int mis = cm->mi_stride;
  2987. PARTITION_TYPE partition;
  2988. BLOCK_SIZE subsize;
  2989. RD_COST this_rdc;
  2990. vp9_rd_cost_reset(&this_rdc);
  2991. if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
  2992. return;
  2993. subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
  2994. partition = partition_lookup[bsl][subsize];
  2995. if (bsize == BLOCK_32X32 && partition != PARTITION_NONE &&
  2996. subsize >= BLOCK_16X16) {
  2997. x->max_partition_size = BLOCK_32X32;
  2998. x->min_partition_size = BLOCK_8X8;
  2999. nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
  3000. rd_cost, 0, INT64_MAX, pc_tree);
  3001. } else if (bsize == BLOCK_16X16 && partition != PARTITION_NONE) {
  3002. x->max_partition_size = BLOCK_16X16;
  3003. x->min_partition_size = BLOCK_8X8;
  3004. nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, bsize,
  3005. rd_cost, 0, INT64_MAX, pc_tree);
  3006. } else {
  3007. switch (partition) {
  3008. case PARTITION_NONE:
  3009. pc_tree->none.pred_pixel_ready = 1;
  3010. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
  3011. subsize, &pc_tree->none);
  3012. pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
  3013. pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
  3014. pc_tree->none.skip = x->skip;
  3015. break;
  3016. case PARTITION_VERT:
  3017. pc_tree->vertical[0].pred_pixel_ready = 1;
  3018. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
  3019. subsize, &pc_tree->vertical[0]);
  3020. pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
  3021. pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
  3022. pc_tree->vertical[0].skip = x->skip;
  3023. if (mi_col + hbs < cm->mi_cols) {
  3024. pc_tree->vertical[1].pred_pixel_ready = 1;
  3025. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
  3026. &this_rdc, subsize, &pc_tree->vertical[1]);
  3027. pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
  3028. pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
  3029. pc_tree->vertical[1].skip = x->skip;
  3030. if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
  3031. rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
  3032. rd_cost->rate += this_rdc.rate;
  3033. rd_cost->dist += this_rdc.dist;
  3034. }
  3035. }
  3036. break;
  3037. case PARTITION_HORZ:
  3038. pc_tree->horizontal[0].pred_pixel_ready = 1;
  3039. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
  3040. subsize, &pc_tree->horizontal[0]);
  3041. pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
  3042. pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
  3043. pc_tree->horizontal[0].skip = x->skip;
  3044. if (mi_row + hbs < cm->mi_rows) {
  3045. pc_tree->horizontal[1].pred_pixel_ready = 1;
  3046. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
  3047. &this_rdc, subsize, &pc_tree->horizontal[1]);
  3048. pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
  3049. pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
  3050. pc_tree->horizontal[1].skip = x->skip;
  3051. if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
  3052. rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
  3053. rd_cost->rate += this_rdc.rate;
  3054. rd_cost->dist += this_rdc.dist;
  3055. }
  3056. }
  3057. break;
  3058. case PARTITION_SPLIT:
  3059. subsize = get_subsize(bsize, PARTITION_SPLIT);
  3060. nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  3061. subsize, output_enabled, rd_cost,
  3062. pc_tree->split[0]);
  3063. nonrd_select_partition(cpi, td, tile_data, mi + hbs, tp,
  3064. mi_row, mi_col + hbs, subsize, output_enabled,
  3065. &this_rdc, pc_tree->split[1]);
  3066. if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
  3067. rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
  3068. rd_cost->rate += this_rdc.rate;
  3069. rd_cost->dist += this_rdc.dist;
  3070. }
  3071. nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis, tp,
  3072. mi_row + hbs, mi_col, subsize, output_enabled,
  3073. &this_rdc, pc_tree->split[2]);
  3074. if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
  3075. rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
  3076. rd_cost->rate += this_rdc.rate;
  3077. rd_cost->dist += this_rdc.dist;
  3078. }
  3079. nonrd_select_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
  3080. mi_row + hbs, mi_col + hbs, subsize,
  3081. output_enabled, &this_rdc, pc_tree->split[3]);
  3082. if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
  3083. rd_cost->rate != INT_MAX && rd_cost->dist != INT64_MAX) {
  3084. rd_cost->rate += this_rdc.rate;
  3085. rd_cost->dist += this_rdc.dist;
  3086. }
  3087. break;
  3088. default:
  3089. assert(0 && "Invalid partition type.");
  3090. break;
  3091. }
  3092. }
  3093. if (bsize == BLOCK_64X64 && output_enabled)
  3094. encode_sb_rt(cpi, td, tile_info, tp, mi_row, mi_col, 1, bsize, pc_tree);
  3095. }
  3096. static void nonrd_use_partition(VP9_COMP *cpi,
  3097. ThreadData *td,
  3098. TileDataEnc *tile_data,
  3099. MODE_INFO **mi,
  3100. TOKENEXTRA **tp,
  3101. int mi_row, int mi_col,
  3102. BLOCK_SIZE bsize, int output_enabled,
  3103. RD_COST *dummy_cost, PC_TREE *pc_tree) {
  3104. VP9_COMMON *const cm = &cpi->common;
  3105. TileInfo *tile_info = &tile_data->tile_info;
  3106. MACROBLOCK *const x = &td->mb;
  3107. MACROBLOCKD *const xd = &x->e_mbd;
  3108. const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
  3109. const int mis = cm->mi_stride;
  3110. PARTITION_TYPE partition;
  3111. BLOCK_SIZE subsize;
  3112. if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
  3113. return;
  3114. subsize = (bsize >= BLOCK_8X8) ? mi[0]->mbmi.sb_type : BLOCK_4X4;
  3115. partition = partition_lookup[bsl][subsize];
  3116. if (output_enabled && bsize != BLOCK_4X4) {
  3117. int ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
  3118. td->counts->partition[ctx][partition]++;
  3119. }
  3120. switch (partition) {
  3121. case PARTITION_NONE:
  3122. pc_tree->none.pred_pixel_ready = 1;
  3123. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
  3124. subsize, &pc_tree->none);
  3125. pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
  3126. pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
  3127. pc_tree->none.skip = x->skip;
  3128. encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
  3129. subsize, &pc_tree->none);
  3130. break;
  3131. case PARTITION_VERT:
  3132. pc_tree->vertical[0].pred_pixel_ready = 1;
  3133. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
  3134. subsize, &pc_tree->vertical[0]);
  3135. pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
  3136. pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
  3137. pc_tree->vertical[0].skip = x->skip;
  3138. encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
  3139. subsize, &pc_tree->vertical[0]);
  3140. if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
  3141. pc_tree->vertical[1].pred_pixel_ready = 1;
  3142. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
  3143. dummy_cost, subsize, &pc_tree->vertical[1]);
  3144. pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
  3145. pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
  3146. pc_tree->vertical[1].skip = x->skip;
  3147. encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
  3148. output_enabled, subsize, &pc_tree->vertical[1]);
  3149. }
  3150. break;
  3151. case PARTITION_HORZ:
  3152. pc_tree->horizontal[0].pred_pixel_ready = 1;
  3153. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
  3154. subsize, &pc_tree->horizontal[0]);
  3155. pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
  3156. pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
  3157. pc_tree->horizontal[0].skip = x->skip;
  3158. encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
  3159. subsize, &pc_tree->horizontal[0]);
  3160. if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
  3161. pc_tree->horizontal[1].pred_pixel_ready = 1;
  3162. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
  3163. dummy_cost, subsize, &pc_tree->horizontal[1]);
  3164. pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
  3165. pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
  3166. pc_tree->horizontal[1].skip = x->skip;
  3167. encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
  3168. output_enabled, subsize, &pc_tree->horizontal[1]);
  3169. }
  3170. break;
  3171. case PARTITION_SPLIT:
  3172. subsize = get_subsize(bsize, PARTITION_SPLIT);
  3173. if (bsize == BLOCK_8X8) {
  3174. nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
  3175. subsize, pc_tree->leaf_split[0]);
  3176. encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col,
  3177. output_enabled, subsize, pc_tree->leaf_split[0]);
  3178. } else {
  3179. nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  3180. subsize, output_enabled, dummy_cost,
  3181. pc_tree->split[0]);
  3182. nonrd_use_partition(cpi, td, tile_data, mi + hbs, tp,
  3183. mi_row, mi_col + hbs, subsize, output_enabled,
  3184. dummy_cost, pc_tree->split[1]);
  3185. nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis, tp,
  3186. mi_row + hbs, mi_col, subsize, output_enabled,
  3187. dummy_cost, pc_tree->split[2]);
  3188. nonrd_use_partition(cpi, td, tile_data, mi + hbs * mis + hbs, tp,
  3189. mi_row + hbs, mi_col + hbs, subsize, output_enabled,
  3190. dummy_cost, pc_tree->split[3]);
  3191. }
  3192. break;
  3193. default:
  3194. assert(0 && "Invalid partition type.");
  3195. break;
  3196. }
  3197. if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
  3198. update_partition_context(xd, mi_row, mi_col, subsize, bsize);
  3199. }
  3200. static void encode_nonrd_sb_row(VP9_COMP *cpi,
  3201. ThreadData *td,
  3202. TileDataEnc *tile_data,
  3203. int mi_row,
  3204. TOKENEXTRA **tp) {
  3205. SPEED_FEATURES *const sf = &cpi->sf;
  3206. VP9_COMMON *const cm = &cpi->common;
  3207. TileInfo *const tile_info = &tile_data->tile_info;
  3208. MACROBLOCK *const x = &td->mb;
  3209. MACROBLOCKD *const xd = &x->e_mbd;
  3210. int mi_col;
  3211. // Initialize the left context for the new SB row
  3212. memset(&xd->left_context, 0, sizeof(xd->left_context));
  3213. memset(xd->left_seg_context, 0, sizeof(xd->left_seg_context));
  3214. // Code each SB in the row
  3215. for (mi_col = tile_info->mi_col_start; mi_col < tile_info->mi_col_end;
  3216. mi_col += MI_BLOCK_SIZE) {
  3217. const struct segmentation *const seg = &cm->seg;
  3218. RD_COST dummy_rdc;
  3219. const int idx_str = cm->mi_stride * mi_row + mi_col;
  3220. MODE_INFO **mi = cm->mi_grid_visible + idx_str;
  3221. PARTITION_SEARCH_TYPE partition_search_type = sf->partition_search_type;
  3222. BLOCK_SIZE bsize = BLOCK_64X64;
  3223. int seg_skip = 0;
  3224. x->source_variance = UINT_MAX;
  3225. vp9_zero(x->pred_mv);
  3226. vp9_rd_cost_init(&dummy_rdc);
  3227. x->color_sensitivity[0] = 0;
  3228. x->color_sensitivity[1] = 0;
  3229. if (seg->enabled) {
  3230. const uint8_t *const map = seg->update_map ? cpi->segmentation_map
  3231. : cm->last_frame_seg_map;
  3232. int segment_id = vp9_get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
  3233. seg_skip = vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP);
  3234. if (seg_skip) {
  3235. partition_search_type = FIXED_PARTITION;
  3236. }
  3237. }
  3238. // Set the partition type of the 64X64 block
  3239. switch (partition_search_type) {
  3240. case VAR_BASED_PARTITION:
  3241. // TODO(jingning, marpan): The mode decision and encoding process
  3242. // support both intra and inter sub8x8 block coding for RTC mode.
  3243. // Tune the thresholds accordingly to use sub8x8 block coding for
  3244. // coding performance improvement.
  3245. choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
  3246. nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  3247. BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
  3248. break;
  3249. case SOURCE_VAR_BASED_PARTITION:
  3250. set_source_var_based_partition(cpi, tile_info, x, mi, mi_row, mi_col);
  3251. nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  3252. BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
  3253. break;
  3254. case FIXED_PARTITION:
  3255. if (!seg_skip)
  3256. bsize = sf->always_this_block_size;
  3257. set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize);
  3258. nonrd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  3259. BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
  3260. break;
  3261. case REFERENCE_PARTITION:
  3262. set_offsets(cpi, tile_info, x, mi_row, mi_col, BLOCK_64X64);
  3263. if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled &&
  3264. xd->mi[0]->mbmi.segment_id) {
  3265. x->max_partition_size = BLOCK_64X64;
  3266. x->min_partition_size = BLOCK_8X8;
  3267. nonrd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col,
  3268. BLOCK_64X64, &dummy_rdc, 1,
  3269. INT64_MAX, td->pc_root);
  3270. } else {
  3271. choose_partitioning(cpi, tile_info, x, mi_row, mi_col);
  3272. nonrd_select_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col,
  3273. BLOCK_64X64, 1, &dummy_rdc, td->pc_root);
  3274. }
  3275. break;
  3276. default:
  3277. assert(0);
  3278. break;
  3279. }
  3280. }
  3281. }
  3282. // end RTC play code
  3283. static int set_var_thresh_from_histogram(VP9_COMP *cpi) {
  3284. const SPEED_FEATURES *const sf = &cpi->sf;
  3285. const VP9_COMMON *const cm = &cpi->common;
  3286. const uint8_t *src = cpi->Source->y_buffer;
  3287. const uint8_t *last_src = cpi->Last_Source->y_buffer;
  3288. const int src_stride = cpi->Source->y_stride;
  3289. const int last_stride = cpi->Last_Source->y_stride;
  3290. // Pick cutoff threshold
  3291. const int cutoff = (MIN(cm->width, cm->height) >= 720) ?
  3292. (cm->MBs * VAR_HIST_LARGE_CUT_OFF / 100) :
  3293. (cm->MBs * VAR_HIST_SMALL_CUT_OFF / 100);
  3294. DECLARE_ALIGNED(16, int, hist[VAR_HIST_BINS]);
  3295. diff *var16 = cpi->source_diff_var;
  3296. int sum = 0;
  3297. int i, j;
  3298. memset(hist, 0, VAR_HIST_BINS * sizeof(hist[0]));
  3299. for (i = 0; i < cm->mb_rows; i++) {
  3300. for (j = 0; j < cm->mb_cols; j++) {
  3301. #if CONFIG_VP9_HIGHBITDEPTH
  3302. if (cm->use_highbitdepth) {
  3303. switch (cm->bit_depth) {
  3304. case VPX_BITS_8:
  3305. vpx_highbd_8_get16x16var(src, src_stride, last_src, last_stride,
  3306. &var16->sse, &var16->sum);
  3307. break;
  3308. case VPX_BITS_10:
  3309. vpx_highbd_10_get16x16var(src, src_stride, last_src, last_stride,
  3310. &var16->sse, &var16->sum);
  3311. break;
  3312. case VPX_BITS_12:
  3313. vpx_highbd_12_get16x16var(src, src_stride, last_src, last_stride,
  3314. &var16->sse, &var16->sum);
  3315. break;
  3316. default:
  3317. assert(0 && "cm->bit_depth should be VPX_BITS_8, VPX_BITS_10"
  3318. " or VPX_BITS_12");
  3319. return -1;
  3320. }
  3321. } else {
  3322. vpx_get16x16var(src, src_stride, last_src, last_stride,
  3323. &var16->sse, &var16->sum);
  3324. }
  3325. #else
  3326. vpx_get16x16var(src, src_stride, last_src, last_stride,
  3327. &var16->sse, &var16->sum);
  3328. #endif // CONFIG_VP9_HIGHBITDEPTH
  3329. var16->var = var16->sse -
  3330. (((uint32_t)var16->sum * var16->sum) >> 8);
  3331. if (var16->var >= VAR_HIST_MAX_BG_VAR)
  3332. hist[VAR_HIST_BINS - 1]++;
  3333. else
  3334. hist[var16->var / VAR_HIST_FACTOR]++;
  3335. src += 16;
  3336. last_src += 16;
  3337. var16++;
  3338. }
  3339. src = src - cm->mb_cols * 16 + 16 * src_stride;
  3340. last_src = last_src - cm->mb_cols * 16 + 16 * last_stride;
  3341. }
  3342. cpi->source_var_thresh = 0;
  3343. if (hist[VAR_HIST_BINS - 1] < cutoff) {
  3344. for (i = 0; i < VAR_HIST_BINS - 1; i++) {
  3345. sum += hist[i];
  3346. if (sum > cutoff) {
  3347. cpi->source_var_thresh = (i + 1) * VAR_HIST_FACTOR;
  3348. return 0;
  3349. }
  3350. }
  3351. }
  3352. return sf->search_type_check_frequency;
  3353. }
  3354. static void source_var_based_partition_search_method(VP9_COMP *cpi) {
  3355. VP9_COMMON *const cm = &cpi->common;
  3356. SPEED_FEATURES *const sf = &cpi->sf;
  3357. if (cm->frame_type == KEY_FRAME) {
  3358. // For key frame, use SEARCH_PARTITION.
  3359. sf->partition_search_type = SEARCH_PARTITION;
  3360. } else if (cm->intra_only) {
  3361. sf->partition_search_type = FIXED_PARTITION;
  3362. } else {
  3363. if (cm->last_width != cm->width || cm->last_height != cm->height) {
  3364. if (cpi->source_diff_var)
  3365. vpx_free(cpi->source_diff_var);
  3366. CHECK_MEM_ERROR(cm, cpi->source_diff_var,
  3367. vpx_calloc(cm->MBs, sizeof(diff)));
  3368. }
  3369. if (!cpi->frames_till_next_var_check)
  3370. cpi->frames_till_next_var_check = set_var_thresh_from_histogram(cpi);
  3371. if (cpi->frames_till_next_var_check > 0) {
  3372. sf->partition_search_type = FIXED_PARTITION;
  3373. cpi->frames_till_next_var_check--;
  3374. }
  3375. }
  3376. }
  3377. static int get_skip_encode_frame(const VP9_COMMON *cm, ThreadData *const td) {
  3378. unsigned int intra_count = 0, inter_count = 0;
  3379. int j;
  3380. for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
  3381. intra_count += td->counts->intra_inter[j][0];
  3382. inter_count += td->counts->intra_inter[j][1];
  3383. }
  3384. return (intra_count << 2) < inter_count &&
  3385. cm->frame_type != KEY_FRAME &&
  3386. cm->show_frame;
  3387. }
  3388. void vp9_init_tile_data(VP9_COMP *cpi) {
  3389. VP9_COMMON *const cm = &cpi->common;
  3390. const int tile_cols = 1 << cm->log2_tile_cols;
  3391. const int tile_rows = 1 << cm->log2_tile_rows;
  3392. int tile_col, tile_row;
  3393. TOKENEXTRA *pre_tok = cpi->tile_tok[0][0];
  3394. int tile_tok = 0;
  3395. if (cpi->tile_data == NULL) {
  3396. CHECK_MEM_ERROR(cm, cpi->tile_data,
  3397. vpx_malloc(tile_cols * tile_rows * sizeof(*cpi->tile_data)));
  3398. for (tile_row = 0; tile_row < tile_rows; ++tile_row)
  3399. for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
  3400. TileDataEnc *tile_data =
  3401. &cpi->tile_data[tile_row * tile_cols + tile_col];
  3402. int i, j;
  3403. for (i = 0; i < BLOCK_SIZES; ++i) {
  3404. for (j = 0; j < MAX_MODES; ++j) {
  3405. tile_data->thresh_freq_fact[i][j] = 32;
  3406. tile_data->mode_map[i][j] = j;
  3407. }
  3408. }
  3409. }
  3410. }
  3411. for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
  3412. for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
  3413. TileInfo *tile_info =
  3414. &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
  3415. vp9_tile_init(tile_info, cm, tile_row, tile_col);
  3416. cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
  3417. pre_tok = cpi->tile_tok[tile_row][tile_col];
  3418. tile_tok = allocated_tokens(*tile_info);
  3419. }
  3420. }
  3421. }
  3422. void vp9_encode_tile(VP9_COMP *cpi, ThreadData *td,
  3423. int tile_row, int tile_col) {
  3424. VP9_COMMON *const cm = &cpi->common;
  3425. const int tile_cols = 1 << cm->log2_tile_cols;
  3426. TileDataEnc *this_tile =
  3427. &cpi->tile_data[tile_row * tile_cols + tile_col];
  3428. const TileInfo * const tile_info = &this_tile->tile_info;
  3429. TOKENEXTRA *tok = cpi->tile_tok[tile_row][tile_col];
  3430. int mi_row;
  3431. for (mi_row = tile_info->mi_row_start; mi_row < tile_info->mi_row_end;
  3432. mi_row += MI_BLOCK_SIZE) {
  3433. if (cpi->sf.use_nonrd_pick_mode)
  3434. encode_nonrd_sb_row(cpi, td, this_tile, mi_row, &tok);
  3435. else
  3436. encode_rd_sb_row(cpi, td, this_tile, mi_row, &tok);
  3437. }
  3438. cpi->tok_count[tile_row][tile_col] =
  3439. (unsigned int)(tok - cpi->tile_tok[tile_row][tile_col]);
  3440. assert(tok - cpi->tile_tok[tile_row][tile_col] <=
  3441. allocated_tokens(*tile_info));
  3442. }
  3443. static void encode_tiles(VP9_COMP *cpi) {
  3444. VP9_COMMON *const cm = &cpi->common;
  3445. const int tile_cols = 1 << cm->log2_tile_cols;
  3446. const int tile_rows = 1 << cm->log2_tile_rows;
  3447. int tile_col, tile_row;
  3448. vp9_init_tile_data(cpi);
  3449. for (tile_row = 0; tile_row < tile_rows; ++tile_row)
  3450. for (tile_col = 0; tile_col < tile_cols; ++tile_col)
  3451. vp9_encode_tile(cpi, &cpi->td, tile_row, tile_col);
  3452. }
  3453. #if CONFIG_FP_MB_STATS
  3454. static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
  3455. VP9_COMMON *cm, uint8_t **this_frame_mb_stats) {
  3456. uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
  3457. cm->current_video_frame * cm->MBs * sizeof(uint8_t);
  3458. if (mb_stats_in > firstpass_mb_stats->mb_stats_end)
  3459. return EOF;
  3460. *this_frame_mb_stats = mb_stats_in;
  3461. return 1;
  3462. }
  3463. #endif
  3464. static void encode_frame_internal(VP9_COMP *cpi) {
  3465. SPEED_FEATURES *const sf = &cpi->sf;
  3466. RD_OPT *const rd_opt = &cpi->rd;
  3467. ThreadData *const td = &cpi->td;
  3468. MACROBLOCK *const x = &td->mb;
  3469. VP9_COMMON *const cm = &cpi->common;
  3470. MACROBLOCKD *const xd = &x->e_mbd;
  3471. RD_COUNTS *const rdc = &cpi->td.rd_counts;
  3472. xd->mi = cm->mi_grid_visible;
  3473. xd->mi[0] = cm->mi;
  3474. vp9_zero(*td->counts);
  3475. vp9_zero(rdc->coef_counts);
  3476. vp9_zero(rdc->comp_pred_diff);
  3477. vp9_zero(rdc->filter_diff);
  3478. vp9_zero(rdc->tx_select_diff);
  3479. vp9_zero(rd_opt->tx_select_threshes);
  3480. xd->lossless = cm->base_qindex == 0 &&
  3481. cm->y_dc_delta_q == 0 &&
  3482. cm->uv_dc_delta_q == 0 &&
  3483. cm->uv_ac_delta_q == 0;
  3484. #if CONFIG_VP9_HIGHBITDEPTH
  3485. if (cm->use_highbitdepth)
  3486. x->fwd_txm4x4 = xd->lossless ? vp9_highbd_fwht4x4 : vp9_highbd_fdct4x4;
  3487. else
  3488. x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
  3489. x->highbd_itxm_add = xd->lossless ? vp9_highbd_iwht4x4_add :
  3490. vp9_highbd_idct4x4_add;
  3491. #else
  3492. x->fwd_txm4x4 = xd->lossless ? vp9_fwht4x4 : vp9_fdct4x4;
  3493. #endif // CONFIG_VP9_HIGHBITDEPTH
  3494. x->itxm_add = xd->lossless ? vp9_iwht4x4_add : vp9_idct4x4_add;
  3495. if (xd->lossless)
  3496. x->optimize = 0;
  3497. cm->tx_mode = select_tx_mode(cpi, xd);
  3498. vp9_frame_init_quantizer(cpi);
  3499. vp9_initialize_rd_consts(cpi);
  3500. vp9_initialize_me_consts(cpi, x, cm->base_qindex);
  3501. init_encode_frame_mb_context(cpi);
  3502. cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
  3503. cm->width == cm->last_width &&
  3504. cm->height == cm->last_height &&
  3505. !cm->intra_only &&
  3506. cm->last_show_frame;
  3507. // Special case: set prev_mi to NULL when the previous mode info
  3508. // context cannot be used.
  3509. cm->prev_mi = cm->use_prev_frame_mvs ?
  3510. cm->prev_mip + cm->mi_stride + 1 : NULL;
  3511. x->quant_fp = cpi->sf.use_quant_fp;
  3512. vp9_zero(x->skip_txfm);
  3513. if (sf->use_nonrd_pick_mode) {
  3514. // Initialize internal buffer pointers for rtc coding, where non-RD
  3515. // mode decision is used and hence no buffer pointer swap needed.
  3516. int i;
  3517. struct macroblock_plane *const p = x->plane;
  3518. struct macroblockd_plane *const pd = xd->plane;
  3519. PICK_MODE_CONTEXT *ctx = &cpi->td.pc_root->none;
  3520. for (i = 0; i < MAX_MB_PLANE; ++i) {
  3521. p[i].coeff = ctx->coeff_pbuf[i][0];
  3522. p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
  3523. pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
  3524. p[i].eobs = ctx->eobs_pbuf[i][0];
  3525. }
  3526. vp9_zero(x->zcoeff_blk);
  3527. if (cm->frame_type != KEY_FRAME && cpi->rc.frames_since_golden == 0)
  3528. cpi->ref_frame_flags &= (~VP9_GOLD_FLAG);
  3529. if (sf->partition_search_type == SOURCE_VAR_BASED_PARTITION)
  3530. source_var_based_partition_search_method(cpi);
  3531. }
  3532. {
  3533. struct vpx_usec_timer emr_timer;
  3534. vpx_usec_timer_start(&emr_timer);
  3535. #if CONFIG_FP_MB_STATS
  3536. if (cpi->use_fp_mb_stats) {
  3537. input_fpmb_stats(&cpi->twopass.firstpass_mb_stats, cm,
  3538. &cpi->twopass.this_frame_mb_stats);
  3539. }
  3540. #endif
  3541. // If allowed, encoding tiles in parallel with one thread handling one tile.
  3542. if (MIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
  3543. vp9_encode_tiles_mt(cpi);
  3544. else
  3545. encode_tiles(cpi);
  3546. vpx_usec_timer_mark(&emr_timer);
  3547. cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
  3548. }
  3549. sf->skip_encode_frame = sf->skip_encode_sb ?
  3550. get_skip_encode_frame(cm, td) : 0;
  3551. #if 0
  3552. // Keep record of the total distortion this time around for future use
  3553. cpi->last_frame_distortion = cpi->frame_distortion;
  3554. #endif
  3555. }
  3556. static INTERP_FILTER get_interp_filter(
  3557. const int64_t threshes[SWITCHABLE_FILTER_CONTEXTS], int is_alt_ref) {
  3558. if (!is_alt_ref &&
  3559. threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP] &&
  3560. threshes[EIGHTTAP_SMOOTH] > threshes[EIGHTTAP_SHARP] &&
  3561. threshes[EIGHTTAP_SMOOTH] > threshes[SWITCHABLE - 1]) {
  3562. return EIGHTTAP_SMOOTH;
  3563. } else if (threshes[EIGHTTAP_SHARP] > threshes[EIGHTTAP] &&
  3564. threshes[EIGHTTAP_SHARP] > threshes[SWITCHABLE - 1]) {
  3565. return EIGHTTAP_SHARP;
  3566. } else if (threshes[EIGHTTAP] > threshes[SWITCHABLE - 1]) {
  3567. return EIGHTTAP;
  3568. } else {
  3569. return SWITCHABLE;
  3570. }
  3571. }
  3572. void vp9_encode_frame(VP9_COMP *cpi) {
  3573. VP9_COMMON *const cm = &cpi->common;
  3574. // In the longer term the encoder should be generalized to match the
  3575. // decoder such that we allow compound where one of the 3 buffers has a
  3576. // different sign bias and that buffer is then the fixed ref. However, this
  3577. // requires further work in the rd loop. For now the only supported encoder
  3578. // side behavior is where the ALT ref buffer has opposite sign bias to
  3579. // the other two.
  3580. if (!frame_is_intra_only(cm)) {
  3581. if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
  3582. cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
  3583. (cm->ref_frame_sign_bias[ALTREF_FRAME] ==
  3584. cm->ref_frame_sign_bias[LAST_FRAME])) {
  3585. cpi->allow_comp_inter_inter = 0;
  3586. } else {
  3587. cpi->allow_comp_inter_inter = 1;
  3588. cm->comp_fixed_ref = ALTREF_FRAME;
  3589. cm->comp_var_ref[0] = LAST_FRAME;
  3590. cm->comp_var_ref[1] = GOLDEN_FRAME;
  3591. }
  3592. }
  3593. if (cpi->sf.frame_parameter_update) {
  3594. int i;
  3595. RD_OPT *const rd_opt = &cpi->rd;
  3596. FRAME_COUNTS *counts = cpi->td.counts;
  3597. RD_COUNTS *const rdc = &cpi->td.rd_counts;
  3598. // This code does a single RD pass over the whole frame assuming
  3599. // either compound, single or hybrid prediction as per whatever has
  3600. // worked best for that type of frame in the past.
  3601. // It also predicts whether another coding mode would have worked
  3602. // better that this coding mode. If that is the case, it remembers
  3603. // that for subsequent frames.
  3604. // It does the same analysis for transform size selection also.
  3605. const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
  3606. int64_t *const mode_thrs = rd_opt->prediction_type_threshes[frame_type];
  3607. int64_t *const filter_thrs = rd_opt->filter_threshes[frame_type];
  3608. int *const tx_thrs = rd_opt->tx_select_threshes[frame_type];
  3609. const int is_alt_ref = frame_type == ALTREF_FRAME;
  3610. /* prediction (compound, single or hybrid) mode selection */
  3611. if (is_alt_ref || !cpi->allow_comp_inter_inter)
  3612. cm->reference_mode = SINGLE_REFERENCE;
  3613. else if (mode_thrs[COMPOUND_REFERENCE] > mode_thrs[SINGLE_REFERENCE] &&
  3614. mode_thrs[COMPOUND_REFERENCE] >
  3615. mode_thrs[REFERENCE_MODE_SELECT] &&
  3616. check_dual_ref_flags(cpi) &&
  3617. cpi->static_mb_pct == 100)
  3618. cm->reference_mode = COMPOUND_REFERENCE;
  3619. else if (mode_thrs[SINGLE_REFERENCE] > mode_thrs[REFERENCE_MODE_SELECT])
  3620. cm->reference_mode = SINGLE_REFERENCE;
  3621. else
  3622. cm->reference_mode = REFERENCE_MODE_SELECT;
  3623. if (cm->interp_filter == SWITCHABLE)
  3624. cm->interp_filter = get_interp_filter(filter_thrs, is_alt_ref);
  3625. encode_frame_internal(cpi);
  3626. for (i = 0; i < REFERENCE_MODES; ++i)
  3627. mode_thrs[i] = (mode_thrs[i] + rdc->comp_pred_diff[i] / cm->MBs) / 2;
  3628. for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
  3629. filter_thrs[i] = (filter_thrs[i] + rdc->filter_diff[i] / cm->MBs) / 2;
  3630. for (i = 0; i < TX_MODES; ++i) {
  3631. int64_t pd = rdc->tx_select_diff[i];
  3632. if (i == TX_MODE_SELECT)
  3633. pd -= RDCOST(cpi->td.mb.rdmult, cpi->td.mb.rddiv, 2048 * (TX_SIZES - 1),
  3634. 0);
  3635. tx_thrs[i] = (tx_thrs[i] + (int)(pd / cm->MBs)) / 2;
  3636. }
  3637. if (cm->reference_mode == REFERENCE_MODE_SELECT) {
  3638. int single_count_zero = 0;
  3639. int comp_count_zero = 0;
  3640. for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
  3641. single_count_zero += counts->comp_inter[i][0];
  3642. comp_count_zero += counts->comp_inter[i][1];
  3643. }
  3644. if (comp_count_zero == 0) {
  3645. cm->reference_mode = SINGLE_REFERENCE;
  3646. vp9_zero(counts->comp_inter);
  3647. } else if (single_count_zero == 0) {
  3648. cm->reference_mode = COMPOUND_REFERENCE;
  3649. vp9_zero(counts->comp_inter);
  3650. }
  3651. }
  3652. if (cm->tx_mode == TX_MODE_SELECT) {
  3653. int count4x4 = 0;
  3654. int count8x8_lp = 0, count8x8_8x8p = 0;
  3655. int count16x16_16x16p = 0, count16x16_lp = 0;
  3656. int count32x32 = 0;
  3657. for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
  3658. count4x4 += counts->tx.p32x32[i][TX_4X4];
  3659. count4x4 += counts->tx.p16x16[i][TX_4X4];
  3660. count4x4 += counts->tx.p8x8[i][TX_4X4];
  3661. count8x8_lp += counts->tx.p32x32[i][TX_8X8];
  3662. count8x8_lp += counts->tx.p16x16[i][TX_8X8];
  3663. count8x8_8x8p += counts->tx.p8x8[i][TX_8X8];
  3664. count16x16_16x16p += counts->tx.p16x16[i][TX_16X16];
  3665. count16x16_lp += counts->tx.p32x32[i][TX_16X16];
  3666. count32x32 += counts->tx.p32x32[i][TX_32X32];
  3667. }
  3668. if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
  3669. count32x32 == 0) {
  3670. cm->tx_mode = ALLOW_8X8;
  3671. reset_skip_tx_size(cm, TX_8X8);
  3672. } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
  3673. count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
  3674. cm->tx_mode = ONLY_4X4;
  3675. reset_skip_tx_size(cm, TX_4X4);
  3676. } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
  3677. cm->tx_mode = ALLOW_32X32;
  3678. } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
  3679. cm->tx_mode = ALLOW_16X16;
  3680. reset_skip_tx_size(cm, TX_16X16);
  3681. }
  3682. }
  3683. } else {
  3684. cm->reference_mode = SINGLE_REFERENCE;
  3685. encode_frame_internal(cpi);
  3686. }
  3687. }
  3688. static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
  3689. const PREDICTION_MODE y_mode = mi->mbmi.mode;
  3690. const PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
  3691. const BLOCK_SIZE bsize = mi->mbmi.sb_type;
  3692. if (bsize < BLOCK_8X8) {
  3693. int idx, idy;
  3694. const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize];
  3695. const int num_4x4_h = num_4x4_blocks_high_lookup[bsize];
  3696. for (idy = 0; idy < 2; idy += num_4x4_h)
  3697. for (idx = 0; idx < 2; idx += num_4x4_w)
  3698. ++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
  3699. } else {
  3700. ++counts->y_mode[size_group_lookup[bsize]][y_mode];
  3701. }
  3702. ++counts->uv_mode[y_mode][uv_mode];
  3703. }
  3704. static void encode_superblock(VP9_COMP *cpi, ThreadData *td,
  3705. TOKENEXTRA **t, int output_enabled,
  3706. int mi_row, int mi_col, BLOCK_SIZE bsize,
  3707. PICK_MODE_CONTEXT *ctx) {
  3708. VP9_COMMON *const cm = &cpi->common;
  3709. MACROBLOCK *const x = &td->mb;
  3710. MACROBLOCKD *const xd = &x->e_mbd;
  3711. MODE_INFO **mi_8x8 = xd->mi;
  3712. MODE_INFO *mi = mi_8x8[0];
  3713. MB_MODE_INFO *mbmi = &mi->mbmi;
  3714. const int seg_skip = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
  3715. SEG_LVL_SKIP);
  3716. const int mis = cm->mi_stride;
  3717. const int mi_width = num_8x8_blocks_wide_lookup[bsize];
  3718. const int mi_height = num_8x8_blocks_high_lookup[bsize];
  3719. x->skip_recode = !x->select_tx_size && mbmi->sb_type >= BLOCK_8X8 &&
  3720. cpi->oxcf.aq_mode != COMPLEXITY_AQ &&
  3721. cpi->oxcf.aq_mode != CYCLIC_REFRESH_AQ &&
  3722. cpi->sf.allow_skip_recode;
  3723. if (!x->skip_recode && !cpi->sf.use_nonrd_pick_mode)
  3724. memset(x->skip_txfm, 0, sizeof(x->skip_txfm));
  3725. x->skip_optimize = ctx->is_coded;
  3726. ctx->is_coded = 1;
  3727. x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
  3728. x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
  3729. x->q_index < QIDX_SKIP_THRESH);
  3730. if (x->skip_encode)
  3731. return;
  3732. set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
  3733. if (!is_inter_block(mbmi)) {
  3734. int plane;
  3735. mbmi->skip = 1;
  3736. for (plane = 0; plane < MAX_MB_PLANE; ++plane)
  3737. vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
  3738. if (output_enabled)
  3739. sum_intra_stats(td->counts, mi);
  3740. vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
  3741. } else {
  3742. int ref;
  3743. const int is_compound = has_second_ref(mbmi);
  3744. for (ref = 0; ref < 1 + is_compound; ++ref) {
  3745. YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
  3746. mbmi->ref_frame[ref]);
  3747. assert(cfg != NULL);
  3748. vp9_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
  3749. &xd->block_refs[ref]->sf);
  3750. }
  3751. if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
  3752. vp9_build_inter_predictors_sby(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
  3753. vp9_build_inter_predictors_sbuv(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
  3754. vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
  3755. vp9_tokenize_sb(cpi, td, t, !output_enabled, MAX(bsize, BLOCK_8X8));
  3756. }
  3757. if (output_enabled) {
  3758. if (cm->tx_mode == TX_MODE_SELECT &&
  3759. mbmi->sb_type >= BLOCK_8X8 &&
  3760. !(is_inter_block(mbmi) && (mbmi->skip || seg_skip))) {
  3761. ++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
  3762. &td->counts->tx)[mbmi->tx_size];
  3763. } else {
  3764. int x, y;
  3765. TX_SIZE tx_size;
  3766. // The new intra coding scheme requires no change of transform size
  3767. if (is_inter_block(&mi->mbmi)) {
  3768. tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
  3769. max_txsize_lookup[bsize]);
  3770. } else {
  3771. tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
  3772. }
  3773. for (y = 0; y < mi_height; y++)
  3774. for (x = 0; x < mi_width; x++)
  3775. if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
  3776. mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;
  3777. }
  3778. ++td->counts->tx.tx_totals[mbmi->tx_size];
  3779. ++td->counts->tx.tx_totals[get_uv_tx_size(mbmi, &xd->plane[1])];
  3780. }
  3781. }