drm_atomic.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794
  1. /*
  2. * Copyright (C) 2014 Red Hat
  3. * Copyright (C) 2014 Intel Corp.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice shall be included in
  13. * all copies or substantial portions of the Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  19. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  20. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  21. * OTHER DEALINGS IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Rob Clark <robdclark@gmail.com>
  25. * Daniel Vetter <daniel.vetter@ffwll.ch>
  26. */
  27. #include <drm/drmP.h>
  28. #include <drm/drm_atomic.h>
  29. #include <drm/drm_mode.h>
  30. #include <drm/drm_plane_helper.h>
  31. #include "drm_crtc_internal.h"
  32. static void crtc_commit_free(struct kref *kref)
  33. {
  34. struct drm_crtc_commit *commit =
  35. container_of(kref, struct drm_crtc_commit, ref);
  36. kfree(commit);
  37. }
  38. void drm_crtc_commit_put(struct drm_crtc_commit *commit)
  39. {
  40. kref_put(&commit->ref, crtc_commit_free);
  41. }
  42. EXPORT_SYMBOL(drm_crtc_commit_put);
  43. /**
  44. * drm_atomic_state_default_release -
  45. * release memory initialized by drm_atomic_state_init
  46. * @state: atomic state
  47. *
  48. * Free all the memory allocated by drm_atomic_state_init.
  49. * This is useful for drivers that subclass the atomic state.
  50. */
  51. void drm_atomic_state_default_release(struct drm_atomic_state *state)
  52. {
  53. kfree(state->connectors);
  54. kfree(state->crtcs);
  55. kfree(state->planes);
  56. }
  57. EXPORT_SYMBOL(drm_atomic_state_default_release);
  58. /**
  59. * drm_atomic_state_init - init new atomic state
  60. * @dev: DRM device
  61. * @state: atomic state
  62. *
  63. * Default implementation for filling in a new atomic state.
  64. * This is useful for drivers that subclass the atomic state.
  65. */
  66. int
  67. drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
  68. {
  69. /* TODO legacy paths should maybe do a better job about
  70. * setting this appropriately?
  71. */
  72. state->allow_modeset = true;
  73. state->crtcs = kcalloc(dev->mode_config.num_crtc,
  74. sizeof(*state->crtcs), GFP_KERNEL);
  75. if (!state->crtcs)
  76. goto fail;
  77. state->planes = kcalloc(dev->mode_config.num_total_plane,
  78. sizeof(*state->planes), GFP_KERNEL);
  79. if (!state->planes)
  80. goto fail;
  81. state->dev = dev;
  82. DRM_DEBUG_ATOMIC("Allocated atomic state %p\n", state);
  83. return 0;
  84. fail:
  85. drm_atomic_state_default_release(state);
  86. return -ENOMEM;
  87. }
  88. EXPORT_SYMBOL(drm_atomic_state_init);
  89. /**
  90. * drm_atomic_state_alloc - allocate atomic state
  91. * @dev: DRM device
  92. *
  93. * This allocates an empty atomic state to track updates.
  94. */
  95. struct drm_atomic_state *
  96. drm_atomic_state_alloc(struct drm_device *dev)
  97. {
  98. struct drm_mode_config *config = &dev->mode_config;
  99. struct drm_atomic_state *state;
  100. if (!config->funcs->atomic_state_alloc) {
  101. state = kzalloc(sizeof(*state), GFP_KERNEL);
  102. if (!state)
  103. return NULL;
  104. if (drm_atomic_state_init(dev, state) < 0) {
  105. kfree(state);
  106. return NULL;
  107. }
  108. return state;
  109. }
  110. return config->funcs->atomic_state_alloc(dev);
  111. }
  112. EXPORT_SYMBOL(drm_atomic_state_alloc);
  113. /**
  114. * drm_atomic_state_default_clear - clear base atomic state
  115. * @state: atomic state
  116. *
  117. * Default implementation for clearing atomic state.
  118. * This is useful for drivers that subclass the atomic state.
  119. */
  120. void drm_atomic_state_default_clear(struct drm_atomic_state *state)
  121. {
  122. struct drm_device *dev = state->dev;
  123. struct drm_mode_config *config = &dev->mode_config;
  124. int i;
  125. DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
  126. for (i = 0; i < state->num_connector; i++) {
  127. struct drm_connector *connector = state->connectors[i].ptr;
  128. if (!connector)
  129. continue;
  130. connector->funcs->atomic_destroy_state(connector,
  131. state->connectors[i].state);
  132. state->connectors[i].ptr = NULL;
  133. state->connectors[i].state = NULL;
  134. drm_connector_unreference(connector);
  135. }
  136. for (i = 0; i < config->num_crtc; i++) {
  137. struct drm_crtc *crtc = state->crtcs[i].ptr;
  138. if (!crtc)
  139. continue;
  140. crtc->funcs->atomic_destroy_state(crtc,
  141. state->crtcs[i].state);
  142. if (state->crtcs[i].commit) {
  143. kfree(state->crtcs[i].commit->event);
  144. state->crtcs[i].commit->event = NULL;
  145. drm_crtc_commit_put(state->crtcs[i].commit);
  146. }
  147. state->crtcs[i].commit = NULL;
  148. state->crtcs[i].ptr = NULL;
  149. state->crtcs[i].state = NULL;
  150. }
  151. for (i = 0; i < config->num_total_plane; i++) {
  152. struct drm_plane *plane = state->planes[i].ptr;
  153. if (!plane)
  154. continue;
  155. plane->funcs->atomic_destroy_state(plane,
  156. state->planes[i].state);
  157. state->planes[i].ptr = NULL;
  158. state->planes[i].state = NULL;
  159. }
  160. }
  161. EXPORT_SYMBOL(drm_atomic_state_default_clear);
  162. /**
  163. * drm_atomic_state_clear - clear state object
  164. * @state: atomic state
  165. *
  166. * When the w/w mutex algorithm detects a deadlock we need to back off and drop
  167. * all locks. So someone else could sneak in and change the current modeset
  168. * configuration. Which means that all the state assembled in @state is no
  169. * longer an atomic update to the current state, but to some arbitrary earlier
  170. * state. Which could break assumptions the driver's ->atomic_check likely
  171. * relies on.
  172. *
  173. * Hence we must clear all cached state and completely start over, using this
  174. * function.
  175. */
  176. void drm_atomic_state_clear(struct drm_atomic_state *state)
  177. {
  178. struct drm_device *dev = state->dev;
  179. struct drm_mode_config *config = &dev->mode_config;
  180. if (config->funcs->atomic_state_clear)
  181. config->funcs->atomic_state_clear(state);
  182. else
  183. drm_atomic_state_default_clear(state);
  184. }
  185. EXPORT_SYMBOL(drm_atomic_state_clear);
  186. /**
  187. * drm_atomic_state_free - free all memory for an atomic state
  188. * @state: atomic state to deallocate
  189. *
  190. * This frees all memory associated with an atomic state, including all the
  191. * per-object state for planes, crtcs and connectors.
  192. */
  193. void drm_atomic_state_free(struct drm_atomic_state *state)
  194. {
  195. struct drm_device *dev;
  196. struct drm_mode_config *config;
  197. if (!state)
  198. return;
  199. dev = state->dev;
  200. config = &dev->mode_config;
  201. drm_atomic_state_clear(state);
  202. DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
  203. if (config->funcs->atomic_state_free) {
  204. config->funcs->atomic_state_free(state);
  205. } else {
  206. drm_atomic_state_default_release(state);
  207. kfree(state);
  208. }
  209. }
  210. EXPORT_SYMBOL(drm_atomic_state_free);
  211. /**
  212. * drm_atomic_get_crtc_state - get crtc state
  213. * @state: global atomic state object
  214. * @crtc: crtc to get state object for
  215. *
  216. * This function returns the crtc state for the given crtc, allocating it if
  217. * needed. It will also grab the relevant crtc lock to make sure that the state
  218. * is consistent.
  219. *
  220. * Returns:
  221. *
  222. * Either the allocated state or the error code encoded into the pointer. When
  223. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  224. * entire atomic sequence must be restarted. All other errors are fatal.
  225. */
  226. struct drm_crtc_state *
  227. drm_atomic_get_crtc_state(struct drm_atomic_state *state,
  228. struct drm_crtc *crtc)
  229. {
  230. int ret, index = drm_crtc_index(crtc);
  231. struct drm_crtc_state *crtc_state;
  232. WARN_ON(!state->acquire_ctx);
  233. crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
  234. if (crtc_state)
  235. return crtc_state;
  236. ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
  237. if (ret)
  238. return ERR_PTR(ret);
  239. crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
  240. if (!crtc_state)
  241. return ERR_PTR(-ENOMEM);
  242. state->crtcs[index].state = crtc_state;
  243. state->crtcs[index].ptr = crtc;
  244. crtc_state->state = state;
  245. DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
  246. crtc->base.id, crtc->name, crtc_state, state);
  247. return crtc_state;
  248. }
  249. EXPORT_SYMBOL(drm_atomic_get_crtc_state);
  250. /**
  251. * drm_atomic_set_mode_for_crtc - set mode for CRTC
  252. * @state: the CRTC whose incoming state to update
  253. * @mode: kernel-internal mode to use for the CRTC, or NULL to disable
  254. *
  255. * Set a mode (originating from the kernel) on the desired CRTC state. Does
  256. * not change any other state properties, including enable, active, or
  257. * mode_changed.
  258. *
  259. * RETURNS:
  260. * Zero on success, error code on failure. Cannot return -EDEADLK.
  261. */
  262. int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
  263. struct drm_display_mode *mode)
  264. {
  265. struct drm_mode_modeinfo umode;
  266. /* Early return for no change. */
  267. if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0)
  268. return 0;
  269. drm_property_unreference_blob(state->mode_blob);
  270. state->mode_blob = NULL;
  271. if (mode) {
  272. drm_mode_convert_to_umode(&umode, mode);
  273. state->mode_blob =
  274. drm_property_create_blob(state->crtc->dev,
  275. sizeof(umode),
  276. &umode);
  277. if (IS_ERR(state->mode_blob))
  278. return PTR_ERR(state->mode_blob);
  279. drm_mode_copy(&state->mode, mode);
  280. state->enable = true;
  281. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  282. mode->name, state);
  283. } else {
  284. memset(&state->mode, 0, sizeof(state->mode));
  285. state->enable = false;
  286. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  287. state);
  288. }
  289. return 0;
  290. }
  291. EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
  292. /**
  293. * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC
  294. * @state: the CRTC whose incoming state to update
  295. * @blob: pointer to blob property to use for mode
  296. *
  297. * Set a mode (originating from a blob property) on the desired CRTC state.
  298. * This function will take a reference on the blob property for the CRTC state,
  299. * and release the reference held on the state's existing mode property, if any
  300. * was set.
  301. *
  302. * RETURNS:
  303. * Zero on success, error code on failure. Cannot return -EDEADLK.
  304. */
  305. int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
  306. struct drm_property_blob *blob)
  307. {
  308. if (blob == state->mode_blob)
  309. return 0;
  310. drm_property_unreference_blob(state->mode_blob);
  311. state->mode_blob = NULL;
  312. memset(&state->mode, 0, sizeof(state->mode));
  313. if (blob) {
  314. if (blob->length != sizeof(struct drm_mode_modeinfo) ||
  315. drm_mode_convert_umode(&state->mode,
  316. (const struct drm_mode_modeinfo *)
  317. blob->data))
  318. return -EINVAL;
  319. state->mode_blob = drm_property_reference_blob(blob);
  320. state->enable = true;
  321. DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
  322. state->mode.name, state);
  323. } else {
  324. state->enable = false;
  325. DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
  326. state);
  327. }
  328. return 0;
  329. }
  330. EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
  331. /**
  332. * drm_atomic_replace_property_blob - replace a blob property
  333. * @blob: a pointer to the member blob to be replaced
  334. * @new_blob: the new blob to replace with
  335. * @replaced: whether the blob has been replaced
  336. *
  337. * RETURNS:
  338. * Zero on success, error code on failure
  339. */
  340. static void
  341. drm_atomic_replace_property_blob(struct drm_property_blob **blob,
  342. struct drm_property_blob *new_blob,
  343. bool *replaced)
  344. {
  345. struct drm_property_blob *old_blob = *blob;
  346. if (old_blob == new_blob)
  347. return;
  348. drm_property_unreference_blob(old_blob);
  349. if (new_blob)
  350. drm_property_reference_blob(new_blob);
  351. *blob = new_blob;
  352. *replaced = true;
  353. return;
  354. }
  355. static int
  356. drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
  357. struct drm_property_blob **blob,
  358. uint64_t blob_id,
  359. ssize_t expected_size,
  360. bool *replaced)
  361. {
  362. struct drm_property_blob *new_blob = NULL;
  363. if (blob_id != 0) {
  364. new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
  365. if (new_blob == NULL)
  366. return -EINVAL;
  367. if (expected_size > 0 && expected_size != new_blob->length) {
  368. drm_property_unreference_blob(new_blob);
  369. return -EINVAL;
  370. }
  371. }
  372. drm_atomic_replace_property_blob(blob, new_blob, replaced);
  373. drm_property_unreference_blob(new_blob);
  374. return 0;
  375. }
  376. /**
  377. * drm_atomic_crtc_set_property - set property on CRTC
  378. * @crtc: the drm CRTC to set a property on
  379. * @state: the state object to update with the new property value
  380. * @property: the property to set
  381. * @val: the new property value
  382. *
  383. * Use this instead of calling crtc->atomic_set_property directly.
  384. * This function handles generic/core properties and calls out to
  385. * driver's ->atomic_set_property() for driver properties. To ensure
  386. * consistent behavior you must call this function rather than the
  387. * driver hook directly.
  388. *
  389. * RETURNS:
  390. * Zero on success, error code on failure
  391. */
  392. int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
  393. struct drm_crtc_state *state, struct drm_property *property,
  394. uint64_t val)
  395. {
  396. struct drm_device *dev = crtc->dev;
  397. struct drm_mode_config *config = &dev->mode_config;
  398. bool replaced = false;
  399. int ret;
  400. if (property == config->prop_active)
  401. state->active = val;
  402. else if (property == config->prop_mode_id) {
  403. struct drm_property_blob *mode =
  404. drm_property_lookup_blob(dev, val);
  405. ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
  406. drm_property_unreference_blob(mode);
  407. return ret;
  408. } else if (property == config->degamma_lut_property) {
  409. ret = drm_atomic_replace_property_blob_from_id(crtc,
  410. &state->degamma_lut,
  411. val,
  412. -1,
  413. &replaced);
  414. state->color_mgmt_changed |= replaced;
  415. return ret;
  416. } else if (property == config->ctm_property) {
  417. ret = drm_atomic_replace_property_blob_from_id(crtc,
  418. &state->ctm,
  419. val,
  420. sizeof(struct drm_color_ctm),
  421. &replaced);
  422. state->color_mgmt_changed |= replaced;
  423. return ret;
  424. } else if (property == config->gamma_lut_property) {
  425. ret = drm_atomic_replace_property_blob_from_id(crtc,
  426. &state->gamma_lut,
  427. val,
  428. -1,
  429. &replaced);
  430. state->color_mgmt_changed |= replaced;
  431. return ret;
  432. } else if (crtc->funcs->atomic_set_property)
  433. return crtc->funcs->atomic_set_property(crtc, state, property, val);
  434. else
  435. return -EINVAL;
  436. return 0;
  437. }
  438. EXPORT_SYMBOL(drm_atomic_crtc_set_property);
  439. /**
  440. * drm_atomic_crtc_get_property - get property value from CRTC state
  441. * @crtc: the drm CRTC to set a property on
  442. * @state: the state object to get the property value from
  443. * @property: the property to set
  444. * @val: return location for the property value
  445. *
  446. * This function handles generic/core properties and calls out to
  447. * driver's ->atomic_get_property() for driver properties. To ensure
  448. * consistent behavior you must call this function rather than the
  449. * driver hook directly.
  450. *
  451. * RETURNS:
  452. * Zero on success, error code on failure
  453. */
  454. static int
  455. drm_atomic_crtc_get_property(struct drm_crtc *crtc,
  456. const struct drm_crtc_state *state,
  457. struct drm_property *property, uint64_t *val)
  458. {
  459. struct drm_device *dev = crtc->dev;
  460. struct drm_mode_config *config = &dev->mode_config;
  461. if (property == config->prop_active)
  462. *val = state->active;
  463. else if (property == config->prop_mode_id)
  464. *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
  465. else if (property == config->degamma_lut_property)
  466. *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
  467. else if (property == config->ctm_property)
  468. *val = (state->ctm) ? state->ctm->base.id : 0;
  469. else if (property == config->gamma_lut_property)
  470. *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
  471. else if (crtc->funcs->atomic_get_property)
  472. return crtc->funcs->atomic_get_property(crtc, state, property, val);
  473. else
  474. return -EINVAL;
  475. return 0;
  476. }
  477. /**
  478. * drm_atomic_crtc_check - check crtc state
  479. * @crtc: crtc to check
  480. * @state: crtc state to check
  481. *
  482. * Provides core sanity checks for crtc state.
  483. *
  484. * RETURNS:
  485. * Zero on success, error code on failure
  486. */
  487. static int drm_atomic_crtc_check(struct drm_crtc *crtc,
  488. struct drm_crtc_state *state)
  489. {
  490. /* NOTE: we explicitly don't enforce constraints such as primary
  491. * layer covering entire screen, since that is something we want
  492. * to allow (on hw that supports it). For hw that does not, it
  493. * should be checked in driver's crtc->atomic_check() vfunc.
  494. *
  495. * TODO: Add generic modeset state checks once we support those.
  496. */
  497. if (state->active && !state->enable) {
  498. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
  499. crtc->base.id, crtc->name);
  500. return -EINVAL;
  501. }
  502. /* The state->enable vs. state->mode_blob checks can be WARN_ON,
  503. * as this is a kernel-internal detail that userspace should never
  504. * be able to trigger. */
  505. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  506. WARN_ON(state->enable && !state->mode_blob)) {
  507. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
  508. crtc->base.id, crtc->name);
  509. return -EINVAL;
  510. }
  511. if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
  512. WARN_ON(!state->enable && state->mode_blob)) {
  513. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
  514. crtc->base.id, crtc->name);
  515. return -EINVAL;
  516. }
  517. /*
  518. * Reject event generation for when a CRTC is off and stays off.
  519. * It wouldn't be hard to implement this, but userspace has a track
  520. * record of happily burning through 100% cpu (or worse, crash) when the
  521. * display pipe is suspended. To avoid all that fun just reject updates
  522. * that ask for events since likely that indicates a bug in the
  523. * compositor's drawing loop. This is consistent with the vblank IOCTL
  524. * and legacy page_flip IOCTL which also reject service on a disabled
  525. * pipe.
  526. */
  527. if (state->event && !state->active && !crtc->state->active) {
  528. DRM_DEBUG_ATOMIC("[CRTC:%d] requesting event but off\n",
  529. crtc->base.id);
  530. return -EINVAL;
  531. }
  532. return 0;
  533. }
  534. /**
  535. * drm_atomic_get_plane_state - get plane state
  536. * @state: global atomic state object
  537. * @plane: plane to get state object for
  538. *
  539. * This function returns the plane state for the given plane, allocating it if
  540. * needed. It will also grab the relevant plane lock to make sure that the state
  541. * is consistent.
  542. *
  543. * Returns:
  544. *
  545. * Either the allocated state or the error code encoded into the pointer. When
  546. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  547. * entire atomic sequence must be restarted. All other errors are fatal.
  548. */
  549. struct drm_plane_state *
  550. drm_atomic_get_plane_state(struct drm_atomic_state *state,
  551. struct drm_plane *plane)
  552. {
  553. int ret, index = drm_plane_index(plane);
  554. struct drm_plane_state *plane_state;
  555. WARN_ON(!state->acquire_ctx);
  556. plane_state = drm_atomic_get_existing_plane_state(state, plane);
  557. if (plane_state)
  558. return plane_state;
  559. ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
  560. if (ret)
  561. return ERR_PTR(ret);
  562. plane_state = plane->funcs->atomic_duplicate_state(plane);
  563. if (!plane_state)
  564. return ERR_PTR(-ENOMEM);
  565. state->planes[index].state = plane_state;
  566. state->planes[index].ptr = plane;
  567. plane_state->state = state;
  568. DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
  569. plane->base.id, plane->name, plane_state, state);
  570. if (plane_state->crtc) {
  571. struct drm_crtc_state *crtc_state;
  572. crtc_state = drm_atomic_get_crtc_state(state,
  573. plane_state->crtc);
  574. if (IS_ERR(crtc_state))
  575. return ERR_CAST(crtc_state);
  576. }
  577. return plane_state;
  578. }
  579. EXPORT_SYMBOL(drm_atomic_get_plane_state);
  580. /**
  581. * drm_atomic_plane_set_property - set property on plane
  582. * @plane: the drm plane to set a property on
  583. * @state: the state object to update with the new property value
  584. * @property: the property to set
  585. * @val: the new property value
  586. *
  587. * Use this instead of calling plane->atomic_set_property directly.
  588. * This function handles generic/core properties and calls out to
  589. * driver's ->atomic_set_property() for driver properties. To ensure
  590. * consistent behavior you must call this function rather than the
  591. * driver hook directly.
  592. *
  593. * RETURNS:
  594. * Zero on success, error code on failure
  595. */
  596. int drm_atomic_plane_set_property(struct drm_plane *plane,
  597. struct drm_plane_state *state, struct drm_property *property,
  598. uint64_t val)
  599. {
  600. struct drm_device *dev = plane->dev;
  601. struct drm_mode_config *config = &dev->mode_config;
  602. if (property == config->prop_fb_id) {
  603. struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
  604. drm_atomic_set_fb_for_plane(state, fb);
  605. if (fb)
  606. drm_framebuffer_unreference(fb);
  607. } else if (property == config->prop_crtc_id) {
  608. struct drm_crtc *crtc = drm_crtc_find(dev, val);
  609. return drm_atomic_set_crtc_for_plane(state, crtc);
  610. } else if (property == config->prop_crtc_x) {
  611. state->crtc_x = U642I64(val);
  612. } else if (property == config->prop_crtc_y) {
  613. state->crtc_y = U642I64(val);
  614. } else if (property == config->prop_crtc_w) {
  615. state->crtc_w = val;
  616. } else if (property == config->prop_crtc_h) {
  617. state->crtc_h = val;
  618. } else if (property == config->prop_src_x) {
  619. state->src_x = val;
  620. } else if (property == config->prop_src_y) {
  621. state->src_y = val;
  622. } else if (property == config->prop_src_w) {
  623. state->src_w = val;
  624. } else if (property == config->prop_src_h) {
  625. state->src_h = val;
  626. } else if (property == config->rotation_property) {
  627. state->rotation = val;
  628. } else if (property == plane->zpos_property) {
  629. state->zpos = val;
  630. } else if (plane->funcs->atomic_set_property) {
  631. return plane->funcs->atomic_set_property(plane, state,
  632. property, val);
  633. } else {
  634. return -EINVAL;
  635. }
  636. return 0;
  637. }
  638. EXPORT_SYMBOL(drm_atomic_plane_set_property);
  639. /**
  640. * drm_atomic_plane_get_property - get property value from plane state
  641. * @plane: the drm plane to set a property on
  642. * @state: the state object to get the property value from
  643. * @property: the property to set
  644. * @val: return location for the property value
  645. *
  646. * This function handles generic/core properties and calls out to
  647. * driver's ->atomic_get_property() for driver properties. To ensure
  648. * consistent behavior you must call this function rather than the
  649. * driver hook directly.
  650. *
  651. * RETURNS:
  652. * Zero on success, error code on failure
  653. */
  654. static int
  655. drm_atomic_plane_get_property(struct drm_plane *plane,
  656. const struct drm_plane_state *state,
  657. struct drm_property *property, uint64_t *val)
  658. {
  659. struct drm_device *dev = plane->dev;
  660. struct drm_mode_config *config = &dev->mode_config;
  661. if (property == config->prop_fb_id) {
  662. *val = (state->fb) ? state->fb->base.id : 0;
  663. } else if (property == config->prop_crtc_id) {
  664. *val = (state->crtc) ? state->crtc->base.id : 0;
  665. } else if (property == config->prop_crtc_x) {
  666. *val = I642U64(state->crtc_x);
  667. } else if (property == config->prop_crtc_y) {
  668. *val = I642U64(state->crtc_y);
  669. } else if (property == config->prop_crtc_w) {
  670. *val = state->crtc_w;
  671. } else if (property == config->prop_crtc_h) {
  672. *val = state->crtc_h;
  673. } else if (property == config->prop_src_x) {
  674. *val = state->src_x;
  675. } else if (property == config->prop_src_y) {
  676. *val = state->src_y;
  677. } else if (property == config->prop_src_w) {
  678. *val = state->src_w;
  679. } else if (property == config->prop_src_h) {
  680. *val = state->src_h;
  681. } else if (property == config->rotation_property) {
  682. *val = state->rotation;
  683. } else if (property == plane->zpos_property) {
  684. *val = state->zpos;
  685. } else if (plane->funcs->atomic_get_property) {
  686. return plane->funcs->atomic_get_property(plane, state, property, val);
  687. } else {
  688. return -EINVAL;
  689. }
  690. return 0;
  691. }
  692. static bool
  693. plane_switching_crtc(struct drm_atomic_state *state,
  694. struct drm_plane *plane,
  695. struct drm_plane_state *plane_state)
  696. {
  697. if (!plane->state->crtc || !plane_state->crtc)
  698. return false;
  699. if (plane->state->crtc == plane_state->crtc)
  700. return false;
  701. /* This could be refined, but currently there's no helper or driver code
  702. * to implement direct switching of active planes nor userspace to take
  703. * advantage of more direct plane switching without the intermediate
  704. * full OFF state.
  705. */
  706. return true;
  707. }
  708. /**
  709. * drm_atomic_plane_check - check plane state
  710. * @plane: plane to check
  711. * @state: plane state to check
  712. *
  713. * Provides core sanity checks for plane state.
  714. *
  715. * RETURNS:
  716. * Zero on success, error code on failure
  717. */
  718. static int drm_atomic_plane_check(struct drm_plane *plane,
  719. struct drm_plane_state *state)
  720. {
  721. unsigned int fb_width, fb_height;
  722. int ret;
  723. /* either *both* CRTC and FB must be set, or neither */
  724. if (WARN_ON(state->crtc && !state->fb)) {
  725. DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
  726. return -EINVAL;
  727. } else if (WARN_ON(state->fb && !state->crtc)) {
  728. DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
  729. return -EINVAL;
  730. }
  731. /* if disabled, we don't care about the rest of the state: */
  732. if (!state->crtc)
  733. return 0;
  734. /* Check whether this plane is usable on this CRTC */
  735. if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
  736. DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
  737. return -EINVAL;
  738. }
  739. /* Check whether this plane supports the fb pixel format. */
  740. ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
  741. if (ret) {
  742. char *format_name = drm_get_format_name(state->fb->pixel_format);
  743. DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", format_name);
  744. kfree(format_name);
  745. return ret;
  746. }
  747. /* Give drivers some help against integer overflows */
  748. if (state->crtc_w > INT_MAX ||
  749. state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
  750. state->crtc_h > INT_MAX ||
  751. state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
  752. DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
  753. state->crtc_w, state->crtc_h,
  754. state->crtc_x, state->crtc_y);
  755. return -ERANGE;
  756. }
  757. fb_width = state->fb->width << 16;
  758. fb_height = state->fb->height << 16;
  759. /* Make sure source coordinates are inside the fb. */
  760. if (state->src_w > fb_width ||
  761. state->src_x > fb_width - state->src_w ||
  762. state->src_h > fb_height ||
  763. state->src_y > fb_height - state->src_h) {
  764. DRM_DEBUG_ATOMIC("Invalid source coordinates "
  765. "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
  766. state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
  767. state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
  768. state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
  769. state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
  770. return -ENOSPC;
  771. }
  772. if (plane_switching_crtc(state->state, plane, state)) {
  773. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
  774. plane->base.id, plane->name);
  775. return -EINVAL;
  776. }
  777. return 0;
  778. }
  779. /**
  780. * drm_atomic_get_connector_state - get connector state
  781. * @state: global atomic state object
  782. * @connector: connector to get state object for
  783. *
  784. * This function returns the connector state for the given connector,
  785. * allocating it if needed. It will also grab the relevant connector lock to
  786. * make sure that the state is consistent.
  787. *
  788. * Returns:
  789. *
  790. * Either the allocated state or the error code encoded into the pointer. When
  791. * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
  792. * entire atomic sequence must be restarted. All other errors are fatal.
  793. */
  794. struct drm_connector_state *
  795. drm_atomic_get_connector_state(struct drm_atomic_state *state,
  796. struct drm_connector *connector)
  797. {
  798. int ret, index;
  799. struct drm_mode_config *config = &connector->dev->mode_config;
  800. struct drm_connector_state *connector_state;
  801. WARN_ON(!state->acquire_ctx);
  802. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  803. if (ret)
  804. return ERR_PTR(ret);
  805. index = drm_connector_index(connector);
  806. if (index >= state->num_connector) {
  807. struct __drm_connnectors_state *c;
  808. int alloc = max(index + 1, config->num_connector);
  809. c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
  810. if (!c)
  811. return ERR_PTR(-ENOMEM);
  812. state->connectors = c;
  813. memset(&state->connectors[state->num_connector], 0,
  814. sizeof(*state->connectors) * (alloc - state->num_connector));
  815. state->num_connector = alloc;
  816. }
  817. if (state->connectors[index].state)
  818. return state->connectors[index].state;
  819. connector_state = connector->funcs->atomic_duplicate_state(connector);
  820. if (!connector_state)
  821. return ERR_PTR(-ENOMEM);
  822. drm_connector_reference(connector);
  823. state->connectors[index].state = connector_state;
  824. state->connectors[index].ptr = connector;
  825. connector_state->state = state;
  826. DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
  827. connector->base.id, connector_state, state);
  828. if (connector_state->crtc) {
  829. struct drm_crtc_state *crtc_state;
  830. crtc_state = drm_atomic_get_crtc_state(state,
  831. connector_state->crtc);
  832. if (IS_ERR(crtc_state))
  833. return ERR_CAST(crtc_state);
  834. }
  835. return connector_state;
  836. }
  837. EXPORT_SYMBOL(drm_atomic_get_connector_state);
  838. /**
  839. * drm_atomic_connector_set_property - set property on connector.
  840. * @connector: the drm connector to set a property on
  841. * @state: the state object to update with the new property value
  842. * @property: the property to set
  843. * @val: the new property value
  844. *
  845. * Use this instead of calling connector->atomic_set_property directly.
  846. * This function handles generic/core properties and calls out to
  847. * driver's ->atomic_set_property() for driver properties. To ensure
  848. * consistent behavior you must call this function rather than the
  849. * driver hook directly.
  850. *
  851. * RETURNS:
  852. * Zero on success, error code on failure
  853. */
  854. int drm_atomic_connector_set_property(struct drm_connector *connector,
  855. struct drm_connector_state *state, struct drm_property *property,
  856. uint64_t val)
  857. {
  858. struct drm_device *dev = connector->dev;
  859. struct drm_mode_config *config = &dev->mode_config;
  860. if (property == config->prop_crtc_id) {
  861. struct drm_crtc *crtc = drm_crtc_find(dev, val);
  862. return drm_atomic_set_crtc_for_connector(state, crtc);
  863. } else if (property == config->dpms_property) {
  864. /* setting DPMS property requires special handling, which
  865. * is done in legacy setprop path for us. Disallow (for
  866. * now?) atomic writes to DPMS property:
  867. */
  868. return -EINVAL;
  869. } else if (connector->funcs->atomic_set_property) {
  870. return connector->funcs->atomic_set_property(connector,
  871. state, property, val);
  872. } else {
  873. return -EINVAL;
  874. }
  875. }
  876. EXPORT_SYMBOL(drm_atomic_connector_set_property);
  877. /**
  878. * drm_atomic_connector_get_property - get property value from connector state
  879. * @connector: the drm connector to set a property on
  880. * @state: the state object to get the property value from
  881. * @property: the property to set
  882. * @val: return location for the property value
  883. *
  884. * This function handles generic/core properties and calls out to
  885. * driver's ->atomic_get_property() for driver properties. To ensure
  886. * consistent behavior you must call this function rather than the
  887. * driver hook directly.
  888. *
  889. * RETURNS:
  890. * Zero on success, error code on failure
  891. */
  892. static int
  893. drm_atomic_connector_get_property(struct drm_connector *connector,
  894. const struct drm_connector_state *state,
  895. struct drm_property *property, uint64_t *val)
  896. {
  897. struct drm_device *dev = connector->dev;
  898. struct drm_mode_config *config = &dev->mode_config;
  899. if (property == config->prop_crtc_id) {
  900. *val = (state->crtc) ? state->crtc->base.id : 0;
  901. } else if (property == config->dpms_property) {
  902. *val = connector->dpms;
  903. } else if (connector->funcs->atomic_get_property) {
  904. return connector->funcs->atomic_get_property(connector,
  905. state, property, val);
  906. } else {
  907. return -EINVAL;
  908. }
  909. return 0;
  910. }
  911. int drm_atomic_get_property(struct drm_mode_object *obj,
  912. struct drm_property *property, uint64_t *val)
  913. {
  914. struct drm_device *dev = property->dev;
  915. int ret;
  916. switch (obj->type) {
  917. case DRM_MODE_OBJECT_CONNECTOR: {
  918. struct drm_connector *connector = obj_to_connector(obj);
  919. WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
  920. ret = drm_atomic_connector_get_property(connector,
  921. connector->state, property, val);
  922. break;
  923. }
  924. case DRM_MODE_OBJECT_CRTC: {
  925. struct drm_crtc *crtc = obj_to_crtc(obj);
  926. WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
  927. ret = drm_atomic_crtc_get_property(crtc,
  928. crtc->state, property, val);
  929. break;
  930. }
  931. case DRM_MODE_OBJECT_PLANE: {
  932. struct drm_plane *plane = obj_to_plane(obj);
  933. WARN_ON(!drm_modeset_is_locked(&plane->mutex));
  934. ret = drm_atomic_plane_get_property(plane,
  935. plane->state, property, val);
  936. break;
  937. }
  938. default:
  939. ret = -EINVAL;
  940. break;
  941. }
  942. return ret;
  943. }
  944. /**
  945. * drm_atomic_set_crtc_for_plane - set crtc for plane
  946. * @plane_state: the plane whose incoming state to update
  947. * @crtc: crtc to use for the plane
  948. *
  949. * Changing the assigned crtc for a plane requires us to grab the lock and state
  950. * for the new crtc, as needed. This function takes care of all these details
  951. * besides updating the pointer in the state object itself.
  952. *
  953. * Returns:
  954. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  955. * then the w/w mutex code has detected a deadlock and the entire atomic
  956. * sequence must be restarted. All other errors are fatal.
  957. */
  958. int
  959. drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
  960. struct drm_crtc *crtc)
  961. {
  962. struct drm_plane *plane = plane_state->plane;
  963. struct drm_crtc_state *crtc_state;
  964. /* Nothing to do for same crtc*/
  965. if (plane_state->crtc == crtc)
  966. return 0;
  967. if (plane_state->crtc) {
  968. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  969. plane_state->crtc);
  970. if (WARN_ON(IS_ERR(crtc_state)))
  971. return PTR_ERR(crtc_state);
  972. crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
  973. }
  974. plane_state->crtc = crtc;
  975. if (crtc) {
  976. crtc_state = drm_atomic_get_crtc_state(plane_state->state,
  977. crtc);
  978. if (IS_ERR(crtc_state))
  979. return PTR_ERR(crtc_state);
  980. crtc_state->plane_mask |= (1 << drm_plane_index(plane));
  981. }
  982. if (crtc)
  983. DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d:%s]\n",
  984. plane_state, crtc->base.id, crtc->name);
  985. else
  986. DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
  987. plane_state);
  988. return 0;
  989. }
  990. EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
  991. /**
  992. * drm_atomic_set_fb_for_plane - set framebuffer for plane
  993. * @plane_state: atomic state object for the plane
  994. * @fb: fb to use for the plane
  995. *
  996. * Changing the assigned framebuffer for a plane requires us to grab a reference
  997. * to the new fb and drop the reference to the old fb, if there is one. This
  998. * function takes care of all these details besides updating the pointer in the
  999. * state object itself.
  1000. */
  1001. void
  1002. drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
  1003. struct drm_framebuffer *fb)
  1004. {
  1005. if (plane_state->fb)
  1006. drm_framebuffer_unreference(plane_state->fb);
  1007. if (fb)
  1008. drm_framebuffer_reference(fb);
  1009. plane_state->fb = fb;
  1010. if (fb)
  1011. DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
  1012. fb->base.id, plane_state);
  1013. else
  1014. DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
  1015. plane_state);
  1016. }
  1017. EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
  1018. /**
  1019. * drm_atomic_set_crtc_for_connector - set crtc for connector
  1020. * @conn_state: atomic state object for the connector
  1021. * @crtc: crtc to use for the connector
  1022. *
  1023. * Changing the assigned crtc for a connector requires us to grab the lock and
  1024. * state for the new crtc, as needed. This function takes care of all these
  1025. * details besides updating the pointer in the state object itself.
  1026. *
  1027. * Returns:
  1028. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1029. * then the w/w mutex code has detected a deadlock and the entire atomic
  1030. * sequence must be restarted. All other errors are fatal.
  1031. */
  1032. int
  1033. drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
  1034. struct drm_crtc *crtc)
  1035. {
  1036. struct drm_crtc_state *crtc_state;
  1037. if (conn_state->crtc == crtc)
  1038. return 0;
  1039. if (conn_state->crtc) {
  1040. crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
  1041. conn_state->crtc);
  1042. crtc_state->connector_mask &=
  1043. ~(1 << drm_connector_index(conn_state->connector));
  1044. drm_connector_unreference(conn_state->connector);
  1045. conn_state->crtc = NULL;
  1046. }
  1047. if (crtc) {
  1048. crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
  1049. if (IS_ERR(crtc_state))
  1050. return PTR_ERR(crtc_state);
  1051. crtc_state->connector_mask |=
  1052. 1 << drm_connector_index(conn_state->connector);
  1053. drm_connector_reference(conn_state->connector);
  1054. conn_state->crtc = crtc;
  1055. DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
  1056. conn_state, crtc->base.id, crtc->name);
  1057. } else {
  1058. DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
  1059. conn_state);
  1060. }
  1061. return 0;
  1062. }
  1063. EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
  1064. /**
  1065. * drm_atomic_add_affected_connectors - add connectors for crtc
  1066. * @state: atomic state
  1067. * @crtc: DRM crtc
  1068. *
  1069. * This function walks the current configuration and adds all connectors
  1070. * currently using @crtc to the atomic configuration @state. Note that this
  1071. * function must acquire the connection mutex. This can potentially cause
  1072. * unneeded seralization if the update is just for the planes on one crtc. Hence
  1073. * drivers and helpers should only call this when really needed (e.g. when a
  1074. * full modeset needs to happen due to some change).
  1075. *
  1076. * Returns:
  1077. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1078. * then the w/w mutex code has detected a deadlock and the entire atomic
  1079. * sequence must be restarted. All other errors are fatal.
  1080. */
  1081. int
  1082. drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
  1083. struct drm_crtc *crtc)
  1084. {
  1085. struct drm_mode_config *config = &state->dev->mode_config;
  1086. struct drm_connector *connector;
  1087. struct drm_connector_state *conn_state;
  1088. int ret;
  1089. ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
  1090. if (ret)
  1091. return ret;
  1092. DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d:%s] to %p\n",
  1093. crtc->base.id, crtc->name, state);
  1094. /*
  1095. * Changed connectors are already in @state, so only need to look at the
  1096. * current configuration.
  1097. */
  1098. drm_for_each_connector(connector, state->dev) {
  1099. if (connector->state->crtc != crtc)
  1100. continue;
  1101. conn_state = drm_atomic_get_connector_state(state, connector);
  1102. if (IS_ERR(conn_state))
  1103. return PTR_ERR(conn_state);
  1104. }
  1105. return 0;
  1106. }
  1107. EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  1108. /**
  1109. * drm_atomic_add_affected_planes - add planes for crtc
  1110. * @state: atomic state
  1111. * @crtc: DRM crtc
  1112. *
  1113. * This function walks the current configuration and adds all planes
  1114. * currently used by @crtc to the atomic configuration @state. This is useful
  1115. * when an atomic commit also needs to check all currently enabled plane on
  1116. * @crtc, e.g. when changing the mode. It's also useful when re-enabling a CRTC
  1117. * to avoid special code to force-enable all planes.
  1118. *
  1119. * Since acquiring a plane state will always also acquire the w/w mutex of the
  1120. * current CRTC for that plane (if there is any) adding all the plane states for
  1121. * a CRTC will not reduce parallism of atomic updates.
  1122. *
  1123. * Returns:
  1124. * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
  1125. * then the w/w mutex code has detected a deadlock and the entire atomic
  1126. * sequence must be restarted. All other errors are fatal.
  1127. */
  1128. int
  1129. drm_atomic_add_affected_planes(struct drm_atomic_state *state,
  1130. struct drm_crtc *crtc)
  1131. {
  1132. struct drm_plane *plane;
  1133. WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
  1134. drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
  1135. struct drm_plane_state *plane_state =
  1136. drm_atomic_get_plane_state(state, plane);
  1137. if (IS_ERR(plane_state))
  1138. return PTR_ERR(plane_state);
  1139. }
  1140. return 0;
  1141. }
  1142. EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  1143. /**
  1144. * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
  1145. * @state: atomic state
  1146. *
  1147. * This function should be used by legacy entry points which don't understand
  1148. * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
  1149. * the slowpath completed.
  1150. */
  1151. void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
  1152. {
  1153. struct drm_device *dev = state->dev;
  1154. unsigned crtc_mask = 0;
  1155. struct drm_crtc *crtc;
  1156. int ret;
  1157. bool global = false;
  1158. drm_for_each_crtc(crtc, dev) {
  1159. if (crtc->acquire_ctx != state->acquire_ctx)
  1160. continue;
  1161. crtc_mask |= drm_crtc_mask(crtc);
  1162. crtc->acquire_ctx = NULL;
  1163. }
  1164. if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
  1165. global = true;
  1166. dev->mode_config.acquire_ctx = NULL;
  1167. }
  1168. retry:
  1169. drm_modeset_backoff(state->acquire_ctx);
  1170. ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
  1171. if (ret)
  1172. goto retry;
  1173. drm_for_each_crtc(crtc, dev)
  1174. if (drm_crtc_mask(crtc) & crtc_mask)
  1175. crtc->acquire_ctx = state->acquire_ctx;
  1176. if (global)
  1177. dev->mode_config.acquire_ctx = state->acquire_ctx;
  1178. }
  1179. EXPORT_SYMBOL(drm_atomic_legacy_backoff);
  1180. /**
  1181. * drm_atomic_check_only - check whether a given config would work
  1182. * @state: atomic configuration to check
  1183. *
  1184. * Note that this function can return -EDEADLK if the driver needed to acquire
  1185. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1186. * backoff dance and restart. All other errors are fatal.
  1187. *
  1188. * Returns:
  1189. * 0 on success, negative error code on failure.
  1190. */
  1191. int drm_atomic_check_only(struct drm_atomic_state *state)
  1192. {
  1193. struct drm_device *dev = state->dev;
  1194. struct drm_mode_config *config = &dev->mode_config;
  1195. struct drm_plane *plane;
  1196. struct drm_plane_state *plane_state;
  1197. struct drm_crtc *crtc;
  1198. struct drm_crtc_state *crtc_state;
  1199. int i, ret = 0;
  1200. DRM_DEBUG_ATOMIC("checking %p\n", state);
  1201. for_each_plane_in_state(state, plane, plane_state, i) {
  1202. ret = drm_atomic_plane_check(plane, plane_state);
  1203. if (ret) {
  1204. DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
  1205. plane->base.id, plane->name);
  1206. return ret;
  1207. }
  1208. }
  1209. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1210. ret = drm_atomic_crtc_check(crtc, crtc_state);
  1211. if (ret) {
  1212. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
  1213. crtc->base.id, crtc->name);
  1214. return ret;
  1215. }
  1216. }
  1217. if (config->funcs->atomic_check)
  1218. ret = config->funcs->atomic_check(state->dev, state);
  1219. if (ret)
  1220. return ret;
  1221. if (!state->allow_modeset) {
  1222. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1223. if (drm_atomic_crtc_needs_modeset(crtc_state)) {
  1224. DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
  1225. crtc->base.id, crtc->name);
  1226. return -EINVAL;
  1227. }
  1228. }
  1229. }
  1230. return 0;
  1231. }
  1232. EXPORT_SYMBOL(drm_atomic_check_only);
  1233. /**
  1234. * drm_atomic_commit - commit configuration atomically
  1235. * @state: atomic configuration to check
  1236. *
  1237. * Note that this function can return -EDEADLK if the driver needed to acquire
  1238. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1239. * backoff dance and restart. All other errors are fatal.
  1240. *
  1241. * Also note that on successful execution ownership of @state is transferred
  1242. * from the caller of this function to the function itself. The caller must not
  1243. * free or in any other way access @state. If the function fails then the caller
  1244. * must clean up @state itself.
  1245. *
  1246. * Returns:
  1247. * 0 on success, negative error code on failure.
  1248. */
  1249. int drm_atomic_commit(struct drm_atomic_state *state)
  1250. {
  1251. struct drm_mode_config *config = &state->dev->mode_config;
  1252. int ret;
  1253. ret = drm_atomic_check_only(state);
  1254. if (ret)
  1255. return ret;
  1256. DRM_DEBUG_ATOMIC("commiting %p\n", state);
  1257. return config->funcs->atomic_commit(state->dev, state, false);
  1258. }
  1259. EXPORT_SYMBOL(drm_atomic_commit);
  1260. /**
  1261. * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
  1262. * @state: atomic configuration to check
  1263. *
  1264. * Note that this function can return -EDEADLK if the driver needed to acquire
  1265. * more locks but encountered a deadlock. The caller must then do the usual w/w
  1266. * backoff dance and restart. All other errors are fatal.
  1267. *
  1268. * Also note that on successful execution ownership of @state is transferred
  1269. * from the caller of this function to the function itself. The caller must not
  1270. * free or in any other way access @state. If the function fails then the caller
  1271. * must clean up @state itself.
  1272. *
  1273. * Returns:
  1274. * 0 on success, negative error code on failure.
  1275. */
  1276. int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
  1277. {
  1278. struct drm_mode_config *config = &state->dev->mode_config;
  1279. int ret;
  1280. ret = drm_atomic_check_only(state);
  1281. if (ret)
  1282. return ret;
  1283. DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
  1284. return config->funcs->atomic_commit(state->dev, state, true);
  1285. }
  1286. EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
  1287. /*
  1288. * The big monstor ioctl
  1289. */
  1290. static struct drm_pending_vblank_event *create_vblank_event(
  1291. struct drm_device *dev, struct drm_file *file_priv,
  1292. struct fence *fence, uint64_t user_data)
  1293. {
  1294. struct drm_pending_vblank_event *e = NULL;
  1295. int ret;
  1296. e = kzalloc(sizeof *e, GFP_KERNEL);
  1297. if (!e)
  1298. return NULL;
  1299. e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
  1300. e->event.base.length = sizeof(e->event);
  1301. e->event.user_data = user_data;
  1302. if (file_priv) {
  1303. ret = drm_event_reserve_init(dev, file_priv, &e->base,
  1304. &e->event.base);
  1305. if (ret) {
  1306. kfree(e);
  1307. return NULL;
  1308. }
  1309. }
  1310. e->base.fence = fence;
  1311. return e;
  1312. }
  1313. static int atomic_set_prop(struct drm_atomic_state *state,
  1314. struct drm_mode_object *obj, struct drm_property *prop,
  1315. uint64_t prop_value)
  1316. {
  1317. struct drm_mode_object *ref;
  1318. int ret;
  1319. if (!drm_property_change_valid_get(prop, prop_value, &ref))
  1320. return -EINVAL;
  1321. switch (obj->type) {
  1322. case DRM_MODE_OBJECT_CONNECTOR: {
  1323. struct drm_connector *connector = obj_to_connector(obj);
  1324. struct drm_connector_state *connector_state;
  1325. connector_state = drm_atomic_get_connector_state(state, connector);
  1326. if (IS_ERR(connector_state)) {
  1327. ret = PTR_ERR(connector_state);
  1328. break;
  1329. }
  1330. ret = drm_atomic_connector_set_property(connector,
  1331. connector_state, prop, prop_value);
  1332. break;
  1333. }
  1334. case DRM_MODE_OBJECT_CRTC: {
  1335. struct drm_crtc *crtc = obj_to_crtc(obj);
  1336. struct drm_crtc_state *crtc_state;
  1337. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  1338. if (IS_ERR(crtc_state)) {
  1339. ret = PTR_ERR(crtc_state);
  1340. break;
  1341. }
  1342. ret = drm_atomic_crtc_set_property(crtc,
  1343. crtc_state, prop, prop_value);
  1344. break;
  1345. }
  1346. case DRM_MODE_OBJECT_PLANE: {
  1347. struct drm_plane *plane = obj_to_plane(obj);
  1348. struct drm_plane_state *plane_state;
  1349. plane_state = drm_atomic_get_plane_state(state, plane);
  1350. if (IS_ERR(plane_state)) {
  1351. ret = PTR_ERR(plane_state);
  1352. break;
  1353. }
  1354. ret = drm_atomic_plane_set_property(plane,
  1355. plane_state, prop, prop_value);
  1356. break;
  1357. }
  1358. default:
  1359. ret = -EINVAL;
  1360. break;
  1361. }
  1362. drm_property_change_valid_put(prop, ref);
  1363. return ret;
  1364. }
  1365. /**
  1366. * drm_atomic_clean_old_fb -- Unset old_fb pointers and set plane->fb pointers.
  1367. *
  1368. * @dev: drm device to check.
  1369. * @plane_mask: plane mask for planes that were updated.
  1370. * @ret: return value, can be -EDEADLK for a retry.
  1371. *
  1372. * Before doing an update plane->old_fb is set to plane->fb,
  1373. * but before dropping the locks old_fb needs to be set to NULL
  1374. * and plane->fb updated. This is a common operation for each
  1375. * atomic update, so this call is split off as a helper.
  1376. */
  1377. void drm_atomic_clean_old_fb(struct drm_device *dev,
  1378. unsigned plane_mask,
  1379. int ret)
  1380. {
  1381. struct drm_plane *plane;
  1382. /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
  1383. * locks (ie. while it is still safe to deref plane->state). We
  1384. * need to do this here because the driver entry points cannot
  1385. * distinguish between legacy and atomic ioctls.
  1386. */
  1387. drm_for_each_plane_mask(plane, dev, plane_mask) {
  1388. if (ret == 0) {
  1389. struct drm_framebuffer *new_fb = plane->state->fb;
  1390. if (new_fb)
  1391. drm_framebuffer_reference(new_fb);
  1392. plane->fb = new_fb;
  1393. plane->crtc = plane->state->crtc;
  1394. if (plane->old_fb)
  1395. drm_framebuffer_unreference(plane->old_fb);
  1396. }
  1397. plane->old_fb = NULL;
  1398. }
  1399. }
  1400. EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  1401. int drm_mode_atomic_ioctl(struct drm_device *dev,
  1402. void *data, struct drm_file *file_priv)
  1403. {
  1404. struct drm_mode_atomic *arg = data;
  1405. uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
  1406. uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
  1407. uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
  1408. uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
  1409. unsigned int copied_objs, copied_props;
  1410. struct drm_atomic_state *state;
  1411. struct drm_modeset_acquire_ctx ctx;
  1412. struct drm_plane *plane;
  1413. struct drm_crtc *crtc;
  1414. struct drm_crtc_state *crtc_state;
  1415. unsigned plane_mask;
  1416. int ret = 0;
  1417. unsigned int i, j;
  1418. /* disallow for drivers not supporting atomic: */
  1419. if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
  1420. return -EINVAL;
  1421. /* disallow for userspace that has not enabled atomic cap (even
  1422. * though this may be a bit overkill, since legacy userspace
  1423. * wouldn't know how to call this ioctl)
  1424. */
  1425. if (!file_priv->atomic)
  1426. return -EINVAL;
  1427. if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
  1428. return -EINVAL;
  1429. if (arg->reserved)
  1430. return -EINVAL;
  1431. if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
  1432. !dev->mode_config.async_page_flip)
  1433. return -EINVAL;
  1434. /* can't test and expect an event at the same time. */
  1435. if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
  1436. (arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
  1437. return -EINVAL;
  1438. drm_modeset_acquire_init(&ctx, 0);
  1439. state = drm_atomic_state_alloc(dev);
  1440. if (!state)
  1441. return -ENOMEM;
  1442. state->acquire_ctx = &ctx;
  1443. state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
  1444. retry:
  1445. plane_mask = 0;
  1446. copied_objs = 0;
  1447. copied_props = 0;
  1448. for (i = 0; i < arg->count_objs; i++) {
  1449. uint32_t obj_id, count_props;
  1450. struct drm_mode_object *obj;
  1451. if (get_user(obj_id, objs_ptr + copied_objs)) {
  1452. ret = -EFAULT;
  1453. goto out;
  1454. }
  1455. obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
  1456. if (!obj) {
  1457. ret = -ENOENT;
  1458. goto out;
  1459. }
  1460. if (!obj->properties) {
  1461. drm_mode_object_unreference(obj);
  1462. ret = -ENOENT;
  1463. goto out;
  1464. }
  1465. if (get_user(count_props, count_props_ptr + copied_objs)) {
  1466. drm_mode_object_unreference(obj);
  1467. ret = -EFAULT;
  1468. goto out;
  1469. }
  1470. copied_objs++;
  1471. for (j = 0; j < count_props; j++) {
  1472. uint32_t prop_id;
  1473. uint64_t prop_value;
  1474. struct drm_property *prop;
  1475. if (get_user(prop_id, props_ptr + copied_props)) {
  1476. drm_mode_object_unreference(obj);
  1477. ret = -EFAULT;
  1478. goto out;
  1479. }
  1480. prop = drm_mode_obj_find_prop_id(obj, prop_id);
  1481. if (!prop) {
  1482. drm_mode_object_unreference(obj);
  1483. ret = -ENOENT;
  1484. goto out;
  1485. }
  1486. if (copy_from_user(&prop_value,
  1487. prop_values_ptr + copied_props,
  1488. sizeof(prop_value))) {
  1489. drm_mode_object_unreference(obj);
  1490. ret = -EFAULT;
  1491. goto out;
  1492. }
  1493. ret = atomic_set_prop(state, obj, prop, prop_value);
  1494. if (ret) {
  1495. drm_mode_object_unreference(obj);
  1496. goto out;
  1497. }
  1498. copied_props++;
  1499. }
  1500. if (obj->type == DRM_MODE_OBJECT_PLANE && count_props &&
  1501. !(arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)) {
  1502. plane = obj_to_plane(obj);
  1503. plane_mask |= (1 << drm_plane_index(plane));
  1504. plane->old_fb = plane->fb;
  1505. }
  1506. drm_mode_object_unreference(obj);
  1507. }
  1508. if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1509. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1510. struct drm_pending_vblank_event *e;
  1511. e = create_vblank_event(dev, file_priv, NULL,
  1512. arg->user_data);
  1513. if (!e) {
  1514. ret = -ENOMEM;
  1515. goto out;
  1516. }
  1517. crtc_state->event = e;
  1518. }
  1519. }
  1520. if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
  1521. /*
  1522. * Unlike commit, check_only does not clean up state.
  1523. * Below we call drm_atomic_state_free for it.
  1524. */
  1525. ret = drm_atomic_check_only(state);
  1526. } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
  1527. ret = drm_atomic_nonblocking_commit(state);
  1528. } else {
  1529. ret = drm_atomic_commit(state);
  1530. }
  1531. out:
  1532. drm_atomic_clean_old_fb(dev, plane_mask, ret);
  1533. if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
  1534. /*
  1535. * Free the allocated event. drm_atomic_helper_setup_commit
  1536. * can allocate an event too, so only free it if it's ours
  1537. * to prevent a double free in drm_atomic_state_clear.
  1538. */
  1539. for_each_crtc_in_state(state, crtc, crtc_state, i) {
  1540. struct drm_pending_vblank_event *event = crtc_state->event;
  1541. if (event && (event->base.fence || event->base.file_priv)) {
  1542. drm_event_cancel_free(dev, &event->base);
  1543. crtc_state->event = NULL;
  1544. }
  1545. }
  1546. }
  1547. if (ret == -EDEADLK) {
  1548. drm_atomic_state_clear(state);
  1549. drm_modeset_backoff(&ctx);
  1550. goto retry;
  1551. }
  1552. if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
  1553. drm_atomic_state_free(state);
  1554. drm_modeset_drop_locks(&ctx);
  1555. drm_modeset_acquire_fini(&ctx);
  1556. return ret;
  1557. }