vmwgfx_scrn.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. #include <drm/drm_plane_helper.h>
  29. #include <drm/drm_atomic.h>
  30. #include <drm/drm_atomic_helper.h>
  31. #define vmw_crtc_to_sou(x) \
  32. container_of(x, struct vmw_screen_object_unit, base.crtc)
  33. #define vmw_encoder_to_sou(x) \
  34. container_of(x, struct vmw_screen_object_unit, base.encoder)
  35. #define vmw_connector_to_sou(x) \
  36. container_of(x, struct vmw_screen_object_unit, base.connector)
  37. /**
  38. * struct vmw_kms_sou_surface_dirty - Closure structure for
  39. * blit surface to screen command.
  40. * @base: The base type we derive from. Used by vmw_kms_helper_dirty().
  41. * @left: Left side of bounding box.
  42. * @right: Right side of bounding box.
  43. * @top: Top side of bounding box.
  44. * @bottom: Bottom side of bounding box.
  45. * @dst_x: Difference between source clip rects and framebuffer coordinates.
  46. * @dst_y: Difference between source clip rects and framebuffer coordinates.
  47. * @sid: Surface id of surface to copy from.
  48. */
  49. struct vmw_kms_sou_surface_dirty {
  50. struct vmw_kms_dirty base;
  51. s32 left, right, top, bottom;
  52. s32 dst_x, dst_y;
  53. u32 sid;
  54. };
  55. /*
  56. * SVGA commands that are used by this code. Please see the device headers
  57. * for explanation.
  58. */
  59. struct vmw_kms_sou_readback_blit {
  60. uint32 header;
  61. SVGAFifoCmdBlitScreenToGMRFB body;
  62. };
  63. struct vmw_kms_sou_dmabuf_blit {
  64. uint32 header;
  65. SVGAFifoCmdBlitGMRFBToScreen body;
  66. };
  67. struct vmw_kms_sou_dirty_cmd {
  68. SVGA3dCmdHeader header;
  69. SVGA3dCmdBlitSurfaceToScreen body;
  70. };
  71. /**
  72. * Display unit using screen objects.
  73. */
  74. struct vmw_screen_object_unit {
  75. struct vmw_display_unit base;
  76. unsigned long buffer_size; /**< Size of allocated buffer */
  77. struct vmw_dma_buffer *buffer; /**< Backing store buffer */
  78. bool defined;
  79. };
  80. static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
  81. {
  82. vmw_du_cleanup(&sou->base);
  83. kfree(sou);
  84. }
  85. /*
  86. * Screen Object Display Unit CRTC functions
  87. */
  88. static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
  89. {
  90. vmw_sou_destroy(vmw_crtc_to_sou(crtc));
  91. }
  92. /**
  93. * Send the fifo command to create a screen.
  94. */
  95. static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
  96. struct vmw_screen_object_unit *sou,
  97. uint32_t x, uint32_t y,
  98. struct drm_display_mode *mode)
  99. {
  100. size_t fifo_size;
  101. struct {
  102. struct {
  103. uint32_t cmdType;
  104. } header;
  105. SVGAScreenObject obj;
  106. } *cmd;
  107. BUG_ON(!sou->buffer);
  108. fifo_size = sizeof(*cmd);
  109. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  110. /* The hardware has hung, nothing we can do about it here. */
  111. if (unlikely(cmd == NULL)) {
  112. DRM_ERROR("Fifo reserve failed.\n");
  113. return -ENOMEM;
  114. }
  115. memset(cmd, 0, fifo_size);
  116. cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
  117. cmd->obj.structSize = sizeof(SVGAScreenObject);
  118. cmd->obj.id = sou->base.unit;
  119. cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
  120. (sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
  121. cmd->obj.size.width = mode->hdisplay;
  122. cmd->obj.size.height = mode->vdisplay;
  123. if (sou->base.is_implicit) {
  124. cmd->obj.root.x = x;
  125. cmd->obj.root.y = y;
  126. } else {
  127. cmd->obj.root.x = sou->base.gui_x;
  128. cmd->obj.root.y = sou->base.gui_y;
  129. }
  130. sou->base.set_gui_x = cmd->obj.root.x;
  131. sou->base.set_gui_y = cmd->obj.root.y;
  132. /* Ok to assume that buffer is pinned in vram */
  133. vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
  134. cmd->obj.backingStore.pitch = mode->hdisplay * 4;
  135. vmw_fifo_commit(dev_priv, fifo_size);
  136. sou->defined = true;
  137. return 0;
  138. }
  139. /**
  140. * Send the fifo command to destroy a screen.
  141. */
  142. static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
  143. struct vmw_screen_object_unit *sou)
  144. {
  145. size_t fifo_size;
  146. int ret;
  147. struct {
  148. struct {
  149. uint32_t cmdType;
  150. } header;
  151. SVGAFifoCmdDestroyScreen body;
  152. } *cmd;
  153. /* no need to do anything */
  154. if (unlikely(!sou->defined))
  155. return 0;
  156. fifo_size = sizeof(*cmd);
  157. cmd = vmw_fifo_reserve(dev_priv, fifo_size);
  158. /* the hardware has hung, nothing we can do about it here */
  159. if (unlikely(cmd == NULL)) {
  160. DRM_ERROR("Fifo reserve failed.\n");
  161. return -ENOMEM;
  162. }
  163. memset(cmd, 0, fifo_size);
  164. cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
  165. cmd->body.screenId = sou->base.unit;
  166. vmw_fifo_commit(dev_priv, fifo_size);
  167. /* Force sync */
  168. ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
  169. if (unlikely(ret != 0))
  170. DRM_ERROR("Failed to sync with HW");
  171. else
  172. sou->defined = false;
  173. return ret;
  174. }
  175. /**
  176. * vmw_sou_crtc_mode_set_nofb - Create new screen
  177. *
  178. * @crtc: CRTC associated with the new screen
  179. *
  180. * This function creates/destroys a screen. This function cannot fail, so if
  181. * somehow we run into a failure, just do the best we can to get out.
  182. */
  183. static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
  184. {
  185. struct vmw_private *dev_priv;
  186. struct vmw_screen_object_unit *sou;
  187. struct vmw_framebuffer *vfb;
  188. struct drm_framebuffer *fb;
  189. struct drm_plane_state *ps;
  190. struct vmw_plane_state *vps;
  191. int ret;
  192. sou = vmw_crtc_to_sou(crtc);
  193. dev_priv = vmw_priv(crtc->dev);
  194. ps = crtc->primary->state;
  195. fb = ps->fb;
  196. vps = vmw_plane_state_to_vps(ps);
  197. vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
  198. if (sou->defined) {
  199. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  200. if (ret) {
  201. DRM_ERROR("Failed to destroy Screen Object\n");
  202. return;
  203. }
  204. }
  205. if (vfb) {
  206. sou->buffer = vps->dmabuf;
  207. sou->buffer_size = vps->dmabuf_size;
  208. ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
  209. &crtc->mode);
  210. if (ret)
  211. DRM_ERROR("Failed to define Screen Object %dx%d\n",
  212. crtc->x, crtc->y);
  213. vmw_kms_add_active(dev_priv, &sou->base, vfb);
  214. } else {
  215. sou->buffer = NULL;
  216. sou->buffer_size = 0;
  217. vmw_kms_del_active(dev_priv, &sou->base);
  218. }
  219. }
  220. /**
  221. * vmw_sou_crtc_helper_prepare - Noop
  222. *
  223. * @crtc: CRTC associated with the new screen
  224. *
  225. * Prepares the CRTC for a mode set, but we don't need to do anything here.
  226. */
  227. static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
  228. {
  229. }
  230. /**
  231. * vmw_sou_crtc_atomic_enable - Noop
  232. *
  233. * @crtc: CRTC associated with the new screen
  234. *
  235. * This is called after a mode set has been completed.
  236. */
  237. static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
  238. struct drm_crtc_state *old_state)
  239. {
  240. }
  241. /**
  242. * vmw_sou_crtc_atomic_disable - Turns off CRTC
  243. *
  244. * @crtc: CRTC to be turned off
  245. */
  246. static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
  247. struct drm_crtc_state *old_state)
  248. {
  249. struct vmw_private *dev_priv;
  250. struct vmw_screen_object_unit *sou;
  251. int ret;
  252. if (!crtc) {
  253. DRM_ERROR("CRTC is NULL\n");
  254. return;
  255. }
  256. sou = vmw_crtc_to_sou(crtc);
  257. dev_priv = vmw_priv(crtc->dev);
  258. if (sou->defined) {
  259. ret = vmw_sou_fifo_destroy(dev_priv, sou);
  260. if (ret)
  261. DRM_ERROR("Failed to destroy Screen Object\n");
  262. }
  263. }
  264. static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
  265. struct drm_framebuffer *new_fb,
  266. struct drm_pending_vblank_event *event,
  267. uint32_t flags,
  268. struct drm_modeset_acquire_ctx *ctx)
  269. {
  270. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  271. struct drm_framebuffer *old_fb = crtc->primary->fb;
  272. struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
  273. struct vmw_fence_obj *fence = NULL;
  274. struct drm_vmw_rect vclips;
  275. int ret;
  276. if (!vmw_kms_crtc_flippable(dev_priv, crtc))
  277. return -EINVAL;
  278. flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
  279. ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
  280. if (ret) {
  281. DRM_ERROR("Page flip error %d.\n", ret);
  282. return ret;
  283. }
  284. /* do a full screen dirty update */
  285. vclips.x = crtc->x;
  286. vclips.y = crtc->y;
  287. vclips.w = crtc->mode.hdisplay;
  288. vclips.h = crtc->mode.vdisplay;
  289. if (vfb->dmabuf)
  290. ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
  291. NULL, &vclips, 1, 1,
  292. true, &fence);
  293. else
  294. ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
  295. NULL, &vclips, NULL,
  296. 0, 0, 1, 1, &fence);
  297. if (ret != 0)
  298. goto out_no_fence;
  299. if (!fence) {
  300. ret = -EINVAL;
  301. goto out_no_fence;
  302. }
  303. if (event) {
  304. struct drm_file *file_priv = event->base.file_priv;
  305. ret = vmw_event_fence_action_queue(file_priv, fence,
  306. &event->base,
  307. &event->event.tv_sec,
  308. &event->event.tv_usec,
  309. true);
  310. }
  311. /*
  312. * No need to hold on to this now. The only cleanup
  313. * we need to do if we fail is unref the fence.
  314. */
  315. vmw_fence_obj_unreference(&fence);
  316. if (vmw_crtc_to_du(crtc)->is_implicit)
  317. vmw_kms_update_implicit_fb(dev_priv, crtc);
  318. return ret;
  319. out_no_fence:
  320. drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
  321. return ret;
  322. }
  323. static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
  324. .gamma_set = vmw_du_crtc_gamma_set,
  325. .destroy = vmw_sou_crtc_destroy,
  326. .reset = vmw_du_crtc_reset,
  327. .atomic_duplicate_state = vmw_du_crtc_duplicate_state,
  328. .atomic_destroy_state = vmw_du_crtc_destroy_state,
  329. .set_config = vmw_kms_set_config,
  330. .page_flip = vmw_sou_crtc_page_flip,
  331. };
  332. /*
  333. * Screen Object Display Unit encoder functions
  334. */
  335. static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
  336. {
  337. vmw_sou_destroy(vmw_encoder_to_sou(encoder));
  338. }
  339. static const struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
  340. .destroy = vmw_sou_encoder_destroy,
  341. };
  342. /*
  343. * Screen Object Display Unit connector functions
  344. */
  345. static void vmw_sou_connector_destroy(struct drm_connector *connector)
  346. {
  347. vmw_sou_destroy(vmw_connector_to_sou(connector));
  348. }
  349. static const struct drm_connector_funcs vmw_sou_connector_funcs = {
  350. .dpms = vmw_du_connector_dpms,
  351. .detect = vmw_du_connector_detect,
  352. .fill_modes = vmw_du_connector_fill_modes,
  353. .set_property = vmw_du_connector_set_property,
  354. .destroy = vmw_sou_connector_destroy,
  355. .reset = vmw_du_connector_reset,
  356. .atomic_duplicate_state = vmw_du_connector_duplicate_state,
  357. .atomic_destroy_state = vmw_du_connector_destroy_state,
  358. .atomic_set_property = vmw_du_connector_atomic_set_property,
  359. .atomic_get_property = vmw_du_connector_atomic_get_property,
  360. };
  361. static const struct
  362. drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
  363. .best_encoder = drm_atomic_helper_best_encoder,
  364. };
  365. /*
  366. * Screen Object Display Plane Functions
  367. */
  368. /**
  369. * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer
  370. *
  371. * @plane: display plane
  372. * @old_state: Contains the FB to clean up
  373. *
  374. * Unpins the display surface
  375. *
  376. * Returns 0 on success
  377. */
  378. static void
  379. vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
  380. struct drm_plane_state *old_state)
  381. {
  382. struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
  383. struct drm_crtc *crtc = plane->state->crtc ?
  384. plane->state->crtc : old_state->crtc;
  385. if (vps->dmabuf)
  386. vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
  387. vmw_dmabuf_unreference(&vps->dmabuf);
  388. vps->dmabuf_size = 0;
  389. vmw_du_plane_cleanup_fb(plane, old_state);
  390. }
  391. /**
  392. * vmw_sou_primary_plane_prepare_fb - allocate backing buffer
  393. *
  394. * @plane: display plane
  395. * @new_state: info on the new plane state, including the FB
  396. *
  397. * The SOU backing buffer is our equivalent of the display plane.
  398. *
  399. * Returns 0 on success
  400. */
  401. static int
  402. vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
  403. struct drm_plane_state *new_state)
  404. {
  405. struct drm_framebuffer *new_fb = new_state->fb;
  406. struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
  407. struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
  408. struct vmw_private *dev_priv;
  409. size_t size;
  410. int ret;
  411. if (!new_fb) {
  412. vmw_dmabuf_unreference(&vps->dmabuf);
  413. vps->dmabuf_size = 0;
  414. return 0;
  415. }
  416. size = new_state->crtc_w * new_state->crtc_h * 4;
  417. dev_priv = vmw_priv(crtc->dev);
  418. if (vps->dmabuf) {
  419. if (vps->dmabuf_size == size) {
  420. /*
  421. * Note that this might temporarily up the pin-count
  422. * to 2, until cleanup_fb() is called.
  423. */
  424. return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
  425. true);
  426. }
  427. vmw_dmabuf_unreference(&vps->dmabuf);
  428. vps->dmabuf_size = 0;
  429. }
  430. vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
  431. if (!vps->dmabuf)
  432. return -ENOMEM;
  433. vmw_svga_enable(dev_priv);
  434. /* After we have alloced the backing store might not be able to
  435. * resume the overlays, this is preferred to failing to alloc.
  436. */
  437. vmw_overlay_pause_all(dev_priv);
  438. ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
  439. &vmw_vram_ne_placement,
  440. false, &vmw_dmabuf_bo_free);
  441. vmw_overlay_resume_all(dev_priv);
  442. if (ret) {
  443. vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
  444. return ret;
  445. }
  446. vps->dmabuf_size = size;
  447. /*
  448. * TTM already thinks the buffer is pinned, but make sure the
  449. * pin_count is upped.
  450. */
  451. return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
  452. }
  453. static void
  454. vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
  455. struct drm_plane_state *old_state)
  456. {
  457. struct drm_crtc *crtc = plane->state->crtc;
  458. if (crtc)
  459. crtc->primary->fb = plane->state->fb;
  460. }
  461. static const struct drm_plane_funcs vmw_sou_plane_funcs = {
  462. .update_plane = drm_atomic_helper_update_plane,
  463. .disable_plane = drm_atomic_helper_disable_plane,
  464. .destroy = vmw_du_primary_plane_destroy,
  465. .reset = vmw_du_plane_reset,
  466. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  467. .atomic_destroy_state = vmw_du_plane_destroy_state,
  468. };
  469. static const struct drm_plane_funcs vmw_sou_cursor_funcs = {
  470. .update_plane = drm_atomic_helper_update_plane,
  471. .disable_plane = drm_atomic_helper_disable_plane,
  472. .destroy = vmw_du_cursor_plane_destroy,
  473. .reset = vmw_du_plane_reset,
  474. .atomic_duplicate_state = vmw_du_plane_duplicate_state,
  475. .atomic_destroy_state = vmw_du_plane_destroy_state,
  476. };
  477. /*
  478. * Atomic Helpers
  479. */
  480. static const struct
  481. drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = {
  482. .atomic_check = vmw_du_cursor_plane_atomic_check,
  483. .atomic_update = vmw_du_cursor_plane_atomic_update,
  484. .prepare_fb = vmw_du_cursor_plane_prepare_fb,
  485. .cleanup_fb = vmw_du_plane_cleanup_fb,
  486. };
  487. static const struct
  488. drm_plane_helper_funcs vmw_sou_primary_plane_helper_funcs = {
  489. .atomic_check = vmw_du_primary_plane_atomic_check,
  490. .atomic_update = vmw_sou_primary_plane_atomic_update,
  491. .prepare_fb = vmw_sou_primary_plane_prepare_fb,
  492. .cleanup_fb = vmw_sou_primary_plane_cleanup_fb,
  493. };
  494. static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = {
  495. .prepare = vmw_sou_crtc_helper_prepare,
  496. .mode_set_nofb = vmw_sou_crtc_mode_set_nofb,
  497. .atomic_check = vmw_du_crtc_atomic_check,
  498. .atomic_begin = vmw_du_crtc_atomic_begin,
  499. .atomic_flush = vmw_du_crtc_atomic_flush,
  500. .atomic_enable = vmw_sou_crtc_atomic_enable,
  501. .atomic_disable = vmw_sou_crtc_atomic_disable,
  502. };
  503. static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
  504. {
  505. struct vmw_screen_object_unit *sou;
  506. struct drm_device *dev = dev_priv->dev;
  507. struct drm_connector *connector;
  508. struct drm_encoder *encoder;
  509. struct drm_plane *primary, *cursor;
  510. struct drm_crtc *crtc;
  511. int ret;
  512. sou = kzalloc(sizeof(*sou), GFP_KERNEL);
  513. if (!sou)
  514. return -ENOMEM;
  515. sou->base.unit = unit;
  516. crtc = &sou->base.crtc;
  517. encoder = &sou->base.encoder;
  518. connector = &sou->base.connector;
  519. primary = &sou->base.primary;
  520. cursor = &sou->base.cursor;
  521. sou->base.active_implicit = false;
  522. sou->base.pref_active = (unit == 0);
  523. sou->base.pref_width = dev_priv->initial_width;
  524. sou->base.pref_height = dev_priv->initial_height;
  525. sou->base.pref_mode = NULL;
  526. /*
  527. * Remove this after enabling atomic because property values can
  528. * only exist in a state object
  529. */
  530. sou->base.is_implicit = false;
  531. /* Initialize primary plane */
  532. vmw_du_plane_reset(primary);
  533. ret = drm_universal_plane_init(dev, &sou->base.primary,
  534. 0, &vmw_sou_plane_funcs,
  535. vmw_primary_plane_formats,
  536. ARRAY_SIZE(vmw_primary_plane_formats),
  537. NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
  538. if (ret) {
  539. DRM_ERROR("Failed to initialize primary plane");
  540. goto err_free;
  541. }
  542. drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
  543. /* Initialize cursor plane */
  544. vmw_du_plane_reset(cursor);
  545. ret = drm_universal_plane_init(dev, &sou->base.cursor,
  546. 0, &vmw_sou_cursor_funcs,
  547. vmw_cursor_plane_formats,
  548. ARRAY_SIZE(vmw_cursor_plane_formats),
  549. NULL, DRM_PLANE_TYPE_CURSOR, NULL);
  550. if (ret) {
  551. DRM_ERROR("Failed to initialize cursor plane");
  552. drm_plane_cleanup(&sou->base.primary);
  553. goto err_free;
  554. }
  555. drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs);
  556. vmw_du_connector_reset(connector);
  557. ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs,
  558. DRM_MODE_CONNECTOR_VIRTUAL);
  559. if (ret) {
  560. DRM_ERROR("Failed to initialize connector\n");
  561. goto err_free;
  562. }
  563. drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
  564. connector->status = vmw_du_connector_detect(connector, true);
  565. vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
  566. ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
  567. DRM_MODE_ENCODER_VIRTUAL, NULL);
  568. if (ret) {
  569. DRM_ERROR("Failed to initialize encoder\n");
  570. goto err_free_connector;
  571. }
  572. (void) drm_mode_connector_attach_encoder(connector, encoder);
  573. encoder->possible_crtcs = (1 << unit);
  574. encoder->possible_clones = 0;
  575. ret = drm_connector_register(connector);
  576. if (ret) {
  577. DRM_ERROR("Failed to register connector\n");
  578. goto err_free_encoder;
  579. }
  580. vmw_du_crtc_reset(crtc);
  581. ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary,
  582. &sou->base.cursor,
  583. &vmw_screen_object_crtc_funcs, NULL);
  584. if (ret) {
  585. DRM_ERROR("Failed to initialize CRTC\n");
  586. goto err_free_unregister;
  587. }
  588. drm_crtc_helper_add(crtc, &vmw_sou_crtc_helper_funcs);
  589. drm_mode_crtc_set_gamma_size(crtc, 256);
  590. drm_object_attach_property(&connector->base,
  591. dev_priv->hotplug_mode_update_property, 1);
  592. drm_object_attach_property(&connector->base,
  593. dev->mode_config.suggested_x_property, 0);
  594. drm_object_attach_property(&connector->base,
  595. dev->mode_config.suggested_y_property, 0);
  596. if (dev_priv->implicit_placement_property)
  597. drm_object_attach_property
  598. (&connector->base,
  599. dev_priv->implicit_placement_property,
  600. sou->base.is_implicit);
  601. return 0;
  602. err_free_unregister:
  603. drm_connector_unregister(connector);
  604. err_free_encoder:
  605. drm_encoder_cleanup(encoder);
  606. err_free_connector:
  607. drm_connector_cleanup(connector);
  608. err_free:
  609. kfree(sou);
  610. return ret;
  611. }
  612. int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
  613. {
  614. struct drm_device *dev = dev_priv->dev;
  615. int i, ret;
  616. if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
  617. DRM_INFO("Not using screen objects,"
  618. " missing cap SCREEN_OBJECT_2\n");
  619. return -ENOSYS;
  620. }
  621. ret = -ENOMEM;
  622. dev_priv->num_implicit = 0;
  623. dev_priv->implicit_fb = NULL;
  624. ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
  625. if (unlikely(ret != 0))
  626. return ret;
  627. vmw_kms_create_implicit_placement_property(dev_priv, false);
  628. for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
  629. vmw_sou_init(dev_priv, i);
  630. dev_priv->active_display_unit = vmw_du_screen_object;
  631. DRM_INFO("Screen Objects Display Unit initialized\n");
  632. return 0;
  633. }
  634. static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
  635. struct vmw_framebuffer *framebuffer)
  636. {
  637. struct vmw_dma_buffer *buf =
  638. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  639. base)->buffer;
  640. int depth = framebuffer->base.format->depth;
  641. struct {
  642. uint32_t header;
  643. SVGAFifoCmdDefineGMRFB body;
  644. } *cmd;
  645. /* Emulate RGBA support, contrary to svga_reg.h this is not
  646. * supported by hosts. This is only a problem if we are reading
  647. * this value later and expecting what we uploaded back.
  648. */
  649. if (depth == 32)
  650. depth = 24;
  651. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  652. if (!cmd) {
  653. DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
  654. return -ENOMEM;
  655. }
  656. cmd->header = SVGA_CMD_DEFINE_GMRFB;
  657. cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
  658. cmd->body.format.colorDepth = depth;
  659. cmd->body.format.reserved = 0;
  660. cmd->body.bytesPerLine = framebuffer->base.pitches[0];
  661. /* Buffer is reserved in vram or GMR */
  662. vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
  663. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  664. return 0;
  665. }
  666. /**
  667. * vmw_sou_surface_fifo_commit - Callback to fill in and submit a
  668. * blit surface to screen command.
  669. *
  670. * @dirty: The closure structure.
  671. *
  672. * Fills in the missing fields in the command, and translates the cliprects
  673. * to match the destination bounding box encoded.
  674. */
  675. static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty)
  676. {
  677. struct vmw_kms_sou_surface_dirty *sdirty =
  678. container_of(dirty, typeof(*sdirty), base);
  679. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  680. s32 trans_x = dirty->unit->crtc.x - sdirty->dst_x;
  681. s32 trans_y = dirty->unit->crtc.y - sdirty->dst_y;
  682. size_t region_size = dirty->num_hits * sizeof(SVGASignedRect);
  683. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  684. int i;
  685. if (!dirty->num_hits) {
  686. vmw_fifo_commit(dirty->dev_priv, 0);
  687. return;
  688. }
  689. cmd->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
  690. cmd->header.size = sizeof(cmd->body) + region_size;
  691. /*
  692. * Use the destination bounding box to specify destination - and
  693. * source bounding regions.
  694. */
  695. cmd->body.destRect.left = sdirty->left;
  696. cmd->body.destRect.right = sdirty->right;
  697. cmd->body.destRect.top = sdirty->top;
  698. cmd->body.destRect.bottom = sdirty->bottom;
  699. cmd->body.srcRect.left = sdirty->left + trans_x;
  700. cmd->body.srcRect.right = sdirty->right + trans_x;
  701. cmd->body.srcRect.top = sdirty->top + trans_y;
  702. cmd->body.srcRect.bottom = sdirty->bottom + trans_y;
  703. cmd->body.srcImage.sid = sdirty->sid;
  704. cmd->body.destScreenId = dirty->unit->unit;
  705. /* Blits are relative to the destination rect. Translate. */
  706. for (i = 0; i < dirty->num_hits; ++i, ++blit) {
  707. blit->left -= sdirty->left;
  708. blit->right -= sdirty->left;
  709. blit->top -= sdirty->top;
  710. blit->bottom -= sdirty->top;
  711. }
  712. vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd));
  713. sdirty->left = sdirty->top = S32_MAX;
  714. sdirty->right = sdirty->bottom = S32_MIN;
  715. }
  716. /**
  717. * vmw_sou_surface_clip - Callback to encode a blit surface to screen cliprect.
  718. *
  719. * @dirty: The closure structure
  720. *
  721. * Encodes a SVGASignedRect cliprect and updates the bounding box of the
  722. * BLIT_SURFACE_TO_SCREEN command.
  723. */
  724. static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
  725. {
  726. struct vmw_kms_sou_surface_dirty *sdirty =
  727. container_of(dirty, typeof(*sdirty), base);
  728. struct vmw_kms_sou_dirty_cmd *cmd = dirty->cmd;
  729. SVGASignedRect *blit = (SVGASignedRect *) &cmd[1];
  730. /* Destination rect. */
  731. blit += dirty->num_hits;
  732. blit->left = dirty->unit_x1;
  733. blit->top = dirty->unit_y1;
  734. blit->right = dirty->unit_x2;
  735. blit->bottom = dirty->unit_y2;
  736. /* Destination bounding box */
  737. sdirty->left = min_t(s32, sdirty->left, dirty->unit_x1);
  738. sdirty->top = min_t(s32, sdirty->top, dirty->unit_y1);
  739. sdirty->right = max_t(s32, sdirty->right, dirty->unit_x2);
  740. sdirty->bottom = max_t(s32, sdirty->bottom, dirty->unit_y2);
  741. dirty->num_hits++;
  742. }
  743. /**
  744. * vmw_kms_sou_do_surface_dirty - Dirty part of a surface backed framebuffer
  745. *
  746. * @dev_priv: Pointer to the device private structure.
  747. * @framebuffer: Pointer to the surface-buffer backed framebuffer.
  748. * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  749. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  750. * be NULL.
  751. * @srf: Pointer to surface to blit from. If NULL, the surface attached
  752. * to @framebuffer will be used.
  753. * @dest_x: X coordinate offset to align @srf with framebuffer coordinates.
  754. * @dest_y: Y coordinate offset to align @srf with framebuffer coordinates.
  755. * @num_clips: Number of clip rects in @clips.
  756. * @inc: Increment to use when looping over @clips.
  757. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  758. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  759. * case the device has already synchronized.
  760. *
  761. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  762. * interrupted.
  763. */
  764. int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
  765. struct vmw_framebuffer *framebuffer,
  766. struct drm_clip_rect *clips,
  767. struct drm_vmw_rect *vclips,
  768. struct vmw_resource *srf,
  769. s32 dest_x,
  770. s32 dest_y,
  771. unsigned num_clips, int inc,
  772. struct vmw_fence_obj **out_fence)
  773. {
  774. struct vmw_framebuffer_surface *vfbs =
  775. container_of(framebuffer, typeof(*vfbs), base);
  776. struct vmw_kms_sou_surface_dirty sdirty;
  777. struct vmw_validation_ctx ctx;
  778. int ret;
  779. if (!srf)
  780. srf = &vfbs->surface->res;
  781. ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
  782. if (ret)
  783. return ret;
  784. sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
  785. sdirty.base.clip = vmw_sou_surface_clip;
  786. sdirty.base.dev_priv = dev_priv;
  787. sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
  788. sizeof(SVGASignedRect) * num_clips;
  789. sdirty.sid = srf->id;
  790. sdirty.left = sdirty.top = S32_MAX;
  791. sdirty.right = sdirty.bottom = S32_MIN;
  792. sdirty.dst_x = dest_x;
  793. sdirty.dst_y = dest_y;
  794. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  795. dest_x, dest_y, num_clips, inc,
  796. &sdirty.base);
  797. vmw_kms_helper_resource_finish(&ctx, out_fence);
  798. return ret;
  799. }
  800. /**
  801. * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
  802. *
  803. * @dirty: The closure structure.
  804. *
  805. * Commits a previously built command buffer of readback clips.
  806. */
  807. static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  808. {
  809. if (!dirty->num_hits) {
  810. vmw_fifo_commit(dirty->dev_priv, 0);
  811. return;
  812. }
  813. vmw_fifo_commit(dirty->dev_priv,
  814. sizeof(struct vmw_kms_sou_dmabuf_blit) *
  815. dirty->num_hits);
  816. }
  817. /**
  818. * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
  819. *
  820. * @dirty: The closure structure
  821. *
  822. * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  823. */
  824. static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  825. {
  826. struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
  827. blit += dirty->num_hits;
  828. blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
  829. blit->body.destScreenId = dirty->unit->unit;
  830. blit->body.srcOrigin.x = dirty->fb_x;
  831. blit->body.srcOrigin.y = dirty->fb_y;
  832. blit->body.destRect.left = dirty->unit_x1;
  833. blit->body.destRect.top = dirty->unit_y1;
  834. blit->body.destRect.right = dirty->unit_x2;
  835. blit->body.destRect.bottom = dirty->unit_y2;
  836. dirty->num_hits++;
  837. }
  838. /**
  839. * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
  840. *
  841. * @dev_priv: Pointer to the device private structure.
  842. * @framebuffer: Pointer to the dma-buffer backed framebuffer.
  843. * @clips: Array of clip rects.
  844. * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  845. * be NULL.
  846. * @num_clips: Number of clip rects in @clips.
  847. * @increment: Increment to use when looping over @clips.
  848. * @interruptible: Whether to perform waits interruptible if possible.
  849. * @out_fence: If non-NULL, will return a ref-counted pointer to a
  850. * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  851. * case the device has already synchronized.
  852. *
  853. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  854. * interrupted.
  855. */
  856. int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
  857. struct vmw_framebuffer *framebuffer,
  858. struct drm_clip_rect *clips,
  859. struct drm_vmw_rect *vclips,
  860. unsigned num_clips, int increment,
  861. bool interruptible,
  862. struct vmw_fence_obj **out_fence)
  863. {
  864. struct vmw_dma_buffer *buf =
  865. container_of(framebuffer, struct vmw_framebuffer_dmabuf,
  866. base)->buffer;
  867. struct vmw_kms_dirty dirty;
  868. int ret;
  869. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
  870. false);
  871. if (ret)
  872. return ret;
  873. ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
  874. if (unlikely(ret != 0))
  875. goto out_revert;
  876. dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
  877. dirty.clip = vmw_sou_dmabuf_clip;
  878. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
  879. num_clips;
  880. ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
  881. 0, 0, num_clips, increment, &dirty);
  882. vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL);
  883. return ret;
  884. out_revert:
  885. vmw_kms_helper_buffer_revert(buf);
  886. return ret;
  887. }
  888. /**
  889. * vmw_sou_readback_fifo_commit - Callback to submit a set of readback clips.
  890. *
  891. * @dirty: The closure structure.
  892. *
  893. * Commits a previously built command buffer of readback clips.
  894. */
  895. static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty)
  896. {
  897. if (!dirty->num_hits) {
  898. vmw_fifo_commit(dirty->dev_priv, 0);
  899. return;
  900. }
  901. vmw_fifo_commit(dirty->dev_priv,
  902. sizeof(struct vmw_kms_sou_readback_blit) *
  903. dirty->num_hits);
  904. }
  905. /**
  906. * vmw_sou_readback_clip - Callback to encode a readback cliprect.
  907. *
  908. * @dirty: The closure structure
  909. *
  910. * Encodes a BLIT_SCREEN_TO_GMRFB cliprect.
  911. */
  912. static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
  913. {
  914. struct vmw_kms_sou_readback_blit *blit = dirty->cmd;
  915. blit += dirty->num_hits;
  916. blit->header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
  917. blit->body.srcScreenId = dirty->unit->unit;
  918. blit->body.destOrigin.x = dirty->fb_x;
  919. blit->body.destOrigin.y = dirty->fb_y;
  920. blit->body.srcRect.left = dirty->unit_x1;
  921. blit->body.srcRect.top = dirty->unit_y1;
  922. blit->body.srcRect.right = dirty->unit_x2;
  923. blit->body.srcRect.bottom = dirty->unit_y2;
  924. dirty->num_hits++;
  925. }
  926. /**
  927. * vmw_kms_sou_readback - Perform a readback from the screen object system to
  928. * a dma-buffer backed framebuffer.
  929. *
  930. * @dev_priv: Pointer to the device private structure.
  931. * @file_priv: Pointer to a struct drm_file identifying the caller.
  932. * Must be set to NULL if @user_fence_rep is NULL.
  933. * @vfb: Pointer to the dma-buffer backed framebuffer.
  934. * @user_fence_rep: User-space provided structure for fence information.
  935. * Must be set to non-NULL if @file_priv is non-NULL.
  936. * @vclips: Array of clip rects.
  937. * @num_clips: Number of clip rects in @vclips.
  938. *
  939. * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  940. * interrupted.
  941. */
  942. int vmw_kms_sou_readback(struct vmw_private *dev_priv,
  943. struct drm_file *file_priv,
  944. struct vmw_framebuffer *vfb,
  945. struct drm_vmw_fence_rep __user *user_fence_rep,
  946. struct drm_vmw_rect *vclips,
  947. uint32_t num_clips)
  948. {
  949. struct vmw_dma_buffer *buf =
  950. container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
  951. struct vmw_kms_dirty dirty;
  952. int ret;
  953. ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
  954. if (ret)
  955. return ret;
  956. ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
  957. if (unlikely(ret != 0))
  958. goto out_revert;
  959. dirty.fifo_commit = vmw_sou_readback_fifo_commit;
  960. dirty.clip = vmw_sou_readback_clip;
  961. dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
  962. num_clips;
  963. ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
  964. 0, 0, num_clips, 1, &dirty);
  965. vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
  966. user_fence_rep);
  967. return ret;
  968. out_revert:
  969. vmw_kms_helper_buffer_revert(buf);
  970. return ret;
  971. }