vmwgfx_fb.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /**************************************************************************
  2. *
  3. * Copyright © 2007 David Airlie
  4. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  5. * All Rights Reserved.
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a
  8. * copy of this software and associated documentation files (the
  9. * "Software"), to deal in the Software without restriction, including
  10. * without limitation the rights to use, copy, modify, merge, publish,
  11. * distribute, sub license, and/or sell copies of the Software, and to
  12. * permit persons to whom the Software is furnished to do so, subject to
  13. * the following conditions:
  14. *
  15. * The above copyright notice and this permission notice (including the
  16. * next paragraph) shall be included in all copies or substantial portions
  17. * of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26. *
  27. **************************************************************************/
  28. #include <linux/export.h>
  29. #include "drmP.h"
  30. #include "vmwgfx_drv.h"
  31. #include "ttm/ttm_placement.h"
  32. #define VMW_DIRTY_DELAY (HZ / 30)
  33. struct vmw_fb_par {
  34. struct vmw_private *vmw_priv;
  35. void *vmalloc;
  36. struct vmw_dma_buffer *vmw_bo;
  37. struct ttm_bo_kmap_obj map;
  38. u32 pseudo_palette[17];
  39. unsigned depth;
  40. unsigned bpp;
  41. unsigned max_width;
  42. unsigned max_height;
  43. void *bo_ptr;
  44. unsigned bo_size;
  45. bool bo_iowrite;
  46. struct {
  47. spinlock_t lock;
  48. bool active;
  49. unsigned x1;
  50. unsigned y1;
  51. unsigned x2;
  52. unsigned y2;
  53. } dirty;
  54. };
  55. static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
  56. unsigned blue, unsigned transp,
  57. struct fb_info *info)
  58. {
  59. struct vmw_fb_par *par = info->par;
  60. u32 *pal = par->pseudo_palette;
  61. if (regno > 15) {
  62. DRM_ERROR("Bad regno %u.\n", regno);
  63. return 1;
  64. }
  65. switch (par->depth) {
  66. case 24:
  67. case 32:
  68. pal[regno] = ((red & 0xff00) << 8) |
  69. (green & 0xff00) |
  70. ((blue & 0xff00) >> 8);
  71. break;
  72. default:
  73. DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
  74. return 1;
  75. }
  76. return 0;
  77. }
  78. static int vmw_fb_check_var(struct fb_var_screeninfo *var,
  79. struct fb_info *info)
  80. {
  81. int depth = var->bits_per_pixel;
  82. struct vmw_fb_par *par = info->par;
  83. struct vmw_private *vmw_priv = par->vmw_priv;
  84. switch (var->bits_per_pixel) {
  85. case 32:
  86. depth = (var->transp.length > 0) ? 32 : 24;
  87. break;
  88. default:
  89. DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
  90. return -EINVAL;
  91. }
  92. switch (depth) {
  93. case 24:
  94. var->red.offset = 16;
  95. var->green.offset = 8;
  96. var->blue.offset = 0;
  97. var->red.length = 8;
  98. var->green.length = 8;
  99. var->blue.length = 8;
  100. var->transp.length = 0;
  101. var->transp.offset = 0;
  102. break;
  103. case 32:
  104. var->red.offset = 16;
  105. var->green.offset = 8;
  106. var->blue.offset = 0;
  107. var->red.length = 8;
  108. var->green.length = 8;
  109. var->blue.length = 8;
  110. var->transp.length = 8;
  111. var->transp.offset = 24;
  112. break;
  113. default:
  114. DRM_ERROR("Bad depth %u.\n", depth);
  115. return -EINVAL;
  116. }
  117. if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  118. (var->xoffset != 0 || var->yoffset != 0)) {
  119. DRM_ERROR("Can not handle panning without display topology\n");
  120. return -EINVAL;
  121. }
  122. if ((var->xoffset + var->xres) > par->max_width ||
  123. (var->yoffset + var->yres) > par->max_height) {
  124. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  125. return -EINVAL;
  126. }
  127. if (!vmw_kms_validate_mode_vram(vmw_priv,
  128. var->xres * var->bits_per_pixel/8,
  129. var->yoffset + var->yres)) {
  130. DRM_ERROR("Requested geom can not fit in framebuffer\n");
  131. return -EINVAL;
  132. }
  133. return 0;
  134. }
  135. static int vmw_fb_set_par(struct fb_info *info)
  136. {
  137. struct vmw_fb_par *par = info->par;
  138. struct vmw_private *vmw_priv = par->vmw_priv;
  139. int ret;
  140. info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
  141. ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
  142. info->fix.line_length,
  143. par->bpp, par->depth);
  144. if (ret)
  145. return ret;
  146. if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
  147. /* TODO check if pitch and offset changes */
  148. vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
  149. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
  150. vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
  151. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
  152. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
  153. vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
  154. vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
  155. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  156. }
  157. /* This is really helpful since if this fails the user
  158. * can probably not see anything on the screen.
  159. */
  160. WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
  161. return 0;
  162. }
  163. static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
  164. struct fb_info *info)
  165. {
  166. return 0;
  167. }
  168. static int vmw_fb_blank(int blank, struct fb_info *info)
  169. {
  170. return 0;
  171. }
  172. /*
  173. * Dirty code
  174. */
  175. static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
  176. {
  177. struct vmw_private *vmw_priv = par->vmw_priv;
  178. struct fb_info *info = vmw_priv->fb_info;
  179. int stride = (info->fix.line_length / 4);
  180. int *src = (int *)info->screen_base;
  181. __le32 __iomem *vram_mem = par->bo_ptr;
  182. unsigned long flags;
  183. unsigned x, y, w, h;
  184. int i, k;
  185. struct {
  186. uint32_t header;
  187. SVGAFifoCmdUpdate body;
  188. } *cmd;
  189. if (vmw_priv->suspended)
  190. return;
  191. spin_lock_irqsave(&par->dirty.lock, flags);
  192. if (!par->dirty.active) {
  193. spin_unlock_irqrestore(&par->dirty.lock, flags);
  194. return;
  195. }
  196. x = par->dirty.x1;
  197. y = par->dirty.y1;
  198. w = min(par->dirty.x2, info->var.xres) - x;
  199. h = min(par->dirty.y2, info->var.yres) - y;
  200. par->dirty.x1 = par->dirty.x2 = 0;
  201. par->dirty.y1 = par->dirty.y2 = 0;
  202. spin_unlock_irqrestore(&par->dirty.lock, flags);
  203. for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
  204. for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
  205. iowrite32(src[k], vram_mem + k);
  206. }
  207. #if 0
  208. DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
  209. #endif
  210. cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
  211. if (unlikely(cmd == NULL)) {
  212. DRM_ERROR("Fifo reserve failed.\n");
  213. return;
  214. }
  215. cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
  216. cmd->body.x = cpu_to_le32(x);
  217. cmd->body.y = cpu_to_le32(y);
  218. cmd->body.width = cpu_to_le32(w);
  219. cmd->body.height = cpu_to_le32(h);
  220. vmw_fifo_commit(vmw_priv, sizeof(*cmd));
  221. }
  222. static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
  223. unsigned x1, unsigned y1,
  224. unsigned width, unsigned height)
  225. {
  226. struct fb_info *info = par->vmw_priv->fb_info;
  227. unsigned long flags;
  228. unsigned x2 = x1 + width;
  229. unsigned y2 = y1 + height;
  230. spin_lock_irqsave(&par->dirty.lock, flags);
  231. if (par->dirty.x1 == par->dirty.x2) {
  232. par->dirty.x1 = x1;
  233. par->dirty.y1 = y1;
  234. par->dirty.x2 = x2;
  235. par->dirty.y2 = y2;
  236. /* if we are active start the dirty work
  237. * we share the work with the defio system */
  238. if (par->dirty.active)
  239. schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
  240. } else {
  241. if (x1 < par->dirty.x1)
  242. par->dirty.x1 = x1;
  243. if (y1 < par->dirty.y1)
  244. par->dirty.y1 = y1;
  245. if (x2 > par->dirty.x2)
  246. par->dirty.x2 = x2;
  247. if (y2 > par->dirty.y2)
  248. par->dirty.y2 = y2;
  249. }
  250. spin_unlock_irqrestore(&par->dirty.lock, flags);
  251. }
  252. static void vmw_deferred_io(struct fb_info *info,
  253. struct list_head *pagelist)
  254. {
  255. struct vmw_fb_par *par = info->par;
  256. unsigned long start, end, min, max;
  257. unsigned long flags;
  258. struct page *page;
  259. int y1, y2;
  260. min = ULONG_MAX;
  261. max = 0;
  262. list_for_each_entry(page, pagelist, lru) {
  263. start = page->index << PAGE_SHIFT;
  264. end = start + PAGE_SIZE - 1;
  265. min = min(min, start);
  266. max = max(max, end);
  267. }
  268. if (min < max) {
  269. y1 = min / info->fix.line_length;
  270. y2 = (max / info->fix.line_length) + 1;
  271. spin_lock_irqsave(&par->dirty.lock, flags);
  272. par->dirty.x1 = 0;
  273. par->dirty.y1 = y1;
  274. par->dirty.x2 = info->var.xres;
  275. par->dirty.y2 = y2;
  276. spin_unlock_irqrestore(&par->dirty.lock, flags);
  277. }
  278. vmw_fb_dirty_flush(par);
  279. };
  280. struct fb_deferred_io vmw_defio = {
  281. .delay = VMW_DIRTY_DELAY,
  282. .deferred_io = vmw_deferred_io,
  283. };
  284. /*
  285. * Draw code
  286. */
  287. static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
  288. {
  289. cfb_fillrect(info, rect);
  290. vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
  291. rect->width, rect->height);
  292. }
  293. static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
  294. {
  295. cfb_copyarea(info, region);
  296. vmw_fb_dirty_mark(info->par, region->dx, region->dy,
  297. region->width, region->height);
  298. }
  299. static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  300. {
  301. cfb_imageblit(info, image);
  302. vmw_fb_dirty_mark(info->par, image->dx, image->dy,
  303. image->width, image->height);
  304. }
  305. /*
  306. * Bring up code
  307. */
  308. static struct fb_ops vmw_fb_ops = {
  309. .owner = THIS_MODULE,
  310. .fb_check_var = vmw_fb_check_var,
  311. .fb_set_par = vmw_fb_set_par,
  312. .fb_setcolreg = vmw_fb_setcolreg,
  313. .fb_fillrect = vmw_fb_fillrect,
  314. .fb_copyarea = vmw_fb_copyarea,
  315. .fb_imageblit = vmw_fb_imageblit,
  316. .fb_pan_display = vmw_fb_pan_display,
  317. .fb_blank = vmw_fb_blank,
  318. };
  319. static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
  320. size_t size, struct vmw_dma_buffer **out)
  321. {
  322. struct vmw_dma_buffer *vmw_bo;
  323. struct ttm_placement ne_placement = vmw_vram_ne_placement;
  324. int ret;
  325. ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  326. /* interuptable? */
  327. ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
  328. if (unlikely(ret != 0))
  329. return ret;
  330. vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
  331. if (!vmw_bo)
  332. goto err_unlock;
  333. ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
  334. &ne_placement,
  335. false,
  336. &vmw_dmabuf_bo_free);
  337. if (unlikely(ret != 0))
  338. goto err_unlock; /* init frees the buffer on failure */
  339. *out = vmw_bo;
  340. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  341. return 0;
  342. err_unlock:
  343. ttm_write_unlock(&vmw_priv->fbdev_master.lock);
  344. return ret;
  345. }
  346. int vmw_fb_init(struct vmw_private *vmw_priv)
  347. {
  348. struct device *device = &vmw_priv->dev->pdev->dev;
  349. struct vmw_fb_par *par;
  350. struct fb_info *info;
  351. unsigned initial_width, initial_height;
  352. unsigned fb_width, fb_height;
  353. unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
  354. int ret;
  355. fb_bpp = 32;
  356. fb_depth = 24;
  357. /* XXX As shouldn't these be as well. */
  358. fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
  359. fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
  360. initial_width = min(vmw_priv->initial_width, fb_width);
  361. initial_height = min(vmw_priv->initial_height, fb_height);
  362. fb_pitch = fb_width * fb_bpp / 8;
  363. fb_size = fb_pitch * fb_height;
  364. fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
  365. info = framebuffer_alloc(sizeof(*par), device);
  366. if (!info)
  367. return -ENOMEM;
  368. /*
  369. * Par
  370. */
  371. vmw_priv->fb_info = info;
  372. par = info->par;
  373. par->vmw_priv = vmw_priv;
  374. par->depth = fb_depth;
  375. par->bpp = fb_bpp;
  376. par->vmalloc = NULL;
  377. par->max_width = fb_width;
  378. par->max_height = fb_height;
  379. /*
  380. * Create buffers and alloc memory
  381. */
  382. par->vmalloc = vmalloc(fb_size);
  383. if (unlikely(par->vmalloc == NULL)) {
  384. ret = -ENOMEM;
  385. goto err_free;
  386. }
  387. ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
  388. if (unlikely(ret != 0))
  389. goto err_free;
  390. ret = ttm_bo_kmap(&par->vmw_bo->base,
  391. 0,
  392. par->vmw_bo->base.num_pages,
  393. &par->map);
  394. if (unlikely(ret != 0))
  395. goto err_unref;
  396. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
  397. par->bo_size = fb_size;
  398. /*
  399. * Fixed and var
  400. */
  401. strcpy(info->fix.id, "svgadrmfb");
  402. info->fix.type = FB_TYPE_PACKED_PIXELS;
  403. info->fix.visual = FB_VISUAL_TRUECOLOR;
  404. info->fix.type_aux = 0;
  405. info->fix.xpanstep = 1; /* doing it in hw */
  406. info->fix.ypanstep = 1; /* doing it in hw */
  407. info->fix.ywrapstep = 0;
  408. info->fix.accel = FB_ACCEL_NONE;
  409. info->fix.line_length = fb_pitch;
  410. info->fix.smem_start = 0;
  411. info->fix.smem_len = fb_size;
  412. info->pseudo_palette = par->pseudo_palette;
  413. info->screen_base = par->vmalloc;
  414. info->screen_size = fb_size;
  415. info->flags = FBINFO_DEFAULT;
  416. info->fbops = &vmw_fb_ops;
  417. /* 24 depth per default */
  418. info->var.red.offset = 16;
  419. info->var.green.offset = 8;
  420. info->var.blue.offset = 0;
  421. info->var.red.length = 8;
  422. info->var.green.length = 8;
  423. info->var.blue.length = 8;
  424. info->var.transp.offset = 0;
  425. info->var.transp.length = 0;
  426. info->var.xres_virtual = fb_width;
  427. info->var.yres_virtual = fb_height;
  428. info->var.bits_per_pixel = par->bpp;
  429. info->var.xoffset = 0;
  430. info->var.yoffset = 0;
  431. info->var.activate = FB_ACTIVATE_NOW;
  432. info->var.height = -1;
  433. info->var.width = -1;
  434. info->var.xres = initial_width;
  435. info->var.yres = initial_height;
  436. /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
  437. info->apertures = alloc_apertures(1);
  438. if (!info->apertures) {
  439. ret = -ENOMEM;
  440. goto err_aper;
  441. }
  442. info->apertures->ranges[0].base = vmw_priv->vram_start;
  443. info->apertures->ranges[0].size = vmw_priv->vram_size;
  444. /*
  445. * Dirty & Deferred IO
  446. */
  447. par->dirty.x1 = par->dirty.x2 = 0;
  448. par->dirty.y1 = par->dirty.y2 = 0;
  449. par->dirty.active = true;
  450. spin_lock_init(&par->dirty.lock);
  451. info->fbdefio = &vmw_defio;
  452. fb_deferred_io_init(info);
  453. ret = register_framebuffer(info);
  454. if (unlikely(ret != 0))
  455. goto err_defio;
  456. return 0;
  457. err_defio:
  458. fb_deferred_io_cleanup(info);
  459. err_aper:
  460. ttm_bo_kunmap(&par->map);
  461. err_unref:
  462. ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
  463. err_free:
  464. vfree(par->vmalloc);
  465. framebuffer_release(info);
  466. vmw_priv->fb_info = NULL;
  467. return ret;
  468. }
  469. int vmw_fb_close(struct vmw_private *vmw_priv)
  470. {
  471. struct fb_info *info;
  472. struct vmw_fb_par *par;
  473. struct ttm_buffer_object *bo;
  474. if (!vmw_priv->fb_info)
  475. return 0;
  476. info = vmw_priv->fb_info;
  477. par = info->par;
  478. bo = &par->vmw_bo->base;
  479. par->vmw_bo = NULL;
  480. /* ??? order */
  481. fb_deferred_io_cleanup(info);
  482. unregister_framebuffer(info);
  483. ttm_bo_kunmap(&par->map);
  484. ttm_bo_unref(&bo);
  485. vfree(par->vmalloc);
  486. framebuffer_release(info);
  487. return 0;
  488. }
  489. int vmw_fb_off(struct vmw_private *vmw_priv)
  490. {
  491. struct fb_info *info;
  492. struct vmw_fb_par *par;
  493. unsigned long flags;
  494. if (!vmw_priv->fb_info)
  495. return -EINVAL;
  496. info = vmw_priv->fb_info;
  497. par = info->par;
  498. spin_lock_irqsave(&par->dirty.lock, flags);
  499. par->dirty.active = false;
  500. spin_unlock_irqrestore(&par->dirty.lock, flags);
  501. flush_delayed_work_sync(&info->deferred_work);
  502. par->bo_ptr = NULL;
  503. ttm_bo_kunmap(&par->map);
  504. vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
  505. return 0;
  506. }
  507. int vmw_fb_on(struct vmw_private *vmw_priv)
  508. {
  509. struct fb_info *info;
  510. struct vmw_fb_par *par;
  511. unsigned long flags;
  512. bool dummy;
  513. int ret;
  514. if (!vmw_priv->fb_info)
  515. return -EINVAL;
  516. info = vmw_priv->fb_info;
  517. par = info->par;
  518. /* we are already active */
  519. if (par->bo_ptr != NULL)
  520. return 0;
  521. /* Make sure that all overlays are stoped when we take over */
  522. vmw_overlay_stop_all(vmw_priv);
  523. ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
  524. if (unlikely(ret != 0)) {
  525. DRM_ERROR("could not move buffer to start of VRAM\n");
  526. goto err_no_buffer;
  527. }
  528. ret = ttm_bo_kmap(&par->vmw_bo->base,
  529. 0,
  530. par->vmw_bo->base.num_pages,
  531. &par->map);
  532. BUG_ON(ret != 0);
  533. par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
  534. spin_lock_irqsave(&par->dirty.lock, flags);
  535. par->dirty.active = true;
  536. spin_unlock_irqrestore(&par->dirty.lock, flags);
  537. err_no_buffer:
  538. vmw_fb_set_par(info);
  539. vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
  540. /* If there already was stuff dirty we wont
  541. * schedule a new work, so lets do it now */
  542. schedule_delayed_work(&info->deferred_work, 0);
  543. return 0;
  544. }