gcovr 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. #! /usr/bin/env python
  2. #
  3. # A report generator for gcov 3.4
  4. #
  5. # This routine generates a format that is similar to the format generated
  6. # by the Python coverage.py module. This code is similar to the
  7. # data processing performed by lcov's geninfo command. However, we
  8. # don't worry about parsing the *.gcna files, and backwards compatibility for
  9. # older versions of gcov is not supported.
  10. #
  11. # Outstanding issues
  12. # - verify that gcov 3.4 or newer is being used
  13. # - verify support for symbolic links
  14. #
  15. # gcovr is a FAST project. For documentation, bug reporting, and
  16. # updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
  17. #
  18. # _________________________________________________________________________
  19. #
  20. # FAST: Utilities for Agile Software Development
  21. # Copyright (c) 2008 Sandia Corporation.
  22. # This software is distributed under the BSD License.
  23. # Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
  24. # the U.S. Government retains certain rights in this software.
  25. # For more information, see the FAST README.txt file.
  26. #
  27. # $Revision: 2839 $
  28. # $Date: 2013-05-27 11:13:17 -0700 (Mon, 27 May 2013) $
  29. # _________________________________________________________________________
  30. #
  31. import copy
  32. import glob
  33. import os
  34. import re
  35. import subprocess
  36. import sys
  37. import time
  38. import xml.dom.minidom
  39. from optparse import OptionParser
  40. from string import Template
  41. from os.path import normpath
  42. __version__ = "2.5-prerelease"
  43. src_revision = "$Revision: 2839 $"
  44. gcov_cmd = "gcov"
  45. output_re = re.compile("[Cc]reating [`'](.*)'$")
  46. source_re = re.compile("cannot open (source|graph) file")
  47. starting_dir = os.getcwd()
  48. def version_str():
  49. ans = __version__
  50. m = re.match('\$Revision:\s*(\S+)\s*\$', src_revision)
  51. if m:
  52. ans = ans + " (r%s)" % (m.group(1))
  53. return ans
  54. #
  55. # Container object for coverage statistics
  56. #
  57. class CoverageData(object):
  58. def __init__(self, fname, uncovered, uncovered_exceptional, covered, branches, noncode):
  59. self.fname=fname
  60. # Shallow copies are cheap & "safe" because the caller will
  61. # throw away their copies of covered & uncovered after calling
  62. # us exactly *once*
  63. self.uncovered = copy.copy(uncovered)
  64. self.uncovered_exceptional = copy.copy(uncovered_exceptional)
  65. self.covered = copy.copy(covered)
  66. self.noncode = copy.copy(noncode)
  67. # But, a deep copy is required here
  68. self.all_lines = copy.deepcopy(uncovered)
  69. self.all_lines.update(uncovered_exceptional)
  70. self.all_lines.update(covered.keys())
  71. self.branches = copy.deepcopy(branches)
  72. def update(self, uncovered, uncovered_exceptional, covered, branches, noncode):
  73. self.all_lines.update(uncovered)
  74. self.all_lines.update(uncovered_exceptional)
  75. self.all_lines.update(covered.keys())
  76. self.uncovered.update(uncovered)
  77. self.uncovered_exceptional.update(uncovered_exceptional)
  78. self.noncode.intersection_update(noncode)
  79. for k in covered.keys():
  80. self.covered[k] = self.covered.get(k,0) + covered[k]
  81. for k in branches.keys():
  82. for b in branches[k]:
  83. d = self.branches.setdefault(k, {})
  84. d[b] = d.get(b, 0) + branches[k][b]
  85. self.uncovered.difference_update(self.covered.keys())
  86. self.uncovered_exceptional.difference_update(self.covered.keys())
  87. def uncovered_str(self, exceptional):
  88. if options.show_branch:
  89. # Don't do any aggregation on branch results
  90. tmp = []
  91. for line in self.branches.keys():
  92. for branch in self.branches[line]:
  93. if self.branches[line][branch] == 0:
  94. tmp.append(line)
  95. break
  96. tmp.sort()
  97. return ",".join([str(x) for x in tmp]) or ""
  98. if exceptional:
  99. tmp = list(self.uncovered_exceptional)
  100. else:
  101. tmp = list(self.uncovered)
  102. if len(tmp) == 0:
  103. return ""
  104. tmp.sort()
  105. first = None
  106. last = None
  107. ranges=[]
  108. for item in tmp:
  109. if last is None:
  110. first=item
  111. last=item
  112. elif item == (last+1):
  113. last=item
  114. else:
  115. if len(self.noncode.intersection(range(last+1,item))) \
  116. == item - last - 1:
  117. last = item
  118. continue
  119. if first==last:
  120. ranges.append(str(first))
  121. else:
  122. ranges.append(str(first)+"-"+str(last))
  123. first=item
  124. last=item
  125. if first==last:
  126. ranges.append(str(first))
  127. else:
  128. ranges.append(str(first)+"-"+str(last))
  129. return ",".join(ranges)
  130. def coverage(self):
  131. if ( options.show_branch ):
  132. total = 0
  133. cover = 0
  134. for line in self.branches.keys():
  135. for branch in self.branches[line].keys():
  136. total += 1
  137. cover += self.branches[line][branch] > 0 and 1 or 0
  138. else:
  139. total = len(self.all_lines)
  140. cover = len(self.covered)
  141. percent = total and str(int(100.0*cover/total)) or "--"
  142. return (total, cover, percent)
  143. def summary(self):
  144. tmp = options.filter.sub('',self.fname)
  145. if not self.fname.endswith(tmp):
  146. # Do no truncation if the filter does not start matching at
  147. # the beginning of the string
  148. tmp = self.fname
  149. tmp = tmp.ljust(40)
  150. if len(tmp) > 40:
  151. tmp=tmp+"\n"+" "*40
  152. (total, cover, percent) = self.coverage()
  153. uncovered_lines = self.uncovered_str(False)
  154. if not options.show_branch:
  155. t = self.uncovered_str(True)
  156. if len(t):
  157. uncovered_lines += " [* " + t + "]";
  158. return ( total, cover,
  159. tmp + str(total).rjust(8) + str(cover).rjust(8) + \
  160. percent.rjust(6) + "% " + uncovered_lines )
  161. def resolve_symlinks(orig_path):
  162. """
  163. Return the normalized absolute path name with all symbolic links resolved
  164. """
  165. drive,tmp = os.path.splitdrive(os.path.abspath(orig_path))
  166. if not drive:
  167. drive = os.path.sep
  168. parts = tmp.split(os.path.sep)
  169. actual_path = [drive]
  170. while parts:
  171. actual_path.append(parts.pop(0))
  172. if not os.path.islink(os.path.join(*actual_path)):
  173. continue
  174. actual_path[-1] = os.readlink(os.path.join(*actual_path))
  175. tmp_drive, tmp_path = os.path.splitdrive(
  176. resolve_symlinks(os.path.join(*actual_path)) )
  177. if tmp_drive:
  178. drive = tmp_drive
  179. actual_path = [drive] + tmp_path.split(os.path.sep)
  180. return os.path.join(*actual_path)
  181. def path_startswith(path, base):
  182. return path.startswith(base) and (
  183. len(base) == len(path) or path[len(base)] == os.path.sep )
  184. class PathAliaser(object):
  185. def __init__(self):
  186. self.aliases = {}
  187. self.master_targets = set()
  188. self.preferred_name = {}
  189. def master_path(self, path):
  190. match_found = False
  191. while True:
  192. for base, alias in self.aliases.items():
  193. if path_startswith(path, base):
  194. path = alias + path[len(base):]
  195. match_found = True
  196. break
  197. for master_base in self.master_targets:
  198. if path_startswith(path, master_base):
  199. return path, master_base, True
  200. if match_found:
  201. sys.stderr.write(
  202. "(ERROR) violating fundamental assumption while walking "
  203. "directory tree.\n\tPlease report this to the gcovr "
  204. "developers.\n" )
  205. return path, None, match_found
  206. def unalias_path(self, path):
  207. path = resolve_symlinks(path)
  208. path, master_base, known_path = self.master_path(path)
  209. if not known_path:
  210. return path
  211. # Try and resolve the preferred name for this location
  212. if master_base in self.preferred_name:
  213. return self.preferred_name[master_base] + path[len(master_base):]
  214. return path
  215. def add_master_target(self, master):
  216. self.master_targets.add(master)
  217. def add_alias(self, target, master):
  218. self.aliases[target] = master
  219. def set_preferred(self, master, preferred):
  220. self.preferred_name[master] = preferred
  221. aliases = PathAliaser()
  222. # This is UGLY. Here's why: UNIX resolves symbolic links by walking the
  223. # entire directory structure. What that means is that relative links
  224. # are always relative to the actual directory inode, and not the
  225. # "virtual" path that the user might have traversed (over symlinks) on
  226. # the way to that directory. Here's the canonical example:
  227. #
  228. # a / b / c / testfile
  229. # a / d / e --> ../../a/b
  230. # m / n --> /a
  231. # x / y / z --> /m/n/d
  232. #
  233. # If we start in "y", we will see the following directory structure:
  234. # y
  235. # |-- z
  236. # |-- e
  237. # |-- c
  238. # |-- testfile
  239. #
  240. # The problem is that using a simple traversal based on the Python
  241. # documentation:
  242. #
  243. # (os.path.join(os.path.dirname(path), os.readlink(result)))
  244. #
  245. # will not work: we will see a link to /m/n/d from /x/y, but completely
  246. # miss the fact that n is itself a link. If we then naively attempt to
  247. # apply the "c" relative link, we get an intermediate path that looks
  248. # like "/m/n/d/e/../../a/b", which would get normalized to "/m/n/a/b"; a
  249. # nonexistant path. The solution is that we need to walk the original
  250. # path, along with the full path of all links 1 directory at a time and
  251. # check for embedded symlinks.
  252. #
  253. def link_walker(path):
  254. targets = [os.path.abspath(path)]
  255. while targets:
  256. target_dir = targets.pop(0)
  257. actual_dir = resolve_symlinks(target_dir)
  258. #print "target dir: %s (%s)" % (target_dir, actual_dir)
  259. master_name, master_base, visited = aliases.master_path(actual_dir)
  260. if visited:
  261. #print " ...root already visited as %s" % master_name
  262. aliases.add_alias(target_dir, master_name)
  263. continue
  264. if master_name != target_dir:
  265. aliases.set_preferred(master_name, target_dir)
  266. aliases.add_alias(target_dir, master_name)
  267. aliases.add_master_target(master_name)
  268. #print " ...master name = %s" % master_name
  269. #print " ...walking %s" % target_dir
  270. for root, dirs, files in os.walk(target_dir, topdown=True):
  271. #print " ...reading %s" % root
  272. for d in dirs:
  273. tmp = os.path.abspath(os.path.join(root, d))
  274. #print " ...checking %s" % tmp
  275. if os.path.islink(tmp):
  276. #print " ...buffering link %s" % tmp
  277. targets.append(tmp)
  278. yield root, dirs, files
  279. def search_file(expr, path):
  280. """
  281. Given a search path, recursively descend to find files that match a
  282. regular expression.
  283. """
  284. ans = []
  285. pattern = re.compile(expr)
  286. if path is None or path == ".":
  287. path = os.getcwd()
  288. elif not os.path.exists(path):
  289. raise IOError("Unknown directory '"+path+"'")
  290. for root, dirs, files in link_walker(path):
  291. for name in files:
  292. if pattern.match(name):
  293. name = os.path.join(root,name)
  294. if os.path.islink(name):
  295. ans.append( os.path.abspath(os.readlink(name)) )
  296. else:
  297. ans.append( os.path.abspath(name) )
  298. return ans
  299. #
  300. # Get the list of datafiles in the directories specified by the user
  301. #
  302. def get_datafiles(flist, options):
  303. allfiles=[]
  304. for dir in flist:
  305. if options.verbose:
  306. sys.stdout.write( "Scanning directory %s for gcda/gcno files...\n"
  307. % (dir, ) )
  308. files = search_file(".*\.gc(da|no)$", dir)
  309. # gcno files will *only* produce uncovered results; however,
  310. # that is useful information for the case where a compilation
  311. # unit is never actually exercised by the test code. So, we
  312. # will process gcno files, but ONLY if there is no corresponding
  313. # gcda file.
  314. gcda_files = [file for file in files if file.endswith('gcda')]
  315. tmp = set(gcda_files)
  316. gcno_files = [ file for file in files if
  317. file.endswith('gcno') and file[:-2]+'da' not in tmp ]
  318. if options.verbose:
  319. sys.stdout.write(
  320. "Found %d files (and will process %d)\n" %
  321. ( len(files), len(gcda_files) + len(gcno_files) ) )
  322. allfiles.extend(gcda_files)
  323. allfiles.extend(gcno_files)
  324. return allfiles
  325. def process_gcov_data(file, covdata, options):
  326. INPUT = open(file,"r")
  327. #
  328. # Get the filename
  329. #
  330. line = INPUT.readline()
  331. segments=line.split(':',3)
  332. if len(segments) != 4 or not segments[2].lower().strip().endswith('source'):
  333. raise RuntimeError('Fatal error parsing gcov file, line 1: \n\t"%s"' % line.rstrip())
  334. currdir = os.getcwd()
  335. os.chdir(starting_dir)
  336. fname = aliases.unalias_path(os.path.abspath((segments[-1]).strip()))
  337. os.chdir(currdir)
  338. if options.verbose:
  339. sys.stdout.write("Parsing coverage data for file %s\n" % fname)
  340. #
  341. # Return if the filename does not match the filter
  342. #
  343. if not options.filter.match(fname):
  344. if options.verbose:
  345. sys.stdout.write(" Filtering coverage data for file %s\n" % fname)
  346. return
  347. #
  348. # Return if the filename matches the exclude pattern
  349. #
  350. for i in range(0,len(options.exclude)):
  351. if options.exclude[i].match(options.filter.sub('',fname)) or \
  352. options.exclude[i].match(fname) or \
  353. options.exclude[i].match(os.path.abspath(fname)):
  354. if options.verbose:
  355. sys.stdout.write(" Excluding coverage data for file %s\n" % fname)
  356. return
  357. #
  358. # Parse each line, and record the lines
  359. # that are uncovered
  360. #
  361. noncode = set()
  362. uncovered = set()
  363. uncovered_exceptional = set()
  364. covered = {}
  365. branches = {}
  366. #first_record=True
  367. lineno = 0
  368. for line in INPUT:
  369. segments=line.split(":",2)
  370. #print "HERE", segments
  371. tmp = segments[0].strip()
  372. if len(segments) > 1:
  373. try:
  374. lineno = int(segments[1].strip())
  375. except:
  376. pass # keep previous line number!
  377. if tmp[0] == '#':
  378. uncovered.add( lineno )
  379. elif tmp[0] == '=':
  380. uncovered_exceptional.add( lineno )
  381. elif tmp[0] in "0123456789":
  382. covered[lineno] = int(segments[0].strip())
  383. elif tmp[0] == '-':
  384. # remember certain non-executed lines
  385. code = segments[2].strip()
  386. if len(code) == 0 or code == "{" or code == "}" or \
  387. code.startswith("//") or code == 'else':
  388. noncode.add( lineno )
  389. elif tmp.startswith('branch'):
  390. fields = line.split()
  391. try:
  392. count = int(fields[3])
  393. branches.setdefault(lineno, {})[int(fields[1])] = count
  394. except:
  395. # We ignore branches that were "never executed"
  396. pass
  397. elif tmp.startswith('call'):
  398. pass
  399. elif tmp.startswith('function'):
  400. pass
  401. elif tmp[0] == 'f':
  402. pass
  403. #if first_record:
  404. #first_record=False
  405. #uncovered.add(prev)
  406. #if prev in uncovered:
  407. #tokens=re.split('[ \t]+',tmp)
  408. #if tokens[3] != "0":
  409. #uncovered.remove(prev)
  410. #prev = int(segments[1].strip())
  411. #first_record=True
  412. else:
  413. sys.stderr.write(
  414. "(WARNING) Unrecognized GCOV output: '%s'\n"
  415. "\tThis is indicitive of a gcov output parse error.\n"
  416. "\tPlease report this to the gcovr developers." % tmp )
  417. ##print 'uncovered',uncovered
  418. ##print 'covered',covered
  419. ##print 'branches',branches
  420. ##print 'noncode',noncode
  421. #
  422. # If the file is already in covdata, then we
  423. # remove lines that are covered here. Otherwise,
  424. # initialize covdata
  425. #
  426. if not fname in covdata:
  427. covdata[fname] = CoverageData(fname,uncovered,uncovered_exceptional,covered,branches,noncode)
  428. else:
  429. covdata[fname].update(uncovered,uncovered_exceptional,covered,branches,noncode)
  430. INPUT.close()
  431. #
  432. # Process a datafile (generated by running the instrumented application)
  433. # and run gcov with the corresponding arguments
  434. #
  435. # This is trickier than it sounds: The gcda/gcno files are stored in the
  436. # same directory as the object files; however, gcov must be run from the
  437. # same directory where gcc/g++ was run. Normally, the user would know
  438. # where gcc/g++ was invoked from and could tell gcov the path to the
  439. # object (and gcda) files with the --object-directory command.
  440. # Unfortunately, we do everything backwards: gcovr looks for the gcda
  441. # files and then has to infer the original gcc working directory.
  442. #
  443. # In general, (but not always) we can assume that the gcda file is in a
  444. # subdirectory of the original gcc working directory, so we will first
  445. # try ".", and on error, move up the directory tree looking for the
  446. # correct working directory (letting gcov's own error codes dictate when
  447. # we hit the right directory). This covers 90+% of the "normal" cases.
  448. # The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
  449. # the object directory was a peer (not a parent/child) of the cwd. In
  450. # this case, things are really tough. We accept an argument
  451. # (--object-directory) that SHOULD BE THE SAME as the one povided to
  452. # gcc. We will then walk that path (backwards) in the hopes of
  453. # identifying the original gcc working directory (there is a bit of
  454. # trial-and-error here)
  455. #
  456. def process_datafile(filename, covdata, options):
  457. #
  458. # Launch gcov
  459. #
  460. abs_filename = os.path.abspath(filename)
  461. (dirname,fname) = os.path.split(abs_filename)
  462. #(name,ext) = os.path.splitext(base)
  463. potential_wd = []
  464. errors=[]
  465. Done = False
  466. if options.objdir:
  467. src_components = abs_filename.split(os.sep)
  468. components = normpath(options.objdir).split(os.sep)
  469. idx = 1
  470. while idx <= len(components):
  471. if idx > len(src_components):
  472. break
  473. if components[-1*idx] != src_components[-1*idx]:
  474. break
  475. idx += 1
  476. if idx > len(components):
  477. pass # a parent dir; the normal process will find it
  478. elif components[-1*idx] == '..':
  479. dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
  480. while idx <= len(components) and components[-1*idx] == '..':
  481. tmp = []
  482. for d in dirs:
  483. for f in os.listdir(d):
  484. x = os.path.join(d,f)
  485. if os.path.isdir(x):
  486. tmp.append(x)
  487. dirs = tmp
  488. idx += 1
  489. potential_wd = dirs
  490. else:
  491. if components[0] == '':
  492. # absolute path
  493. tmp = [ options.objdir ]
  494. else:
  495. # relative path: check relative to both the cwd and the
  496. # gcda file
  497. tmp = [ os.path.join(x, options.objdir) for x in
  498. [os.path.dirname(abs_filename), os.getcwd()] ]
  499. potential_wd = [ testdir for testdir in tmp
  500. if os.path.isdir(testdir) ]
  501. if len(potential_wd) == 0:
  502. errors.append("ERROR: cannot identify the location where GCC "
  503. "was run using --object-directory=%s\n" %
  504. options.objdir)
  505. # Revert to the normal
  506. #sys.exit(1)
  507. # no objdir was specified (or it was a parent dir); walk up the dir tree
  508. if len(potential_wd) == 0:
  509. wd = os.path.split(abs_filename)[0]
  510. while True:
  511. potential_wd.append(wd)
  512. wd = os.path.split(wd)[0]
  513. if wd == potential_wd[-1]:
  514. break
  515. cmd = [ gcov_cmd, abs_filename,
  516. "--branch-counts", "--branch-probabilities", "--preserve-paths",
  517. '--object-directory', dirname ]
  518. # NB: We are lazy English speakers, so we will only parse English output
  519. env = dict(os.environ)
  520. env['LC_ALL'] = 'en_US'
  521. while len(potential_wd) > 0 and not Done:
  522. # NB: either len(potential_wd) == 1, or all entires are absolute
  523. # paths, so we don't have to chdir(starting_dir) at every
  524. # iteration.
  525. os.chdir(potential_wd.pop(0))
  526. #if options.objdir:
  527. # cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
  528. if options.verbose:
  529. sys.stdout.write("Running gcov: '%s' in '%s'\n" % ( ' '.join(cmd), os.getcwd() ))
  530. (out, err) = subprocess.Popen( cmd, env=env,
  531. stdout=subprocess.PIPE,
  532. stderr=subprocess.PIPE ).communicate()
  533. out=out.decode('utf-8')
  534. err=err.decode('utf-8')
  535. # find the files that gcov created
  536. gcov_files = {'active':[], 'filter':[], 'exclude':[]}
  537. for line in out.splitlines():
  538. found = output_re.search(line.strip())
  539. if found is not None:
  540. fname = found.group(1)
  541. if not options.gcov_filter.match(fname):
  542. if options.verbose:
  543. sys.stdout.write("Filtering gcov file %s\n" % fname)
  544. gcov_files['filter'].append(fname)
  545. continue
  546. exclude=False
  547. for i in range(0,len(options.gcov_exclude)):
  548. if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
  549. options.gcov_exclude[i].match(fname) or \
  550. options.gcov_exclude[i].match(os.path.abspath(fname)):
  551. exclude=True
  552. break
  553. if not exclude:
  554. gcov_files['active'].append(fname)
  555. elif options.verbose:
  556. sys.stdout.write("Excluding gcov file %s\n" % fname)
  557. gcov_files['exclude'].append(fname)
  558. if source_re.search(err):
  559. # gcov tossed errors: try the next potential_wd
  560. errors.append(err)
  561. else:
  562. # Process *.gcov files
  563. for fname in gcov_files['active']:
  564. process_gcov_data(fname, covdata, options)
  565. Done = True
  566. if not options.keep:
  567. for group in gcov_files.values():
  568. for fname in group:
  569. if os.path.exists(fname):
  570. # Only remove files that actually exist.
  571. os.remove(fname)
  572. os.chdir(starting_dir)
  573. if options.delete:
  574. if not abs_filename.endswith('gcno'):
  575. os.remove(abs_filename)
  576. if not Done:
  577. sys.stderr.write(
  578. "(WARNING) GCOV produced the following errors processing %s:\n"
  579. "\t %s"
  580. "\t(gcovr could not infer a working directory that resolved it.)\n"
  581. % ( filename, "\t ".join(errors) ) )
  582. #
  583. # Produce the classic gcovr text report
  584. #
  585. def print_text_report(covdata):
  586. def _num_uncovered(key):
  587. (total, covered, percent) = covdata[key].coverage()
  588. return total - covered
  589. def _percent_uncovered(key):
  590. (total, covered, percent) = covdata[key].coverage()
  591. if covered:
  592. return -1.0*covered/total
  593. else:
  594. return total or 1e6
  595. def _alpha(key):
  596. return key
  597. if options.output:
  598. OUTPUT = open(options.output,'w')
  599. else:
  600. OUTPUT = sys.stdout
  601. total_lines=0
  602. total_covered=0
  603. # Header
  604. OUTPUT.write("-"*78 + '\n')
  605. a = options.show_branch and "Branches" or "Lines"
  606. b = options.show_branch and "Taken" or "Exec"
  607. c = "Missing"
  608. OUTPUT.write("File".ljust(40) + a.rjust(8) + b.rjust(8)+ " Cover " + c + "\n")
  609. OUTPUT.write("-"*78 + '\n')
  610. # Data
  611. keys = list(covdata.keys())
  612. keys.sort(key=options.sort_uncovered and _num_uncovered or \
  613. options.sort_percent and _percent_uncovered or _alpha)
  614. for key in keys:
  615. (t, n, txt) = covdata[key].summary()
  616. total_lines += t
  617. total_covered += n
  618. OUTPUT.write(txt + '\n')
  619. # Footer & summary
  620. OUTPUT.write("-"*78 + '\n')
  621. percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
  622. OUTPUT.write("TOTAL".ljust(40) + str(total_lines).rjust(8) + \
  623. str(total_covered).rjust(8) + str(percent).rjust(6)+"%" + '\n')
  624. OUTPUT.write("-"*78 + '\n')
  625. # Close logfile
  626. if options.output:
  627. OUTPUT.close()
  628. #
  629. # Produce an XML report in the Cobertura format
  630. #
  631. def print_xml_report(covdata):
  632. branchTotal = 0
  633. branchCovered = 0
  634. lineTotal = 0
  635. lineCovered = 0
  636. options.show_branch = True
  637. for key in covdata.keys():
  638. (total, covered, percent) = covdata[key].coverage()
  639. branchTotal += total
  640. branchCovered += covered
  641. options.show_branch = False
  642. for key in covdata.keys():
  643. (total, covered, percent) = covdata[key].coverage()
  644. lineTotal += total
  645. lineCovered += covered
  646. impl = xml.dom.minidom.getDOMImplementation()
  647. docType = impl.createDocumentType(
  648. "coverage", None,
  649. "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
  650. doc = impl.createDocument(None, "coverage", docType)
  651. root = doc.documentElement
  652. root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
  653. str(float(lineCovered) / lineTotal) )
  654. root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
  655. str(float(branchCovered) / branchTotal) )
  656. root.setAttribute( "timestamp", str(int(time.time())) )
  657. root.setAttribute( "version", "gcovr %s" % (version_str(),) )
  658. # Generate the <sources> element: this is either the root directory
  659. # (specified by --root), or the CWD.
  660. sources = doc.createElement("sources")
  661. root.appendChild(sources)
  662. # Generate the coverage output (on a per-package basis)
  663. packageXml = doc.createElement("packages")
  664. root.appendChild(packageXml)
  665. packages = {}
  666. source_dirs = set()
  667. keys = list(covdata.keys())
  668. keys.sort()
  669. for f in keys:
  670. data = covdata[f]
  671. dir = options.filter.sub('',f)
  672. if f.endswith(dir):
  673. src_path = f[:-1*len(dir)]
  674. if len(src_path) > 0:
  675. while dir.startswith(os.path.sep):
  676. src_path += os.path.sep
  677. dir = dir[len(os.path.sep):]
  678. source_dirs.add(src_path)
  679. else:
  680. # Do no truncation if the filter does not start matching at
  681. # the beginning of the string
  682. dir = f
  683. (dir, fname) = os.path.split(dir)
  684. package = packages.setdefault(
  685. dir, [ doc.createElement("package"), {},
  686. 0, 0, 0, 0 ] )
  687. c = doc.createElement("class")
  688. lines = doc.createElement("lines")
  689. c.appendChild(lines)
  690. class_lines = 0
  691. class_hits = 0
  692. class_branches = 0
  693. class_branch_hits = 0
  694. for line in data.all_lines:
  695. hits = data.covered.get(line, 0)
  696. class_lines += 1
  697. if hits > 0:
  698. class_hits += 1
  699. l = doc.createElement("line")
  700. l.setAttribute("number", str(line))
  701. l.setAttribute("hits", str(hits))
  702. branches = data.branches.get(line)
  703. if branches is None:
  704. l.setAttribute("branch", "false")
  705. else:
  706. b_hits = 0
  707. for v in branches.values():
  708. if v > 0:
  709. b_hits += 1
  710. coverage = 100*b_hits/len(branches)
  711. l.setAttribute("branch", "true")
  712. l.setAttribute( "condition-coverage",
  713. "%i%% (%i/%i)" %
  714. (coverage, b_hits, len(branches)) )
  715. cond = doc.createElement('condition')
  716. cond.setAttribute("number", "0")
  717. cond.setAttribute("type", "jump")
  718. cond.setAttribute("coverage", "%i%%" % ( coverage ) )
  719. class_branch_hits += b_hits
  720. class_branches += float(len(branches))
  721. conditions = doc.createElement("conditions")
  722. conditions.appendChild(cond)
  723. l.appendChild(conditions)
  724. lines.appendChild(l)
  725. className = fname.replace('.', '_')
  726. c.setAttribute("name", className)
  727. c.setAttribute("filename", os.path.join(dir, fname))
  728. c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
  729. c.setAttribute( "branch-rate",
  730. str(class_branch_hits / (1.0*class_branches or 1.0)) )
  731. c.setAttribute("complexity", "0.0")
  732. package[1][className] = c
  733. package[2] += class_hits
  734. package[3] += class_lines
  735. package[4] += class_branch_hits
  736. package[5] += class_branches
  737. for packageName, packageData in packages.items():
  738. package = packageData[0];
  739. packageXml.appendChild(package)
  740. classes = doc.createElement("classes")
  741. package.appendChild(classes)
  742. classNames = list(packageData[1].keys())
  743. classNames.sort()
  744. for className in classNames:
  745. classes.appendChild(packageData[1][className])
  746. package.setAttribute("name", packageName.replace(os.sep, '.'))
  747. package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
  748. package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
  749. package.setAttribute("complexity", "0.0")
  750. # Populate the <sources> element: this is either the root directory
  751. # (specified by --root), or relative directories based
  752. # on the filter, or the CWD
  753. if options.root is not None:
  754. source = doc.createElement("source")
  755. source.appendChild(doc.createTextNode(options.root.strip()))
  756. sources.appendChild(source)
  757. elif len(source_dirs) > 0:
  758. cwd = os.getcwd()
  759. for d in source_dirs:
  760. source = doc.createElement("source")
  761. if d.startswith(cwd):
  762. reldir = d[len(cwd):].lstrip(os.path.sep)
  763. elif cwd.startswith(d):
  764. i = 1
  765. while normpath(d) != normpath(os.path.join(*tuple([cwd]+['..']*i))):
  766. i += 1
  767. reldir = os.path.join(*tuple(['..']*i))
  768. else:
  769. reldir = d
  770. source.appendChild(doc.createTextNode(reldir.strip()))
  771. sources.appendChild(source)
  772. else:
  773. source = doc.createElement("source")
  774. source.appendChild(doc.createTextNode('.'))
  775. sources.appendChild(source)
  776. if options.prettyxml:
  777. import textwrap
  778. lines = doc.toprettyxml(" ").split('\n')
  779. for i in xrange(len(lines)):
  780. n=0
  781. while n < len(lines[i]) and lines[i][n] == " ":
  782. n += 1
  783. lines[i] = "\n".join(textwrap.wrap(lines[i], 78, break_long_words=False, break_on_hyphens=False, subsequent_indent=" "+ n*" "))
  784. xmlString = "\n".join(lines)
  785. #print textwrap.wrap(doc.toprettyxml(" "), 80)
  786. else:
  787. xmlString = doc.toprettyxml(indent="")
  788. if options.output is None:
  789. sys.stdout.write(xmlString+'\n')
  790. else:
  791. OUTPUT = open(options.output, 'w')
  792. OUTPUT.write(xmlString +'\n')
  793. OUTPUT.close()
  794. ##
  795. ## MAIN
  796. ##
  797. #
  798. # Create option parser
  799. #
  800. parser = OptionParser()
  801. parser.add_option("--version",
  802. help="Print the version number, then exit",
  803. action="store_true",
  804. dest="version",
  805. default=False)
  806. parser.add_option("-v","--verbose",
  807. help="Print progress messages",
  808. action="store_true",
  809. dest="verbose",
  810. default=False)
  811. parser.add_option('--object-directory',
  812. help="Specify the directory that contains the gcov data files. gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run. Normally, gcovr can guess correctly. This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
  813. action="store",
  814. dest="objdir",
  815. default=None)
  816. parser.add_option("-o","--output",
  817. help="Print output to this filename",
  818. action="store",
  819. dest="output",
  820. default=None)
  821. parser.add_option("-k","--keep",
  822. help="Keep the temporary *.gcov files generated by gcov. By default, these are deleted.",
  823. action="store_true",
  824. dest="keep",
  825. default=False)
  826. parser.add_option("-d","--delete",
  827. help="Delete the coverage files after they are processed. These are generated by the users's program, and by default gcovr does not remove these files.",
  828. action="store_true",
  829. dest="delete",
  830. default=False)
  831. parser.add_option("-f","--filter",
  832. help="Keep only the data files that match this regular expression",
  833. action="store",
  834. dest="filter",
  835. default=None)
  836. parser.add_option("-e","--exclude",
  837. help="Exclude data files that match this regular expression",
  838. action="append",
  839. dest="exclude",
  840. default=[])
  841. parser.add_option("--gcov-filter",
  842. help="Keep only gcov data files that match this regular expression",
  843. action="store",
  844. dest="gcov_filter",
  845. default=None)
  846. parser.add_option("--gcov-exclude",
  847. help="Exclude gcov data files that match this regular expression",
  848. action="append",
  849. dest="gcov_exclude",
  850. default=[])
  851. parser.add_option("-r","--root",
  852. help="Defines the root directory. This is used to filter the files, and to standardize the output.",
  853. action="store",
  854. dest="root",
  855. default=None)
  856. parser.add_option("-x","--xml",
  857. help="Generate XML instead of the normal tabular output.",
  858. action="store_true",
  859. dest="xml",
  860. default=False)
  861. parser.add_option("--xml-pretty",
  862. help="Generate pretty XML instead of the normal dense format.",
  863. action="store_true",
  864. dest="prettyxml",
  865. default=False)
  866. parser.add_option("-b","--branches",
  867. help="Tabulate the branch coverage instead of the line coverage.",
  868. action="store_true",
  869. dest="show_branch",
  870. default=None)
  871. parser.add_option("-u","--sort-uncovered",
  872. help="Sort entries by increasing number of uncovered lines.",
  873. action="store_true",
  874. dest="sort_uncovered",
  875. default=None)
  876. parser.add_option("-p","--sort-percentage",
  877. help="Sort entries by decreasing percentage of covered lines.",
  878. action="store_true",
  879. dest="sort_percent",
  880. default=None)
  881. parser.usage="gcovr [options]"
  882. parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
  883. #
  884. # Process options
  885. #
  886. (options, args) = parser.parse_args(args=sys.argv)
  887. if options.version:
  888. sys.stdout.write(
  889. "gcovr %s\n"
  890. "\n"
  891. "Copyright (2008) Sandia Corporation. Under the terms of Contract\n"
  892. "DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government\n"
  893. "retains certain rights in this software.\n"
  894. % (version_str(),) )
  895. sys.exit(0)
  896. if options.objdir:
  897. tmp = options.objdir.replace('/',os.sep).replace('\\',os.sep)
  898. while os.sep+os.sep in tmp:
  899. tmp = tmp.replace(os.sep+os.sep, os.sep)
  900. if normpath(options.objdir) != tmp:
  901. sys.stderr.write(
  902. "(WARNING) relative referencing in --object-directory.\n"
  903. "\tthis could cause strange errors when gcovr attempts to\n"
  904. "\tidentify the original gcc working directory.\n")
  905. #
  906. # Setup filters
  907. #
  908. for i in range(0,len(options.exclude)):
  909. options.exclude[i] = re.compile(options.exclude[i])
  910. if options.filter is not None:
  911. options.filter = re.compile(options.filter)
  912. elif options.root is not None:
  913. if not options.root:
  914. sys.stderr.write(
  915. "(ERROR) empty --root option.\n"
  916. "\tRoot specifies the path to the root directory of your project.\n"
  917. "\tThis option cannot be an empty string.\n")
  918. sys.exit(1)
  919. options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
  920. if options.filter is None:
  921. options.filter = re.compile('')
  922. #
  923. for i in range(0,len(options.gcov_exclude)):
  924. options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
  925. if options.gcov_filter is not None:
  926. options.gcov_filter = re.compile(options.gcov_filter)
  927. else:
  928. options.gcov_filter = re.compile('')
  929. #
  930. # Get data files
  931. #
  932. if len(args) == 1:
  933. datafiles = get_datafiles(["."], options)
  934. else:
  935. datafiles = get_datafiles(args[1:], options)
  936. #
  937. # Get coverage data
  938. #
  939. covdata = {}
  940. for file in datafiles:
  941. process_datafile(file,covdata,options)
  942. if options.verbose:
  943. sys.stdout.write("Gathered coveraged data for "+str(len(covdata))+" files\n")
  944. #
  945. # Print report
  946. #
  947. if options.xml or options.prettyxml:
  948. print_xml_report(covdata)
  949. else:
  950. print_text_report(covdata)