From 110184e54276a4a89107348e45a473cde3f99fe4 Mon Sep 17 00:00:00 2001 From: Jan van Nunen Date: Thu, 15 Jan 2015 15:32:19 +0100 Subject: [PATCH] Made exceptions of the Scala tokenizer non-fatal when CPD is executed with the '--skipLexicalErrors' command line option. --- .../java/net/sourceforge/pmd/cpd/CPD.java | 2 +- .../plugins/scala/cpd/ScalaTokenizer.java | 33 ++++++++++++------- 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/pmd-core/src/main/java/net/sourceforge/pmd/cpd/CPD.java b/pmd-core/src/main/java/net/sourceforge/pmd/cpd/CPD.java index a467037055..21624a8bb0 100644 --- a/pmd-core/src/main/java/net/sourceforge/pmd/cpd/CPD.java +++ b/pmd-core/src/main/java/net/sourceforge/pmd/cpd/CPD.java @@ -151,7 +151,7 @@ public class CPD { try { addAndThrowLexicalError(sourceCode); } catch (TokenMgrError e) { - System.err.println("Skipping " + e.getMessage()); + System.err.println("Skipping " + sourceCode.getFileName() + ". Reason: " + e.getMessage()); tokens.getTokens().clear(); tokens.getTokens().addAll(savedTokenEntry.restore()); } diff --git a/pmd-scala/src/main/java/org/sonar/plugins/scala/cpd/ScalaTokenizer.java b/pmd-scala/src/main/java/org/sonar/plugins/scala/cpd/ScalaTokenizer.java index c18bfe305d..a28f1bc942 100644 --- a/pmd-scala/src/main/java/org/sonar/plugins/scala/cpd/ScalaTokenizer.java +++ b/pmd-scala/src/main/java/org/sonar/plugins/scala/cpd/ScalaTokenizer.java @@ -25,6 +25,7 @@ import net.sourceforge.pmd.cpd.SourceCode; import net.sourceforge.pmd.cpd.TokenEntry; import net.sourceforge.pmd.cpd.Tokenizer; import net.sourceforge.pmd.cpd.Tokens; +import net.sourceforge.pmd.lang.ast.TokenMgrError; import org.sonar.plugins.scala.compiler.Lexer; import org.sonar.plugins.scala.compiler.Token; @@ -36,20 +37,28 @@ import org.sonar.plugins.scala.compiler.Token; */ public final class ScalaTokenizer implements Tokenizer { - public void tokenize(SourceCode source, Tokens cpdTokens) { - String filename = source.getFileName(); + public void tokenize(SourceCode source, Tokens cpdTokens) { + String filename = source.getFileName(); - Lexer lexer = new Lexer(); - List tokens = lexer.getTokensOfFile(filename); - for (Token token : tokens) { - String tokenVal = - token.tokenVal() != null ? token.tokenVal() : Integer.toString(token.tokenType()); + try { + Lexer lexer = new Lexer(); + List tokens = lexer.getTokensOfFile(filename); + for (Token token : tokens) { + String tokenVal = + token.tokenVal() != null ? token.tokenVal() : Integer.toString(token.tokenType()); - TokenEntry cpdToken = new TokenEntry(tokenVal, filename, token.line()); - cpdTokens.add(cpdToken); + TokenEntry cpdToken = new TokenEntry(tokenVal, filename, token.line()); + cpdTokens.add(cpdToken); + } + cpdTokens.add(TokenEntry.getEOF()); + } catch (RuntimeException e) { + e.printStackTrace(); + // Wrap exceptions of the Scala tokenizer in a TokenMgrError, so they are correctly handled + // when CPD is executed with the '--skipLexicalErrors' command line option + throw new TokenMgrError( + "Lexical error in file " + filename + ". The scala tokenizer exited with error: " + e.getMessage(), + TokenMgrError.LEXICAL_ERROR); + } } - cpdTokens.add(TokenEntry.getEOF()); - } - }