scala update, part 2

* Update to new scala API
* remove comments support which was not used anyway
* Use scala-maven-plugin to generate javadoc
This commit is contained in:
Andreas Dangel
2018-02-18 20:23:32 +01:00
parent 74ae8d0d6e
commit 9392239c45
2 changed files with 26 additions and 69 deletions

View File

@ -41,9 +41,7 @@
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<configuration>
<jvmArgs>
<jvmArg>-Dscalac.patmat.analysisBudget=off</jvmArg>
</jvmArgs>
<addScalacArgs>-deprecation</addScalacArgs>
<scalaVersion>${scala.version}</scalaVersion>
</configuration>
<executions>
@ -55,6 +53,12 @@
<goal>compile</goal>
</goals>
</execution>
<execution>
<id>attach-javadocs</id>
<goals>
<goal>doc-jar</goal>
</goals>
</execution>
</executions>
</plugin>
@ -65,6 +69,21 @@
<suppressionsLocation>pmd-scala-checkstyle-suppressions.xml</suppressionsLocation>
</configuration>
</plugin>
<!-- Disabling the default javadoc plugin - we use scala-maven-plugin instead -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
<executions>
<execution>
<id>attach-javadocs</id>
<phase>none</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>

View File

@ -19,7 +19,7 @@
*/
package org.sonar.plugins.scala.compiler
import collection.mutable.ListBuffer
import scala.collection.mutable.Buffer
import org.sonar.plugins.scala.language.{Comment, CommentType}
import scala.reflect.io.AbstractFile
@ -34,7 +34,7 @@ import scala.reflect.internal.util.BatchSourceFile
*/
class Lexer {
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import Compiler._
def getTokens(code: String): java.util.List[Token] = {
@ -49,7 +49,7 @@ class Lexer {
private def tokenize(unit: CompilationUnit): java.util.List[Token] = {
val scanner = new syntaxAnalyzer.UnitScanner(unit)
val tokens = ListBuffer[Token]()
val tokens = Buffer[Token]()
scanner.init()
while (scanner.token != scala.tools.nsc.ast.parser.Tokens.EOF) {
@ -60,69 +60,7 @@ class Lexer {
tokens += Token(scanner.token, linenr, tokenVal)
scanner.nextToken()
}
tokens
tokens.asJava
}
def getComments(code: String): java.util.List[Comment] = {
val unit = new CompilationUnit(new BatchSourceFile("", code.toCharArray))
tokenizeComments(unit)
}
def getCommentsOfFile(path: String): java.util.List[Comment] = {
val unit = new CompilationUnit(new BatchSourceFile(AbstractFile.getFile(path)))
tokenizeComments(unit)
}
private def tokenizeComments(unit: CompilationUnit): java.util.List[Comment] = {
val comments = ListBuffer[Comment]()
val scanner = new syntaxAnalyzer.UnitScanner(unit) {
private var lastDocCommentRange: Option[Range] = None
private var foundToken = false
override def nextToken() {
super.nextToken()
foundToken = token != 0
}
override def foundComment(value: String, start: Int, end: Int) = {
super.foundComment(value, start, end)
def isHeaderComment(value: String) = {
!foundToken && comments.isEmpty && value.trim().startsWith("/*")
}
lastDocCommentRange match {
case Some(r: Range) => {
if (r.start != start || r.end != end) {
comments += new Comment(value, CommentType.NORMAL)
}
}
case None => {
if (isHeaderComment(value)) {
comments += new Comment(value, CommentType.HEADER)
} else {
comments += new Comment(value, CommentType.NORMAL)
}
}
}
}
override def foundDocComment(value: String, start: Int, end: Int) = {
super.foundDocComment(value, start, end)
comments += new Comment(value, CommentType.DOC)
lastDocCommentRange = Some(Range(start, end))
}
}
scanner.init()
while (scanner.token != scala.tools.nsc.ast.parser.Tokens.EOF) {
scanner.nextToken()
}
comments
}
}