diff --git a/docs/pages/release_notes.md b/docs/pages/release_notes.md index d94d0e0f65..7985ac14f4 100644 --- a/docs/pages/release_notes.md +++ b/docs/pages/release_notes.md @@ -154,9 +154,10 @@ in the Migration Guide. #### API Changes -**Internalized classes and interfaces** +**Internalized classes and interfaces and methods** -The following classes have been marked as @InternalApi before and are now moved into a `internal` package: +The following classes/methods have been marked as @InternalApi before and are now moved into a `internal` +package or made package private and are not accessible anymore. * pmd-core * {%jdoc core::cache.internal.AbstractAnalysisCache %} (now package private) @@ -169,6 +170,7 @@ The following classes have been marked as @InternalApi before and are no * {%jdoc core::cache.internal.FileAnalysisCache %} * {%jdoc core::cache.internal.NoopAnalysisCache %} * {%jdoc core::util.internal.ResourceLoader %} + * {%jdoc core::cpd.Tokens#Tokens() %} (the constructor is now package private) **Removed classes and methods (previously deprecated)** diff --git a/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokenizer.java b/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokenizer.java index e1bd1e84e2..5c37848c0c 100644 --- a/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokenizer.java +++ b/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokenizer.java @@ -27,4 +27,10 @@ public interface Tokenizer { tokenizer.tokenize(textDocument, tf); } } + + static Tokens tokenize(Tokenizer tokenizer, TextDocument textDocument) throws IOException { + Tokens tokens = new Tokens(); + tokenize(tokenizer, textDocument, tokens); + return tokens; + } } diff --git a/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokens.java b/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokens.java index 3f314121e5..f87877f95a 100644 --- a/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokens.java +++ b/pmd-core/src/main/java/net/sourceforge/pmd/cpd/Tokens.java @@ -13,7 +13,6 @@ import java.util.Map.Entry; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; -import net.sourceforge.pmd.annotation.InternalApi; import net.sourceforge.pmd.lang.ast.TokenMgrError; import net.sourceforge.pmd.lang.document.FileId; import net.sourceforge.pmd.lang.document.TextDocument; @@ -22,7 +21,6 @@ import net.sourceforge.pmd.lang.document.TextDocument; * Global token collector for CPD. This is populated by lexing all files, * after which the match algorithm proceeds. */ -@InternalApi public class Tokens { // This stores all the token entries recorded during the run. @@ -32,12 +30,12 @@ public class Tokens { private int curImageId = 1; /** - * Create a new instance, is internal. + * Create a new instance. + * + * @apiNote Internal API */ - @InternalApi - @Deprecated // just to get a warning - public Tokens() { // NOPMD: UnnecessaryConstructor - constructor is needed to place the annotations - // constructor is needed to place the annotations + Tokens() { + // constructor is package private } private void add(TokenEntry tokenEntry) { diff --git a/pmd-java/src/test/java/net/sourceforge/pmd/lang/java/cpd/JavaTokenizerTest.java b/pmd-java/src/test/java/net/sourceforge/pmd/lang/java/cpd/JavaTokenizerTest.java index 17a873fc1c..fd7e56f9f4 100644 --- a/pmd-java/src/test/java/net/sourceforge/pmd/lang/java/cpd/JavaTokenizerTest.java +++ b/pmd-java/src/test/java/net/sourceforge/pmd/lang/java/cpd/JavaTokenizerTest.java @@ -13,7 +13,6 @@ import org.junit.jupiter.api.Test; import net.sourceforge.pmd.cpd.CpdLanguageProperties; import net.sourceforge.pmd.cpd.Tokenizer; -import net.sourceforge.pmd.cpd.Tokens; import net.sourceforge.pmd.cpd.test.CpdTextComparisonTest; import net.sourceforge.pmd.cpd.test.LanguagePropertyConfig; import net.sourceforge.pmd.lang.ast.TokenMgrError; @@ -41,12 +40,10 @@ class JavaTokenizerTest extends CpdTextComparisonTest { @Test void testLexExceptionLocation() { Tokenizer tokenizer = newTokenizer(defaultProperties()); - Tokens tokens = new Tokens(); TokenMgrError lexException = assertThrows(TokenMgrError.class, () -> Tokenizer.tokenize(tokenizer, // note: the source deliberately contains an unbalanced quote, unterminated string literal - TextDocument.readOnlyString("class F {\n String s=\"abc\";\"\n}\n", FileId.UNKNOWN, getLanguage().getDefaultVersion()), - tokens) + TextDocument.readOnlyString("class F {\n String s=\"abc\";\"\n}\n", FileId.UNKNOWN, getLanguage().getDefaultVersion())) ); // this shouldn't throw a IllegalArgumentException assertThat(lexException.getMessage(), containsString("at line 3, column 1")); diff --git a/pmd-lang-test/src/main/kotlin/net/sourceforge/pmd/cpd/test/CpdTextComparisonTest.kt b/pmd-lang-test/src/main/kotlin/net/sourceforge/pmd/cpd/test/CpdTextComparisonTest.kt index 5c1b1f1c12..6b53b3ef60 100644 --- a/pmd-lang-test/src/main/kotlin/net/sourceforge/pmd/cpd/test/CpdTextComparisonTest.kt +++ b/pmd-lang-test/src/main/kotlin/net/sourceforge/pmd/cpd/test/CpdTextComparisonTest.kt @@ -5,17 +5,17 @@ package net.sourceforge.pmd.cpd.test import io.kotest.assertions.throwables.shouldThrow -import net.sourceforge.pmd.cpd.* -import net.sourceforge.pmd.lang.Language +import net.sourceforge.pmd.cpd.CpdCapableLanguage +import net.sourceforge.pmd.cpd.TokenEntry +import net.sourceforge.pmd.cpd.Tokenizer +import net.sourceforge.pmd.cpd.Tokens import net.sourceforge.pmd.lang.LanguagePropertyBundle import net.sourceforge.pmd.lang.LanguageRegistry import net.sourceforge.pmd.lang.ast.TokenMgrError -import net.sourceforge.pmd.lang.document.TextDocument -import net.sourceforge.pmd.lang.document.TextFile import net.sourceforge.pmd.lang.document.FileId +import net.sourceforge.pmd.lang.document.TextDocument import net.sourceforge.pmd.test.BaseTextComparisonTest import org.apache.commons.lang3.StringUtils -import java.util.* /** * CPD test comparing a dump of a file against a saved baseline. @@ -173,10 +173,7 @@ abstract class CpdTextComparisonTest( FileData(fileName = fileName, fileText = text) fun tokenize(tokenizer: Tokenizer, fileData: FileData): Tokens = - Tokens().also { tokens -> - val source = sourceCodeOf(fileData) - Tokenizer.tokenize(tokenizer, source, tokens) - } + Tokenizer.tokenize(tokenizer, sourceCodeOf(fileData)) private companion object { const val Indent = " "