diff --git a/buildenv/jenkins/JenkinsfileBase b/buildenv/jenkins/JenkinsfileBase index fdc1e18a40..050082d371 100644 --- a/buildenv/jenkins/JenkinsfileBase +++ b/buildenv/jenkins/JenkinsfileBase @@ -199,7 +199,7 @@ def setupParallelEnv() { echo 'Exception: ' + e.toString() echo 'Cannot run copyArtifacts from test.getDependency. Skipping copyArtifacts...' } - + String unsetLLP = "" //unset LD_LIBRARY_PATH workaround for issue https://github.com/adoptium/infrastructure/issues/2934 if (JDK_IMPL == 'hotspot' && JDK_VERSION == '8' && PLATFORM.contains('alpine-linux')) { @@ -387,9 +387,9 @@ def setup() { if (!env.SPEC.startsWith('aix') && !env.SPEC.startsWith('zos')) { riURL = "${artifactoryUrl}/${repoForRi}/Latest/${PLATFORM}/${JDK_VERSION}" } - } + } CUSTOMIZED_SDK_URL_OPTION = "-c '${params.CUSTOMIZED_SDK_URL} ${riURL}'" - } + } } else { CUSTOMIZED_SDK_URL_OPTION = "" } @@ -677,7 +677,7 @@ def runTest( ) { sh "/usr/bin/X11/X -force -vfb -x abx -x dbe -x GLX -secIP 000 :0 &" env.DISPLAY = "unix:0" echo "env.DISPLAY is ${env.DISPLAY}" - + } else if (env.SPEC.startsWith('sunos')) { sh "/usr/X11/bin/Xvfb :2 -screen 0 1024x768x24 &" env.DISPLAY = ":2" @@ -745,7 +745,7 @@ def checkTestResults(results) { summary.appendText("NO TEST FOUND!", false) return } - + if (results["FAILED"] != 0) { currentBuild.result = 'UNSTABLE' echo 'There were test failures, set build result to UNSTABLE.' @@ -932,7 +932,7 @@ def terminateTestProcesses() { def statusCode=sh(script:"aqa-tests/terminateTestProcesses.sh ${env.USER} 2>&1", returnStatus:true) if (statusCode != 0) { echo "rc = ${statusCode}, Unable to terminate all processes." - } + } } def getJDKImpl(jvm_version) { @@ -987,7 +987,7 @@ def archiveFile(filename, forceStoreOnJenkins) { if (!params.ARTIFACTORY_SERVER || forceStoreOnJenkins) { echo "Saving ${filename} file on jenkins." archiveArtifacts artifacts: filename, fingerprint: true, allowEmptyArchive: true - } + } if (params.ARTIFACTORY_SERVER) { def pattern = "${env.WORKSPACE}/${filename}" uploadToArtifactory(pattern) @@ -998,12 +998,12 @@ def uploadToArtifactory(pattern, artifactoryUploadDir="") { if (params.ARTIFACTORY_SERVER) { def server = Artifactory.server params.ARTIFACTORY_SERVER def artifactoryRepo = params.ARTIFACTORY_REPO ? params.ARTIFACTORY_REPO : "sys-rt-generic-local" - + if (artifactoryRepo.contains(',')) { String[] repos = artifactoryRepo.split(",") artifactoryRepo = repos[0].trim() } - + def artifactoryRoorDir = params.ARTIFACTORY_ROOT_DIR ? params.ARTIFACTORY_ROOT_DIR : getJenkinsDomain() if (artifactoryRoorDir.endsWith("/")) { artifactoryRoorDir = artifactoryRoorDir.substring(0, artifactoryRoorDir.length() - 1); @@ -1164,11 +1164,11 @@ def addFailedTestsGrinderLink(paths=""){ def customizedTestCases = [:] if (jdkFailedTestCaseList) { customizedTestCases['jdk'] = "${jdkFailedTestCaseList}" - } + } if (hotspotFailedTestCaseList) { customizedTestCases['hotspot'] = "${hotspotFailedTestCaseList}" } - customizedTestCases.each { target, testcases -> + customizedTestCases.each { target, testcases -> def tempTestCases = testcases.substring(0, testcases.length() - 1) tempTestCases = tempTestCases.split(' ').toUnique().join('+') def customURL = url.replace(env.FAILED_TEST_TARGET, "TARGET=${target}_custom") diff --git a/buildenv/jenkins/aqaTestPipeline.groovy b/buildenv/jenkins/aqaTestPipeline.groovy index 6ef607e178..1e641b3295 100644 --- a/buildenv/jenkins/aqaTestPipeline.groovy +++ b/buildenv/jenkins/aqaTestPipeline.groovy @@ -57,7 +57,7 @@ JDK_VERSIONS.each { JDK_VERSION -> if (SDK_RESOURCE == "customized" ) { if (params.TOP_LEVEL_SDK_URL) { - // example: /job/build-scripts/job/openjdk11-pipeline/123/artifact/target/linux/aarch64/openj9/*_aarch64_linux_*.tar.gz/*zip*/openj9.zip + // example: /job/build-scripts/job/openjdk11-pipeline/123/artifact/target/linux/aarch64/openj9/*_aarch64_linux_*.tar.gz/*zip*/openj9.zip download_url = params.TOP_LEVEL_SDK_URL + "artifact/target/${os}/${arch}/${params.VARIANT}/${filter}/*zip*/${params.VARIANT}.zip" } } else if (SDK_RESOURCE == "releases") { diff --git a/buildenv/jenkins/openjdk_tests b/buildenv/jenkins/openjdk_tests index 3e58c48047..aa070a1a28 100644 --- a/buildenv/jenkins/openjdk_tests +++ b/buildenv/jenkins/openjdk_tests @@ -166,7 +166,7 @@ timestamps{ // trigger xxx_imageUpload test job, then xxx_imagePull test job def commonLabel = "sw.tool.podman&&sw.tool.container.criu" if (params.LABEL_ADDITION) { - commonLabel += "&&${params.LABEL_ADDITION}" + commonLabel += "&&${params.LABEL_ADDITION}" } def imageUploadMap = [ 'x86-64_linux' : [ @@ -315,7 +315,7 @@ timestamps{ } else { // IF no nodes are idle we will check if there is supported virtual agent // When Parallel the race condition could happen. Say the number of multiply jobs is larger than the available nodes the query's result may be delayed and wrong - // In this case jobs will be fooled to fall back to wait local busy nodes. + // In this case jobs will be fooled to fall back to wait local busy nodes. dynamicAgents = PLATFORM_MAP[params.PLATFORM]["DynamicAgents"] ? PLATFORM_MAP[params.PLATFORM]["DynamicAgents"] : [] println "dynamicAgents: ${dynamicAgents}" @@ -371,7 +371,7 @@ timestamps{ echo "Done with main node" } echo "Done with RELATED_NODES: ${params.RELATED_NODES}" - } + } } } else { assert false : "Cannot find RELATED_NODES: ${params.RELATED_NODES}." @@ -396,7 +396,7 @@ def runTest() { try { def retry_count = 0 def sleep_time = 180 - + if (params.PLATFORM.contains('zos')) { /* Ensure correct CC env */ env._CC_CCMODE = '1' @@ -448,7 +448,7 @@ def runTest() { jenkinsfile = load "${WORKSPACE}/aqa-tests/buildenv/jenkins/JenkinsfileBase" if (LABEL.contains('ci.agent.dynamic') && CLOUD_PROVIDER.equals('azure')) { //Set dockerimage for azure agent. Fyre has stencil to setup the right environment - docker.image('adoptopenjdk/centos6_build_image').pull() + docker.image('adoptopenjdk/centos6_build_image').pull() docker.image('adoptopenjdk/centos6_build_image').inside { jenkinsfile.testBuild() } @@ -526,7 +526,7 @@ def areNodesWithLabelOnline(labelToCheck) { String[] onlineNodes = nodesByLabel(labelToCheck) if (onlineNodes.size() < 1) { return false - } + } return true } @@ -547,7 +547,7 @@ def checkErrors(errorList) { println("${message}") currentNode.setTemporarilyOffline(true, new hudson.slaves.OfflineCause.UserCause(User.current(), "${message}")) } - // if SLACK_CHANNEL is provided, send the message in slack + // if SLACK_CHANNEL is provided, send the message in slack if (params.SLACK_CHANNEL) { slackSend channel: SLACK_CHANNEL, message: "${message}" } @@ -574,7 +574,7 @@ def changeParam(paramsList, newParams) { paramsList.each { param -> def value = param.value.toString() newParams.each { newParam -> - + if (param.key == newParam.key) { value = newParam.value.toString() } @@ -617,11 +617,11 @@ def generateJobViaAutoGen(testJobName) { jobParams << string(name: 'GROUPS', value: group) } } - } + } } jobParams << string(name: 'JDK_IMPL', value: jdk_impl) jobParams << string(name: 'JDK_VERSIONS', value: jdk_version) build job: 'Test_Job_Auto_Gen', parameters: jobParams, propagate: true } -} \ No newline at end of file +} diff --git a/buildenv/jenkins/testJobTemplate b/buildenv/jenkins/testJobTemplate index b22179c234..f423dd9022 100644 --- a/buildenv/jenkins/testJobTemplate +++ b/buildenv/jenkins/testJobTemplate @@ -350,7 +350,7 @@ ARCH_OS_LIST.each { ARCH_OS -> When DYNAMIC_COMPILE is on:
- if there is no BUILD_LIST input, DYNAMIC_COMPILE will automatically figure out which test material to compile based on the test target.
- if there is BUILD_LIST input, the BUILD_LIST will be further refined based on the test target.''') - stringParam('TARGET', "${ACTUAL_TARGET}", + stringParam('TARGET', "${ACTUAL_TARGET}", '''Test TARGET to execute, you can also select sub-targets such as:
different level: sanity|extended|special
different group: openjdk|system|jck|functional|perf|external
diff --git a/buildenv/jenkins/triggerGrinder.groovy b/buildenv/jenkins/triggerGrinder.groovy index 3d27ee31a5..d0ad732747 100644 --- a/buildenv/jenkins/triggerGrinder.groovy +++ b/buildenv/jenkins/triggerGrinder.groovy @@ -24,8 +24,8 @@ def launch_grinders(List> json) { /** Get the parameters specified as a string list. Get each of the key-value pairs for each json value. - If the issue for a job is closed, and JDK_VERSION and - JDK_IMPL match the parameters specified, + If the issue for a job is closed, and JDK_VERSION and + JDK_IMPL match the parameters specified, run grinder on it. Otherwise ignore it */ diff --git a/doc/Manifesto.md b/doc/Manifesto.md index 8f291753c1..b0d50152f9 100644 --- a/doc/Manifesto.md +++ b/doc/Manifesto.md @@ -34,7 +34,7 @@ See the License for the specific language governing permissions and -## 1 Introduction +## 1 Introduction At the Adoptium project, we are committed to producing high-quality OpenJDK binaries, which we call Eclipse Temurin, for consumption by the community. After speaking with different implementers, and listening to the needs of our community, developers and enterprise consumers alike, we have heard very clearly the desire to guarantee a certain level of quality. We organise this quality assurance work within the Eclipse AQAvit project, which is named by the Adoptium Quality Assurance (AQA) acronym, in combination with 'vit' and its dual meaning for speed and vitality. @@ -50,10 +50,10 @@ Quality Assurance means, “Make quality certain to happen”. Our goal is to m -### 2.1 Open & Transparent -We believe open languages deserve open tests. This means test source should be open and tests should be executed in the open with their results openly available. +### 2.1 Open & Transparent +We believe open languages deserve open tests. This means test source should be open and tests should be executed in the open with their results openly available. -Transparent processes strengthen confidence. Consumers get to see test results directly and know a certain quality bar was achieved, rather than just be told some testing was done. Open tests get scrutinized, get fixed, get loved. +Transparent processes strengthen confidence. Consumers get to see test results directly and know a certain quality bar was achieved, rather than just be told some testing was done. Open tests get scrutinized, get fixed, get loved. Open testing engages more people and helps to drive innovation and build community. Being able to see and compare test results across implementations also creates a healthy and competitive ecosystem that ultimately benefits all. @@ -66,44 +66,44 @@ These tests should cover different categories including functional/regression, s #### 2.2.1 Functional and Regression Tests -We currently utilize both the OpenJDK regression test suite and the Eclipse OpenJ9 functional test suite. While there remains some effort to segregate the portions of these test suites that are VM-implementation specific (OpenJDK functional originally written to target Hotspot VM, Openj9 functional originally written to target Openj9 VM), we see there is a great number of tests that are implementation agnostic and can be used to verify different implementations. +We currently utilize both the OpenJDK regression test suite and the Eclipse OpenJ9 functional test suite. While there remains some effort to segregate the portions of these test suites that are VM-implementation specific (OpenJDK functional originally written to target Hotspot VM, Openj9 functional originally written to target Openj9 VM), we see there is a great number of tests that are implementation agnostic and can be used to verify different implementations. -Both of these test groups, which we call “openjdk” and “functional” are typically unit tests designed to verify the APIs and features of the JDK. By thorough coverage of the APIs and coverage of the JEPs/JSRs, we identify interoperability issues and consistency results. Both test suites are continuously augmented at their source projects/repositories, OpenJDK and Eclipse OpenJ9 respectively. For this reason, we have chosen to include portions of these as part of AQA, since the tests are being kept current and relevant to the changes to the binaries we test. +Both of these test groups, which we call “openjdk” and “functional” are typically unit tests designed to verify the APIs and features of the JDK. By thorough coverage of the APIs and coverage of the JEPs/JSRs, we identify interoperability issues and consistency results. Both test suites are continuously augmented at their source projects/repositories, OpenJDK and Eclipse OpenJ9 respectively. For this reason, we have chosen to include portions of these as part of AQA, since the tests are being kept current and relevant to the changes to the binaries we test. -While there are different JDK implementations (ranging from different garbage collection policies to different code generation options to different JVM technologies), they all draw from OpenJDK. Functional correctness and consistency can be measured through a common set of quality metrics. +While there are different JDK implementations (ranging from different garbage collection policies to different code generation options to different JVM technologies), they all draw from OpenJDK. Functional correctness and consistency can be measured through a common set of quality metrics. 2.2.2 System and Load Tests -This category of testing includes tests designed and written from a system-wide perspective. Quality engineers have written these tests from a consumer perspective, designing common customer scenarios based on feedback and observation from service professionals and consumer feedback. Importantly, this test group also includes load and stress testing which is important to enterprise consumers. These tests ask the question, “what level of load can this binary take before it breaks?”. +This category of testing includes tests designed and written from a system-wide perspective. Quality engineers have written these tests from a consumer perspective, designing common customer scenarios based on feedback and observation from service professionals and consumer feedback. Importantly, this test group also includes load and stress testing which is important to enterprise consumers. These tests ask the question, “what level of load can this binary take before it breaks?”. Some of these load and stress tests fire up thousands of threads and iterate 10’s of thousands of times. The tests can also be tuned, so that as binaries become more resilient, we can increase the load to further stress and push the load bar higher. #### 2.2.3 External Application Tests -This test category includes various suites, at present, functional and smoke tests from a set of third-party applications and Microprofile TCKs (run on different implementations of application server). +This test category includes various suites, at present, functional and smoke tests from a set of third-party applications and Microprofile TCKs (run on different implementations of application server). Current External Applications Suites being run at AQAvit can be found within adoptium/aqa-tests/external and include third-party applications / running Functional and Smoke tests, and application servers / running Microprofile TCKs. These tests are run in containers based from Temurin and optionally alternate vendor docker images in order to both exercise those images from docker hub, and to verify these third-party applications work well against them. Application suites are selected for both their real-world popularity and because they offer interesting workloads that best exercise an OpenJDK binary. This suite is expandable and there are more applications in the plan captured in adoptium/aqa-tests issue #172. -We are interested in ensuring these suites run well. We want to engage and share with application communities, but more importantly, we aim to demonstrate to enterprise consumers the correct execution of key applications. +We are interested in ensuring these suites run well. We want to engage and share with application communities, but more importantly, we aim to demonstrate to enterprise consumers the correct execution of key applications. #### 2.2.4 Performance Tests Performance benchmarks are important verification tools to compare binaries against an acceptable baseline. Consumers of our binaries require them to be performant. This category of tests includes microbenchmarks and large open-source benchmark suites. -In preparing AQA, we ask how can we run performance benchmarks against Temurin and other OpenJDK binaries and analyze and compare results? What information is coming out of some common open-source benchmarks and why might it be interesting? +In preparing AQA, we ask how can we run performance benchmarks against Temurin and other OpenJDK binaries and analyze and compare results? What information is coming out of some common open-source benchmarks and why might it be interesting? The challenge of selecting a set of benchmarks is the varied performance metrics are often in opposition with each other, throughput versus footprint being the classic example. When adding benchmarks, it will be important to clarify what the benchmark measures and how we run the benchmark (how many repetitions, with what command line options was it run, what gc policies were used, etc). We will favour benchmarks that are not implementation-specific, but rather gather a broad set of metrics that enterprise customers may expect in various deployment scenarios. -Along with the output and ‘score’ from the benchmark itself, the metrics gathered from a performance test run should also include the calculated confidence interval along with information about the machine upon which the test was run. We need to be confident that we have a repeatable measure, or at least understand in what ways a particular benchmark score can vary. +Along with the output and ‘score’ from the benchmark itself, the metrics gathered from a performance test run should also include the calculated confidence interval along with information about the machine upon which the test was run. We need to be confident that we have a repeatable measure, or at least understand in what ways a particular benchmark score can vary. We will set baseline scores for the performance benchmarks included in AQA that binaries must meet or exceed in order to pass the performance bar. #### 2.2.5 Security Tests -While the regression and functional suites contain many security tests, we intend to increase the level of security tests run against the Temurin and other OpenJDK binaries. We will include open security test suites that test for known vulnerabilities. We also intend to use fuzzers to search for security vulnerabilities. +While the regression and functional suites contain many security tests, we intend to increase the level of security tests run against the Temurin and other OpenJDK binaries. We will include open security test suites that test for known vulnerabilities. We also intend to use fuzzers to search for security vulnerabilities. @@ -112,9 +112,9 @@ While the regression and functional suites contain many security tests, we inten #### 2.3.1 Continual Investment -Tests (like the products they verify) need to continuously evolve & change. This is not a small effort, so is best achieved when we coordinate our efforts with many like-minded quality professionals and developers on a common goal of quality assurance. +Tests (like the products they verify) need to continuously evolve & change. This is not a small effort, so is best achieved when we coordinate our efforts with many like-minded quality professionals and developers on a common goal of quality assurance. Tests require maintenance and care. We want to continuously improve and become more effective and efficient. This includes: -Refining automation and tools +Refining automation and tools Automate re-inclusions upon fixes Remove friction, make testing easier Reduce process, make tools simpler @@ -133,18 +133,18 @@ To summarize this section on the process to modify, the selection criteria would #### 2.3.3 Codecov & Other Metrics -We should continually review the value of the tests and processes that we employ. The project should gather data to measure the effectiveness of tests. This data helps inform our process of improvement and change. +We should continually review the value of the tests and processes that we employ. The project should gather data to measure the effectiveness of tests. This data helps inform our process of improvement and change. Metrics of interest are: - Heat maps - List of most used APIs to evaluate gaps and risk and prioritize testing -- Code coverage +- Code coverage - Set bar to stay above a certain score (especially for priority areas in the heat map) - Shows gaps, limited, code coverage does not equate to functional coverage - Bug prediction - List of most changed files, bugs often to occur in more changed areas - Bug injection, mutation testing (in the plan, to measure the quality of our testing) -- Comparative analysis +- Comparative analysis - Test-the-tests (Section 2.3.4) In summary, we will gather metrics for the purpose of improving test material and our ability to assure quality. @@ -155,7 +155,7 @@ In summary, we will gather metrics for the purpose of improving test material an We can employ a “test-the-tests” mechanism, running tests and seeing how they perform across implementations/versions etc. This allows for a repeatable pattern to follow when triaging test results, look first at the failure and look to see if it fails across versions, platforms and implementations to hone in on root cause. We can also employ tools for a ‘diff’ of test results, to compare across the variations that we encounter at the project. One of the greatest benefits that the work encapulated by AQAvit offers is that we test many different implementations, versions and platforms in one spot, making it easy to compare. This comparison informs stakeholders, enterprises, open-source communities, and developers on the qualities of a particular binary as it compares to others. Stakeholders have answers to questions like: -- how did each implementation fair against particular test criteria? +- how did each implementation fair against particular test criteria? - how stable/fast/secure is the new release? @@ -171,18 +171,18 @@ Portable tests allow for both easier upkeep and maintenance but also faster evol ### 2.5 Tag & Publish -We want consumers of our binaries to be able to see what tests were run against them, and how the binaries scored against a rigorous onslaught of tests. Consumers should be able to get a Bill of Materials (BoM), essentially a listing of the test materials used to verify an OpenJDK binary. +We want consumers of our binaries to be able to see what tests were run against them, and how the binaries scored against a rigorous onslaught of tests. Consumers should be able to get a Bill of Materials (BoM), essentially a listing of the test materials used to verify an OpenJDK binary. Per test run: - Set the bar by which binaries are judged (target pass rate) - Determining which failures/exclude ‘block’ releases -Ability to associate binaries to: +Ability to associate binaries to: - Test summary (pass/fail/excluded totals) - BoM - list of SHAs of every test repo used -- Downloadable relevant test artifacts +- Downloadable relevant test artifacts - Badge/indicator on website marking binaries that pass the AQA -The goal would be the ability for 100% reproducible test results per release. Anyone should be able to grab a binary and the test artifacts including the BoM and reproduce the same test set that was originally run at the project. +The goal would be the ability for 100% reproducible test results per release. Anyone should be able to grab a binary and the test artifacts including the BoM and reproduce the same test set that was originally run at the project. diff --git a/doc/RunAqa.md b/doc/RunAqa.md index 8770f0707d..34a8f04d64 100644 --- a/doc/RunAqa.md +++ b/doc/RunAqa.md @@ -6,7 +6,7 @@ On any Pull Request (PR), make a comment starting with the keywords `run aqa` fo Example: ``` -run aqa --sdk_resource nightly --build_list openjdk --target sanity.openjdk --jdk_version 8 11 --jdk_impl hotspot --platform x86-64_linux +run aqa --sdk_resource nightly --build_list openjdk --target sanity.openjdk --jdk_version 8 11 --jdk_impl hotspot --platform x86-64_linux ``` ## Arguments diff --git a/doc/TriageChecklist.md b/doc/TriageChecklist.md index 7640b8057c..ccb3ce971d 100644 --- a/doc/TriageChecklist.md +++ b/doc/TriageChecklist.md @@ -1,6 +1,6 @@ ## Triage checklist -You are investigating a test failure. +You are investigating a test failure. ### What tools and information is available to you? #### From the Jenkins test job: @@ -9,7 +9,7 @@ You are investigating a test failure. ### What actions to best triage the failure? -- Check for any obvious or known explanation of the problem. This is easier the more 'in tune' you are with issues lists at various repos. +- Check for any obvious or known explanation of the problem. This is easier the more 'in tune' you are with issues lists at various repos. If there is no obvious cause, you next need to find out if the failure happens on different: - implementations (does it fail against both hotspot and openj9?) diff --git a/doc/userGuide.md b/doc/userGuide.md index a8a758f757..e44c4f5e97 100644 --- a/doc/userGuide.md +++ b/doc/userGuide.md @@ -12,14 +12,14 @@ Set up your test machine with this [set of prerequisites](https://github.com/ecl While you can [run all the tests manually](#local-testing-via-make-targets-on-the-commandline) via the make targets on the command line, you may also run the tests in Jenkins. As part of the AdoptOpenJDK continuous integration (CI), AdoptOpenJDK runs test builds against the release and nightly SDK builds. You can set up your own Jenkins-based test builds using the adoptium aqa-tests Jenkinsfiles by: - + - Configure a [Jenkins job with a Customized URL](#jenkins-configuration-with-customized-url) - Ensure your Jenkins machines are configured properly (see the [openjdk-infrastructure playbooks](https://github.com/AdoptOpenJDK/openjdk-infrastructure/blob/master/ansible/README.md) for details) - Ensure machines are labeled following the [AdoptOpenJDK labeling scheme](https://github.com/smlambert/openjdk-infrastructure/blob/labels/docs/jenkinslabels.md). Minimally, your Jenkins nodes should have hw.arch.xxxx and sw.os.xxxx labels (for example, hw.arch.x86 and sw.os.linux for an x86_linux machine). ### Jenkins Configuration with Customized URL -1. Create Pipeline test build job using Pipeline script from SCM +1. Create Pipeline test build job using Pipeline script from SCM - Repository url - :https://github.com/adoptium/aqa-tests.git - Branches to build - */master - Script path - buildenv/jenkins/fileToMatchVersionAndPlatformToTest, example openjdk8_x86-64_linux @@ -29,7 +29,7 @@ You can set up your own Jenkins-based test builds using the adoptium aqa-tests J * TARGET - relates to the test target you wish to run (system, openjdk, perf, external, jck, functional are the top-level targets, but you can also add any of the sub-targets, including those defined in playlist.xml files in test directories) * JVM_VERSION - depending on what SDK you are testing against (some possible values are: openjdk8, openjdk8-openj9, openjdk9, openjdk9-openj9, openjdk10, openjdk10-openj9, openjdk10-sap) -* CUSTOMIZED_SDK_URL - the URL for where to pick up the SDK to test (if you are picking up builds from AdoptOpenJDK, please refer to the [openjdk-api README](https://github.com/AdoptOpenJDK/openjdk-api/blob/master/README.md) for more details) +* CUSTOMIZED_SDK_URL - the URL for where to pick up the SDK to test (if you are picking up builds from AdoptOpenJDK, please refer to the [openjdk-api README](https://github.com/AdoptOpenJDK/openjdk-api/blob/master/README.md) for more details) ![jenkins parameters](/doc/diagrams/jenkinsParameters.jpg) @@ -39,24 +39,24 @@ This is the guide on how to pass in environment variables when making builds wit #### Method 1: Write it in as part of the playlist file -This method is typically used if that environment variable is to be used in that specific test target. +This method is typically used if that environment variable is to be used in that specific test target. 1. Find the folder that your test is in ![test_folder](/doc/diagrams/testFolder.jpg) - + 2. Open the playlist.xml file ![playlist_file](/doc/diagrams/playListFile.jpg) - + 3. Find the testCaseName matching with the test you want to run ![test_case_name](/doc/diagrams/testCaseName.jpg) - + 4. In the corresponding command section, at the beginning, add the key word `export`, your environment variable, followed by a semicolon, just as you might do if you were running this set of commands locally ![export](/doc/diagrams/commandSection.jpg) - + 5. Save it, git add, commit, push ``` bash @@ -64,50 +64,50 @@ git add --all git commit -m "Added TR_Options as an environment variable in the playlist" git push origin env_var ``` - + 6. Go to the Jenkins page, and open up the Grinders ![open_grinders](/doc/diagrams/openGrinders.jpg) - + 7. Click “Build with Parameters” on the left side of the page, third down from the top 8. In the ADOPTOPENJDK_REPO section, put in the repository you were working from when you made those changes ![repo](/doc/diagrams/repo.jpg) - + 9. In the ADOPTOPENJDK_BRANCH section, put in the branch you were on ![branch](/doc/diagrams/branch.jpg) - + 10. In the BUILD_LIST and TARGET sections, put in the corresponding information ![build_list_target](/doc/diagrams/buildListTarget.jpg) - + 11. Scroll to the bottom and hit the Build button ![build](/doc/diagrams/build.jpg) -#### Method 2: Put it in the .mk file of the test that you want to run +#### Method 2: Put it in the .mk file of the test that you want to run -This method is to be used when the objective is to set that environment variable for all test targets in the group being run. For this example, we will be looking at the system.mk file. +This method is to be used when the objective is to set that environment variable for all test targets in the group being run. For this example, we will be looking at the system.mk file. 1. Open the aqa-tests/system folder ![system_folder](/doc/diagrams/systemFolder.jpg) - + 2. Open the .mk file corresponding to your test ![system_test](/doc/diagrams/systemtest.jpg) - -3. Find the last line of the file with the RESROOT name, the line that says SYSTEMTEST_RESROOT=$(TEST_RESROOT)/../ in this example + +3. Find the last line of the file with the RESROOT name, the line that says SYSTEMTEST_RESROOT=$(TEST_RESROOT)/../ in this example ![resroot_line](/doc/diagrams/resrootLine.jpg) - + 4. Insert the key word `export`, followed by your environment variable, without any single or double quotation marks, in the line above it ![export](/doc/diagrams/export.jpg) - + 5. Save it, git add, commit, push ``` bash @@ -115,46 +115,46 @@ git add --all git commit -m "Added TR_Options as an environment variable in the playlist" git push origin env_var ``` - + 6. Go to the Jenkins page, and open up the Grinders ![open_grinders](/doc/diagrams/openGrinders.jpg) - + 7. Click “Build with Parameters” on the left side of the page, third down from the top 8. In the ADOPTOPENJDK_REPO section, put in the repository you were working from when you made those changes ![repo](/doc/diagrams/repo.jpg) - + 9. In the ADOPTOPENJDK_BRANCH section, put in the branch you were on ![branch](/doc/diagrams/branch.jpg) - + 10. In the BUILD_LIST and TARGET sections, put in the corresponding information ![build_list_target](/doc/diagrams/buildListTarget.jpg) - + 11. Scroll to the bottom and hit the Build button ![build](/doc/diagrams/build.jpg) - - -#### Method 3: Put it in the testEnv.mk file + + +#### Method 3: Put it in the testEnv.mk file This method is to be used when the objective is to set that environment variable for a more generic case. -1. Fork https://github.com/AdoptOpenJDK/TKG +1. Fork https://github.com/AdoptOpenJDK/TKG ![test_config](/doc/diagrams/testConfig.jpg) - + 2. Edit the [testEnv.mk](https://github.com/AdoptOpenJDK/TKG/blob/master/testEnv.mk) file - + ![test_env](/doc/diagrams/testEnv.jpg) - + 3. Insert the key word export, followed by your environment variable, without any single or double quotation marks, or spaces - + ![export](/doc/diagrams/otherExport.jpg) - + 5. Save it, git add, commit, push ``` git add --all @@ -164,9 +164,9 @@ git push origin env_var 6. Go to the Jenkins page, and open up the Grinder_TKG job ![open_grinders](/doc/diagrams/openGrinders.jpg) - + 7. Click “Build with Parameters” on the left side of the page, third down from the top - + 8. Use your TKG_REPO and TKG_BRANCH where you have made your changes for those parameters instead of the default values 9. Scroll to the bottom and hit the Build button @@ -199,7 +199,7 @@ Usage : get.sh --testdir|-t openjdktestdir [--sdk_resource|-r ] : optional. Indicate where to download an sdk from - releases, nightly, upstream or customized - [--customizedURL|-c ] : optional. If downloading an sdk and if sdk source is set as customized, indicates sdk url + [--customizedURL|-c ] : optional. If downloading an sdk and if sdk source is set as customized, indicates sdk url [--clone_openj9 ] : optional. ture or false. Clone openj9 if this flag is set to true. Default to true [--openj9_repo ] : optional. OpenJ9 git repo. Default value https://github.com/eclipse-openj9/openj9.git is used if not provided [--openj9_sha ] : optional. OpenJ9 pull request sha @@ -215,10 +215,10 @@ Usage : get.sh --testdir|-t openjdktestdir #### Set environment variables, configure, build and run tests -You can use the same approach as described in the [OpenJ9 functional tests README file]( https://github.com/eclipse-openj9/openj9/blob/master/test/README.md). In the case of the tests run at AdoptOpenJDK, instead of using a make target called _sanity.functional, you can provide the appropriate make target to run the tests of interest to you. +You can use the same approach as described in the [OpenJ9 functional tests README file]( https://github.com/eclipse-openj9/openj9/blob/master/test/README.md). In the case of the tests run at AdoptOpenJDK, instead of using a make target called _sanity.functional, you can provide the appropriate make target to run the tests of interest to you. ##### Top-level test targets: -- openjdk +- openjdk - system - external - perf @@ -237,7 +237,7 @@ In each playlist.xml file in each test directory, there are tests defined. Test For example, for this excerpt from a playlist: ``` - scala_test + scala_test ... ``` you will be able to run 'make scala_test' to execute the test. @@ -314,22 +314,22 @@ The JTREG report HTML summary file is then located at `aqa-tests/test-results/op ### Use live_monitor feature -TKG has a script that monitors the test progress live. It can be your only friend in darkest nights. When you run tests locally, you may want to use that feature. It shows how many tests are passed currently etc. Now let's look how to use it. You need Python3 installed on your machine in order to use it. Note that this feature currently works with openjdk tests only. This means you need to use: +TKG has a script that monitors the test progress live. It can be your only friend in darkest nights. When you run tests locally, you may want to use that feature. It shows how many tests are passed currently etc. Now let's look how to use it. You need Python3 installed on your machine in order to use it. Note that this feature currently works with openjdk tests only. This means you need to use: `export BUILD_LIST=openjdk` before compiling or you need to run just openjdk tests and pipe them. Otherwise it won't work. -1. You need to change the verbose option of jtreg. In order to do that, you need to change 1 line of code in [/openjdk/openjdk.mk](https://github.com/adoptium/aqa-tests/blob/master/openjdk/openjdk.mk) file. +1. You need to change the verbose option of jtreg. In order to do that, you need to change 1 line of code in [/openjdk/openjdk.mk](https://github.com/adoptium/aqa-tests/blob/master/openjdk/openjdk.mk) file. - You need to change + You need to change - `JTREG_BASIC_OPTIONS += -v:fail,error,time,nopass` + `JTREG_BASIC_OPTIONS += -v:fail,error,time,nopass` line to `JTREG_BASIC_OPTIONS += -v:all` -2. After that, you are ready to run the scripts. Here is the example of how you can do it : +2. After that, you are ready to run the scripts. Here is the example of how you can do it : `make _sanity.openjdk | python3 -u scripts/liveMonitor_countTests/jtreg-monitor.py` @@ -339,28 +339,28 @@ TKG has a script that counts how many test exists in a specified folder. This sc `aqa-tests/TKG# python3 -u scripts/liveMonitor_countTests/count-java-tests.py ../openjdk/openjdk-jdk/test/langtools/tools/javac/warnings/` -The output of the code above is : +The output of the code above is : + - Counting tests in 'aqa-tests/openjdk/openjdk-jdk/test/langtools/tools/javac/warnings/' ... Found 41 java files - . ................ 11 - 6594914 ........... 2 - 6747671 ........... 1 - 6885255 ........... 1 + . ................ 11 + 6594914 ........... 2 + 6747671 ........... 1 + 6885255 ........... 1 - 7090499 ........... 1 - AuxiliaryClass .... 3 - NestedDeprecation . 1 - suppress ......... 10 + 7090499 ........... 1 + AuxiliaryClass .... 3 + NestedDeprecation . 1 + suppress ......... 10 Found 30 java files containing @test - - + + ## Exclude a test target @@ -369,13 +369,13 @@ Instead of having to manually create a PR to disable test targets, they can now ```auto exclude test ``` -If the testName matches the testCaseName defined in `````` element of playlist.xml, the entire test suite will be excluded. If the testName is testCaseName followed by _n, only the (n+1)th variation will be excluded. +If the testName matches the testCaseName defined in `````` element of playlist.xml, the entire test suite will be excluded. If the testName is testCaseName followed by _n, only the (n+1)th variation will be excluded. For example: ``` - jdk_test + jdk_test NoOptions -Xmx1024m diff --git a/external/README.md b/external/README.md index 3a15bc64b5..fcdddd6e01 100644 --- a/external/README.md +++ b/external/README.md @@ -1,6 +1,6 @@ # External (Third Party Container) Tests -Third Party container tests help verify that the adoptium binaries are *good* by running a variety of Java applications inside of Docker containers. adoptium/aqa-tests/[Issue #172](https://github.com/adoptium/aqa-tests/issues/172) lists the applications that we have initially targeted to best exercise the adoptium binaries. For each application, we choose to run a selection of their functional tests. +Third Party container tests help verify that the adoptium binaries are *good* by running a variety of Java applications inside of Docker containers. adoptium/aqa-tests/[Issue #172](https://github.com/adoptium/aqa-tests/issues/172) lists the applications that we have initially targeted to best exercise the adoptium binaries. For each application, we choose to run a selection of their functional tests. ## Running External tests locally To run any AQA tests locally, you follow the same pattern: @@ -8,8 +8,8 @@ To run any AQA tests locally, you follow the same pattern: 0. Ensure your test machine is set up with [test prereqs](https://github.com/adoptium/aqa-tests/blob/master/doc/Prerequisites.md). For external tests, you do need Docker installed. 1. Download/unpack the SDK that you want to test to your test machine -1. `export TEST_JDK_HOME=` -1. `git clone https://github.com/adoptium/aqa-tests.git` +1. `export TEST_JDK_HOME=` +1. `git clone https://github.com/adoptium/aqa-tests.git` 1. `cd aqa-tests` 1. `./get.sh` 1. `cd TKG` @@ -18,7 +18,7 @@ To run any AQA tests locally, you follow the same pattern: 1. `make _jacoco_test` (When you defined BUILD_LIST to point to a directory in [aqa-tests/external](https://github.com/adoptium/aqa-tests/tree/master/external), then this is a testCaseName from the playlist.xml file within the directory you chose) -When [running these from the command-line](https://github.com/adoptium/aqa-tests/blob/master/doc/userGuide.md#local-testing-via-make-targets-on-the-commandline), these tests are grouped under a make target called 'external', so 'make external' would run the entire set of tests found in the aqa-tests/external directory. This is unadvisable! Limit what you compile and run, BUILD_LIST=external/``, and TARGET=`` +When [running these from the command-line](https://github.com/adoptium/aqa-tests/blob/master/doc/userGuide.md#local-testing-via-make-targets-on-the-commandline), these tests are grouped under a make target called 'external', so 'make external' would run the entire set of tests found in the aqa-tests/external directory. This is unadvisable! Limit what you compile and run, BUILD_LIST=external/``, and TARGET=`` These tests run regularly and results can be found in [TRSS Third Party Application view](https://trss.adoptopenjdk.net/ThirdPartyAppView). @@ -28,12 +28,12 @@ See the [roadmap](https://github.com/adoptium/aqa-tests/tree/master/external#roa Our next steps to improve and expand this set of external tests is divided into 2 categories: #### Technical Goals - Verify the docker images that the project produces -- Copy results from Docker container for easier viewing and triage in Jenkins +- Copy results from Docker container for easier viewing and triage in Jenkins - Quick compare view, easy comparison of how different implementations stack up - Parallel testing (to improve execution time) - Startup-only testing (application startup, but not full runs of app functional testing) -- Add high-value tests that exercise the adoptium binaries, including but not limited to functional test suites and Microprofile compliance tests (plan to start with [Fault Tolerance TCK](https://github.com/eclipse-openj9/microprofile-fault-tolerance/blob/master/tck/running_the_tck.asciidoc) and [Metrics API TCKs](https://github.com/eclipse-openj9/microprofile-metrics/blob/master/tck/running_the_tck.asciidoc) against [GlassFish](https://javaee.github.io/glassfish/) EE reference implementation) - +- Add high-value tests that exercise the adoptium binaries, including but not limited to functional test suites and Microprofile compliance tests (plan to start with [Fault Tolerance TCK](https://github.com/eclipse-openj9/microprofile-fault-tolerance/blob/master/tck/running_the_tck.asciidoc) and [Metrics API TCKs](https://github.com/eclipse-openj9/microprofile-metrics/blob/master/tck/running_the_tck.asciidoc) against [GlassFish](https://javaee.github.io/glassfish/) EE reference implementation) + #### Strategic Goals - Engage with application communities, including the Eclipse Jakarta EE project, to: - report and resolve application test failures @@ -47,23 +47,23 @@ There are 4 common triage scenarios, with associated appropriate actions to take ### How to Add New Tests - Learn how to run the application tests that you intend to automate in the build manually first, and find out any special dependencies the application testing may have. - Clone https://github.com/adoptium/aqa-tests.git and look at external directory. -- Copy the 'example-test' subdirectory and rename it after the application you are adding. -- Modify the files in your new sub-directory according to your needs. -- Check in the changes into https://github.com/[YOUR-BRANCH]/aqa-tests and test it using a personal build. +- Copy the 'example-test' subdirectory and rename it after the application you are adding. +- Modify the files in your new sub-directory according to your needs. +- Check in the changes into https://github.com/[YOUR-BRANCH]/aqa-tests and test it using a personal build. #### Which files do I need to modify after making a copy of example-test? **Dockerfile** -- The example Dockerfile contains a default list of dependent executable files. Please read the documentation of the third party application you are enabling to find out if you need any executable files other than the default set, if yes, add them to the list. +- The example Dockerfile contains a default list of dependent executable files. Please read the documentation of the third party application you are enabling to find out if you need any executable files other than the default set, if yes, add them to the list. - Update the clone command based on your third party application's source repository. - + **Shell script** - Replace the example command line at the bottom of this script with the initial command lines that trigger execution of your test. -**build.xml** -- Update the distribution folder paths, docker image name etc according to the name of your application. +**build.xml** +- Update the distribution folder paths, docker image name etc according to the name of your application. -**playlist.xml** -- Update the name of the example test case to the actual test case of the third party application that you intend to run. +**playlist.xml** +- Update the name of the example test case to the actual test case of the third party application that you intend to run. Please direct questions to the [#testing Slack channel](https://adoptium.slack.com/archives/C5219G28G). diff --git a/external/build.xml b/external/build.xml index 3a9950d348..88da6e00f7 100644 --- a/external/build.xml +++ b/external/build.xml @@ -24,7 +24,7 @@ - + @@ -43,7 +43,7 @@ - + @@ -60,7 +60,7 @@ - + @@ -68,7 +68,7 @@ - + diff --git a/external/camel/playlist.xml b/external/camel/playlist.xml index 9b34cb3d17..de16e2419d 100644 --- a/external/camel/playlist.xml +++ b/external/camel/playlist.xml @@ -17,7 +17,7 @@ camel_test $(TEST_ROOT)$(D)external$(D)external.sh --run --tag "${DOCKERIMAGE_TAG}" --dir camel --reportdst $(REPORTDIR) --docker_args "$(EXTRA_DOCKER_ARGS)"; \ $(TEST_STATUS); \ - $(TEST_ROOT)$(D)external$(D)external.sh --clean --tag "${DOCKERIMAGE_TAG}" --dir camel + $(TEST_ROOT)$(D)external$(D)external.sh --clean --tag "${DOCKERIMAGE_TAG}" --dir camel extended diff --git a/external/criu/pingPerf.sh b/external/criu/pingPerf.sh index 3184f59a0a..c92d61536d 100755 --- a/external/criu/pingPerf.sh +++ b/external/criu/pingPerf.sh @@ -56,7 +56,7 @@ getSemeruDockerfile() { sed -i 's:mkdir -p \/opt\/java\/java-ea; \\:mkdir -p \/opt\/java\/java-ea;:' $semeruDockerfile sed -i 's:cd \/opt\/java\/java-ea; \\:COPY NEWJDK\/ \/opt\/java\/java-ea:' $semeruDockerfile sed -i 's:tar -xf \/tmp\/openjdk.tar.gz --strip-components=1;:RUN \/opt\/java\/java-ea\/bin\/java --version:' $semeruDockerfile - + mkdir NEWJDK cp -r $testJDKPath/. NEWJDK/ else @@ -72,7 +72,7 @@ prepare() { rm -f PingperfFiles.zip cp "$pingPerfZipPath" . unzip PingperfFiles.zip - else + else echo "${pingPerfZipPath} does not exist." exit 1 fi @@ -143,7 +143,7 @@ checkLog() { echo "check log ..." if [ -f ./containerId.log ]; then cat ./containerId.log - else + else echo "./containerId.log does not exist." exit 1 fi @@ -291,7 +291,7 @@ setup() { echo "NODE_LABELS: $NODE_LABELS" echo "PLATFORM: $PLATFORM" echo "uname -a: $(uname -a)" - + if [ -n "$(cat /etc/redhat-release | grep 'Red Hat')" ]; then cat /etc/redhat-release fi @@ -308,7 +308,7 @@ setup() { node_label_micro_architecture="" node_label_current_os="" for label in $NODE_LABELS - do + do if [[ -z "$node_label_micro_architecture" && "$label" == "hw.arch."*"."* ]]; then #hw.arch.x86.skylake node_label_micro_architecture=$label echo "node_label_micro_architecture is $node_label_micro_architecture" diff --git a/external/dockerfile_functions.sh b/external/dockerfile_functions.sh index 12d481e4c2..384ba63306 100755 --- a/external/dockerfile_functions.sh +++ b/external/dockerfile_functions.sh @@ -86,7 +86,7 @@ print_image_args() { tag="" if [[ "${package}" == "jre" ]]; then tag="${version}-jre" - else + else tag="${version}-jdk" fi if [[ "${vm}" == "openj9" ]]; then @@ -340,7 +340,7 @@ print_python_install() { "\nENV PYTHON_VERSION=\$PYTHON_VERSION" \ "\n\n# Install python" \ "\nRUN wget --progress=dot:mega -O python.tar.xz https://www.python.org/ftp/python/\${PYTHON_VERSION}/Python-\${PYTHON_VERSION}.tar.xz \\" >> ${file} - + echo -e "\t&& tar -xJf python.tar.xz \\" \ "\n\t&& cd Python-\${PYTHON_VERSION} \\" \ "\n\t&& ./configure --prefix=/usr/local \\" \ @@ -416,7 +416,7 @@ print_maven_install() { "\nENV MAVEN_HOME /opt/maven" \ "\n\n# Install Maven" \ "\nRUN wget --no-verbose --no-check-certificate --no-cookies https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/\${MAVEN_VERSION}/apache-maven-\${MAVEN_VERSION}-bin.tar.gz \\" >> ${file} - + echo -e "\t&& tar -zvxf apache-maven-\${MAVEN_VERSION}-bin.tar.gz -C /opt/ \\" \ "\n\t&& ln -s /opt/apache-maven-\${MAVEN_VERSION} /opt/maven \\" \ "\n\t&& rm -f apache-maven-\${MAVEN_VERSION}-bin.tar.gz" \ @@ -624,7 +624,7 @@ generate_dockerfile() { if [[ ! -z ${jdk_install} ]]; then print_jdk_install ${file} ${os} ${platform}; fi - + if [[ ! -z ${maven_version} ]]; then print_maven_install ${file} ${maven_version}; fi diff --git a/external/external.sh b/external/external.sh index 3fd5c8fb6b..1c3587006c 100755 --- a/external/external.sh +++ b/external/external.sh @@ -79,7 +79,7 @@ parseCommandLineArgs() { check_external_custom=0 while [[ $# -gt 0 ]] && [[ ."$1" = .-* ]] ; do opt="$1"; - shift; + shift; case "$opt" in "--dir" | "-d" ) @@ -111,25 +111,25 @@ parseCommandLineArgs() { container_rmi="sudo podman rmi" fi shift;; - + "--version" | "-v" ) version="$1"; shift;; - + "--impl" | "-i" ) impl="$1"; shift;; "--docker_args" ) - if [ -z ${1+x} ]; then - echo "No EXTRA_DOCKER_ARGS set"; - else + if [ -z ${1+x} ]; then + echo "No EXTRA_DOCKER_ARGS set"; + else docker_args="$1"; shift; parse_docker_args $docker_args; fi;; - + "--tag" | "-t" ) - if [ -z "$1" ]; then - echo "No DOCKERIMAGE_TAG set, tag as default 'nightly'"; - else + if [ -z "$1" ]; then + echo "No DOCKERIMAGE_TAG set, tag as default 'nightly'"; + else tag="$1"; fi shift; @@ -150,7 +150,7 @@ parseCommandLineArgs() { "--node_labels" ) node_labels="$1"; shift; for label in $node_labels - do + do if [[ -z "$node_label_micro_architecture" && "$label" == "hw.arch."*"."* ]]; then #hw.arch.x86.skylake node_label_micro_architecture=$label echo "node_label_micro_architecture is $node_label_micro_architecture" @@ -199,7 +199,7 @@ parseCommandLineArgs() { testtarget="$1"; shift;; "--build" | "-b" ) - command_type=build;; + command_type=build;; "--run" | "-r" ) command_type=run;; @@ -215,35 +215,35 @@ parseCommandLineArgs() { *) echo >&2 "Invalid option: ${opt}"; echo "This option was unrecognized."; usage; exit 1; esac - done + done } # Parse environment variable DOCKERIMAGE_TAG # to set docker_os, build_type, package -function parse_tag() { +function parse_tag() { # set PACKAGE case $tag in - *jre*) + *jre*) package=jre ;; esac - + # set DOCKER_OS case $tag in - *ubuntu*|*latest*|*nightly*) + *ubuntu*|*latest*|*nightly*) echo "DOCKERIMAGE_TAG $tag has been recognized.";; *) echo "Unable to recognize DOCKER_OS from DOCKERIMAGE_TAG = $tag!";; esac - + } function parse_docker_args() { # parse docker_args to two variable: mountV and imageArg - mountV=""; + mountV=""; while [[ $# -gt 0 ]] && [[ ."$1" = .-* ]] ; do opt="$1"; - shift; + shift; case "$opt" in "--volume" | "-v" ) @@ -253,7 +253,7 @@ function parse_docker_args() { mountV="${mountV} --tmpfs $1 "; shift;; "--image" | "-i" ) - imageArg="$1"; + imageArg="$1"; shift;; *) echo >&2 "Invalid docker args option: ${opt}"; exit 1; esac @@ -267,7 +267,7 @@ function docker-ip() { parseCommandLineArgs "$@" -# set DOCKER_HOST env variables +# set DOCKER_HOST env variables # DOCKER_HOST=$(docker-ip $test-test) if [ $command_type == "build" ]; then @@ -300,7 +300,7 @@ if [ $command_type == "run" ]; then if [ $reportsrc != "false" ]; then $container_cp $test-test:$reportsrc $reportdst/external_test_reports; fi - + if [ $portable != "false" ]; then if [[ $docker_registry_url ]]; then echo "Private Docker Registry login starts:" @@ -335,7 +335,7 @@ if [ $command_type == "load" ]; then if [[ $docker_registry_url ]]; then echo "Private Docker Registry login starts:" echo $DOCKER_REGISTRY_CREDENTIALS_PSW | $container_login --username=$DOCKER_REGISTRY_CREDENTIALS_USR --password-stdin $docker_registry_url - + mount_options="$mountV" if [[ $mount_jdk == "false" ]]; then echo "JDK inside the docker image is used for testing" @@ -373,7 +373,7 @@ if [ $command_type == "load" ]; then echo "$container_run --privileged $mount_options --name restore-test --rm $restore_docker_image_name" $container_run --privileged $mount_options --name restore-test --rm $restore_docker_image_name done - + $container_logout $docker_registry_url else echo "Docker Registry is not available on this Jenkins" diff --git a/openjdk/README.md b/openjdk/README.md index 95352b3e03..e0539ff190 100644 --- a/openjdk/README.md +++ b/openjdk/README.md @@ -12,9 +12,9 @@ See the License for the specific language governing permissions and --> # OpenJDK regression tests -This group of tests are the set that comes from the openjdk project, often referred to as jtreg tests, as the jtreg framework is the underlying executable used to execute them. The entire set of openjdk regression tests is quite large. For our nightly builds, we run only a subset of these tests (typically, those suites of tests that we 'tag' as sanity in the [playlist.xml](https://github.com/adoptium/aqa-tests/blob/master/openjdk/playlist.xml) file). For release builds, we intend to run the suites tagged as sanity and extended in the playlist. +This group of tests are the set that comes from the openjdk project, often referred to as jtreg tests, as the jtreg framework is the underlying executable used to execute them. The entire set of openjdk regression tests is quite large. For our nightly builds, we run only a subset of these tests (typically, those suites of tests that we 'tag' as sanity in the [playlist.xml](https://github.com/adoptium/aqa-tests/blob/master/openjdk/playlist.xml) file). For release builds, we intend to run the suites tagged as sanity and extended in the playlist. -For more details on how the underlying jtreg harness works, you can refer to the ["How to Use the JTreg harness" guide](https://adoptopenjdk.gitbooks.io/adoptopenjdk-getting-started-kit/en/intermediate-steps/how_to_use_jtreg_-_java_regression_test_harness.html). For advanced users, you can refer to the [jtreg doc](https://openjdk.org/jtreg/command-help.html). +For more details on how the underlying jtreg harness works, you can refer to the ["How to Use the JTreg harness" guide](https://adoptopenjdk.gitbooks.io/adoptopenjdk-getting-started-kit/en/intermediate-steps/how_to_use_jtreg_-_java_regression_test_harness.html). For advanced users, you can refer to the [jtreg doc](https://openjdk.org/jtreg/command-help.html). ## Running OpenJDK tests locally While you can directly use the jtreg test harness to run these tests locally, we have also integrated them into our AQA test suite with TKG (TestKitGen) so that they can be run following the same pattern as any other AQA test: @@ -22,8 +22,8 @@ While you can directly use the jtreg test harness to run these tests locally, we 0. Ensure your test machine is set up with [test prereqs](https://github.com/eclipse-openj9/openj9/blob/master/test/docs/Prerequisites.md). For openjdk tests, you do not need Docker installed. 1. Download/unpack the SDK you want to your test machine (you can download them from our website: [adoptium.net](adoptium.net). -1. `export TEST_JDK_HOME=` -1. `git clone https://github.com/adoptium/aqa-tests.git` +1. `export TEST_JDK_HOME=` +1. `git clone https://github.com/adoptium/aqa-tests.git` 1. `cd aqa-tests` 1. `./get.sh` 1. `cd TKG` @@ -64,7 +64,7 @@ If you need to exclude more than one testcase, put an indent after the reason an ## Fixing the tests: Some tests just may need to be run with "othervm", and that can easily be done by adding a @run line (or modifying any existing @run): - + * @run main/othervm NameOfMainClass Make sure this @run follows any use of @library. Otherwise, if the test is a samevm possibility, make sure the test is diff --git a/openjdk/build.xml b/openjdk/build.xml index 2910254138..ed3de5c6d8 100755 --- a/openjdk/build.xml +++ b/openjdk/build.xml @@ -38,7 +38,7 @@ - + @@ -50,7 +50,7 @@ - + @@ -89,11 +89,11 @@ - + - + - + @@ -101,7 +101,7 @@ - + @@ -133,7 +133,7 @@ @@ -156,7 +156,7 @@ - - + @@ -187,7 +187,7 @@ - + @@ -195,7 +195,7 @@ - + @@ -208,7 +208,7 @@ - - - - + + + @@ -329,13 +329,13 @@ - + - + diff --git a/scripts/disabled_tests/README.md b/scripts/disabled_tests/README.md index 9073af7ca6..022b31136b 100644 --- a/scripts/disabled_tests/README.md +++ b/scripts/disabled_tests/README.md @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and # Disabled Tests -- Tools and Scripts -Scripts which parse various formats of lists, containing disabled JDK tests, into a uniform JSON format. In addition, a script which augments, within the generated JSON files, each disabled test with the status of their associated issue. +Scripts which parse various formats of lists, containing disabled JDK tests, into a uniform JSON format. In addition, a script which augments, within the generated JSON files, each disabled test with the status of their associated issue. ### Prerequisites