Files
rust_browser/tests/js262_harness.rs
Zachary D. Rowitsch b70a6bdf73 Update Test262 full suite: promote 602 tests, demote 126 regressions, add per-test timeout
- Promote 602 newly passing tests from RegExp implementation
- Demote 126 regex literal early-error tests (parse-time pattern validation not yet implemented)
- Add 30-second per-test timeout to prevent infinite loops from hanging the suite
- Add JS262_VERBOSE env var for per-test diagnostic output
- Full-suite pass rate: 3706/15431 (24%, up from 3230)

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-25 10:57:38 -05:00

282 lines
9.7 KiB
Rust

//! JS262 conformance test harness — Test262-inspired tests for the JS engine.
//!
//! Mirrors the WPT harness pattern: manifest-driven, parallelized, policy-enforced.
#[path = "js262_harness/manifest.rs"]
mod manifest;
#[path = "js262_harness/runner.rs"]
mod runner;
#[path = "js262_harness/types.rs"]
mod types;
use manifest::{load_full_manifest, load_manifest};
use runner::{enforce_case_policy, run_case};
#[allow(unused_imports)]
use types::{Js262Outcome, Js262Status};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
fn external_js262_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.join("tests")
.join("external")
.join("js262")
}
#[test]
fn js262_suite_matches_manifest_expectations() {
let root = external_js262_root();
let cases = load_manifest(&root).expect("manifest must parse");
let total = cases.len();
let completed = AtomicUsize::new(0);
let passed = AtomicUsize::new(0);
let known_fail = AtomicUsize::new(0);
let skipped = AtomicUsize::new(0);
let failures = Mutex::new(Vec::new());
eprintln!("JS262: running {total} tests across threads...");
// Use 8 MiB stacks for worker threads — debug-mode stack frames are large
// and deeply recursive JS tests (e.g. error-statement-limit.js) need room.
const STACK_SIZE: usize = 8 * 1024 * 1024;
let parallelism = std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(4);
let chunk_size = cases.len().div_ceil(parallelism);
// Use Arc to share atomic counters and failures across owned threads.
let completed = std::sync::Arc::new(completed);
let passed = std::sync::Arc::new(passed);
let known_fail = std::sync::Arc::new(known_fail);
let skipped = std::sync::Arc::new(skipped);
let failures = std::sync::Arc::new(failures);
let mut handles = Vec::new();
for chunk in cases.chunks(chunk_size) {
let chunk: Vec<_> = chunk.to_vec();
let completed = std::sync::Arc::clone(&completed);
let passed = std::sync::Arc::clone(&passed);
let known_fail = std::sync::Arc::clone(&known_fail);
let skipped = std::sync::Arc::clone(&skipped);
let failures = std::sync::Arc::clone(&failures);
let handle = std::thread::Builder::new()
.stack_size(STACK_SIZE)
.spawn(move || {
for case in &chunk {
let outcome = run_case(case);
match (&case.status, &outcome) {
(Js262Status::Pass, Js262Outcome::Pass) => {
passed.fetch_add(1, Ordering::Relaxed);
}
(Js262Status::KnownFail { .. }, Js262Outcome::Fail { .. }) => {
known_fail.fetch_add(1, Ordering::Relaxed);
}
(Js262Status::Skip { .. }, Js262Outcome::Skipped { .. }) => {
skipped.fetch_add(1, Ordering::Relaxed);
}
_ => {}
}
if let Err(err) = enforce_case_policy(case, outcome) {
failures.lock().unwrap().push(format!("{}: {err}", case.id));
}
let done = completed.fetch_add(1, Ordering::Relaxed) + 1;
if done == total || done.is_multiple_of(50) {
eprintln!(
"JS262: [{done}/{total}] pass={} known_fail={} skip={}",
passed.load(Ordering::Relaxed),
known_fail.load(Ordering::Relaxed),
skipped.load(Ordering::Relaxed),
);
}
}
})
.expect("failed to spawn JS262 worker thread");
handles.push(handle);
}
for handle in handles {
handle.join().expect("JS262 worker thread panicked");
}
let failures = std::sync::Arc::try_unwrap(failures)
.expect("Arc still has multiple owners")
.into_inner()
.unwrap();
eprintln!(
"JS262: complete — pass={} known_fail={} skip={} failures={}",
passed.load(Ordering::Relaxed),
known_fail.load(Ordering::Relaxed),
skipped.load(Ordering::Relaxed),
failures.len(),
);
if !failures.is_empty() {
panic!(
"JS262 harness failures ({}):\n{}",
failures.len(),
failures.join("\n")
);
}
}
#[test]
fn js262_manifest_has_nonempty_reasons_for_nonpassing_entries() {
let root = external_js262_root();
let cases = load_manifest(&root).expect("manifest must parse");
for case in cases {
match case.status {
Js262Status::KnownFail { reason } | Js262Status::Skip { reason } => {
assert!(
!reason.trim().is_empty(),
"case '{}' must include a non-empty reason",
case.id
);
}
Js262Status::Pass => {}
}
}
}
#[test]
fn js262_runner_is_deterministic() {
let root = external_js262_root();
let cases = load_manifest(&root).expect("manifest must parse");
// Pick the first pass case for the determinism check
let case = cases
.iter()
.find(|c| matches!(c.status, Js262Status::Pass) && c.expected_output.is_some())
.expect("at least one pass+output case must exist for determinism check");
let first = run_case(case);
let second = run_case(case);
assert_eq!(first, second, "same case must be deterministic");
assert!(matches!(first, Js262Outcome::Pass));
}
#[test]
fn js262_full_suite_matches_manifest_expectations() {
let root = external_js262_root();
let cases = match load_full_manifest(&root) {
Ok(Some(cases)) => cases,
Ok(None) => {
eprintln!("JS262-FULL: manifest not found, skipping (run `just test262-scan` first)");
return;
}
Err(e) => panic!("full manifest parse error: {e}"),
};
let total = cases.len();
eprintln!("JS262-FULL: running {total} tests across threads...");
let completed = AtomicUsize::new(0);
let passed = AtomicUsize::new(0);
let known_fail = AtomicUsize::new(0);
let skipped = AtomicUsize::new(0);
let failures = Mutex::new(Vec::new());
const STACK_SIZE: usize = 8 * 1024 * 1024;
let parallelism = std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(4);
let chunk_size = cases.len().div_ceil(parallelism);
let completed = std::sync::Arc::new(completed);
let passed = std::sync::Arc::new(passed);
let known_fail = std::sync::Arc::new(known_fail);
let skipped = std::sync::Arc::new(skipped);
let failures = std::sync::Arc::new(failures);
let mut handles = Vec::new();
for chunk in cases.chunks(chunk_size) {
let chunk: Vec<_> = chunk.to_vec();
let completed = std::sync::Arc::clone(&completed);
let passed = std::sync::Arc::clone(&passed);
let known_fail = std::sync::Arc::clone(&known_fail);
let skipped = std::sync::Arc::clone(&skipped);
let failures = std::sync::Arc::clone(&failures);
let handle = std::thread::Builder::new()
.stack_size(STACK_SIZE)
.spawn(move || {
let verbose = std::env::var("JS262_VERBOSE").is_ok();
for case in &chunk {
if verbose {
eprintln!("JS262-FULL: running {}", case.id);
}
let outcome = run_case(case);
match (&case.status, &outcome) {
(Js262Status::Pass, Js262Outcome::Pass) => {
passed.fetch_add(1, Ordering::Relaxed);
}
(Js262Status::KnownFail { .. }, Js262Outcome::Fail { .. }) => {
known_fail.fetch_add(1, Ordering::Relaxed);
}
(Js262Status::Skip { .. }, Js262Outcome::Skipped { .. }) => {
skipped.fetch_add(1, Ordering::Relaxed);
}
_ => {}
}
if let Err(err) = enforce_case_policy(case, outcome) {
failures.lock().unwrap().push(format!("{}: {err}", case.id));
}
let done = completed.fetch_add(1, Ordering::Relaxed) + 1;
if done == total || done.is_multiple_of(200) {
eprintln!(
"JS262-FULL: [{done}/{total}] pass={} known_fail={} skip={}",
passed.load(Ordering::Relaxed),
known_fail.load(Ordering::Relaxed),
skipped.load(Ordering::Relaxed),
);
}
}
})
.expect("failed to spawn JS262-FULL worker thread");
handles.push(handle);
}
for handle in handles {
handle.join().expect("JS262-FULL worker thread panicked");
}
let failures = std::sync::Arc::try_unwrap(failures)
.expect("Arc still has multiple owners")
.into_inner()
.unwrap();
eprintln!(
"JS262-FULL: complete — pass={} known_fail={} skip={} failures={}",
passed.load(Ordering::Relaxed),
known_fail.load(Ordering::Relaxed),
skipped.load(Ordering::Relaxed),
failures.len(),
);
if !failures.is_empty() {
panic!(
"JS262-FULL harness failures ({}):\n{}",
failures.len(),
failures.join("\n")
);
}
}