Skip to content

[SYCL][NATIVECPU] use ur_memory_scope_capability_flags_t in NativeCPU adapter #18462

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 10 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
187 changes: 0 additions & 187 deletions .ci/all_requirements.txt

This file was deleted.

10 changes: 4 additions & 6 deletions .ci/compute_projects.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,6 @@
"clang": {"clang-tools-extra", "compiler-rt", "cross-project-tests"},
"clang-tools-extra": {"libc"},
"mlir": {"flang"},
# Test everything if ci scripts are changed.
# FIXME: Figure out what is missing and add here.
".ci": {"llvm", "clang", "lld", "lldb"},
}

DEPENDENT_RUNTIMES_TO_TEST = {"clang": {"libcxx", "libcxxabi", "libunwind"}}
Expand Down Expand Up @@ -133,11 +130,12 @@ def _add_dependencies(projects: Set[str]) -> Set[str]:
def _compute_projects_to_test(modified_projects: Set[str], platform: str) -> Set[str]:
projects_to_test = set()
for modified_project in modified_projects:
# Skip all projects where we cannot run tests.
if modified_project not in PROJECT_CHECK_TARGETS:
continue
if modified_project in RUNTIMES:
continue
# Skip all projects where we cannot run tests.
if modified_project in PROJECT_CHECK_TARGETS:
projects_to_test.add(modified_project)
projects_to_test.add(modified_project)
if modified_project not in DEPENDENTS_TO_TEST:
continue
for dependent_project in DEPENDENTS_TO_TEST[modified_project]:
Expand Down
17 changes: 0 additions & 17 deletions .ci/compute_projects_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,23 +188,6 @@ def test_exclude_gn(self):
self.assertEqual(env_variables["runtimes_to_build"], "")
self.assertEqual(env_variables["runtimes_check_targets"], "")

def test_ci(self):
env_variables = compute_projects.get_env_variables(
[".ci/compute_projects.py"], "Linux"
)
self.assertEqual(env_variables["projects_to_build"], "clang;lld;llvm;lldb")
self.assertEqual(
env_variables["project_check_targets"],
"check-clang check-lld check-llvm check-lldb",
)
self.assertEqual(
env_variables["runtimes_to_build"], "libcxx;libcxxabi;libunwind"
)
self.assertEqual(
env_variables["runtimes_check_targets"],
"check-cxx check-cxxabi check-unwind",
)


if __name__ == "__main__":
unittest.main()
2 changes: 1 addition & 1 deletion .ci/generate_test_report_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def plural(num_tests):
]
)
elif failures:
report.extend(["", "## Failed Tests", "(click on a test name to see its output)"])
report.extend(["", "## Failed Tests", "(click to see output)"])

for testsuite_name, failures in failures.items():
report.extend(["", f"### {testsuite_name}"])
Expand Down
7 changes: 2 additions & 5 deletions .ci/generate_test_report_lib_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

# To run these tests:
# python -m unittest generate_test_report_lib_test.py

import unittest
from io import StringIO
from textwrap import dedent
Expand Down Expand Up @@ -153,7 +150,7 @@ def test_report_single_file_single_testsuite(self):
* 2 tests failed

## Failed Tests
(click on a test name to see its output)
(click to see output)

### Bar
<details>
Expand Down Expand Up @@ -185,7 +182,7 @@ def test_report_single_file_single_testsuite(self):
* 2 tests failed

## Failed Tests
(click on a test name to see its output)
(click to see output)

### ABC
<details>
Expand Down
13 changes: 3 additions & 10 deletions .ci/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,16 @@
# Lists the Github workflows we want to track. Maps the Github job name to
# the metric name prefix in grafana.
# This metric name is also used as a key in the job->name map.
GITHUB_WORKFLOW_TO_TRACK = {"CI Checks": "github_llvm_premerge_checks"}
GITHUB_WORKFLOW_TO_TRACK = {"LLVM Premerge Checks": "github_llvm_premerge_checks"}

# Lists the Github jobs to track for a given workflow. The key is the stable
# name (metric name) of the workflow (see GITHUB_WORKFLOW_TO_TRACK).
# Each value is a map to link the github job name to the corresponding metric
# name.
GITHUB_JOB_TO_TRACK = {
"github_llvm_premerge_checks": {
"Build and Test Linux (Test Only - Please Ignore Results)": "premerge_linux",
"Build and Test Windows (Test Only - Please Ignore Results)": "premerge_windows",
"Linux Premerge Checks (Test Only - Please Ignore Results)": "premerge_linux",
"Windows Premerge Checks (Test Only - Please Ignore Results)": "premerge_windows",
}
}

Expand Down Expand Up @@ -282,13 +282,6 @@ def github_get_metrics(
queued_count = collections.Counter()
running_count = collections.Counter()

# Initialize all the counters to 0 so we report 0 when no job is queued
# or running.
for wf_name, wf_metric_name in GITHUB_WORKFLOW_TO_TRACK.items():
for job_name, job_metric_name in GITHUB_JOB_TO_TRACK[wf_metric_name].items():
queued_count[wf_metric_name + "_" + job_metric_name] = 0
running_count[wf_metric_name + "_" + job_metric_name] = 0

# The list of workflows this iteration will process.
# MaxSize = GITHUB_WORKFLOWS_MAX_PROCESS_COUNT
workflow_seen_as_completed = set()
Expand Down
Loading
Loading