Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .cursor-server/data/User/History/3d3c3208/9HrO.py +124 -0
- .cursor-server/data/User/History/3d3c3208/DQ6P.py +132 -0
- .cursor-server/data/User/History/3d3c3208/GjuK.py +120 -0
- .cursor-server/data/User/History/3d3c3208/WH6U.py +132 -0
- .cursor-server/data/User/History/3d3c3208/ehn2.py +118 -0
- .cursor-server/data/User/History/3d3c3208/hGSb.py +117 -0
- .cursor-server/data/User/History/3d3c3208/zeaM.py +117 -0
- .cursor-server/data/User/globalStorage/github.vscode-pull-request-github/assignableUsers/yichuan520030910320/SPANN.json +1 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__clang_hip_stdlib.h +43 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stdarg_header_macro.h +12 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stdarg_va_copy.h +12 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stdarg_va_list.h +13 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_max_align_t.h +27 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_null.h +29 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_unreachable.h +21 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_wchar_t.h +28 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__wmmintrin_pclmul.h +48 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/adcintrin.h +160 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/amxcomplexintrin.h +169 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/amxintrin.h +524 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/arm_cde.h +410 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/arm_mve.h +0 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/arm_sve.h +0 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx2intrin.h +0 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512bf16intrin.h +283 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512dqintrin.h +1379 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vbmivlintrin.h +193 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vlbf16intrin.h +517 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vlcdintrin.h +230 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vldqintrin.h +1173 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vp2intersectintrin.h +78 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vpopcntdqvlintrin.h +95 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/builtins.h +19 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cmpccxaddintrin.h +70 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cpuid.h +351 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cuda_wrappers/algorithm +116 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cuda_wrappers/complex +90 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/f16cintrin.h +162 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/hexagon_circ_brev_intrinsics.h +298 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/hexagon_protos.h +0 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/hvx_hexagon_protos.h +0 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/intrin0.h +247 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/keylockerintrin.h +527 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/lsxintrin.h +0 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/lzcntintrin.h +104 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/movdirintrin.h +49 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/nmmintrin.h +20 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/opencl-c-base.h +829 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/openmp_wrappers/cmath +132 -0
- .cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/pkuintrin.h +34 -0
.cursor-server/data/User/History/3d3c3208/9HrO.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import collections
|
3 |
+
import logging
|
4 |
+
from typing import List, Union, Dict, Optional
|
5 |
+
from dataclasses import dataclass, field, asdict
|
6 |
+
from datasets import load_dataset
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter:
|
13 |
+
def remove_whitespace(self, text: str) -> str:
|
14 |
+
"""
|
15 |
+
Remove whitespace from the text.
|
16 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
17 |
+
"""
|
18 |
+
return text[1:] if text.startswith(" ") else text
|
19 |
+
|
20 |
+
def take_first(self, text: str) -> str:
|
21 |
+
"""
|
22 |
+
Take the first answer from the text.
|
23 |
+
To be aligned with lm_eval/filters/selection.py:TakeFirstFilter
|
24 |
+
"""
|
25 |
+
return text.split("\n")[0]
|
26 |
+
|
27 |
+
def __call__(self, text: str) -> str:
|
28 |
+
"""
|
29 |
+
To be aligned with nq_open.yaml:filter_list:
|
30 |
+
- name: remove_whitespace
|
31 |
+
filter:
|
32 |
+
- function: remove_whitespace
|
33 |
+
- function: take_first
|
34 |
+
"""
|
35 |
+
return self.take_first(self.remove_whitespace(text))
|
36 |
+
|
37 |
+
|
38 |
+
@dataclass
|
39 |
+
class EvaluationConfig:
|
40 |
+
ignore_case: bool = True
|
41 |
+
ignore_punctuation: bool = True
|
42 |
+
regexes_to_ignore: List[str] = field(
|
43 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
class Evaluator:
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
dataset_name: str = "nq_open",
|
51 |
+
metric_name: str = "exact_match",
|
52 |
+
aggreg_name: str = "mean",
|
53 |
+
filter: Optional[Filter] = None,
|
54 |
+
config: Optional[EvaluationConfig] = None,
|
55 |
+
):
|
56 |
+
"""
|
57 |
+
Initialize evaluator with dataset and config.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
dataset_name: Name of the dataset to load
|
61 |
+
metric_name: Metric to use
|
62 |
+
aggreg_name: Aggregation method to use
|
63 |
+
filter: Filter to apply
|
64 |
+
config: Evaluation configuration
|
65 |
+
"""
|
66 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
67 |
+
self.config = config or EvaluationConfig()
|
68 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
69 |
+
self.metric_name = metric_name
|
70 |
+
self.aggreg_name = aggreg_name
|
71 |
+
|
72 |
+
self.filter = filter if filter is not None else Filter()
|
73 |
+
|
74 |
+
def _get_gold(self, query: str) -> List[str]:
|
75 |
+
matches = [item for item in self.dataset if item["question"] == query]
|
76 |
+
if not matches:
|
77 |
+
logger.warning(f"Query not found in dataset: {query}")
|
78 |
+
return []
|
79 |
+
return matches[0]["answer"]
|
80 |
+
|
81 |
+
def evaluate_single(self, query: str, prediction: str) -> float:
|
82 |
+
gold = self._get_gold(query)
|
83 |
+
result = [prediction for _ in range(len(gold))]
|
84 |
+
scores: dict[str, float] = self.evaluator(
|
85 |
+
references=gold,
|
86 |
+
predictions=result,
|
87 |
+
**asdict(self.config),
|
88 |
+
)
|
89 |
+
return float(scores[self.metric_name] > 0.0)
|
90 |
+
|
91 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
92 |
+
"""
|
93 |
+
Evaluate a batch of predictions. Aggregate scores by self.aggreg_name.
|
94 |
+
"""
|
95 |
+
assert self.aggreg_name == "mean" # ! Currently only support mean aggregation
|
96 |
+
|
97 |
+
all_scores = {self.metric_name: []}
|
98 |
+
|
99 |
+
for query, prediction in predictions.items():
|
100 |
+
score = self.evaluate_single(query, prediction)
|
101 |
+
all_scores[self.metric_name].append(score)
|
102 |
+
|
103 |
+
# Calculate averages
|
104 |
+
return {
|
105 |
+
self.metric_name: sum(all_scores[self.metric_name])
|
106 |
+
/ len(all_scores[self.metric_name])
|
107 |
+
if all_scores[self.metric_name]
|
108 |
+
else 0.0
|
109 |
+
}
|
110 |
+
|
111 |
+
|
112 |
+
# Example usage:
|
113 |
+
if __name__ == "__main__":
|
114 |
+
# Initialize evaluator with a dataset
|
115 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
116 |
+
|
117 |
+
# Example evaluation
|
118 |
+
query = "when was the last time anyone was on the moon"
|
119 |
+
prediction = "December 1972"
|
120 |
+
|
121 |
+
result = evaluator.evaluate_single(query, prediction)
|
122 |
+
print(f"Query: {query}")
|
123 |
+
print(f"Prediction: {prediction}")
|
124 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/History/3d3c3208/DQ6P.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import logging
|
3 |
+
from typing import List, Dict, Optional, Sequence
|
4 |
+
from dataclasses import dataclass, field, asdict
|
5 |
+
from datasets import load_dataset
|
6 |
+
import evaluate as hf_evaluate
|
7 |
+
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
|
11 |
+
class Filter:
|
12 |
+
def remove_whitespace(self, text: str) -> str:
|
13 |
+
"""
|
14 |
+
Remove whitespace from the text.
|
15 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
16 |
+
"""
|
17 |
+
return text[1:] if text.startswith(" ") else text
|
18 |
+
|
19 |
+
def __call__(self, text: str) -> str:
|
20 |
+
"""
|
21 |
+
To be aligned with nq_open.yaml:filter_list:
|
22 |
+
- name: remove_whitespace
|
23 |
+
filter:
|
24 |
+
- function: remove_whitespace
|
25 |
+
- function: take_first
|
26 |
+
"""
|
27 |
+
# ! Here, we simply ignore the take_first filter
|
28 |
+
# ! because we assume the prediction is just a single string
|
29 |
+
return self.remove_whitespace(text)
|
30 |
+
|
31 |
+
|
32 |
+
@dataclass
|
33 |
+
class EvaluationConfig:
|
34 |
+
ignore_case: bool = True
|
35 |
+
ignore_punctuation: bool = True
|
36 |
+
regexes_to_ignore: List[str] = field(
|
37 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
def mean(arr: Sequence[float]) -> float:
|
42 |
+
return sum(arr) / len(arr)
|
43 |
+
|
44 |
+
|
45 |
+
def sample_stddev(arr: Sequence[float]) -> float:
|
46 |
+
mu = mean(arr)
|
47 |
+
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
|
48 |
+
|
49 |
+
|
50 |
+
def mean_stderr(arr):
|
51 |
+
return sample_stddev(arr) / math.sqrt(len(arr))
|
52 |
+
|
53 |
+
|
54 |
+
class Evaluator:
|
55 |
+
def __init__(
|
56 |
+
self,
|
57 |
+
dataset_name: str = "nq_open",
|
58 |
+
metric_name: str = "exact_match",
|
59 |
+
aggreg_name: str = "mean",
|
60 |
+
filter: Optional[Filter] = None,
|
61 |
+
config: Optional[EvaluationConfig] = None,
|
62 |
+
):
|
63 |
+
"""
|
64 |
+
Initialize evaluator with dataset and config.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
dataset_name: Name of the dataset to load
|
68 |
+
metric_name: Metric to use
|
69 |
+
aggreg_name: Aggregation method to use
|
70 |
+
filter: Filter to apply
|
71 |
+
config: Evaluation configuration
|
72 |
+
"""
|
73 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
74 |
+
self.config = config or EvaluationConfig()
|
75 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
76 |
+
self.metric_name = metric_name
|
77 |
+
self.aggreg_name = aggreg_name
|
78 |
+
|
79 |
+
self.filter = filter if filter is not None else Filter()
|
80 |
+
|
81 |
+
# ! Could become very large for a lot of questions
|
82 |
+
# ! Maybe we should pass gold value along with the query.
|
83 |
+
self._question_to_answer = {
|
84 |
+
item["question"]: item["answer"] for item in self.dataset
|
85 |
+
}
|
86 |
+
|
87 |
+
def _get_gold(self, query: str) -> List[str]:
|
88 |
+
return self._question_to_answer[query]
|
89 |
+
|
90 |
+
def evaluate_single(self, query: str, prediction: str) -> float:
|
91 |
+
# ! This is a hack to get the originial query from the prompt, see the comment on self._question_to_answer
|
92 |
+
query = query.split("\n\nQ: ")[-1].split("?\nA:")[0]
|
93 |
+
gold = self._get_gold(query)
|
94 |
+
result = [self.filter(prediction) for _ in range(len(gold))]
|
95 |
+
scores = self.evaluator(
|
96 |
+
references=gold,
|
97 |
+
predictions=result,
|
98 |
+
**asdict(self.config),
|
99 |
+
)
|
100 |
+
return float(scores[self.metric_name] > 0.0)
|
101 |
+
|
102 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
103 |
+
"""
|
104 |
+
Evaluate a batch of predictions. Aggregate scores by self.aggreg_name.
|
105 |
+
"""
|
106 |
+
assert self.aggreg_name == "mean" # ! Currently only support mean aggregation
|
107 |
+
|
108 |
+
all_scores = {self.metric_name: []}
|
109 |
+
|
110 |
+
for query, prediction in predictions.items():
|
111 |
+
score = self.evaluate_single(query, prediction)
|
112 |
+
all_scores[self.metric_name].append(score)
|
113 |
+
|
114 |
+
# Calculate averages
|
115 |
+
return {
|
116 |
+
self.metric_name: mean(all_scores[self.metric_name]),
|
117 |
+
f"{self.metric_name}_stderr": mean_stderr(all_scores[self.metric_name]),
|
118 |
+
}
|
119 |
+
|
120 |
+
|
121 |
+
# Example usage:
|
122 |
+
if __name__ == "__main__":
|
123 |
+
# Initialize evaluator with a dataset
|
124 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
125 |
+
|
126 |
+
# Example evaluation
|
127 |
+
query = "when was the last time anyone was on the moon"
|
128 |
+
prediction = " December 1972"
|
129 |
+
result = evaluator.evaluate_single(query, prediction)
|
130 |
+
print(f"Query: {query}")
|
131 |
+
print(f"Prediction: {prediction}")
|
132 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/History/3d3c3208/GjuK.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import collections
|
3 |
+
import logging
|
4 |
+
from typing import List, Union, Dict, Optional
|
5 |
+
from dataclasses import dataclass, field, asdict
|
6 |
+
from datasets import load_dataset
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter:
|
13 |
+
def remove_whitespace(self, text: str) -> str:
|
14 |
+
"""
|
15 |
+
Remove whitespace from the text.
|
16 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
17 |
+
"""
|
18 |
+
return text[1:] if text.startswith(" ") else text
|
19 |
+
|
20 |
+
def __call__(self, text: str) -> str:
|
21 |
+
"""
|
22 |
+
To be aligned with nq_open.yaml:filter_list:
|
23 |
+
- name: remove_whitespace
|
24 |
+
filter:
|
25 |
+
- function: remove_whitespace
|
26 |
+
- function: take_first
|
27 |
+
"""
|
28 |
+
# ! Here, we simply ignore the take_first filter
|
29 |
+
# ! because we assume the prediction is just a single string
|
30 |
+
return self.remove_whitespace(text)
|
31 |
+
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class EvaluationConfig:
|
35 |
+
ignore_case: bool = True
|
36 |
+
ignore_punctuation: bool = True
|
37 |
+
regexes_to_ignore: List[str] = field(
|
38 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
class Evaluator:
|
43 |
+
def __init__(
|
44 |
+
self,
|
45 |
+
dataset_name: str = "nq_open",
|
46 |
+
metric_name: str = "exact_match",
|
47 |
+
aggreg_name: str = "mean",
|
48 |
+
filter: Optional[Filter] = None,
|
49 |
+
config: Optional[EvaluationConfig] = None,
|
50 |
+
):
|
51 |
+
"""
|
52 |
+
Initialize evaluator with dataset and config.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
dataset_name: Name of the dataset to load
|
56 |
+
metric_name: Metric to use
|
57 |
+
aggreg_name: Aggregation method to use
|
58 |
+
filter: Filter to apply
|
59 |
+
config: Evaluation configuration
|
60 |
+
"""
|
61 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
62 |
+
self.config = config or EvaluationConfig()
|
63 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
64 |
+
self.metric_name = metric_name
|
65 |
+
self.aggreg_name = aggreg_name
|
66 |
+
|
67 |
+
self.filter = filter if filter is not None else Filter()
|
68 |
+
|
69 |
+
# ! Could become very large for a lot of questions
|
70 |
+
# ! Maybe we should pass gold value along with the query.
|
71 |
+
self.question_to_answer = {
|
72 |
+
item["question"]: item["answer"] for item in self.dataset
|
73 |
+
}
|
74 |
+
|
75 |
+
def _get_gold(self, query: str) -> List[str]:
|
76 |
+
return self.question_to_answer[query]
|
77 |
+
|
78 |
+
def evaluate_single(self, query: str, prediction: str) -> float:
|
79 |
+
gold = self._get_gold(query)
|
80 |
+
result = [self.filter(prediction) for _ in range(len(gold))]
|
81 |
+
scores = self.evaluator(
|
82 |
+
references=gold,
|
83 |
+
predictions=result,
|
84 |
+
**asdict(self.config),
|
85 |
+
)
|
86 |
+
return float(scores[self.metric_name] > 0.0)
|
87 |
+
|
88 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
89 |
+
"""
|
90 |
+
Evaluate a batch of predictions. Aggregate scores by self.aggreg_name.
|
91 |
+
"""
|
92 |
+
assert self.aggreg_name == "mean" # ! Currently only support mean aggregation
|
93 |
+
|
94 |
+
all_scores = {self.metric_name: []}
|
95 |
+
|
96 |
+
for query, prediction in predictions.items():
|
97 |
+
score = self.evaluate_single(query, prediction)
|
98 |
+
all_scores[self.metric_name].append(score)
|
99 |
+
|
100 |
+
# Calculate averages
|
101 |
+
return {
|
102 |
+
self.metric_name: sum(all_scores[self.metric_name])
|
103 |
+
/ len(all_scores[self.metric_name])
|
104 |
+
if all_scores[self.metric_name]
|
105 |
+
else 0.0
|
106 |
+
}
|
107 |
+
|
108 |
+
|
109 |
+
# Example usage:
|
110 |
+
if __name__ == "__main__":
|
111 |
+
# Initialize evaluator with a dataset
|
112 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
113 |
+
|
114 |
+
# Example evaluation
|
115 |
+
query = "when was the last time anyone was on the moon"
|
116 |
+
prediction = " December 1972"
|
117 |
+
result = evaluator.evaluate_single(query, prediction)
|
118 |
+
print(f"Query: {query}")
|
119 |
+
print(f"Prediction: {prediction}")
|
120 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/History/3d3c3208/WH6U.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import collections
|
3 |
+
import logging
|
4 |
+
from typing import List, Union, Dict, Optional
|
5 |
+
from dataclasses import dataclass, field, asdict
|
6 |
+
from datasets import load_dataset
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter:
|
13 |
+
def remove_whitespace(self, text: str) -> str:
|
14 |
+
"""
|
15 |
+
Remove whitespace from the text.
|
16 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
17 |
+
"""
|
18 |
+
return text[1:] if text.startswith(" ") else text
|
19 |
+
|
20 |
+
def take_first(self, text: str) -> str:
|
21 |
+
"""
|
22 |
+
Take the first answer from the text.
|
23 |
+
To be aligned with lm_eval/filters/selection.py:TakeFirstFilter
|
24 |
+
"""
|
25 |
+
return text.split("\n")[0]
|
26 |
+
|
27 |
+
def __call__(self, text: str) -> str:
|
28 |
+
"""
|
29 |
+
To be aligned with nq_open.yaml:filter_list:
|
30 |
+
- name: remove_whitespace
|
31 |
+
filter:
|
32 |
+
- function: remove_whitespace
|
33 |
+
- function: take_first
|
34 |
+
"""
|
35 |
+
return self.take_first(self.remove_whitespace(text))
|
36 |
+
|
37 |
+
|
38 |
+
@dataclass
|
39 |
+
class EvaluationConfig:
|
40 |
+
ignore_case: bool = True
|
41 |
+
ignore_punctuation: bool = True
|
42 |
+
regexes_to_ignore: List[str] = field(
|
43 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
class Evaluator:
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
dataset_name: str = "nq_open",
|
51 |
+
metric_name: str = "exact_match",
|
52 |
+
aggreg_name: str = "mean",
|
53 |
+
filter: Optional[Filter] = None,
|
54 |
+
config: Optional[EvaluationConfig] = None,
|
55 |
+
):
|
56 |
+
"""
|
57 |
+
Initialize evaluator with dataset and config.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
dataset_name: Name of the dataset to load
|
61 |
+
metric_name: Metric to use
|
62 |
+
aggreg_name: Aggregation method to use
|
63 |
+
filter: Filter to apply
|
64 |
+
config: Evaluation configuration
|
65 |
+
"""
|
66 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
67 |
+
self.config = config or EvaluationConfig()
|
68 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
69 |
+
self.metric_name = metric_name
|
70 |
+
|
71 |
+
self.filter = filter if filter is not None else Filter()
|
72 |
+
|
73 |
+
def _get_gold(self, query: str) -> List[str]:
|
74 |
+
matches = [item for item in self.dataset if item["question"] == query]
|
75 |
+
if not matches:
|
76 |
+
logger.warning(f"Query not found in dataset: {query}")
|
77 |
+
return []
|
78 |
+
return matches[0]["answer"]
|
79 |
+
|
80 |
+
def evaluate_single(self, query: str, prediction: str) -> float:
|
81 |
+
"""
|
82 |
+
Evaluate a single prediction against the dataset answers.
|
83 |
+
|
84 |
+
Args:
|
85 |
+
query: The question being answered
|
86 |
+
prediction: The model's predicted answer
|
87 |
+
|
88 |
+
Returns:
|
89 |
+
Dictionary of metric scores
|
90 |
+
"""
|
91 |
+
gold = self._get_gold(query)
|
92 |
+
result = [prediction for _ in range(len(gold))]
|
93 |
+
scores = self.evaluator(
|
94 |
+
references=gold,
|
95 |
+
predictions=result,
|
96 |
+
**asdict(self.config),
|
97 |
+
)
|
98 |
+
return float(scores[self.metric_name] > 0.0)
|
99 |
+
|
100 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
101 |
+
"""
|
102 |
+
Evaluate a batch of predictions.
|
103 |
+
|
104 |
+
Args:
|
105 |
+
predictions: Dictionary mapping queries to predicted answers
|
106 |
+
|
107 |
+
Returns:
|
108 |
+
Dictionary mapping metric names to average scores
|
109 |
+
"""
|
110 |
+
all_scores = {self.metric_name: []}
|
111 |
+
|
112 |
+
for query, prediction in predictions.items():
|
113 |
+
score = self.evaluate_single(query, prediction)
|
114 |
+
all_scores[self.metric_name].append(score)
|
115 |
+
|
116 |
+
# Calculate averages
|
117 |
+
return {self.metric_name: sum(scores) / len(scores) if scores else 0.0}
|
118 |
+
|
119 |
+
|
120 |
+
# Example usage:
|
121 |
+
if __name__ == "__main__":
|
122 |
+
# Initialize evaluator with a dataset
|
123 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
124 |
+
|
125 |
+
# Example evaluation
|
126 |
+
query = "when was the last time anyone was on the moon"
|
127 |
+
prediction = "December 1972"
|
128 |
+
|
129 |
+
result = evaluator.evaluate_single(query, prediction)
|
130 |
+
print(f"Query: {query}")
|
131 |
+
print(f"Prediction: {prediction}")
|
132 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/History/3d3c3208/ehn2.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import collections
|
3 |
+
import logging
|
4 |
+
from typing import List, Union, Dict, Optional
|
5 |
+
from dataclasses import dataclass, field, asdict
|
6 |
+
from datasets import load_dataset
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter:
|
13 |
+
def remove_whitespace(self, text: str) -> str:
|
14 |
+
"""
|
15 |
+
Remove whitespace from the text.
|
16 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
17 |
+
"""
|
18 |
+
return text[1:] if text.startswith(" ") else text
|
19 |
+
|
20 |
+
def __call__(self, text: str) -> str:
|
21 |
+
"""
|
22 |
+
To be aligned with nq_open.yaml:filter_list:
|
23 |
+
- name: remove_whitespace
|
24 |
+
filter:
|
25 |
+
- function: remove_whitespace
|
26 |
+
- function: take_first
|
27 |
+
"""
|
28 |
+
# ! Here, we simply ignore the take_first filter
|
29 |
+
# ! because we assume the prediction is just a single string
|
30 |
+
return self.remove_whitespace(text)
|
31 |
+
|
32 |
+
|
33 |
+
@dataclass
|
34 |
+
class EvaluationConfig:
|
35 |
+
ignore_case: bool = True
|
36 |
+
ignore_punctuation: bool = True
|
37 |
+
regexes_to_ignore: List[str] = field(
|
38 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
class Evaluator:
|
43 |
+
def __init__(
|
44 |
+
self,
|
45 |
+
dataset_name: str = "nq_open",
|
46 |
+
metric_name: str = "exact_match",
|
47 |
+
aggreg_name: str = "mean",
|
48 |
+
filter: Optional[Filter] = None,
|
49 |
+
config: Optional[EvaluationConfig] = None,
|
50 |
+
):
|
51 |
+
"""
|
52 |
+
Initialize evaluator with dataset and config.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
dataset_name: Name of the dataset to load
|
56 |
+
metric_name: Metric to use
|
57 |
+
aggreg_name: Aggregation method to use
|
58 |
+
filter: Filter to apply
|
59 |
+
config: Evaluation configuration
|
60 |
+
"""
|
61 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
62 |
+
self.config = config or EvaluationConfig()
|
63 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
64 |
+
self.metric_name = metric_name
|
65 |
+
self.aggreg_name = aggreg_name
|
66 |
+
|
67 |
+
self.filter = filter if filter is not None else Filter()
|
68 |
+
|
69 |
+
def _get_gold(self, query: str) -> List[str]:
|
70 |
+
matches = [item for item in self.dataset if item["question"] == query]
|
71 |
+
if not matches:
|
72 |
+
logger.warning(f"Query not found in dataset: {query}")
|
73 |
+
return []
|
74 |
+
return matches[0]["answer"]
|
75 |
+
|
76 |
+
def evaluate_single(self, query: str, prediction: str) -> float:
|
77 |
+
gold = self._get_gold(query)
|
78 |
+
result = [self.filter(prediction) for _ in range(len(gold))]
|
79 |
+
scores = self.evaluator(
|
80 |
+
references=gold,
|
81 |
+
predictions=result,
|
82 |
+
**asdict(self.config),
|
83 |
+
)
|
84 |
+
return float(scores[self.metric_name] > 0.0)
|
85 |
+
|
86 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
87 |
+
"""
|
88 |
+
Evaluate a batch of predictions. Aggregate scores by self.aggreg_name.
|
89 |
+
"""
|
90 |
+
assert self.aggreg_name == "mean" # ! Currently only support mean aggregation
|
91 |
+
|
92 |
+
all_scores = {self.metric_name: []}
|
93 |
+
|
94 |
+
for query, prediction in predictions.items():
|
95 |
+
score = self.evaluate_single(query, prediction)
|
96 |
+
all_scores[self.metric_name].append(score)
|
97 |
+
|
98 |
+
# Calculate averages
|
99 |
+
return {
|
100 |
+
self.metric_name: sum(all_scores[self.metric_name])
|
101 |
+
/ len(all_scores[self.metric_name])
|
102 |
+
if all_scores[self.metric_name]
|
103 |
+
else 0.0
|
104 |
+
}
|
105 |
+
|
106 |
+
|
107 |
+
# Example usage:
|
108 |
+
if __name__ == "__main__":
|
109 |
+
# Initialize evaluator with a dataset
|
110 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
111 |
+
|
112 |
+
# Example evaluation
|
113 |
+
query = "when was the last time anyone was on the moon"
|
114 |
+
prediction = " December 1972"
|
115 |
+
result = evaluator.evaluate_single(query, prediction)
|
116 |
+
print(f"Query: {query}")
|
117 |
+
print(f"Prediction: {prediction}")
|
118 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/History/3d3c3208/hGSb.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import collections
|
3 |
+
import logging
|
4 |
+
from typing import List, Union, Dict, Optional
|
5 |
+
from dataclasses import dataclass, field, asdict
|
6 |
+
from datasets import load_dataset
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter:
|
13 |
+
def remove_whitespace(self, text: str) -> str:
|
14 |
+
"""
|
15 |
+
Remove whitespace from the text.
|
16 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
17 |
+
"""
|
18 |
+
return text[1:] if text.startswith(" ") else text
|
19 |
+
|
20 |
+
def __call__(self, text: str) -> str:
|
21 |
+
"""
|
22 |
+
To be aligned with nq_open.yaml:filter_list:
|
23 |
+
- name: remove_whitespace
|
24 |
+
filter:
|
25 |
+
- function: remove_whitespace
|
26 |
+
- function: take_first
|
27 |
+
"""
|
28 |
+
returnself.remove_whitespace(text)
|
29 |
+
|
30 |
+
|
31 |
+
@dataclass
|
32 |
+
class EvaluationConfig:
|
33 |
+
ignore_case: bool = True
|
34 |
+
ignore_punctuation: bool = True
|
35 |
+
regexes_to_ignore: List[str] = field(
|
36 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
class Evaluator:
|
41 |
+
def __init__(
|
42 |
+
self,
|
43 |
+
dataset_name: str = "nq_open",
|
44 |
+
metric_name: str = "exact_match",
|
45 |
+
aggreg_name: str = "mean",
|
46 |
+
filter: Optional[Filter] = None,
|
47 |
+
config: Optional[EvaluationConfig] = None,
|
48 |
+
):
|
49 |
+
"""
|
50 |
+
Initialize evaluator with dataset and config.
|
51 |
+
|
52 |
+
Args:
|
53 |
+
dataset_name: Name of the dataset to load
|
54 |
+
metric_name: Metric to use
|
55 |
+
aggreg_name: Aggregation method to use
|
56 |
+
filter: Filter to apply
|
57 |
+
config: Evaluation configuration
|
58 |
+
"""
|
59 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
60 |
+
self.config = config or EvaluationConfig()
|
61 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
62 |
+
self.metric_name = metric_name
|
63 |
+
self.aggreg_name = aggreg_name
|
64 |
+
|
65 |
+
self.filter = filter if filter is not None else Filter()
|
66 |
+
|
67 |
+
def _get_gold(self, query: str) -> List[str]:
|
68 |
+
matches = [item for item in self.dataset if item["question"] == query]
|
69 |
+
if not matches:
|
70 |
+
logger.warning(f"Query not found in dataset: {query}")
|
71 |
+
return []
|
72 |
+
return matches[0]["answer"]
|
73 |
+
|
74 |
+
def evaluate_single(self, query: str, prediction: List[str]) -> float:
|
75 |
+
gold = self._get_gold(query)
|
76 |
+
result = [self.filter(prediction) for _ in range(len(gold))]
|
77 |
+
scores = self.evaluator(
|
78 |
+
references=gold,
|
79 |
+
predictions=result,
|
80 |
+
**asdict(self.config),
|
81 |
+
)
|
82 |
+
return float(scores[self.metric_name] > 0.0)
|
83 |
+
|
84 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
85 |
+
"""
|
86 |
+
Evaluate a batch of predictions. Aggregate scores by self.aggreg_name.
|
87 |
+
"""
|
88 |
+
assert self.aggreg_name == "mean" # ! Currently only support mean aggregation
|
89 |
+
|
90 |
+
all_scores = {self.metric_name: []}
|
91 |
+
|
92 |
+
for query, prediction in predictions.items():
|
93 |
+
score = self.evaluate_single(query, prediction)
|
94 |
+
all_scores[self.metric_name].append(score)
|
95 |
+
|
96 |
+
# Calculate averages
|
97 |
+
return {
|
98 |
+
self.metric_name: sum(all_scores[self.metric_name])
|
99 |
+
/ len(all_scores[self.metric_name])
|
100 |
+
if all_scores[self.metric_name]
|
101 |
+
else 0.0
|
102 |
+
}
|
103 |
+
|
104 |
+
|
105 |
+
# Example usage:
|
106 |
+
if __name__ == "__main__":
|
107 |
+
# Initialize evaluator with a dataset
|
108 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
109 |
+
|
110 |
+
# Example evaluation
|
111 |
+
query = "when was the last time anyone was on the moon"
|
112 |
+
prediction = [" December 1972"]
|
113 |
+
|
114 |
+
result = evaluator.evaluate_single(query, prediction)
|
115 |
+
print(f"Query: {query}")
|
116 |
+
print(f"Prediction: {prediction}")
|
117 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/History/3d3c3208/zeaM.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import collections
|
3 |
+
import logging
|
4 |
+
from typing import List, Union, Dict, Optional
|
5 |
+
from dataclasses import dataclass, field, asdict
|
6 |
+
from datasets import load_dataset
|
7 |
+
import evaluate as hf_evaluate
|
8 |
+
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
|
12 |
+
class Filter:
|
13 |
+
def remove_whitespace(self, text: str) -> str:
|
14 |
+
"""
|
15 |
+
Remove whitespace from the text.
|
16 |
+
To be aligned with the lm_eval/extraction.py:WhitespaceFilter
|
17 |
+
"""
|
18 |
+
return text[1:] if text.startswith(" ") else text
|
19 |
+
|
20 |
+
def take_first(self, text: str) -> str:
|
21 |
+
"""
|
22 |
+
Take the first answer from the text.
|
23 |
+
To be aligned with lm_eval/filters/selection.py:TakeFirstFilter
|
24 |
+
"""
|
25 |
+
return text.split("\n")[0]
|
26 |
+
|
27 |
+
def __call__(self, text: str) -> str:
|
28 |
+
"""
|
29 |
+
To be aligned with nq_open.yaml:filter_list:
|
30 |
+
- name: remove_whitespace
|
31 |
+
filter:
|
32 |
+
- function: remove_whitespace
|
33 |
+
- function: take_first
|
34 |
+
"""
|
35 |
+
return self.take_first(self.remove_whitespace(text))
|
36 |
+
|
37 |
+
|
38 |
+
@dataclass
|
39 |
+
class EvaluationConfig:
|
40 |
+
ignore_case: bool = True
|
41 |
+
ignore_punctuation: bool = True
|
42 |
+
regexes_to_ignore: List[str] = field(
|
43 |
+
default_factory=lambda: ["\\b(?:The |the |An |A |The |a |an )"]
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
class Evaluator:
|
48 |
+
def __init__(
|
49 |
+
self,
|
50 |
+
dataset_name: str = "nq_open",
|
51 |
+
metric_name: str = "exact_match",
|
52 |
+
aggreg_name: str = "mean",
|
53 |
+
filter: Optional[Filter] = None,
|
54 |
+
config: Optional[EvaluationConfig] = None,
|
55 |
+
):
|
56 |
+
"""
|
57 |
+
Initialize evaluator with dataset and config.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
dataset_name: Name of the dataset to load
|
61 |
+
metric_name: Metric to use
|
62 |
+
aggreg_name: Aggregation method to use
|
63 |
+
filter: Filter to apply
|
64 |
+
config: Evaluation configuration
|
65 |
+
"""
|
66 |
+
self.dataset = load_dataset(dataset_name, split="validation")
|
67 |
+
self.config = config or EvaluationConfig()
|
68 |
+
self.evaluator = hf_evaluate.load(metric_name).compute
|
69 |
+
self.metric_name = metric_name
|
70 |
+
self.aggreg_name = aggreg_name
|
71 |
+
|
72 |
+
self.filter = filter if filter is not None else Filter()
|
73 |
+
|
74 |
+
def _get_gold(self, query: str) -> List[str]:
|
75 |
+
matches = [item for item in self.dataset if item["question"] == query]
|
76 |
+
if not matches:
|
77 |
+
logger.warning(f"Query not found in dataset: {query}")
|
78 |
+
return []
|
79 |
+
return matches[0]["answer"]
|
80 |
+
|
81 |
+
def evaluate_single(self, query: str, prediction: str) -> float:
|
82 |
+
gold = self._get_gold(query)
|
83 |
+
result = [prediction for _ in range(len(gold))]
|
84 |
+
scores = self.evaluator(
|
85 |
+
references=gold,
|
86 |
+
predictions=result,
|
87 |
+
**asdict(self.config),
|
88 |
+
)
|
89 |
+
return float(scores[self.metric_name] > 0.0)
|
90 |
+
|
91 |
+
def evaluate_batch(self, predictions: Dict[str, str]) -> Dict[str, float]:
|
92 |
+
"""
|
93 |
+
Evaluate a batch of predictions. Aggregate scores by self.aggreg_name.
|
94 |
+
"""
|
95 |
+
all_scores = {self.metric_name: []}
|
96 |
+
|
97 |
+
for query, prediction in predictions.items():
|
98 |
+
score = self.evaluate_single(query, prediction)
|
99 |
+
all_scores[self.metric_name].append(score)
|
100 |
+
|
101 |
+
# Calculate averages
|
102 |
+
return {self.metric_name: sum(scores) / len(scores) if scores else 0.0}
|
103 |
+
|
104 |
+
|
105 |
+
# Example usage:
|
106 |
+
if __name__ == "__main__":
|
107 |
+
# Initialize evaluator with a dataset
|
108 |
+
evaluator = Evaluator("nq_open") # or whatever dataset you're using
|
109 |
+
|
110 |
+
# Example evaluation
|
111 |
+
query = "when was the last time anyone was on the moon"
|
112 |
+
prediction = "December 1972"
|
113 |
+
|
114 |
+
result = evaluator.evaluate_single(query, prediction)
|
115 |
+
print(f"Query: {query}")
|
116 |
+
print(f"Prediction: {prediction}")
|
117 |
+
print(f"Result: {result}")
|
.cursor-server/data/User/globalStorage/github.vscode-pull-request-github/assignableUsers/yichuan520030910320/SPANN.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{"login":"andylizf","avatarUrl":"https://avatars.githubusercontent.com/u/28052536?u=ac7b36b3d24d1cabfc56d1de85dbb44bf6511af1&v=4","name":"Andy Lee","url":"https://github.com/andylizf","email":"[email protected]","id":"MDQ6VXNlcjI4MDUyNTM2"},{"login":"yichuan520030910320","avatarUrl":"https://avatars.githubusercontent.com/u/73766326?u=f41e0289b78050f03c52b4a1127697eac8cdef9b&v=4","name":"yichuan","url":"https://github.com/yichuan520030910320","email":"","id":"MDQ6VXNlcjczNzY2MzI2"}]
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__clang_hip_stdlib.h
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __clang_hip_stdlib.h - Device-side HIP math support --------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
#ifndef __CLANG_HIP_STDLIB_H__
|
10 |
+
|
11 |
+
#if !defined(__HIP__) && !defined(__OPENMP_AMDGCN__)
|
12 |
+
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
|
13 |
+
#endif
|
14 |
+
|
15 |
+
#if !defined(__cplusplus)
|
16 |
+
|
17 |
+
#include <limits.h>
|
18 |
+
|
19 |
+
#ifdef __OPENMP_AMDGCN__
|
20 |
+
#define __DEVICE__ static inline __attribute__((always_inline, nothrow))
|
21 |
+
#else
|
22 |
+
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
|
23 |
+
#endif
|
24 |
+
|
25 |
+
__DEVICE__
|
26 |
+
int abs(int __x) {
|
27 |
+
int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
|
28 |
+
return (__x ^ __sgn) - __sgn;
|
29 |
+
}
|
30 |
+
__DEVICE__
|
31 |
+
long labs(long __x) {
|
32 |
+
long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
|
33 |
+
return (__x ^ __sgn) - __sgn;
|
34 |
+
}
|
35 |
+
__DEVICE__
|
36 |
+
long long llabs(long long __x) {
|
37 |
+
long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
|
38 |
+
return (__x ^ __sgn) - __sgn;
|
39 |
+
}
|
40 |
+
|
41 |
+
#endif // !defined(__cplusplus)
|
42 |
+
|
43 |
+
#endif // #define __CLANG_HIP_STDLIB_H__
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stdarg_header_macro.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stdarg_header_macro.h ------------------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __STDARG_H
|
11 |
+
#define __STDARG_H
|
12 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stdarg_va_copy.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stdarg_va_copy.h - Definition of va_copy------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef va_copy
|
11 |
+
#define va_copy(dest, src) __builtin_va_copy(dest, src)
|
12 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stdarg_va_list.h
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stdarg_va_list.h - Definition of va_list -----------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef _VA_LIST
|
11 |
+
#define _VA_LIST
|
12 |
+
typedef __builtin_va_list va_list;
|
13 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_max_align_t.h
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stddef_max_align_t.h - Definition of max_align_t ---------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __CLANG_MAX_ALIGN_T_DEFINED
|
11 |
+
#define __CLANG_MAX_ALIGN_T_DEFINED
|
12 |
+
|
13 |
+
#if defined(_MSC_VER)
|
14 |
+
typedef double max_align_t;
|
15 |
+
#elif defined(__APPLE__)
|
16 |
+
typedef long double max_align_t;
|
17 |
+
#else
|
18 |
+
// Define 'max_align_t' to match the GCC definition.
|
19 |
+
typedef struct {
|
20 |
+
long long __clang_max_align_nonce1
|
21 |
+
__attribute__((__aligned__(__alignof__(long long))));
|
22 |
+
long double __clang_max_align_nonce2
|
23 |
+
__attribute__((__aligned__(__alignof__(long double))));
|
24 |
+
} max_align_t;
|
25 |
+
#endif
|
26 |
+
|
27 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_null.h
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stddef_null.h - Definition of NULL -----------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#if !defined(NULL) || !__building_module(_Builtin_stddef)
|
11 |
+
|
12 |
+
/* linux/stddef.h will define NULL to 0. glibc (and other) headers then define
|
13 |
+
* __need_NULL and rely on stddef.h to redefine NULL to the correct value again.
|
14 |
+
* Modules don't support redefining macros like that, but support that pattern
|
15 |
+
* in the non-modules case.
|
16 |
+
*/
|
17 |
+
#undef NULL
|
18 |
+
|
19 |
+
#ifdef __cplusplus
|
20 |
+
#if !defined(__MINGW32__) && !defined(_MSC_VER)
|
21 |
+
#define NULL __null
|
22 |
+
#else
|
23 |
+
#define NULL 0
|
24 |
+
#endif
|
25 |
+
#else
|
26 |
+
#define NULL ((void*)0)
|
27 |
+
#endif
|
28 |
+
|
29 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_unreachable.h
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stddef_unreachable.h - Definition of unreachable ---------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __cplusplus
|
11 |
+
|
12 |
+
/*
|
13 |
+
* When -fbuiltin-headers-in-system-modules is set this is a non-modular header
|
14 |
+
* and needs to behave as if it was textual.
|
15 |
+
*/
|
16 |
+
#if !defined(unreachable) || \
|
17 |
+
(__has_feature(modules) && !__building_module(_Builtin_stddef))
|
18 |
+
#define unreachable() __builtin_unreachable()
|
19 |
+
#endif
|
20 |
+
|
21 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__stddef_wchar_t.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __stddef_wchar.h - Definition of wchar_t -------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#if !defined(__cplusplus) || (defined(_MSC_VER) && !_NATIVE_WCHAR_T_DEFINED)
|
11 |
+
|
12 |
+
/*
|
13 |
+
* When -fbuiltin-headers-in-system-modules is set this is a non-modular header
|
14 |
+
* and needs to behave as if it was textual.
|
15 |
+
*/
|
16 |
+
#if !defined(_WCHAR_T) || \
|
17 |
+
(__has_feature(modules) && !__building_module(_Builtin_stddef))
|
18 |
+
#define _WCHAR_T
|
19 |
+
|
20 |
+
#ifdef _MSC_EXTENSIONS
|
21 |
+
#define _WCHAR_T_DEFINED
|
22 |
+
#endif
|
23 |
+
|
24 |
+
typedef __WCHAR_TYPE__ wchar_t;
|
25 |
+
|
26 |
+
#endif
|
27 |
+
|
28 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/__wmmintrin_pclmul.h
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- __wmmintrin_pclmul.h - PCMUL intrinsics ---------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __WMMINTRIN_H
|
11 |
+
#error "Never use <__wmmintrin_pclmul.h> directly; include <wmmintrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __WMMINTRIN_PCLMUL_H
|
15 |
+
#define __WMMINTRIN_PCLMUL_H
|
16 |
+
|
17 |
+
/// Multiplies two 64-bit integer values, which are selected from source
|
18 |
+
/// operands using the immediate-value operand. The multiplication is a
|
19 |
+
/// carry-less multiplication, and the 128-bit integer product is stored in
|
20 |
+
/// the destination.
|
21 |
+
///
|
22 |
+
/// \headerfile <x86intrin.h>
|
23 |
+
///
|
24 |
+
/// \code
|
25 |
+
/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I);
|
26 |
+
/// \endcode
|
27 |
+
///
|
28 |
+
/// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
|
29 |
+
///
|
30 |
+
/// \param X
|
31 |
+
/// A 128-bit vector of [2 x i64] containing one of the source operands.
|
32 |
+
/// \param Y
|
33 |
+
/// A 128-bit vector of [2 x i64] containing one of the source operands.
|
34 |
+
/// \param I
|
35 |
+
/// An immediate value specifying which 64-bit values to select from the
|
36 |
+
/// operands. Bit 0 is used to select a value from operand \a X, and bit
|
37 |
+
/// 4 is used to select a value from operand \a Y: \n
|
38 |
+
/// Bit[0]=0 indicates that bits[63:0] of operand \a X are used. \n
|
39 |
+
/// Bit[0]=1 indicates that bits[127:64] of operand \a X are used. \n
|
40 |
+
/// Bit[4]=0 indicates that bits[63:0] of operand \a Y are used. \n
|
41 |
+
/// Bit[4]=1 indicates that bits[127:64] of operand \a Y are used.
|
42 |
+
/// \returns The 128-bit integer vector containing the result of the carry-less
|
43 |
+
/// multiplication of the selected 64-bit values.
|
44 |
+
#define _mm_clmulepi64_si128(X, Y, I) \
|
45 |
+
((__m128i)__builtin_ia32_pclmulqdq128((__v2di)(__m128i)(X), \
|
46 |
+
(__v2di)(__m128i)(Y), (char)(I)))
|
47 |
+
|
48 |
+
#endif /* __WMMINTRIN_PCLMUL_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/adcintrin.h
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- adcintrin.h - ADC intrinsics -------------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __ADCINTRIN_H
|
11 |
+
#define __ADCINTRIN_H
|
12 |
+
|
13 |
+
#if !defined(__i386__) && !defined(__x86_64__)
|
14 |
+
#error "This header is only meant to be used on x86 and x64 architecture"
|
15 |
+
#endif
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
|
19 |
+
|
20 |
+
/* Use C++ inline semantics in C++, GNU inline for C mode. */
|
21 |
+
#if defined(__cplusplus)
|
22 |
+
#define __INLINE __inline
|
23 |
+
#else
|
24 |
+
#define __INLINE static __inline
|
25 |
+
#endif
|
26 |
+
|
27 |
+
#if defined(__cplusplus)
|
28 |
+
extern "C" {
|
29 |
+
#endif
|
30 |
+
|
31 |
+
/// Adds unsigned 32-bit integers \a __x and \a __y, plus 0 or 1 as indicated
|
32 |
+
/// by the carry flag \a __cf. Stores the unsigned 32-bit sum in the memory
|
33 |
+
/// at \a __p, and returns the 8-bit carry-out (carry flag).
|
34 |
+
///
|
35 |
+
/// \code{.operation}
|
36 |
+
/// temp := (__cf == 0) ? 0 : 1
|
37 |
+
/// Store32(__p, __x + __y + temp)
|
38 |
+
/// result := CF
|
39 |
+
/// \endcode
|
40 |
+
///
|
41 |
+
/// \headerfile <immintrin.h>
|
42 |
+
///
|
43 |
+
/// This intrinsic corresponds to the \c ADC instruction.
|
44 |
+
///
|
45 |
+
/// \param __cf
|
46 |
+
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
|
47 |
+
/// \param __x
|
48 |
+
/// A 32-bit unsigned addend.
|
49 |
+
/// \param __y
|
50 |
+
/// A 32-bit unsigned addend.
|
51 |
+
/// \param __p
|
52 |
+
/// Pointer to memory for storing the sum.
|
53 |
+
/// \returns The 8-bit unsigned carry-out value.
|
54 |
+
__INLINE unsigned char __DEFAULT_FN_ATTRS _addcarry_u32(unsigned char __cf,
|
55 |
+
unsigned int __x,
|
56 |
+
unsigned int __y,
|
57 |
+
unsigned int *__p) {
|
58 |
+
return __builtin_ia32_addcarryx_u32(__cf, __x, __y, __p);
|
59 |
+
}
|
60 |
+
|
61 |
+
/// Adds unsigned 32-bit integer \a __y to 0 or 1 as indicated by the carry
|
62 |
+
/// flag \a __cf, and subtracts the result from unsigned 32-bit integer
|
63 |
+
/// \a __x. Stores the unsigned 32-bit difference in the memory at \a __p,
|
64 |
+
/// and returns the 8-bit carry-out (carry or overflow flag).
|
65 |
+
///
|
66 |
+
/// \code{.operation}
|
67 |
+
/// temp := (__cf == 0) ? 0 : 1
|
68 |
+
/// Store32(__p, __x - (__y + temp))
|
69 |
+
/// result := CF
|
70 |
+
/// \endcode
|
71 |
+
///
|
72 |
+
/// \headerfile <immintrin.h>
|
73 |
+
///
|
74 |
+
/// This intrinsic corresponds to the \c SBB instruction.
|
75 |
+
///
|
76 |
+
/// \param __cf
|
77 |
+
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
|
78 |
+
/// \param __x
|
79 |
+
/// The 32-bit unsigned minuend.
|
80 |
+
/// \param __y
|
81 |
+
/// The 32-bit unsigned subtrahend.
|
82 |
+
/// \param __p
|
83 |
+
/// Pointer to memory for storing the difference.
|
84 |
+
/// \returns The 8-bit unsigned carry-out value.
|
85 |
+
__INLINE unsigned char __DEFAULT_FN_ATTRS _subborrow_u32(unsigned char __cf,
|
86 |
+
unsigned int __x,
|
87 |
+
unsigned int __y,
|
88 |
+
unsigned int *__p) {
|
89 |
+
return __builtin_ia32_subborrow_u32(__cf, __x, __y, __p);
|
90 |
+
}
|
91 |
+
|
92 |
+
#ifdef __x86_64__
|
93 |
+
/// Adds unsigned 64-bit integers \a __x and \a __y, plus 0 or 1 as indicated
|
94 |
+
/// by the carry flag \a __cf. Stores the unsigned 64-bit sum in the memory
|
95 |
+
/// at \a __p, and returns the 8-bit carry-out (carry flag).
|
96 |
+
///
|
97 |
+
/// \code{.operation}
|
98 |
+
/// temp := (__cf == 0) ? 0 : 1
|
99 |
+
/// Store64(__p, __x + __y + temp)
|
100 |
+
/// result := CF
|
101 |
+
/// \endcode
|
102 |
+
///
|
103 |
+
/// \headerfile <immintrin.h>
|
104 |
+
///
|
105 |
+
/// This intrinsic corresponds to the \c ADC instruction.
|
106 |
+
///
|
107 |
+
/// \param __cf
|
108 |
+
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
|
109 |
+
/// \param __x
|
110 |
+
/// A 64-bit unsigned addend.
|
111 |
+
/// \param __y
|
112 |
+
/// A 64-bit unsigned addend.
|
113 |
+
/// \param __p
|
114 |
+
/// Pointer to memory for storing the sum.
|
115 |
+
/// \returns The 8-bit unsigned carry-out value.
|
116 |
+
__INLINE unsigned char __DEFAULT_FN_ATTRS
|
117 |
+
_addcarry_u64(unsigned char __cf, unsigned long long __x,
|
118 |
+
unsigned long long __y, unsigned long long *__p) {
|
119 |
+
return __builtin_ia32_addcarryx_u64(__cf, __x, __y, __p);
|
120 |
+
}
|
121 |
+
|
122 |
+
/// Adds unsigned 64-bit integer \a __y to 0 or 1 as indicated by the carry
|
123 |
+
/// flag \a __cf, and subtracts the result from unsigned 64-bit integer
|
124 |
+
/// \a __x. Stores the unsigned 64-bit difference in the memory at \a __p,
|
125 |
+
/// and returns the 8-bit carry-out (carry or overflow flag).
|
126 |
+
///
|
127 |
+
/// \code{.operation}
|
128 |
+
/// temp := (__cf == 0) ? 0 : 1
|
129 |
+
/// Store64(__p, __x - (__y + temp))
|
130 |
+
/// result := CF
|
131 |
+
/// \endcode
|
132 |
+
///
|
133 |
+
/// \headerfile <immintrin.h>
|
134 |
+
///
|
135 |
+
/// This intrinsic corresponds to the \c ADC instruction.
|
136 |
+
///
|
137 |
+
/// \param __cf
|
138 |
+
/// The 8-bit unsigned carry flag; any non-zero value indicates carry.
|
139 |
+
/// \param __x
|
140 |
+
/// The 64-bit unsigned minuend.
|
141 |
+
/// \param __y
|
142 |
+
/// The 64-bit unsigned subtrahend.
|
143 |
+
/// \param __p
|
144 |
+
/// Pointer to memory for storing the difference.
|
145 |
+
/// \returns The 8-bit unsigned carry-out value.
|
146 |
+
__INLINE unsigned char __DEFAULT_FN_ATTRS
|
147 |
+
_subborrow_u64(unsigned char __cf, unsigned long long __x,
|
148 |
+
unsigned long long __y, unsigned long long *__p) {
|
149 |
+
return __builtin_ia32_subborrow_u64(__cf, __x, __y, __p);
|
150 |
+
}
|
151 |
+
#endif
|
152 |
+
|
153 |
+
#if defined(__cplusplus)
|
154 |
+
}
|
155 |
+
#endif
|
156 |
+
|
157 |
+
#undef __INLINE
|
158 |
+
#undef __DEFAULT_FN_ATTRS
|
159 |
+
|
160 |
+
#endif /* __ADCINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/amxcomplexintrin.h
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===--------- amxcomplexintrin.h - AMXCOMPLEX intrinsics -*- C++ -*---------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===------------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error "Never use <amxcomplexintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif // __IMMINTRIN_H
|
13 |
+
|
14 |
+
#ifndef __AMX_COMPLEXINTRIN_H
|
15 |
+
#define __AMX_COMPLEXINTRIN_H
|
16 |
+
#ifdef __x86_64__
|
17 |
+
|
18 |
+
#define __DEFAULT_FN_ATTRS_COMPLEX \
|
19 |
+
__attribute__((__always_inline__, __nodebug__, __target__("amx-complex")))
|
20 |
+
|
21 |
+
/// Perform matrix multiplication of two tiles containing complex elements and
|
22 |
+
/// accumulate the results into a packed single precision tile. Each dword
|
23 |
+
/// element in input tiles \a a and \a b is interpreted as a complex number
|
24 |
+
/// with FP16 real part and FP16 imaginary part.
|
25 |
+
/// Calculates the imaginary part of the result. For each possible combination
|
26 |
+
/// of (row of \a a, column of \a b), it performs a set of multiplication
|
27 |
+
/// and accumulations on all corresponding complex numbers (one from \a a
|
28 |
+
/// and one from \a b). The imaginary part of the \a a element is multiplied
|
29 |
+
/// with the real part of the corresponding \a b element, and the real part
|
30 |
+
/// of the \a a element is multiplied with the imaginary part of the
|
31 |
+
/// corresponding \a b elements. The two accumulated results are added, and
|
32 |
+
/// then accumulated into the corresponding row and column of \a dst.
|
33 |
+
///
|
34 |
+
/// \headerfile <x86intrin.h>
|
35 |
+
///
|
36 |
+
/// \code
|
37 |
+
/// void _tile_cmmimfp16ps(__tile dst, __tile a, __tile b);
|
38 |
+
/// \endcode
|
39 |
+
///
|
40 |
+
/// \code{.operation}
|
41 |
+
/// FOR m := 0 TO dst.rows - 1
|
42 |
+
/// tmp := dst.row[m]
|
43 |
+
/// FOR k := 0 TO (a.colsb / 4) - 1
|
44 |
+
/// FOR n := 0 TO (dst.colsb / 4) - 1
|
45 |
+
/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+1])
|
46 |
+
/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+0])
|
47 |
+
/// ENDFOR
|
48 |
+
/// ENDFOR
|
49 |
+
/// write_row_and_zero(dst, m, tmp, dst.colsb)
|
50 |
+
/// ENDFOR
|
51 |
+
/// zero_upper_rows(dst, dst.rows)
|
52 |
+
/// zero_tileconfig_start()
|
53 |
+
/// \endcode
|
54 |
+
///
|
55 |
+
/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
|
56 |
+
///
|
57 |
+
/// \param dst
|
58 |
+
/// The destination tile. Max size is 1024 Bytes.
|
59 |
+
/// \param a
|
60 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
61 |
+
/// \param b
|
62 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
63 |
+
#define _tile_cmmimfp16ps(dst, a, b) __builtin_ia32_tcmmimfp16ps(dst, a, b)
|
64 |
+
|
65 |
+
/// Perform matrix multiplication of two tiles containing complex elements and
|
66 |
+
/// accumulate the results into a packed single precision tile. Each dword
|
67 |
+
/// element in input tiles \a a and \a b is interpreted as a complex number
|
68 |
+
/// with FP16 real part and FP16 imaginary part.
|
69 |
+
/// Calculates the real part of the result. For each possible combination
|
70 |
+
/// of (row of \a a, column of \a b), it performs a set of multiplication
|
71 |
+
/// and accumulations on all corresponding complex numbers (one from \a a
|
72 |
+
/// and one from \a b). The real part of the \a a element is multiplied
|
73 |
+
/// with the real part of the corresponding \a b element, and the negated
|
74 |
+
/// imaginary part of the \a a element is multiplied with the imaginary
|
75 |
+
/// part of the corresponding \a b elements. The two accumulated results
|
76 |
+
/// are added, and then accumulated into the corresponding row and column
|
77 |
+
/// of \a dst.
|
78 |
+
///
|
79 |
+
/// \headerfile <x86intrin.h>
|
80 |
+
///
|
81 |
+
/// \code
|
82 |
+
/// void _tile_cmmrlfp16ps(__tile dst, __tile a, __tile b);
|
83 |
+
/// \endcode
|
84 |
+
///
|
85 |
+
/// \code{.operation}
|
86 |
+
/// FOR m := 0 TO dst.rows - 1
|
87 |
+
/// tmp := dst.row[m]
|
88 |
+
/// FOR k := 0 TO (a.colsb / 4) - 1
|
89 |
+
/// FOR n := 0 TO (dst.colsb / 4) - 1
|
90 |
+
/// tmp.fp32[n] += FP32(a.row[m].fp16[2*k+0]) * FP32(b.row[k].fp16[2*n+0])
|
91 |
+
/// tmp.fp32[n] += FP32(-a.row[m].fp16[2*k+1]) * FP32(b.row[k].fp16[2*n+1])
|
92 |
+
/// ENDFOR
|
93 |
+
/// ENDFOR
|
94 |
+
/// write_row_and_zero(dst, m, tmp, dst.colsb)
|
95 |
+
/// ENDFOR
|
96 |
+
/// zero_upper_rows(dst, dst.rows)
|
97 |
+
/// zero_tileconfig_start()
|
98 |
+
/// \endcode
|
99 |
+
///
|
100 |
+
/// This intrinsic corresponds to the \c TCMMIMFP16PS instruction.
|
101 |
+
///
|
102 |
+
/// \param dst
|
103 |
+
/// The destination tile. Max size is 1024 Bytes.
|
104 |
+
/// \param a
|
105 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
106 |
+
/// \param b
|
107 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
108 |
+
#define _tile_cmmrlfp16ps(dst, a, b) __builtin_ia32_tcmmrlfp16ps(dst, a, b)
|
109 |
+
|
110 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
|
111 |
+
_tile_cmmimfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
|
112 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
113 |
+
return __builtin_ia32_tcmmimfp16ps_internal(m, n, k, dst, src1, src2);
|
114 |
+
}
|
115 |
+
|
116 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_COMPLEX
|
117 |
+
_tile_cmmrlfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
|
118 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
119 |
+
return __builtin_ia32_tcmmrlfp16ps_internal(m, n, k, dst, src1, src2);
|
120 |
+
}
|
121 |
+
|
122 |
+
/// Perform matrix multiplication of two tiles containing complex elements and
|
123 |
+
/// accumulate the results into a packed single precision tile. Each dword
|
124 |
+
/// element in input tiles src0 and src1 is interpreted as a complex number with
|
125 |
+
/// FP16 real part and FP16 imaginary part.
|
126 |
+
/// This function calculates the imaginary part of the result.
|
127 |
+
///
|
128 |
+
/// \headerfile <immintrin.h>
|
129 |
+
///
|
130 |
+
/// This intrinsic corresponds to the <c> TCMMIMFP16PS </c> instruction.
|
131 |
+
///
|
132 |
+
/// \param dst
|
133 |
+
/// The destination tile. Max size is 1024 Bytes.
|
134 |
+
/// \param src0
|
135 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
136 |
+
/// \param src1
|
137 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
138 |
+
__DEFAULT_FN_ATTRS_COMPLEX
|
139 |
+
static void __tile_cmmimfp16ps(__tile1024i *dst, __tile1024i src0,
|
140 |
+
__tile1024i src1) {
|
141 |
+
dst->tile = _tile_cmmimfp16ps_internal(src0.row, src1.col, src0.col,
|
142 |
+
dst->tile, src0.tile, src1.tile);
|
143 |
+
}
|
144 |
+
|
145 |
+
/// Perform matrix multiplication of two tiles containing complex elements and
|
146 |
+
/// accumulate the results into a packed single precision tile. Each dword
|
147 |
+
/// element in input tiles src0 and src1 is interpreted as a complex number with
|
148 |
+
/// FP16 real part and FP16 imaginary part.
|
149 |
+
/// This function calculates the real part of the result.
|
150 |
+
///
|
151 |
+
/// \headerfile <immintrin.h>
|
152 |
+
///
|
153 |
+
/// This intrinsic corresponds to the <c> TCMMRLFP16PS </c> instruction.
|
154 |
+
///
|
155 |
+
/// \param dst
|
156 |
+
/// The destination tile. Max size is 1024 Bytes.
|
157 |
+
/// \param src0
|
158 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
159 |
+
/// \param src1
|
160 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
161 |
+
__DEFAULT_FN_ATTRS_COMPLEX
|
162 |
+
static void __tile_cmmrlfp16ps(__tile1024i *dst, __tile1024i src0,
|
163 |
+
__tile1024i src1) {
|
164 |
+
dst->tile = _tile_cmmrlfp16ps_internal(src0.row, src1.col, src0.col,
|
165 |
+
dst->tile, src0.tile, src1.tile);
|
166 |
+
}
|
167 |
+
|
168 |
+
#endif // __x86_64__
|
169 |
+
#endif // __AMX_COMPLEXINTRIN_H
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/amxintrin.h
ADDED
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===--------------- amxintrin.h - AMX intrinsics -*- C/C++ -*---------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===------------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error "Never use <amxintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif /* __IMMINTRIN_H */
|
13 |
+
|
14 |
+
#ifndef __AMXINTRIN_H
|
15 |
+
#define __AMXINTRIN_H
|
16 |
+
#ifdef __x86_64__
|
17 |
+
|
18 |
+
/* Define the default attributes for the functions in this file. */
|
19 |
+
#define __DEFAULT_FN_ATTRS_TILE \
|
20 |
+
__attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
|
21 |
+
#define __DEFAULT_FN_ATTRS_INT8 \
|
22 |
+
__attribute__((__always_inline__, __nodebug__, __target__("amx-int8")))
|
23 |
+
#define __DEFAULT_FN_ATTRS_BF16 \
|
24 |
+
__attribute__((__always_inline__, __nodebug__, __target__("amx-bf16")))
|
25 |
+
#define __DEFAULT_FN_ATTRS_FP16 \
|
26 |
+
__attribute__((__always_inline__, __nodebug__, __target__("amx-fp16")))
|
27 |
+
|
28 |
+
/// Load tile configuration from a 64-byte memory location specified by
|
29 |
+
/// "mem_addr". The tile configuration includes the tile type palette, the
|
30 |
+
/// number of bytes per row, and the number of rows. If the specified
|
31 |
+
/// palette_id is zero, that signifies the init state for both the tile
|
32 |
+
/// config and the tile data, and the tiles are zeroed. Any invalid
|
33 |
+
/// configurations will result in #GP fault.
|
34 |
+
///
|
35 |
+
/// \headerfile <immintrin.h>
|
36 |
+
///
|
37 |
+
/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
|
38 |
+
///
|
39 |
+
/// \param __config
|
40 |
+
/// A pointer to 512-bits configuration
|
41 |
+
static __inline__ void __DEFAULT_FN_ATTRS_TILE
|
42 |
+
_tile_loadconfig(const void *__config) {
|
43 |
+
__builtin_ia32_tile_loadconfig(__config);
|
44 |
+
}
|
45 |
+
|
46 |
+
/// Stores the current tile configuration to a 64-byte memory location
|
47 |
+
/// specified by "mem_addr". The tile configuration includes the tile type
|
48 |
+
/// palette, the number of bytes per row, and the number of rows. If tiles
|
49 |
+
/// are not configured, all zeroes will be stored to memory.
|
50 |
+
///
|
51 |
+
/// \headerfile <immintrin.h>
|
52 |
+
///
|
53 |
+
/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
|
54 |
+
///
|
55 |
+
/// \param __config
|
56 |
+
/// A pointer to 512-bits configuration
|
57 |
+
static __inline__ void __DEFAULT_FN_ATTRS_TILE
|
58 |
+
_tile_storeconfig(void *__config) {
|
59 |
+
__builtin_ia32_tile_storeconfig(__config);
|
60 |
+
}
|
61 |
+
|
62 |
+
/// Release the tile configuration to return to the init state, which
|
63 |
+
/// releases all storage it currently holds.
|
64 |
+
///
|
65 |
+
/// \headerfile <immintrin.h>
|
66 |
+
///
|
67 |
+
/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
|
68 |
+
static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
|
69 |
+
__builtin_ia32_tilerelease();
|
70 |
+
}
|
71 |
+
|
72 |
+
/// Load tile rows from memory specifieid by "base" address and "stride" into
|
73 |
+
/// destination tile "dst" using the tile configuration previously configured
|
74 |
+
/// via "_tile_loadconfig".
|
75 |
+
///
|
76 |
+
/// \headerfile <immintrin.h>
|
77 |
+
///
|
78 |
+
/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
|
79 |
+
///
|
80 |
+
/// \param dst
|
81 |
+
/// A destination tile. Max size is 1024 Bytes.
|
82 |
+
/// \param base
|
83 |
+
/// A pointer to base address.
|
84 |
+
/// \param stride
|
85 |
+
/// The stride between the rows' data to be loaded in memory.
|
86 |
+
#define _tile_loadd(dst, base, stride) \
|
87 |
+
__builtin_ia32_tileloadd64((dst), ((const void *)(base)), \
|
88 |
+
(__SIZE_TYPE__)(stride))
|
89 |
+
|
90 |
+
/// Load tile rows from memory specifieid by "base" address and "stride" into
|
91 |
+
/// destination tile "dst" using the tile configuration previously configured
|
92 |
+
/// via "_tile_loadconfig". This intrinsic provides a hint to the implementation
|
93 |
+
/// that the data will likely not be reused in the near future and the data
|
94 |
+
/// caching can be optimized accordingly.
|
95 |
+
///
|
96 |
+
/// \headerfile <immintrin.h>
|
97 |
+
///
|
98 |
+
/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
|
99 |
+
///
|
100 |
+
/// \param dst
|
101 |
+
/// A destination tile. Max size is 1024 Bytes.
|
102 |
+
/// \param base
|
103 |
+
/// A pointer to base address.
|
104 |
+
/// \param stride
|
105 |
+
/// The stride between the rows' data to be loaded in memory.
|
106 |
+
#define _tile_stream_loadd(dst, base, stride) \
|
107 |
+
__builtin_ia32_tileloaddt164((dst), ((const void *)(base)), \
|
108 |
+
(__SIZE_TYPE__)(stride))
|
109 |
+
|
110 |
+
/// Store the tile specified by "src" to memory specifieid by "base" address and
|
111 |
+
/// "stride" using the tile configuration previously configured via
|
112 |
+
/// "_tile_loadconfig".
|
113 |
+
///
|
114 |
+
/// \headerfile <immintrin.h>
|
115 |
+
///
|
116 |
+
/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
|
117 |
+
///
|
118 |
+
/// \param dst
|
119 |
+
/// A destination tile. Max size is 1024 Bytes.
|
120 |
+
/// \param base
|
121 |
+
/// A pointer to base address.
|
122 |
+
/// \param stride
|
123 |
+
/// The stride between the rows' data to be stored in memory.
|
124 |
+
#define _tile_stored(dst, base, stride) \
|
125 |
+
__builtin_ia32_tilestored64((dst), ((void *)(base)), (__SIZE_TYPE__)(stride))
|
126 |
+
|
127 |
+
/// Zero the tile specified by "tdest".
|
128 |
+
///
|
129 |
+
/// \headerfile <immintrin.h>
|
130 |
+
///
|
131 |
+
/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
|
132 |
+
///
|
133 |
+
/// \param tile
|
134 |
+
/// The destination tile to be zero. Max size is 1024 Bytes.
|
135 |
+
#define _tile_zero(tile) __builtin_ia32_tilezero((tile))
|
136 |
+
|
137 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
138 |
+
/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
|
139 |
+
/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
|
140 |
+
/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
|
141 |
+
/// and store the 32-bit result back to tile "dst".
|
142 |
+
///
|
143 |
+
/// \headerfile <immintrin.h>
|
144 |
+
///
|
145 |
+
/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
|
146 |
+
///
|
147 |
+
/// \param dst
|
148 |
+
/// The destination tile. Max size is 1024 Bytes.
|
149 |
+
/// \param src0
|
150 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
151 |
+
/// \param src1
|
152 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
153 |
+
#define _tile_dpbssd(dst, src0, src1) \
|
154 |
+
__builtin_ia32_tdpbssd((dst), (src0), (src1))
|
155 |
+
|
156 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
157 |
+
/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
|
158 |
+
/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
|
159 |
+
/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
|
160 |
+
/// in "dst", and store the 32-bit result back to tile "dst".
|
161 |
+
///
|
162 |
+
/// \headerfile <immintrin.h>
|
163 |
+
///
|
164 |
+
/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
|
165 |
+
///
|
166 |
+
/// \param dst
|
167 |
+
/// The destination tile. Max size is 1024 Bytes.
|
168 |
+
/// \param src0
|
169 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
170 |
+
/// \param src1
|
171 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
172 |
+
#define _tile_dpbsud(dst, src0, src1) \
|
173 |
+
__builtin_ia32_tdpbsud((dst), (src0), (src1))
|
174 |
+
|
175 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
176 |
+
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
|
177 |
+
/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
|
178 |
+
/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
|
179 |
+
/// and store the 32-bit result back to tile "dst".
|
180 |
+
///
|
181 |
+
/// \headerfile <immintrin.h>
|
182 |
+
///
|
183 |
+
/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
|
184 |
+
///
|
185 |
+
/// \param dst
|
186 |
+
/// The destination tile. Max size is 1024 Bytes.
|
187 |
+
/// \param src0
|
188 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
189 |
+
/// \param src1
|
190 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
191 |
+
#define _tile_dpbusd(dst, src0, src1) \
|
192 |
+
__builtin_ia32_tdpbusd((dst), (src0), (src1))
|
193 |
+
|
194 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
195 |
+
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
|
196 |
+
/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
|
197 |
+
/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
|
198 |
+
/// "dst", and store the 32-bit result back to tile "dst".
|
199 |
+
///
|
200 |
+
/// \headerfile <immintrin.h>
|
201 |
+
///
|
202 |
+
/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
|
203 |
+
///
|
204 |
+
/// \param dst
|
205 |
+
/// The destination tile. Max size is 1024 Bytes.
|
206 |
+
/// \param src0
|
207 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
208 |
+
/// \param src1
|
209 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
210 |
+
#define _tile_dpbuud(dst, src0, src1) \
|
211 |
+
__builtin_ia32_tdpbuud((dst), (src0), (src1))
|
212 |
+
|
213 |
+
/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
|
214 |
+
/// src1, accumulating the intermediate single-precision (32-bit) floating-point
|
215 |
+
/// elements with elements in "dst", and store the 32-bit result back to tile
|
216 |
+
/// "dst".
|
217 |
+
///
|
218 |
+
/// \headerfile <immintrin.h>
|
219 |
+
///
|
220 |
+
/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
|
221 |
+
///
|
222 |
+
/// \param dst
|
223 |
+
/// The destination tile. Max size is 1024 Bytes.
|
224 |
+
/// \param src0
|
225 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
226 |
+
/// \param src1
|
227 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
228 |
+
#define _tile_dpbf16ps(dst, src0, src1) \
|
229 |
+
__builtin_ia32_tdpbf16ps((dst), (src0), (src1))
|
230 |
+
|
231 |
+
/// AMX tile register size can be configured, the maximum size is 16x64=1024
|
232 |
+
/// bytes. Since there is no 2D type in llvm IR, we use vector type to
|
233 |
+
/// represent 2D tile and the fixed size is maximum amx tile register size.
|
234 |
+
typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64)));
|
235 |
+
|
236 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
237 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
|
238 |
+
_tile_loadd_internal(unsigned short m, unsigned short n, const void *base,
|
239 |
+
__SIZE_TYPE__ stride) {
|
240 |
+
return __builtin_ia32_tileloadd64_internal(m, n, base,
|
241 |
+
(__SIZE_TYPE__)(stride));
|
242 |
+
}
|
243 |
+
|
244 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
245 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
|
246 |
+
_tile_loaddt1_internal(unsigned short m, unsigned short n, const void *base,
|
247 |
+
__SIZE_TYPE__ stride) {
|
248 |
+
return __builtin_ia32_tileloaddt164_internal(m, n, base,
|
249 |
+
(__SIZE_TYPE__)(stride));
|
250 |
+
}
|
251 |
+
|
252 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
253 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
|
254 |
+
_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k,
|
255 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
256 |
+
return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2);
|
257 |
+
}
|
258 |
+
|
259 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
260 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
|
261 |
+
_tile_dpbsud_internal(unsigned short m, unsigned short n, unsigned short k,
|
262 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
263 |
+
return __builtin_ia32_tdpbsud_internal(m, n, k, dst, src1, src2);
|
264 |
+
}
|
265 |
+
|
266 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
267 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
|
268 |
+
_tile_dpbusd_internal(unsigned short m, unsigned short n, unsigned short k,
|
269 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
270 |
+
return __builtin_ia32_tdpbusd_internal(m, n, k, dst, src1, src2);
|
271 |
+
}
|
272 |
+
|
273 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
274 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
|
275 |
+
_tile_dpbuud_internal(unsigned short m, unsigned short n, unsigned short k,
|
276 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
277 |
+
return __builtin_ia32_tdpbuud_internal(m, n, k, dst, src1, src2);
|
278 |
+
}
|
279 |
+
|
280 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
281 |
+
static __inline__ void __DEFAULT_FN_ATTRS_INT8
|
282 |
+
_tile_stored_internal(unsigned short m, unsigned short n, void *base,
|
283 |
+
__SIZE_TYPE__ stride, _tile1024i tile) {
|
284 |
+
return __builtin_ia32_tilestored64_internal(m, n, base,
|
285 |
+
(__SIZE_TYPE__)(stride), tile);
|
286 |
+
}
|
287 |
+
|
288 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
289 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_BF16
|
290 |
+
_tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k,
|
291 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
292 |
+
return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2);
|
293 |
+
}
|
294 |
+
|
295 |
+
/// This is internal intrinsic. C/C++ user should avoid calling it directly.
|
296 |
+
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_FP16
|
297 |
+
_tile_dpfp16ps_internal(unsigned short m, unsigned short n, unsigned short k,
|
298 |
+
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
|
299 |
+
return __builtin_ia32_tdpfp16ps_internal(m, n, k, dst, src1, src2);
|
300 |
+
}
|
301 |
+
|
302 |
+
/// This struct pack the shape and tile data together for user. We suggest
|
303 |
+
/// initializing the struct as early as possible, because compiler depends
|
304 |
+
/// on the shape information to do configure. The constant value is preferred
|
305 |
+
/// for optimization by compiler.
|
306 |
+
typedef struct __tile1024i_str {
|
307 |
+
const unsigned short row;
|
308 |
+
const unsigned short col;
|
309 |
+
_tile1024i tile;
|
310 |
+
} __tile1024i;
|
311 |
+
|
312 |
+
/// Load tile rows from memory specifieid by "base" address and "stride" into
|
313 |
+
/// destination tile "dst".
|
314 |
+
///
|
315 |
+
/// \headerfile <immintrin.h>
|
316 |
+
///
|
317 |
+
/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
|
318 |
+
///
|
319 |
+
/// \param dst
|
320 |
+
/// A destination tile. Max size is 1024 Bytes.
|
321 |
+
/// \param base
|
322 |
+
/// A pointer to base address.
|
323 |
+
/// \param stride
|
324 |
+
/// The stride between the rows' data to be loaded in memory.
|
325 |
+
__DEFAULT_FN_ATTRS_TILE
|
326 |
+
static __inline__ void __tile_loadd(__tile1024i *dst, const void *base,
|
327 |
+
__SIZE_TYPE__ stride) {
|
328 |
+
dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride);
|
329 |
+
}
|
330 |
+
|
331 |
+
/// Load tile rows from memory specifieid by "base" address and "stride" into
|
332 |
+
/// destination tile "dst". This intrinsic provides a hint to the implementation
|
333 |
+
/// that the data will likely not be reused in the near future and the data
|
334 |
+
/// caching can be optimized accordingly.
|
335 |
+
///
|
336 |
+
/// \headerfile <immintrin.h>
|
337 |
+
///
|
338 |
+
/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
|
339 |
+
///
|
340 |
+
/// \param dst
|
341 |
+
/// A destination tile. Max size is 1024 Bytes.
|
342 |
+
/// \param base
|
343 |
+
/// A pointer to base address.
|
344 |
+
/// \param stride
|
345 |
+
/// The stride between the rows' data to be loaded in memory.
|
346 |
+
__DEFAULT_FN_ATTRS_TILE
|
347 |
+
static __inline__ void __tile_stream_loadd(__tile1024i *dst, const void *base,
|
348 |
+
__SIZE_TYPE__ stride) {
|
349 |
+
dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride);
|
350 |
+
}
|
351 |
+
|
352 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
353 |
+
/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
|
354 |
+
/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
|
355 |
+
/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
|
356 |
+
/// and store the 32-bit result back to tile "dst".
|
357 |
+
///
|
358 |
+
/// \headerfile <immintrin.h>
|
359 |
+
///
|
360 |
+
/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
|
361 |
+
///
|
362 |
+
/// \param dst
|
363 |
+
/// The destination tile. Max size is 1024 Bytes.
|
364 |
+
/// \param src0
|
365 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
366 |
+
/// \param src1
|
367 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
368 |
+
__DEFAULT_FN_ATTRS_INT8
|
369 |
+
static __inline__ void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,
|
370 |
+
__tile1024i src1) {
|
371 |
+
dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile,
|
372 |
+
src0.tile, src1.tile);
|
373 |
+
}
|
374 |
+
|
375 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
376 |
+
/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
|
377 |
+
/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
|
378 |
+
/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
|
379 |
+
/// in "dst", and store the 32-bit result back to tile "dst".
|
380 |
+
///
|
381 |
+
/// \headerfile <immintrin.h>
|
382 |
+
///
|
383 |
+
/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
|
384 |
+
///
|
385 |
+
/// \param dst
|
386 |
+
/// The destination tile. Max size is 1024 Bytes.
|
387 |
+
/// \param src0
|
388 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
389 |
+
/// \param src1
|
390 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
391 |
+
__DEFAULT_FN_ATTRS_INT8
|
392 |
+
static __inline__ void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,
|
393 |
+
__tile1024i src1) {
|
394 |
+
dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile,
|
395 |
+
src0.tile, src1.tile);
|
396 |
+
}
|
397 |
+
|
398 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
399 |
+
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
|
400 |
+
/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
|
401 |
+
/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
|
402 |
+
/// and store the 32-bit result back to tile "dst".
|
403 |
+
///
|
404 |
+
/// \headerfile <immintrin.h>
|
405 |
+
///
|
406 |
+
/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
|
407 |
+
///
|
408 |
+
/// \param dst
|
409 |
+
/// The destination tile. Max size is 1024 Bytes.
|
410 |
+
/// \param src0
|
411 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
412 |
+
/// \param src1
|
413 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
414 |
+
__DEFAULT_FN_ATTRS_INT8
|
415 |
+
static __inline__ void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,
|
416 |
+
__tile1024i src1) {
|
417 |
+
dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile,
|
418 |
+
src0.tile, src1.tile);
|
419 |
+
}
|
420 |
+
|
421 |
+
/// Compute dot-product of bytes in tiles with a source/destination accumulator.
|
422 |
+
/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
|
423 |
+
/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
|
424 |
+
/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
|
425 |
+
/// "dst", and store the 32-bit result back to tile "dst".
|
426 |
+
///
|
427 |
+
/// \headerfile <immintrin.h>
|
428 |
+
///
|
429 |
+
/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
|
430 |
+
///
|
431 |
+
/// \param dst
|
432 |
+
/// The destination tile. Max size is 1024 Bytes.
|
433 |
+
/// \param src0
|
434 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
435 |
+
/// \param src1
|
436 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
437 |
+
__DEFAULT_FN_ATTRS_INT8
|
438 |
+
static __inline__ void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
|
439 |
+
__tile1024i src1) {
|
440 |
+
dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile,
|
441 |
+
src0.tile, src1.tile);
|
442 |
+
}
|
443 |
+
|
444 |
+
/// Store the tile specified by "src" to memory specifieid by "base" address and
|
445 |
+
/// "stride".
|
446 |
+
///
|
447 |
+
/// \headerfile <immintrin.h>
|
448 |
+
///
|
449 |
+
/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
|
450 |
+
///
|
451 |
+
/// \param base
|
452 |
+
/// A pointer to base address.
|
453 |
+
/// \param stride
|
454 |
+
/// The stride between the rows' data to be stored in memory.
|
455 |
+
__DEFAULT_FN_ATTRS_TILE
|
456 |
+
static __inline__ void __tile_stored(void *base, __SIZE_TYPE__ stride,
|
457 |
+
__tile1024i src) {
|
458 |
+
_tile_stored_internal(src.row, src.col, base, stride, src.tile);
|
459 |
+
}
|
460 |
+
|
461 |
+
/// Zero the tile specified by "dst".
|
462 |
+
///
|
463 |
+
/// \headerfile <immintrin.h>
|
464 |
+
///
|
465 |
+
/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
|
466 |
+
///
|
467 |
+
/// \param dst
|
468 |
+
/// The destination tile to be zero. Max size is 1024 Bytes.
|
469 |
+
__DEFAULT_FN_ATTRS_TILE
|
470 |
+
static __inline__ void __tile_zero(__tile1024i *dst) {
|
471 |
+
dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col);
|
472 |
+
}
|
473 |
+
|
474 |
+
/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
|
475 |
+
/// src1, accumulating the intermediate single-precision (32-bit) floating-point
|
476 |
+
/// elements with elements in "dst", and store the 32-bit result back to tile
|
477 |
+
/// "dst".
|
478 |
+
///
|
479 |
+
/// \headerfile <immintrin.h>
|
480 |
+
///
|
481 |
+
/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
|
482 |
+
///
|
483 |
+
/// \param dst
|
484 |
+
/// The destination tile. Max size is 1024 Bytes.
|
485 |
+
/// \param src0
|
486 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
487 |
+
/// \param src1
|
488 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
489 |
+
__DEFAULT_FN_ATTRS_BF16
|
490 |
+
static __inline__ void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0,
|
491 |
+
__tile1024i src1) {
|
492 |
+
dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile,
|
493 |
+
src0.tile, src1.tile);
|
494 |
+
}
|
495 |
+
|
496 |
+
/// Compute dot-product of FP16 (16-bit) floating-point pairs in tiles src0 and
|
497 |
+
/// src1, accumulating the intermediate single-precision (32-bit) floating-point
|
498 |
+
/// elements with elements in "dst", and store the 32-bit result back to tile
|
499 |
+
/// "dst".
|
500 |
+
///
|
501 |
+
/// \headerfile <immintrin.h>
|
502 |
+
///
|
503 |
+
/// This intrinsic corresponds to the <c> TDPFP16PS </c> instruction.
|
504 |
+
///
|
505 |
+
/// \param dst
|
506 |
+
/// The destination tile. Max size is 1024 Bytes.
|
507 |
+
/// \param src0
|
508 |
+
/// The 1st source tile. Max size is 1024 Bytes.
|
509 |
+
/// \param src1
|
510 |
+
/// The 2nd source tile. Max size is 1024 Bytes.
|
511 |
+
__DEFAULT_FN_ATTRS_FP16
|
512 |
+
static __inline__ void __tile_dpfp16ps(__tile1024i *dst, __tile1024i src0,
|
513 |
+
__tile1024i src1) {
|
514 |
+
dst->tile = _tile_dpfp16ps_internal(src0.row, src1.col, src0.col, dst->tile,
|
515 |
+
src0.tile, src1.tile);
|
516 |
+
}
|
517 |
+
|
518 |
+
#undef __DEFAULT_FN_ATTRS_TILE
|
519 |
+
#undef __DEFAULT_FN_ATTRS_INT8
|
520 |
+
#undef __DEFAULT_FN_ATTRS_BF16
|
521 |
+
#undef __DEFAULT_FN_ATTRS_FP16
|
522 |
+
|
523 |
+
#endif /* __x86_64__ */
|
524 |
+
#endif /* __AMXINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/arm_cde.h
ADDED
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- arm_cde.h - ARM CDE intrinsics -----------------------------------===
|
2 |
+
*
|
3 |
+
*
|
4 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
5 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
6 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
7 |
+
*
|
8 |
+
*===-----------------------------------------------------------------------===
|
9 |
+
*/
|
10 |
+
|
11 |
+
#ifndef __ARM_CDE_H
|
12 |
+
#define __ARM_CDE_H
|
13 |
+
|
14 |
+
#if !__ARM_FEATURE_CDE
|
15 |
+
#error "CDE support not enabled"
|
16 |
+
#endif
|
17 |
+
|
18 |
+
#include <stdint.h>
|
19 |
+
|
20 |
+
#ifdef __cplusplus
|
21 |
+
extern "C" {
|
22 |
+
#endif
|
23 |
+
|
24 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1)))
|
25 |
+
uint32_t __arm_cx1(int, uint32_t);
|
26 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1a)))
|
27 |
+
uint32_t __arm_cx1a(int, uint32_t, uint32_t);
|
28 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1d)))
|
29 |
+
uint64_t __arm_cx1d(int, uint32_t);
|
30 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx1da)))
|
31 |
+
uint64_t __arm_cx1da(int, uint64_t, uint32_t);
|
32 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2)))
|
33 |
+
uint32_t __arm_cx2(int, uint32_t, uint32_t);
|
34 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2a)))
|
35 |
+
uint32_t __arm_cx2a(int, uint32_t, uint32_t, uint32_t);
|
36 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2d)))
|
37 |
+
uint64_t __arm_cx2d(int, uint32_t, uint32_t);
|
38 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx2da)))
|
39 |
+
uint64_t __arm_cx2da(int, uint64_t, uint32_t, uint32_t);
|
40 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3)))
|
41 |
+
uint32_t __arm_cx3(int, uint32_t, uint32_t, uint32_t);
|
42 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3a)))
|
43 |
+
uint32_t __arm_cx3a(int, uint32_t, uint32_t, uint32_t, uint32_t);
|
44 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3d)))
|
45 |
+
uint64_t __arm_cx3d(int, uint32_t, uint32_t, uint32_t);
|
46 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_cx3da)))
|
47 |
+
uint64_t __arm_cx3da(int, uint64_t, uint32_t, uint32_t, uint32_t);
|
48 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1_u32)))
|
49 |
+
uint32_t __arm_vcx1_u32(int, uint32_t);
|
50 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1a_u32)))
|
51 |
+
uint32_t __arm_vcx1a_u32(int, uint32_t, uint32_t);
|
52 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1d_u64)))
|
53 |
+
uint64_t __arm_vcx1d_u64(int, uint32_t);
|
54 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1da_u64)))
|
55 |
+
uint64_t __arm_vcx1da_u64(int, uint64_t, uint32_t);
|
56 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2_u32)))
|
57 |
+
uint32_t __arm_vcx2_u32(int, uint32_t, uint32_t);
|
58 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2a_u32)))
|
59 |
+
uint32_t __arm_vcx2a_u32(int, uint32_t, uint32_t, uint32_t);
|
60 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2d_u64)))
|
61 |
+
uint64_t __arm_vcx2d_u64(int, uint64_t, uint32_t);
|
62 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx2da_u64)))
|
63 |
+
uint64_t __arm_vcx2da_u64(int, uint64_t, uint64_t, uint32_t);
|
64 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3_u32)))
|
65 |
+
uint32_t __arm_vcx3_u32(int, uint32_t, uint32_t, uint32_t);
|
66 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3a_u32)))
|
67 |
+
uint32_t __arm_vcx3a_u32(int, uint32_t, uint32_t, uint32_t, uint32_t);
|
68 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3d_u64)))
|
69 |
+
uint64_t __arm_vcx3d_u64(int, uint64_t, uint64_t, uint32_t);
|
70 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx3da_u64)))
|
71 |
+
uint64_t __arm_vcx3da_u64(int, uint64_t, uint64_t, uint64_t, uint32_t);
|
72 |
+
|
73 |
+
#if __ARM_FEATURE_MVE
|
74 |
+
|
75 |
+
typedef uint16_t mve_pred16_t;
|
76 |
+
typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) int16_t int16x8_t;
|
77 |
+
typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) int32_t int32x4_t;
|
78 |
+
typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) int64_t int64x2_t;
|
79 |
+
typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) int8_t int8x16_t;
|
80 |
+
typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) uint16_t uint16x8_t;
|
81 |
+
typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) uint32_t uint32x4_t;
|
82 |
+
typedef __attribute__((__neon_vector_type__(2), __clang_arm_mve_strict_polymorphism)) uint64_t uint64x2_t;
|
83 |
+
typedef __attribute__((__neon_vector_type__(16), __clang_arm_mve_strict_polymorphism)) uint8_t uint8x16_t;
|
84 |
+
|
85 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s16)))
|
86 |
+
int16x8_t __arm_vcx1q_m(int, int16x8_t, uint32_t, mve_pred16_t);
|
87 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s32)))
|
88 |
+
int32x4_t __arm_vcx1q_m(int, int32x4_t, uint32_t, mve_pred16_t);
|
89 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s64)))
|
90 |
+
int64x2_t __arm_vcx1q_m(int, int64x2_t, uint32_t, mve_pred16_t);
|
91 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_s8)))
|
92 |
+
int8x16_t __arm_vcx1q_m(int, int8x16_t, uint32_t, mve_pred16_t);
|
93 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u16)))
|
94 |
+
uint16x8_t __arm_vcx1q_m(int, uint16x8_t, uint32_t, mve_pred16_t);
|
95 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u32)))
|
96 |
+
uint32x4_t __arm_vcx1q_m(int, uint32x4_t, uint32_t, mve_pred16_t);
|
97 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u64)))
|
98 |
+
uint64x2_t __arm_vcx1q_m(int, uint64x2_t, uint32_t, mve_pred16_t);
|
99 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_u8)))
|
100 |
+
uint8x16_t __arm_vcx1q_m(int, uint8x16_t, uint32_t, mve_pred16_t);
|
101 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_u8)))
|
102 |
+
uint8x16_t __arm_vcx1q_u8(int, uint32_t);
|
103 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s16)))
|
104 |
+
int16x8_t __arm_vcx1qa_m(int, int16x8_t, uint32_t, mve_pred16_t);
|
105 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s32)))
|
106 |
+
int32x4_t __arm_vcx1qa_m(int, int32x4_t, uint32_t, mve_pred16_t);
|
107 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s64)))
|
108 |
+
int64x2_t __arm_vcx1qa_m(int, int64x2_t, uint32_t, mve_pred16_t);
|
109 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_s8)))
|
110 |
+
int8x16_t __arm_vcx1qa_m(int, int8x16_t, uint32_t, mve_pred16_t);
|
111 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u16)))
|
112 |
+
uint16x8_t __arm_vcx1qa_m(int, uint16x8_t, uint32_t, mve_pred16_t);
|
113 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u32)))
|
114 |
+
uint32x4_t __arm_vcx1qa_m(int, uint32x4_t, uint32_t, mve_pred16_t);
|
115 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u64)))
|
116 |
+
uint64x2_t __arm_vcx1qa_m(int, uint64x2_t, uint32_t, mve_pred16_t);
|
117 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_u8)))
|
118 |
+
uint8x16_t __arm_vcx1qa_m(int, uint8x16_t, uint32_t, mve_pred16_t);
|
119 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s16)))
|
120 |
+
int16x8_t __arm_vcx1qa(int, int16x8_t, uint32_t);
|
121 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s32)))
|
122 |
+
int32x4_t __arm_vcx1qa(int, int32x4_t, uint32_t);
|
123 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s64)))
|
124 |
+
int64x2_t __arm_vcx1qa(int, int64x2_t, uint32_t);
|
125 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_s8)))
|
126 |
+
int8x16_t __arm_vcx1qa(int, int8x16_t, uint32_t);
|
127 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u16)))
|
128 |
+
uint16x8_t __arm_vcx1qa(int, uint16x8_t, uint32_t);
|
129 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u32)))
|
130 |
+
uint32x4_t __arm_vcx1qa(int, uint32x4_t, uint32_t);
|
131 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u64)))
|
132 |
+
uint64x2_t __arm_vcx1qa(int, uint64x2_t, uint32_t);
|
133 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_u8)))
|
134 |
+
uint8x16_t __arm_vcx1qa(int, uint8x16_t, uint32_t);
|
135 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s16)))
|
136 |
+
int16x8_t __arm_vcx2q_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
|
137 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s32)))
|
138 |
+
int32x4_t __arm_vcx2q_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
|
139 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s64)))
|
140 |
+
int64x2_t __arm_vcx2q_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
|
141 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_s8)))
|
142 |
+
int8x16_t __arm_vcx2q_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
143 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u16)))
|
144 |
+
uint16x8_t __arm_vcx2q_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
|
145 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u32)))
|
146 |
+
uint32x4_t __arm_vcx2q_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
|
147 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u64)))
|
148 |
+
uint64x2_t __arm_vcx2q_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
|
149 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_u8)))
|
150 |
+
uint8x16_t __arm_vcx2q_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
151 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s16)))
|
152 |
+
int16x8_t __arm_vcx2q(int, int16x8_t, uint32_t);
|
153 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s32)))
|
154 |
+
int32x4_t __arm_vcx2q(int, int32x4_t, uint32_t);
|
155 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s64)))
|
156 |
+
int64x2_t __arm_vcx2q(int, int64x2_t, uint32_t);
|
157 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_s8)))
|
158 |
+
int8x16_t __arm_vcx2q(int, int8x16_t, uint32_t);
|
159 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u16)))
|
160 |
+
uint16x8_t __arm_vcx2q(int, uint16x8_t, uint32_t);
|
161 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u32)))
|
162 |
+
uint32x4_t __arm_vcx2q(int, uint32x4_t, uint32_t);
|
163 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u64)))
|
164 |
+
uint64x2_t __arm_vcx2q(int, uint64x2_t, uint32_t);
|
165 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8)))
|
166 |
+
uint8x16_t __arm_vcx2q(int, uint8x16_t, uint32_t);
|
167 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s16)))
|
168 |
+
uint8x16_t __arm_vcx2q_u8(int, int16x8_t, uint32_t);
|
169 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s32)))
|
170 |
+
uint8x16_t __arm_vcx2q_u8(int, int32x4_t, uint32_t);
|
171 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s64)))
|
172 |
+
uint8x16_t __arm_vcx2q_u8(int, int64x2_t, uint32_t);
|
173 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_s8)))
|
174 |
+
uint8x16_t __arm_vcx2q_u8(int, int8x16_t, uint32_t);
|
175 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u16)))
|
176 |
+
uint8x16_t __arm_vcx2q_u8(int, uint16x8_t, uint32_t);
|
177 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u32)))
|
178 |
+
uint8x16_t __arm_vcx2q_u8(int, uint32x4_t, uint32_t);
|
179 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u64)))
|
180 |
+
uint8x16_t __arm_vcx2q_u8(int, uint64x2_t, uint32_t);
|
181 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_u8)))
|
182 |
+
uint8x16_t __arm_vcx2q_u8(int, uint8x16_t, uint32_t);
|
183 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s16)))
|
184 |
+
int16x8_t __arm_vcx2qa_impl(int, int16x8_t, uint8x16_t, uint32_t);
|
185 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s32)))
|
186 |
+
int32x4_t __arm_vcx2qa_impl(int, int32x4_t, uint8x16_t, uint32_t);
|
187 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s64)))
|
188 |
+
int64x2_t __arm_vcx2qa_impl(int, int64x2_t, uint8x16_t, uint32_t);
|
189 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_s8)))
|
190 |
+
int8x16_t __arm_vcx2qa_impl(int, int8x16_t, uint8x16_t, uint32_t);
|
191 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u16)))
|
192 |
+
uint16x8_t __arm_vcx2qa_impl(int, uint16x8_t, uint8x16_t, uint32_t);
|
193 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u32)))
|
194 |
+
uint32x4_t __arm_vcx2qa_impl(int, uint32x4_t, uint8x16_t, uint32_t);
|
195 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u64)))
|
196 |
+
uint64x2_t __arm_vcx2qa_impl(int, uint64x2_t, uint8x16_t, uint32_t);
|
197 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_u8)))
|
198 |
+
uint8x16_t __arm_vcx2qa_impl(int, uint8x16_t, uint8x16_t, uint32_t);
|
199 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s16)))
|
200 |
+
int16x8_t __arm_vcx2qa_m_impl(int, int16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
|
201 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s32)))
|
202 |
+
int32x4_t __arm_vcx2qa_m_impl(int, int32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
|
203 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s64)))
|
204 |
+
int64x2_t __arm_vcx2qa_m_impl(int, int64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
|
205 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_s8)))
|
206 |
+
int8x16_t __arm_vcx2qa_m_impl(int, int8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
207 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u16)))
|
208 |
+
uint16x8_t __arm_vcx2qa_m_impl(int, uint16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
|
209 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u32)))
|
210 |
+
uint32x4_t __arm_vcx2qa_m_impl(int, uint32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
|
211 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u64)))
|
212 |
+
uint64x2_t __arm_vcx2qa_m_impl(int, uint64x2_t, uint8x16_t, uint32_t, mve_pred16_t);
|
213 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_u8)))
|
214 |
+
uint8x16_t __arm_vcx2qa_m_impl(int, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
215 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s16)))
|
216 |
+
int16x8_t __arm_vcx3q_impl(int, int16x8_t, uint8x16_t, uint32_t);
|
217 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s32)))
|
218 |
+
int32x4_t __arm_vcx3q_impl(int, int32x4_t, uint8x16_t, uint32_t);
|
219 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s64)))
|
220 |
+
int64x2_t __arm_vcx3q_impl(int, int64x2_t, uint8x16_t, uint32_t);
|
221 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_s8)))
|
222 |
+
int8x16_t __arm_vcx3q_impl(int, int8x16_t, uint8x16_t, uint32_t);
|
223 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u16)))
|
224 |
+
uint16x8_t __arm_vcx3q_impl(int, uint16x8_t, uint8x16_t, uint32_t);
|
225 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u32)))
|
226 |
+
uint32x4_t __arm_vcx3q_impl(int, uint32x4_t, uint8x16_t, uint32_t);
|
227 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u64)))
|
228 |
+
uint64x2_t __arm_vcx3q_impl(int, uint64x2_t, uint8x16_t, uint32_t);
|
229 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_u8)))
|
230 |
+
uint8x16_t __arm_vcx3q_impl(int, uint8x16_t, uint8x16_t, uint32_t);
|
231 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s16)))
|
232 |
+
int16x8_t __arm_vcx3q_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
233 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s32)))
|
234 |
+
int32x4_t __arm_vcx3q_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
235 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s64)))
|
236 |
+
int64x2_t __arm_vcx3q_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
237 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_s8)))
|
238 |
+
int8x16_t __arm_vcx3q_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
239 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u16)))
|
240 |
+
uint16x8_t __arm_vcx3q_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
241 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u32)))
|
242 |
+
uint32x4_t __arm_vcx3q_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
243 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u64)))
|
244 |
+
uint64x2_t __arm_vcx3q_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
245 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_u8)))
|
246 |
+
uint8x16_t __arm_vcx3q_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
247 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s16)))
|
248 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, int16x8_t, uint8x16_t, uint32_t);
|
249 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s32)))
|
250 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, int32x4_t, uint8x16_t, uint32_t);
|
251 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s64)))
|
252 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, int64x2_t, uint8x16_t, uint32_t);
|
253 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_s8)))
|
254 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, int8x16_t, uint8x16_t, uint32_t);
|
255 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u16)))
|
256 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, uint16x8_t, uint8x16_t, uint32_t);
|
257 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u32)))
|
258 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, uint32x4_t, uint8x16_t, uint32_t);
|
259 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u64)))
|
260 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, uint64x2_t, uint8x16_t, uint32_t);
|
261 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_u8)))
|
262 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, uint8x16_t, uint8x16_t, uint32_t);
|
263 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s16)))
|
264 |
+
int16x8_t __arm_vcx3qa_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t);
|
265 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s32)))
|
266 |
+
int32x4_t __arm_vcx3qa_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t);
|
267 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s64)))
|
268 |
+
int64x2_t __arm_vcx3qa_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t);
|
269 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_s8)))
|
270 |
+
int8x16_t __arm_vcx3qa_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t);
|
271 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u16)))
|
272 |
+
uint16x8_t __arm_vcx3qa_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t);
|
273 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u32)))
|
274 |
+
uint32x4_t __arm_vcx3qa_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t);
|
275 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u64)))
|
276 |
+
uint64x2_t __arm_vcx3qa_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t);
|
277 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_u8)))
|
278 |
+
uint8x16_t __arm_vcx3qa_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t);
|
279 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s16)))
|
280 |
+
int16x8_t __arm_vcx3qa_m_impl(int, int16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
281 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s32)))
|
282 |
+
int32x4_t __arm_vcx3qa_m_impl(int, int32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
283 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s64)))
|
284 |
+
int64x2_t __arm_vcx3qa_m_impl(int, int64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
285 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_s8)))
|
286 |
+
int8x16_t __arm_vcx3qa_m_impl(int, int8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
287 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u16)))
|
288 |
+
uint16x8_t __arm_vcx3qa_m_impl(int, uint16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
289 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u32)))
|
290 |
+
uint32x4_t __arm_vcx3qa_m_impl(int, uint32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
291 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u64)))
|
292 |
+
uint64x2_t __arm_vcx3qa_m_impl(int, uint64x2_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
293 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_u8)))
|
294 |
+
uint8x16_t __arm_vcx3qa_m_impl(int, uint8x16_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
295 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s16_u8)))
|
296 |
+
int16x8_t __arm_vreinterpretq_s16_u8(uint8x16_t);
|
297 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s32_u8)))
|
298 |
+
int32x4_t __arm_vreinterpretq_s32_u8(uint8x16_t);
|
299 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s64_u8)))
|
300 |
+
int64x2_t __arm_vreinterpretq_s64_u8(uint8x16_t);
|
301 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_s8_u8)))
|
302 |
+
int8x16_t __arm_vreinterpretq_s8_u8(uint8x16_t);
|
303 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u16_u8)))
|
304 |
+
uint16x8_t __arm_vreinterpretq_u16_u8(uint8x16_t);
|
305 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u32_u8)))
|
306 |
+
uint32x4_t __arm_vreinterpretq_u32_u8(uint8x16_t);
|
307 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u64_u8)))
|
308 |
+
uint64x2_t __arm_vreinterpretq_u64_u8(uint8x16_t);
|
309 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s16)))
|
310 |
+
uint8x16_t __arm_vreinterpretq_u8(int16x8_t);
|
311 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s32)))
|
312 |
+
uint8x16_t __arm_vreinterpretq_u8(int32x4_t);
|
313 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s64)))
|
314 |
+
uint8x16_t __arm_vreinterpretq_u8(int64x2_t);
|
315 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_s8)))
|
316 |
+
uint8x16_t __arm_vreinterpretq_u8(int8x16_t);
|
317 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u16)))
|
318 |
+
uint8x16_t __arm_vreinterpretq_u8(uint16x8_t);
|
319 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u32)))
|
320 |
+
uint8x16_t __arm_vreinterpretq_u8(uint32x4_t);
|
321 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_u64)))
|
322 |
+
uint8x16_t __arm_vreinterpretq_u8(uint64x2_t);
|
323 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vreinterpretq_u8_u8)))
|
324 |
+
uint8x16_t __arm_vreinterpretq_u8(uint8x16_t);
|
325 |
+
#define __arm_vcx2q_m(cp, inactive, n, imm, pred) __arm_vcx2q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), (imm), (pred))
|
326 |
+
#define __arm_vcx2qa(cp, acc, n, imm) __arm_vcx2qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm))
|
327 |
+
#define __arm_vcx2qa_m(cp, acc, n, imm, pred) __arm_vcx2qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), (imm), (pred))
|
328 |
+
#define __arm_vcx3q(cp, n, m, imm) __arm_vcx3q_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))
|
329 |
+
#define __arm_vcx3q_m(cp, inactive, n, m, imm, pred) __arm_vcx3q_m_impl((cp), (inactive), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))
|
330 |
+
#define __arm_vcx3q_u8(cp, n, m, imm) __arm_vcx3q_u8_impl((cp), (n), __arm_vreinterpretq_u8(m), (imm))
|
331 |
+
#define __arm_vcx3qa(cp, acc, n, m, imm) __arm_vcx3qa_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm))
|
332 |
+
#define __arm_vcx3qa_m(cp, acc, n, m, imm, pred) __arm_vcx3qa_m_impl((cp), (acc), __arm_vreinterpretq_u8(n), __arm_vreinterpretq_u8(m), (imm), (pred))
|
333 |
+
|
334 |
+
#endif /* __ARM_FEATURE_MVE */
|
335 |
+
|
336 |
+
#if __ARM_FEATURE_MVE & 2
|
337 |
+
|
338 |
+
typedef __fp16 float16_t;
|
339 |
+
typedef float float32_t;
|
340 |
+
typedef __attribute__((__neon_vector_type__(8), __clang_arm_mve_strict_polymorphism)) float16_t float16x8_t;
|
341 |
+
typedef __attribute__((__neon_vector_type__(4), __clang_arm_mve_strict_polymorphism)) float32_t float32x4_t;
|
342 |
+
|
343 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f16)))
|
344 |
+
float16x8_t __arm_vcx1q_m(int, float16x8_t, uint32_t, mve_pred16_t);
|
345 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1q_m_f32)))
|
346 |
+
float32x4_t __arm_vcx1q_m(int, float32x4_t, uint32_t, mve_pred16_t);
|
347 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f16)))
|
348 |
+
float16x8_t __arm_vcx1qa(int, float16x8_t, uint32_t);
|
349 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_f32)))
|
350 |
+
float32x4_t __arm_vcx1qa(int, float32x4_t, uint32_t);
|
351 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f16)))
|
352 |
+
float16x8_t __arm_vcx1qa_m(int, float16x8_t, uint32_t, mve_pred16_t);
|
353 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx1qa_m_f32)))
|
354 |
+
float32x4_t __arm_vcx1qa_m(int, float32x4_t, uint32_t, mve_pred16_t);
|
355 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f16)))
|
356 |
+
float16x8_t __arm_vcx2q(int, float16x8_t, uint32_t);
|
357 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_f32)))
|
358 |
+
float32x4_t __arm_vcx2q(int, float32x4_t, uint32_t);
|
359 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f16)))
|
360 |
+
float16x8_t __arm_vcx2q_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
|
361 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_m_impl_f32)))
|
362 |
+
float32x4_t __arm_vcx2q_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
|
363 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f16)))
|
364 |
+
uint8x16_t __arm_vcx2q_u8(int, float16x8_t, uint32_t);
|
365 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2q_u8_f32)))
|
366 |
+
uint8x16_t __arm_vcx2q_u8(int, float32x4_t, uint32_t);
|
367 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f16)))
|
368 |
+
float16x8_t __arm_vcx2qa_impl(int, float16x8_t, uint8x16_t, uint32_t);
|
369 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_impl_f32)))
|
370 |
+
float32x4_t __arm_vcx2qa_impl(int, float32x4_t, uint8x16_t, uint32_t);
|
371 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f16)))
|
372 |
+
float16x8_t __arm_vcx2qa_m_impl(int, float16x8_t, uint8x16_t, uint32_t, mve_pred16_t);
|
373 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx2qa_m_impl_f32)))
|
374 |
+
float32x4_t __arm_vcx2qa_m_impl(int, float32x4_t, uint8x16_t, uint32_t, mve_pred16_t);
|
375 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f16)))
|
376 |
+
float16x8_t __arm_vcx3q_impl(int, float16x8_t, uint8x16_t, uint32_t);
|
377 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_impl_f32)))
|
378 |
+
float32x4_t __arm_vcx3q_impl(int, float32x4_t, uint8x16_t, uint32_t);
|
379 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f16)))
|
380 |
+
float16x8_t __arm_vcx3q_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
381 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_m_impl_f32)))
|
382 |
+
float32x4_t __arm_vcx3q_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
383 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f16)))
|
384 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, float16x8_t, uint8x16_t, uint32_t);
|
385 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3q_u8_impl_f32)))
|
386 |
+
uint8x16_t __arm_vcx3q_u8_impl(int, float32x4_t, uint8x16_t, uint32_t);
|
387 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f16)))
|
388 |
+
float16x8_t __arm_vcx3qa_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t);
|
389 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_impl_f32)))
|
390 |
+
float32x4_t __arm_vcx3qa_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t);
|
391 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f16)))
|
392 |
+
float16x8_t __arm_vcx3qa_m_impl(int, float16x8_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
393 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_cde_vcx3qa_m_impl_f32)))
|
394 |
+
float32x4_t __arm_vcx3qa_m_impl(int, float32x4_t, uint8x16_t, uint8x16_t, uint32_t, mve_pred16_t);
|
395 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f16_u8)))
|
396 |
+
float16x8_t __arm_vreinterpretq_f16_u8(uint8x16_t);
|
397 |
+
static __inline__ __attribute__((__clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_f32_u8)))
|
398 |
+
float32x4_t __arm_vreinterpretq_f32_u8(uint8x16_t);
|
399 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f16)))
|
400 |
+
uint8x16_t __arm_vreinterpretq_u8(float16x8_t);
|
401 |
+
static __inline__ __attribute__((__overloadable__, __clang_arm_builtin_alias(__builtin_arm_mve_vreinterpretq_u8_f32)))
|
402 |
+
uint8x16_t __arm_vreinterpretq_u8(float32x4_t);
|
403 |
+
|
404 |
+
#endif /* __ARM_FEATURE_MVE & 2 */
|
405 |
+
|
406 |
+
#ifdef __cplusplus
|
407 |
+
} /* extern "C" */
|
408 |
+
#endif
|
409 |
+
|
410 |
+
#endif /* __ARM_CDE_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/arm_mve.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/arm_sve.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx2intrin.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512bf16intrin.h
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===------------ avx512bf16intrin.h - AVX512_BF16 intrinsics --------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
#ifndef __IMMINTRIN_H
|
10 |
+
#error "Never use <avx512bf16intrin.h> directly; include <immintrin.h> instead."
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#ifdef __SSE2__
|
14 |
+
|
15 |
+
#ifndef __AVX512BF16INTRIN_H
|
16 |
+
#define __AVX512BF16INTRIN_H
|
17 |
+
|
18 |
+
typedef __bf16 __v32bf __attribute__((__vector_size__(64), __aligned__(64)));
|
19 |
+
typedef __bf16 __m512bh __attribute__((__vector_size__(64), __aligned__(64)));
|
20 |
+
typedef __bf16 __bfloat16 __attribute__((deprecated("use __bf16 instead")));
|
21 |
+
|
22 |
+
#define __DEFAULT_FN_ATTRS512 \
|
23 |
+
__attribute__((__always_inline__, __nodebug__, __target__("avx512bf16,evex512"), \
|
24 |
+
__min_vector_width__(512)))
|
25 |
+
#define __DEFAULT_FN_ATTRS \
|
26 |
+
__attribute__((__always_inline__, __nodebug__, \
|
27 |
+
__target__("avx512bf16,no-evex512")))
|
28 |
+
|
29 |
+
/// Convert One BF16 Data to One Single Float Data.
|
30 |
+
///
|
31 |
+
/// \headerfile <x86intrin.h>
|
32 |
+
///
|
33 |
+
/// This intrinsic does not correspond to a specific instruction.
|
34 |
+
///
|
35 |
+
/// \param __A
|
36 |
+
/// A bfloat data.
|
37 |
+
/// \returns A float data whose sign field and exponent field keep unchanged,
|
38 |
+
/// and fraction field is extended to 23 bits.
|
39 |
+
static __inline__ float __DEFAULT_FN_ATTRS _mm_cvtsbh_ss(__bf16 __A) {
|
40 |
+
return __builtin_ia32_cvtsbf162ss_32(__A);
|
41 |
+
}
|
42 |
+
|
43 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
44 |
+
///
|
45 |
+
/// \headerfile <x86intrin.h>
|
46 |
+
///
|
47 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
48 |
+
///
|
49 |
+
/// \param __A
|
50 |
+
/// A 512-bit vector of [16 x float].
|
51 |
+
/// \param __B
|
52 |
+
/// A 512-bit vector of [16 x float].
|
53 |
+
/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from
|
54 |
+
/// conversion of __B, and higher 256 bits come from conversion of __A.
|
55 |
+
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
|
56 |
+
_mm512_cvtne2ps_pbh(__m512 __A, __m512 __B) {
|
57 |
+
return (__m512bh)__builtin_ia32_cvtne2ps2bf16_512((__v16sf) __A,
|
58 |
+
(__v16sf) __B);
|
59 |
+
}
|
60 |
+
|
61 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
62 |
+
///
|
63 |
+
/// \headerfile <x86intrin.h>
|
64 |
+
///
|
65 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
66 |
+
///
|
67 |
+
/// \param __A
|
68 |
+
/// A 512-bit vector of [16 x float].
|
69 |
+
/// \param __B
|
70 |
+
/// A 512-bit vector of [16 x float].
|
71 |
+
/// \param __W
|
72 |
+
/// A 512-bit vector of [32 x bfloat].
|
73 |
+
/// \param __U
|
74 |
+
/// A 32-bit mask value specifying what is chosen for each element.
|
75 |
+
/// A 1 means conversion of __A or __B. A 0 means element from __W.
|
76 |
+
/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from
|
77 |
+
/// conversion of __B, and higher 256 bits come from conversion of __A.
|
78 |
+
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
|
79 |
+
_mm512_mask_cvtne2ps_pbh(__m512bh __W, __mmask32 __U, __m512 __A, __m512 __B) {
|
80 |
+
return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U,
|
81 |
+
(__v32bf)_mm512_cvtne2ps_pbh(__A, __B),
|
82 |
+
(__v32bf)__W);
|
83 |
+
}
|
84 |
+
|
85 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
86 |
+
///
|
87 |
+
/// \headerfile <x86intrin.h>
|
88 |
+
///
|
89 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
90 |
+
///
|
91 |
+
/// \param __A
|
92 |
+
/// A 512-bit vector of [16 x float].
|
93 |
+
/// \param __B
|
94 |
+
/// A 512-bit vector of [16 x float].
|
95 |
+
/// \param __U
|
96 |
+
/// A 32-bit mask value specifying what is chosen for each element.
|
97 |
+
/// A 1 means conversion of __A or __B. A 0 means element is zero.
|
98 |
+
/// \returns A 512-bit vector of [32 x bfloat] whose lower 256 bits come from
|
99 |
+
/// conversion of __B, and higher 256 bits come from conversion of __A.
|
100 |
+
static __inline__ __m512bh __DEFAULT_FN_ATTRS512
|
101 |
+
_mm512_maskz_cvtne2ps_pbh(__mmask32 __U, __m512 __A, __m512 __B) {
|
102 |
+
return (__m512bh)__builtin_ia32_selectpbf_512((__mmask32)__U,
|
103 |
+
(__v32bf)_mm512_cvtne2ps_pbh(__A, __B),
|
104 |
+
(__v32bf)_mm512_setzero_si512());
|
105 |
+
}
|
106 |
+
|
107 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
108 |
+
///
|
109 |
+
/// \headerfile <x86intrin.h>
|
110 |
+
///
|
111 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
112 |
+
///
|
113 |
+
/// \param __A
|
114 |
+
/// A 512-bit vector of [16 x float].
|
115 |
+
/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A.
|
116 |
+
static __inline__ __m256bh __DEFAULT_FN_ATTRS512
|
117 |
+
_mm512_cvtneps_pbh(__m512 __A) {
|
118 |
+
return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
|
119 |
+
(__v16bf)_mm256_undefined_si256(),
|
120 |
+
(__mmask16)-1);
|
121 |
+
}
|
122 |
+
|
123 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
124 |
+
///
|
125 |
+
/// \headerfile <x86intrin.h>
|
126 |
+
///
|
127 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
128 |
+
///
|
129 |
+
/// \param __A
|
130 |
+
/// A 512-bit vector of [16 x float].
|
131 |
+
/// \param __W
|
132 |
+
/// A 256-bit vector of [16 x bfloat].
|
133 |
+
/// \param __U
|
134 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
135 |
+
/// A 1 means conversion of __A. A 0 means element from __W.
|
136 |
+
/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A.
|
137 |
+
static __inline__ __m256bh __DEFAULT_FN_ATTRS512
|
138 |
+
_mm512_mask_cvtneps_pbh(__m256bh __W, __mmask16 __U, __m512 __A) {
|
139 |
+
return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
|
140 |
+
(__v16bf)__W,
|
141 |
+
(__mmask16)__U);
|
142 |
+
}
|
143 |
+
|
144 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
145 |
+
///
|
146 |
+
/// \headerfile <x86intrin.h>
|
147 |
+
///
|
148 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
149 |
+
///
|
150 |
+
/// \param __A
|
151 |
+
/// A 512-bit vector of [16 x float].
|
152 |
+
/// \param __U
|
153 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
154 |
+
/// A 1 means conversion of __A. A 0 means element is zero.
|
155 |
+
/// \returns A 256-bit vector of [16 x bfloat] come from conversion of __A.
|
156 |
+
static __inline__ __m256bh __DEFAULT_FN_ATTRS512
|
157 |
+
_mm512_maskz_cvtneps_pbh(__mmask16 __U, __m512 __A) {
|
158 |
+
return (__m256bh)__builtin_ia32_cvtneps2bf16_512_mask((__v16sf)__A,
|
159 |
+
(__v16bf)_mm256_setzero_si256(),
|
160 |
+
(__mmask16)__U);
|
161 |
+
}
|
162 |
+
|
163 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
164 |
+
///
|
165 |
+
/// \headerfile <x86intrin.h>
|
166 |
+
///
|
167 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
168 |
+
///
|
169 |
+
/// \param __A
|
170 |
+
/// A 512-bit vector of [32 x bfloat].
|
171 |
+
/// \param __B
|
172 |
+
/// A 512-bit vector of [32 x bfloat].
|
173 |
+
/// \param __D
|
174 |
+
/// A 512-bit vector of [16 x float].
|
175 |
+
/// \returns A 512-bit vector of [16 x float] comes from Dot Product of
|
176 |
+
/// __A, __B and __D
|
177 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
178 |
+
_mm512_dpbf16_ps(__m512 __D, __m512bh __A, __m512bh __B) {
|
179 |
+
return (__m512)__builtin_ia32_dpbf16ps_512((__v16sf) __D,
|
180 |
+
(__v32bf) __A,
|
181 |
+
(__v32bf) __B);
|
182 |
+
}
|
183 |
+
|
184 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
185 |
+
///
|
186 |
+
/// \headerfile <x86intrin.h>
|
187 |
+
///
|
188 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
189 |
+
///
|
190 |
+
/// \param __A
|
191 |
+
/// A 512-bit vector of [32 x bfloat].
|
192 |
+
/// \param __B
|
193 |
+
/// A 512-bit vector of [32 x bfloat].
|
194 |
+
/// \param __D
|
195 |
+
/// A 512-bit vector of [16 x float].
|
196 |
+
/// \param __U
|
197 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
198 |
+
/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
|
199 |
+
/// \returns A 512-bit vector of [16 x float] comes from Dot Product of
|
200 |
+
/// __A, __B and __D
|
201 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
202 |
+
_mm512_mask_dpbf16_ps(__m512 __D, __mmask16 __U, __m512bh __A, __m512bh __B) {
|
203 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
204 |
+
(__v16sf)_mm512_dpbf16_ps(__D, __A, __B),
|
205 |
+
(__v16sf)__D);
|
206 |
+
}
|
207 |
+
|
208 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
209 |
+
///
|
210 |
+
/// \headerfile <x86intrin.h>
|
211 |
+
///
|
212 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
213 |
+
///
|
214 |
+
/// \param __A
|
215 |
+
/// A 512-bit vector of [32 x bfloat].
|
216 |
+
/// \param __B
|
217 |
+
/// A 512-bit vector of [32 x bfloat].
|
218 |
+
/// \param __D
|
219 |
+
/// A 512-bit vector of [16 x float].
|
220 |
+
/// \param __U
|
221 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
222 |
+
/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
|
223 |
+
/// \returns A 512-bit vector of [16 x float] comes from Dot Product of
|
224 |
+
/// __A, __B and __D
|
225 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
226 |
+
_mm512_maskz_dpbf16_ps(__mmask16 __U, __m512 __D, __m512bh __A, __m512bh __B) {
|
227 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
228 |
+
(__v16sf)_mm512_dpbf16_ps(__D, __A, __B),
|
229 |
+
(__v16sf)_mm512_setzero_si512());
|
230 |
+
}
|
231 |
+
|
232 |
+
/// Convert Packed BF16 Data to Packed float Data.
|
233 |
+
///
|
234 |
+
/// \headerfile <x86intrin.h>
|
235 |
+
///
|
236 |
+
/// \param __A
|
237 |
+
/// A 256-bit vector of [16 x bfloat].
|
238 |
+
/// \returns A 512-bit vector of [16 x float] come from conversion of __A
|
239 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512 _mm512_cvtpbh_ps(__m256bh __A) {
|
240 |
+
return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32(
|
241 |
+
(__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16));
|
242 |
+
}
|
243 |
+
|
244 |
+
/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
|
245 |
+
///
|
246 |
+
/// \headerfile <x86intrin.h>
|
247 |
+
///
|
248 |
+
/// \param __U
|
249 |
+
/// A 16-bit mask. Elements are zeroed out when the corresponding mask
|
250 |
+
/// bit is not set.
|
251 |
+
/// \param __A
|
252 |
+
/// A 256-bit vector of [16 x bfloat].
|
253 |
+
/// \returns A 512-bit vector of [16 x float] come from conversion of __A
|
254 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
255 |
+
_mm512_maskz_cvtpbh_ps(__mmask16 __U, __m256bh __A) {
|
256 |
+
return _mm512_castsi512_ps((__m512i)_mm512_slli_epi32(
|
257 |
+
(__m512i)_mm512_maskz_cvtepi16_epi32((__mmask16)__U, (__m256i)__A), 16));
|
258 |
+
}
|
259 |
+
|
260 |
+
/// Convert Packed BF16 Data to Packed float Data using merging mask.
|
261 |
+
///
|
262 |
+
/// \headerfile <x86intrin.h>
|
263 |
+
///
|
264 |
+
/// \param __S
|
265 |
+
/// A 512-bit vector of [16 x float]. Elements are copied from __S when
|
266 |
+
/// the corresponding mask bit is not set.
|
267 |
+
/// \param __U
|
268 |
+
/// A 16-bit mask.
|
269 |
+
/// \param __A
|
270 |
+
/// A 256-bit vector of [16 x bfloat].
|
271 |
+
/// \returns A 512-bit vector of [16 x float] come from conversion of __A
|
272 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
273 |
+
_mm512_mask_cvtpbh_ps(__m512 __S, __mmask16 __U, __m256bh __A) {
|
274 |
+
return _mm512_castsi512_ps((__m512i)_mm512_mask_slli_epi32(
|
275 |
+
(__m512i)__S, (__mmask16)__U,
|
276 |
+
(__m512i)_mm512_cvtepi16_epi32((__m256i)__A), 16));
|
277 |
+
}
|
278 |
+
|
279 |
+
#undef __DEFAULT_FN_ATTRS
|
280 |
+
#undef __DEFAULT_FN_ATTRS512
|
281 |
+
|
282 |
+
#endif
|
283 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512dqintrin.h
ADDED
@@ -0,0 +1,1379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- avx512dqintrin.h - AVX512DQ intrinsics ---------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error "Never use <avx512dqintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __AVX512DQINTRIN_H
|
15 |
+
#define __AVX512DQINTRIN_H
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS512 __attribute__((__always_inline__, __nodebug__, __target__("avx512dq,evex512"), __min_vector_width__(512)))
|
19 |
+
#define __DEFAULT_FN_ATTRS \
|
20 |
+
__attribute__((__always_inline__, __nodebug__, \
|
21 |
+
__target__("avx512dq,no-evex512")))
|
22 |
+
|
23 |
+
static __inline __mmask8 __DEFAULT_FN_ATTRS
|
24 |
+
_knot_mask8(__mmask8 __M)
|
25 |
+
{
|
26 |
+
return __builtin_ia32_knotqi(__M);
|
27 |
+
}
|
28 |
+
|
29 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
30 |
+
_kand_mask8(__mmask8 __A, __mmask8 __B)
|
31 |
+
{
|
32 |
+
return (__mmask8)__builtin_ia32_kandqi((__mmask8)__A, (__mmask8)__B);
|
33 |
+
}
|
34 |
+
|
35 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
36 |
+
_kandn_mask8(__mmask8 __A, __mmask8 __B)
|
37 |
+
{
|
38 |
+
return (__mmask8)__builtin_ia32_kandnqi((__mmask8)__A, (__mmask8)__B);
|
39 |
+
}
|
40 |
+
|
41 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
42 |
+
_kor_mask8(__mmask8 __A, __mmask8 __B)
|
43 |
+
{
|
44 |
+
return (__mmask8)__builtin_ia32_korqi((__mmask8)__A, (__mmask8)__B);
|
45 |
+
}
|
46 |
+
|
47 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
48 |
+
_kxnor_mask8(__mmask8 __A, __mmask8 __B)
|
49 |
+
{
|
50 |
+
return (__mmask8)__builtin_ia32_kxnorqi((__mmask8)__A, (__mmask8)__B);
|
51 |
+
}
|
52 |
+
|
53 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
54 |
+
_kxor_mask8(__mmask8 __A, __mmask8 __B)
|
55 |
+
{
|
56 |
+
return (__mmask8)__builtin_ia32_kxorqi((__mmask8)__A, (__mmask8)__B);
|
57 |
+
}
|
58 |
+
|
59 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
60 |
+
_kortestc_mask8_u8(__mmask8 __A, __mmask8 __B)
|
61 |
+
{
|
62 |
+
return (unsigned char)__builtin_ia32_kortestcqi(__A, __B);
|
63 |
+
}
|
64 |
+
|
65 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
66 |
+
_kortestz_mask8_u8(__mmask8 __A, __mmask8 __B)
|
67 |
+
{
|
68 |
+
return (unsigned char)__builtin_ia32_kortestzqi(__A, __B);
|
69 |
+
}
|
70 |
+
|
71 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
72 |
+
_kortest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) {
|
73 |
+
*__C = (unsigned char)__builtin_ia32_kortestcqi(__A, __B);
|
74 |
+
return (unsigned char)__builtin_ia32_kortestzqi(__A, __B);
|
75 |
+
}
|
76 |
+
|
77 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
78 |
+
_ktestc_mask8_u8(__mmask8 __A, __mmask8 __B)
|
79 |
+
{
|
80 |
+
return (unsigned char)__builtin_ia32_ktestcqi(__A, __B);
|
81 |
+
}
|
82 |
+
|
83 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
84 |
+
_ktestz_mask8_u8(__mmask8 __A, __mmask8 __B)
|
85 |
+
{
|
86 |
+
return (unsigned char)__builtin_ia32_ktestzqi(__A, __B);
|
87 |
+
}
|
88 |
+
|
89 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
90 |
+
_ktest_mask8_u8(__mmask8 __A, __mmask8 __B, unsigned char *__C) {
|
91 |
+
*__C = (unsigned char)__builtin_ia32_ktestcqi(__A, __B);
|
92 |
+
return (unsigned char)__builtin_ia32_ktestzqi(__A, __B);
|
93 |
+
}
|
94 |
+
|
95 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
96 |
+
_ktestc_mask16_u8(__mmask16 __A, __mmask16 __B)
|
97 |
+
{
|
98 |
+
return (unsigned char)__builtin_ia32_ktestchi(__A, __B);
|
99 |
+
}
|
100 |
+
|
101 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
102 |
+
_ktestz_mask16_u8(__mmask16 __A, __mmask16 __B)
|
103 |
+
{
|
104 |
+
return (unsigned char)__builtin_ia32_ktestzhi(__A, __B);
|
105 |
+
}
|
106 |
+
|
107 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
108 |
+
_ktest_mask16_u8(__mmask16 __A, __mmask16 __B, unsigned char *__C) {
|
109 |
+
*__C = (unsigned char)__builtin_ia32_ktestchi(__A, __B);
|
110 |
+
return (unsigned char)__builtin_ia32_ktestzhi(__A, __B);
|
111 |
+
}
|
112 |
+
|
113 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
114 |
+
_kadd_mask8(__mmask8 __A, __mmask8 __B)
|
115 |
+
{
|
116 |
+
return (__mmask8)__builtin_ia32_kaddqi((__mmask8)__A, (__mmask8)__B);
|
117 |
+
}
|
118 |
+
|
119 |
+
static __inline__ __mmask16 __DEFAULT_FN_ATTRS
|
120 |
+
_kadd_mask16(__mmask16 __A, __mmask16 __B)
|
121 |
+
{
|
122 |
+
return (__mmask16)__builtin_ia32_kaddhi((__mmask16)__A, (__mmask16)__B);
|
123 |
+
}
|
124 |
+
|
125 |
+
#define _kshiftli_mask8(A, I) \
|
126 |
+
((__mmask8)__builtin_ia32_kshiftliqi((__mmask8)(A), (unsigned int)(I)))
|
127 |
+
|
128 |
+
#define _kshiftri_mask8(A, I) \
|
129 |
+
((__mmask8)__builtin_ia32_kshiftriqi((__mmask8)(A), (unsigned int)(I)))
|
130 |
+
|
131 |
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
132 |
+
_cvtmask8_u32(__mmask8 __A) {
|
133 |
+
return (unsigned int)__builtin_ia32_kmovb((__mmask8)__A);
|
134 |
+
}
|
135 |
+
|
136 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
137 |
+
_cvtu32_mask8(unsigned int __A) {
|
138 |
+
return (__mmask8)__builtin_ia32_kmovb((__mmask8)__A);
|
139 |
+
}
|
140 |
+
|
141 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
|
142 |
+
_load_mask8(__mmask8 *__A) {
|
143 |
+
return (__mmask8)__builtin_ia32_kmovb(*(__mmask8 *)__A);
|
144 |
+
}
|
145 |
+
|
146 |
+
static __inline__ void __DEFAULT_FN_ATTRS
|
147 |
+
_store_mask8(__mmask8 *__A, __mmask8 __B) {
|
148 |
+
*(__mmask8 *)__A = __builtin_ia32_kmovb((__mmask8)__B);
|
149 |
+
}
|
150 |
+
|
151 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
152 |
+
_mm512_mullo_epi64 (__m512i __A, __m512i __B) {
|
153 |
+
return (__m512i) ((__v8du) __A * (__v8du) __B);
|
154 |
+
}
|
155 |
+
|
156 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
157 |
+
_mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
|
158 |
+
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
|
159 |
+
(__v8di)_mm512_mullo_epi64(__A, __B),
|
160 |
+
(__v8di)__W);
|
161 |
+
}
|
162 |
+
|
163 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
164 |
+
_mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
|
165 |
+
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
|
166 |
+
(__v8di)_mm512_mullo_epi64(__A, __B),
|
167 |
+
(__v8di)_mm512_setzero_si512());
|
168 |
+
}
|
169 |
+
|
170 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
171 |
+
_mm512_xor_pd(__m512d __A, __m512d __B) {
|
172 |
+
return (__m512d)((__v8du)__A ^ (__v8du)__B);
|
173 |
+
}
|
174 |
+
|
175 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
176 |
+
_mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
|
177 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
178 |
+
(__v8df)_mm512_xor_pd(__A, __B),
|
179 |
+
(__v8df)__W);
|
180 |
+
}
|
181 |
+
|
182 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
183 |
+
_mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) {
|
184 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
185 |
+
(__v8df)_mm512_xor_pd(__A, __B),
|
186 |
+
(__v8df)_mm512_setzero_pd());
|
187 |
+
}
|
188 |
+
|
189 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
190 |
+
_mm512_xor_ps (__m512 __A, __m512 __B) {
|
191 |
+
return (__m512)((__v16su)__A ^ (__v16su)__B);
|
192 |
+
}
|
193 |
+
|
194 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
195 |
+
_mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
|
196 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
197 |
+
(__v16sf)_mm512_xor_ps(__A, __B),
|
198 |
+
(__v16sf)__W);
|
199 |
+
}
|
200 |
+
|
201 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
202 |
+
_mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) {
|
203 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
204 |
+
(__v16sf)_mm512_xor_ps(__A, __B),
|
205 |
+
(__v16sf)_mm512_setzero_ps());
|
206 |
+
}
|
207 |
+
|
208 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
209 |
+
_mm512_or_pd(__m512d __A, __m512d __B) {
|
210 |
+
return (__m512d)((__v8du)__A | (__v8du)__B);
|
211 |
+
}
|
212 |
+
|
213 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
214 |
+
_mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
|
215 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
216 |
+
(__v8df)_mm512_or_pd(__A, __B),
|
217 |
+
(__v8df)__W);
|
218 |
+
}
|
219 |
+
|
220 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
221 |
+
_mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) {
|
222 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
223 |
+
(__v8df)_mm512_or_pd(__A, __B),
|
224 |
+
(__v8df)_mm512_setzero_pd());
|
225 |
+
}
|
226 |
+
|
227 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
228 |
+
_mm512_or_ps(__m512 __A, __m512 __B) {
|
229 |
+
return (__m512)((__v16su)__A | (__v16su)__B);
|
230 |
+
}
|
231 |
+
|
232 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
233 |
+
_mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
|
234 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
235 |
+
(__v16sf)_mm512_or_ps(__A, __B),
|
236 |
+
(__v16sf)__W);
|
237 |
+
}
|
238 |
+
|
239 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
240 |
+
_mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) {
|
241 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
242 |
+
(__v16sf)_mm512_or_ps(__A, __B),
|
243 |
+
(__v16sf)_mm512_setzero_ps());
|
244 |
+
}
|
245 |
+
|
246 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
247 |
+
_mm512_and_pd(__m512d __A, __m512d __B) {
|
248 |
+
return (__m512d)((__v8du)__A & (__v8du)__B);
|
249 |
+
}
|
250 |
+
|
251 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
252 |
+
_mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
|
253 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
254 |
+
(__v8df)_mm512_and_pd(__A, __B),
|
255 |
+
(__v8df)__W);
|
256 |
+
}
|
257 |
+
|
258 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
259 |
+
_mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) {
|
260 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
261 |
+
(__v8df)_mm512_and_pd(__A, __B),
|
262 |
+
(__v8df)_mm512_setzero_pd());
|
263 |
+
}
|
264 |
+
|
265 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
266 |
+
_mm512_and_ps(__m512 __A, __m512 __B) {
|
267 |
+
return (__m512)((__v16su)__A & (__v16su)__B);
|
268 |
+
}
|
269 |
+
|
270 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
271 |
+
_mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
|
272 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
273 |
+
(__v16sf)_mm512_and_ps(__A, __B),
|
274 |
+
(__v16sf)__W);
|
275 |
+
}
|
276 |
+
|
277 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
278 |
+
_mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) {
|
279 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
280 |
+
(__v16sf)_mm512_and_ps(__A, __B),
|
281 |
+
(__v16sf)_mm512_setzero_ps());
|
282 |
+
}
|
283 |
+
|
284 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
285 |
+
_mm512_andnot_pd(__m512d __A, __m512d __B) {
|
286 |
+
return (__m512d)(~(__v8du)__A & (__v8du)__B);
|
287 |
+
}
|
288 |
+
|
289 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
290 |
+
_mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
|
291 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
292 |
+
(__v8df)_mm512_andnot_pd(__A, __B),
|
293 |
+
(__v8df)__W);
|
294 |
+
}
|
295 |
+
|
296 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
297 |
+
_mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) {
|
298 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
299 |
+
(__v8df)_mm512_andnot_pd(__A, __B),
|
300 |
+
(__v8df)_mm512_setzero_pd());
|
301 |
+
}
|
302 |
+
|
303 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
304 |
+
_mm512_andnot_ps(__m512 __A, __m512 __B) {
|
305 |
+
return (__m512)(~(__v16su)__A & (__v16su)__B);
|
306 |
+
}
|
307 |
+
|
308 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
309 |
+
_mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
|
310 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
311 |
+
(__v16sf)_mm512_andnot_ps(__A, __B),
|
312 |
+
(__v16sf)__W);
|
313 |
+
}
|
314 |
+
|
315 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
316 |
+
_mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) {
|
317 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
|
318 |
+
(__v16sf)_mm512_andnot_ps(__A, __B),
|
319 |
+
(__v16sf)_mm512_setzero_ps());
|
320 |
+
}
|
321 |
+
|
322 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
323 |
+
_mm512_cvtpd_epi64 (__m512d __A) {
|
324 |
+
return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
|
325 |
+
(__v8di) _mm512_setzero_si512(),
|
326 |
+
(__mmask8) -1,
|
327 |
+
_MM_FROUND_CUR_DIRECTION);
|
328 |
+
}
|
329 |
+
|
330 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
331 |
+
_mm512_mask_cvtpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
|
332 |
+
return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
|
333 |
+
(__v8di) __W,
|
334 |
+
(__mmask8) __U,
|
335 |
+
_MM_FROUND_CUR_DIRECTION);
|
336 |
+
}
|
337 |
+
|
338 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
339 |
+
_mm512_maskz_cvtpd_epi64 (__mmask8 __U, __m512d __A) {
|
340 |
+
return (__m512i) __builtin_ia32_cvtpd2qq512_mask ((__v8df) __A,
|
341 |
+
(__v8di) _mm512_setzero_si512(),
|
342 |
+
(__mmask8) __U,
|
343 |
+
_MM_FROUND_CUR_DIRECTION);
|
344 |
+
}
|
345 |
+
|
346 |
+
#define _mm512_cvt_roundpd_epi64(A, R) \
|
347 |
+
((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
|
348 |
+
(__v8di)_mm512_setzero_si512(), \
|
349 |
+
(__mmask8)-1, (int)(R)))
|
350 |
+
|
351 |
+
#define _mm512_mask_cvt_roundpd_epi64(W, U, A, R) \
|
352 |
+
((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
|
353 |
+
(__v8di)(__m512i)(W), \
|
354 |
+
(__mmask8)(U), (int)(R)))
|
355 |
+
|
356 |
+
#define _mm512_maskz_cvt_roundpd_epi64(U, A, R) \
|
357 |
+
((__m512i)__builtin_ia32_cvtpd2qq512_mask((__v8df)(__m512d)(A), \
|
358 |
+
(__v8di)_mm512_setzero_si512(), \
|
359 |
+
(__mmask8)(U), (int)(R)))
|
360 |
+
|
361 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
362 |
+
_mm512_cvtpd_epu64 (__m512d __A) {
|
363 |
+
return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
|
364 |
+
(__v8di) _mm512_setzero_si512(),
|
365 |
+
(__mmask8) -1,
|
366 |
+
_MM_FROUND_CUR_DIRECTION);
|
367 |
+
}
|
368 |
+
|
369 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
370 |
+
_mm512_mask_cvtpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
|
371 |
+
return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
|
372 |
+
(__v8di) __W,
|
373 |
+
(__mmask8) __U,
|
374 |
+
_MM_FROUND_CUR_DIRECTION);
|
375 |
+
}
|
376 |
+
|
377 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
378 |
+
_mm512_maskz_cvtpd_epu64 (__mmask8 __U, __m512d __A) {
|
379 |
+
return (__m512i) __builtin_ia32_cvtpd2uqq512_mask ((__v8df) __A,
|
380 |
+
(__v8di) _mm512_setzero_si512(),
|
381 |
+
(__mmask8) __U,
|
382 |
+
_MM_FROUND_CUR_DIRECTION);
|
383 |
+
}
|
384 |
+
|
385 |
+
#define _mm512_cvt_roundpd_epu64(A, R) \
|
386 |
+
((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
|
387 |
+
(__v8di)_mm512_setzero_si512(), \
|
388 |
+
(__mmask8)-1, (int)(R)))
|
389 |
+
|
390 |
+
#define _mm512_mask_cvt_roundpd_epu64(W, U, A, R) \
|
391 |
+
((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
|
392 |
+
(__v8di)(__m512i)(W), \
|
393 |
+
(__mmask8)(U), (int)(R)))
|
394 |
+
|
395 |
+
#define _mm512_maskz_cvt_roundpd_epu64(U, A, R) \
|
396 |
+
((__m512i)__builtin_ia32_cvtpd2uqq512_mask((__v8df)(__m512d)(A), \
|
397 |
+
(__v8di)_mm512_setzero_si512(), \
|
398 |
+
(__mmask8)(U), (int)(R)))
|
399 |
+
|
400 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
401 |
+
_mm512_cvtps_epi64 (__m256 __A) {
|
402 |
+
return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
|
403 |
+
(__v8di) _mm512_setzero_si512(),
|
404 |
+
(__mmask8) -1,
|
405 |
+
_MM_FROUND_CUR_DIRECTION);
|
406 |
+
}
|
407 |
+
|
408 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
409 |
+
_mm512_mask_cvtps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
|
410 |
+
return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
|
411 |
+
(__v8di) __W,
|
412 |
+
(__mmask8) __U,
|
413 |
+
_MM_FROUND_CUR_DIRECTION);
|
414 |
+
}
|
415 |
+
|
416 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
417 |
+
_mm512_maskz_cvtps_epi64 (__mmask8 __U, __m256 __A) {
|
418 |
+
return (__m512i) __builtin_ia32_cvtps2qq512_mask ((__v8sf) __A,
|
419 |
+
(__v8di) _mm512_setzero_si512(),
|
420 |
+
(__mmask8) __U,
|
421 |
+
_MM_FROUND_CUR_DIRECTION);
|
422 |
+
}
|
423 |
+
|
424 |
+
#define _mm512_cvt_roundps_epi64(A, R) \
|
425 |
+
((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
|
426 |
+
(__v8di)_mm512_setzero_si512(), \
|
427 |
+
(__mmask8)-1, (int)(R)))
|
428 |
+
|
429 |
+
#define _mm512_mask_cvt_roundps_epi64(W, U, A, R) \
|
430 |
+
((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
|
431 |
+
(__v8di)(__m512i)(W), \
|
432 |
+
(__mmask8)(U), (int)(R)))
|
433 |
+
|
434 |
+
#define _mm512_maskz_cvt_roundps_epi64(U, A, R) \
|
435 |
+
((__m512i)__builtin_ia32_cvtps2qq512_mask((__v8sf)(__m256)(A), \
|
436 |
+
(__v8di)_mm512_setzero_si512(), \
|
437 |
+
(__mmask8)(U), (int)(R)))
|
438 |
+
|
439 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
440 |
+
_mm512_cvtps_epu64 (__m256 __A) {
|
441 |
+
return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
|
442 |
+
(__v8di) _mm512_setzero_si512(),
|
443 |
+
(__mmask8) -1,
|
444 |
+
_MM_FROUND_CUR_DIRECTION);
|
445 |
+
}
|
446 |
+
|
447 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
448 |
+
_mm512_mask_cvtps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
|
449 |
+
return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
|
450 |
+
(__v8di) __W,
|
451 |
+
(__mmask8) __U,
|
452 |
+
_MM_FROUND_CUR_DIRECTION);
|
453 |
+
}
|
454 |
+
|
455 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
456 |
+
_mm512_maskz_cvtps_epu64 (__mmask8 __U, __m256 __A) {
|
457 |
+
return (__m512i) __builtin_ia32_cvtps2uqq512_mask ((__v8sf) __A,
|
458 |
+
(__v8di) _mm512_setzero_si512(),
|
459 |
+
(__mmask8) __U,
|
460 |
+
_MM_FROUND_CUR_DIRECTION);
|
461 |
+
}
|
462 |
+
|
463 |
+
#define _mm512_cvt_roundps_epu64(A, R) \
|
464 |
+
((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
|
465 |
+
(__v8di)_mm512_setzero_si512(), \
|
466 |
+
(__mmask8)-1, (int)(R)))
|
467 |
+
|
468 |
+
#define _mm512_mask_cvt_roundps_epu64(W, U, A, R) \
|
469 |
+
((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
|
470 |
+
(__v8di)(__m512i)(W), \
|
471 |
+
(__mmask8)(U), (int)(R)))
|
472 |
+
|
473 |
+
#define _mm512_maskz_cvt_roundps_epu64(U, A, R) \
|
474 |
+
((__m512i)__builtin_ia32_cvtps2uqq512_mask((__v8sf)(__m256)(A), \
|
475 |
+
(__v8di)_mm512_setzero_si512(), \
|
476 |
+
(__mmask8)(U), (int)(R)))
|
477 |
+
|
478 |
+
|
479 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
480 |
+
_mm512_cvtepi64_pd (__m512i __A) {
|
481 |
+
return (__m512d)__builtin_convertvector((__v8di)__A, __v8df);
|
482 |
+
}
|
483 |
+
|
484 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
485 |
+
_mm512_mask_cvtepi64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
|
486 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
487 |
+
(__v8df)_mm512_cvtepi64_pd(__A),
|
488 |
+
(__v8df)__W);
|
489 |
+
}
|
490 |
+
|
491 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
492 |
+
_mm512_maskz_cvtepi64_pd (__mmask8 __U, __m512i __A) {
|
493 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
494 |
+
(__v8df)_mm512_cvtepi64_pd(__A),
|
495 |
+
(__v8df)_mm512_setzero_pd());
|
496 |
+
}
|
497 |
+
|
498 |
+
#define _mm512_cvt_roundepi64_pd(A, R) \
|
499 |
+
((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
|
500 |
+
(__v8df)_mm512_setzero_pd(), \
|
501 |
+
(__mmask8)-1, (int)(R)))
|
502 |
+
|
503 |
+
#define _mm512_mask_cvt_roundepi64_pd(W, U, A, R) \
|
504 |
+
((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
|
505 |
+
(__v8df)(__m512d)(W), \
|
506 |
+
(__mmask8)(U), (int)(R)))
|
507 |
+
|
508 |
+
#define _mm512_maskz_cvt_roundepi64_pd(U, A, R) \
|
509 |
+
((__m512d)__builtin_ia32_cvtqq2pd512_mask((__v8di)(__m512i)(A), \
|
510 |
+
(__v8df)_mm512_setzero_pd(), \
|
511 |
+
(__mmask8)(U), (int)(R)))
|
512 |
+
|
513 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS512
|
514 |
+
_mm512_cvtepi64_ps (__m512i __A) {
|
515 |
+
return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
|
516 |
+
(__v8sf) _mm256_setzero_ps(),
|
517 |
+
(__mmask8) -1,
|
518 |
+
_MM_FROUND_CUR_DIRECTION);
|
519 |
+
}
|
520 |
+
|
521 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS512
|
522 |
+
_mm512_mask_cvtepi64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
|
523 |
+
return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
|
524 |
+
(__v8sf) __W,
|
525 |
+
(__mmask8) __U,
|
526 |
+
_MM_FROUND_CUR_DIRECTION);
|
527 |
+
}
|
528 |
+
|
529 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS512
|
530 |
+
_mm512_maskz_cvtepi64_ps (__mmask8 __U, __m512i __A) {
|
531 |
+
return (__m256) __builtin_ia32_cvtqq2ps512_mask ((__v8di) __A,
|
532 |
+
(__v8sf) _mm256_setzero_ps(),
|
533 |
+
(__mmask8) __U,
|
534 |
+
_MM_FROUND_CUR_DIRECTION);
|
535 |
+
}
|
536 |
+
|
537 |
+
#define _mm512_cvt_roundepi64_ps(A, R) \
|
538 |
+
((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
|
539 |
+
(__v8sf)_mm256_setzero_ps(), \
|
540 |
+
(__mmask8)-1, (int)(R)))
|
541 |
+
|
542 |
+
#define _mm512_mask_cvt_roundepi64_ps(W, U, A, R) \
|
543 |
+
((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
|
544 |
+
(__v8sf)(__m256)(W), (__mmask8)(U), \
|
545 |
+
(int)(R)))
|
546 |
+
|
547 |
+
#define _mm512_maskz_cvt_roundepi64_ps(U, A, R) \
|
548 |
+
((__m256)__builtin_ia32_cvtqq2ps512_mask((__v8di)(__m512i)(A), \
|
549 |
+
(__v8sf)_mm256_setzero_ps(), \
|
550 |
+
(__mmask8)(U), (int)(R)))
|
551 |
+
|
552 |
+
|
553 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
554 |
+
_mm512_cvttpd_epi64 (__m512d __A) {
|
555 |
+
return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
|
556 |
+
(__v8di) _mm512_setzero_si512(),
|
557 |
+
(__mmask8) -1,
|
558 |
+
_MM_FROUND_CUR_DIRECTION);
|
559 |
+
}
|
560 |
+
|
561 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
562 |
+
_mm512_mask_cvttpd_epi64 (__m512i __W, __mmask8 __U, __m512d __A) {
|
563 |
+
return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
|
564 |
+
(__v8di) __W,
|
565 |
+
(__mmask8) __U,
|
566 |
+
_MM_FROUND_CUR_DIRECTION);
|
567 |
+
}
|
568 |
+
|
569 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
570 |
+
_mm512_maskz_cvttpd_epi64 (__mmask8 __U, __m512d __A) {
|
571 |
+
return (__m512i) __builtin_ia32_cvttpd2qq512_mask ((__v8df) __A,
|
572 |
+
(__v8di) _mm512_setzero_si512(),
|
573 |
+
(__mmask8) __U,
|
574 |
+
_MM_FROUND_CUR_DIRECTION);
|
575 |
+
}
|
576 |
+
|
577 |
+
#define _mm512_cvtt_roundpd_epi64(A, R) \
|
578 |
+
((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
|
579 |
+
(__v8di)_mm512_setzero_si512(), \
|
580 |
+
(__mmask8)-1, (int)(R)))
|
581 |
+
|
582 |
+
#define _mm512_mask_cvtt_roundpd_epi64(W, U, A, R) \
|
583 |
+
((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
|
584 |
+
(__v8di)(__m512i)(W), \
|
585 |
+
(__mmask8)(U), (int)(R)))
|
586 |
+
|
587 |
+
#define _mm512_maskz_cvtt_roundpd_epi64(U, A, R) \
|
588 |
+
((__m512i)__builtin_ia32_cvttpd2qq512_mask((__v8df)(__m512d)(A), \
|
589 |
+
(__v8di)_mm512_setzero_si512(), \
|
590 |
+
(__mmask8)(U), (int)(R)))
|
591 |
+
|
592 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
593 |
+
_mm512_cvttpd_epu64 (__m512d __A) {
|
594 |
+
return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
|
595 |
+
(__v8di) _mm512_setzero_si512(),
|
596 |
+
(__mmask8) -1,
|
597 |
+
_MM_FROUND_CUR_DIRECTION);
|
598 |
+
}
|
599 |
+
|
600 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
601 |
+
_mm512_mask_cvttpd_epu64 (__m512i __W, __mmask8 __U, __m512d __A) {
|
602 |
+
return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
|
603 |
+
(__v8di) __W,
|
604 |
+
(__mmask8) __U,
|
605 |
+
_MM_FROUND_CUR_DIRECTION);
|
606 |
+
}
|
607 |
+
|
608 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
609 |
+
_mm512_maskz_cvttpd_epu64 (__mmask8 __U, __m512d __A) {
|
610 |
+
return (__m512i) __builtin_ia32_cvttpd2uqq512_mask ((__v8df) __A,
|
611 |
+
(__v8di) _mm512_setzero_si512(),
|
612 |
+
(__mmask8) __U,
|
613 |
+
_MM_FROUND_CUR_DIRECTION);
|
614 |
+
}
|
615 |
+
|
616 |
+
#define _mm512_cvtt_roundpd_epu64(A, R) \
|
617 |
+
((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
|
618 |
+
(__v8di)_mm512_setzero_si512(), \
|
619 |
+
(__mmask8)-1, (int)(R)))
|
620 |
+
|
621 |
+
#define _mm512_mask_cvtt_roundpd_epu64(W, U, A, R) \
|
622 |
+
((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
|
623 |
+
(__v8di)(__m512i)(W), \
|
624 |
+
(__mmask8)(U), (int)(R)))
|
625 |
+
|
626 |
+
#define _mm512_maskz_cvtt_roundpd_epu64(U, A, R) \
|
627 |
+
((__m512i)__builtin_ia32_cvttpd2uqq512_mask((__v8df)(__m512d)(A), \
|
628 |
+
(__v8di)_mm512_setzero_si512(), \
|
629 |
+
(__mmask8)(U), (int)(R)))
|
630 |
+
|
631 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
632 |
+
_mm512_cvttps_epi64 (__m256 __A) {
|
633 |
+
return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
|
634 |
+
(__v8di) _mm512_setzero_si512(),
|
635 |
+
(__mmask8) -1,
|
636 |
+
_MM_FROUND_CUR_DIRECTION);
|
637 |
+
}
|
638 |
+
|
639 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
640 |
+
_mm512_mask_cvttps_epi64 (__m512i __W, __mmask8 __U, __m256 __A) {
|
641 |
+
return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
|
642 |
+
(__v8di) __W,
|
643 |
+
(__mmask8) __U,
|
644 |
+
_MM_FROUND_CUR_DIRECTION);
|
645 |
+
}
|
646 |
+
|
647 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
648 |
+
_mm512_maskz_cvttps_epi64 (__mmask8 __U, __m256 __A) {
|
649 |
+
return (__m512i) __builtin_ia32_cvttps2qq512_mask ((__v8sf) __A,
|
650 |
+
(__v8di) _mm512_setzero_si512(),
|
651 |
+
(__mmask8) __U,
|
652 |
+
_MM_FROUND_CUR_DIRECTION);
|
653 |
+
}
|
654 |
+
|
655 |
+
#define _mm512_cvtt_roundps_epi64(A, R) \
|
656 |
+
((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
|
657 |
+
(__v8di)_mm512_setzero_si512(), \
|
658 |
+
(__mmask8)-1, (int)(R)))
|
659 |
+
|
660 |
+
#define _mm512_mask_cvtt_roundps_epi64(W, U, A, R) \
|
661 |
+
((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
|
662 |
+
(__v8di)(__m512i)(W), \
|
663 |
+
(__mmask8)(U), (int)(R)))
|
664 |
+
|
665 |
+
#define _mm512_maskz_cvtt_roundps_epi64(U, A, R) \
|
666 |
+
((__m512i)__builtin_ia32_cvttps2qq512_mask((__v8sf)(__m256)(A), \
|
667 |
+
(__v8di)_mm512_setzero_si512(), \
|
668 |
+
(__mmask8)(U), (int)(R)))
|
669 |
+
|
670 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
671 |
+
_mm512_cvttps_epu64 (__m256 __A) {
|
672 |
+
return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
|
673 |
+
(__v8di) _mm512_setzero_si512(),
|
674 |
+
(__mmask8) -1,
|
675 |
+
_MM_FROUND_CUR_DIRECTION);
|
676 |
+
}
|
677 |
+
|
678 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
679 |
+
_mm512_mask_cvttps_epu64 (__m512i __W, __mmask8 __U, __m256 __A) {
|
680 |
+
return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
|
681 |
+
(__v8di) __W,
|
682 |
+
(__mmask8) __U,
|
683 |
+
_MM_FROUND_CUR_DIRECTION);
|
684 |
+
}
|
685 |
+
|
686 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
687 |
+
_mm512_maskz_cvttps_epu64 (__mmask8 __U, __m256 __A) {
|
688 |
+
return (__m512i) __builtin_ia32_cvttps2uqq512_mask ((__v8sf) __A,
|
689 |
+
(__v8di) _mm512_setzero_si512(),
|
690 |
+
(__mmask8) __U,
|
691 |
+
_MM_FROUND_CUR_DIRECTION);
|
692 |
+
}
|
693 |
+
|
694 |
+
#define _mm512_cvtt_roundps_epu64(A, R) \
|
695 |
+
((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
|
696 |
+
(__v8di)_mm512_setzero_si512(), \
|
697 |
+
(__mmask8)-1, (int)(R)))
|
698 |
+
|
699 |
+
#define _mm512_mask_cvtt_roundps_epu64(W, U, A, R) \
|
700 |
+
((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
|
701 |
+
(__v8di)(__m512i)(W), \
|
702 |
+
(__mmask8)(U), (int)(R)))
|
703 |
+
|
704 |
+
#define _mm512_maskz_cvtt_roundps_epu64(U, A, R) \
|
705 |
+
((__m512i)__builtin_ia32_cvttps2uqq512_mask((__v8sf)(__m256)(A), \
|
706 |
+
(__v8di)_mm512_setzero_si512(), \
|
707 |
+
(__mmask8)(U), (int)(R)))
|
708 |
+
|
709 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
710 |
+
_mm512_cvtepu64_pd (__m512i __A) {
|
711 |
+
return (__m512d)__builtin_convertvector((__v8du)__A, __v8df);
|
712 |
+
}
|
713 |
+
|
714 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
715 |
+
_mm512_mask_cvtepu64_pd (__m512d __W, __mmask8 __U, __m512i __A) {
|
716 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
717 |
+
(__v8df)_mm512_cvtepu64_pd(__A),
|
718 |
+
(__v8df)__W);
|
719 |
+
}
|
720 |
+
|
721 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
722 |
+
_mm512_maskz_cvtepu64_pd (__mmask8 __U, __m512i __A) {
|
723 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
|
724 |
+
(__v8df)_mm512_cvtepu64_pd(__A),
|
725 |
+
(__v8df)_mm512_setzero_pd());
|
726 |
+
}
|
727 |
+
|
728 |
+
#define _mm512_cvt_roundepu64_pd(A, R) \
|
729 |
+
((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
|
730 |
+
(__v8df)_mm512_setzero_pd(), \
|
731 |
+
(__mmask8)-1, (int)(R)))
|
732 |
+
|
733 |
+
#define _mm512_mask_cvt_roundepu64_pd(W, U, A, R) \
|
734 |
+
((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
|
735 |
+
(__v8df)(__m512d)(W), \
|
736 |
+
(__mmask8)(U), (int)(R)))
|
737 |
+
|
738 |
+
|
739 |
+
#define _mm512_maskz_cvt_roundepu64_pd(U, A, R) \
|
740 |
+
((__m512d)__builtin_ia32_cvtuqq2pd512_mask((__v8di)(__m512i)(A), \
|
741 |
+
(__v8df)_mm512_setzero_pd(), \
|
742 |
+
(__mmask8)(U), (int)(R)))
|
743 |
+
|
744 |
+
|
745 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS512
|
746 |
+
_mm512_cvtepu64_ps (__m512i __A) {
|
747 |
+
return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
|
748 |
+
(__v8sf) _mm256_setzero_ps(),
|
749 |
+
(__mmask8) -1,
|
750 |
+
_MM_FROUND_CUR_DIRECTION);
|
751 |
+
}
|
752 |
+
|
753 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS512
|
754 |
+
_mm512_mask_cvtepu64_ps (__m256 __W, __mmask8 __U, __m512i __A) {
|
755 |
+
return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
|
756 |
+
(__v8sf) __W,
|
757 |
+
(__mmask8) __U,
|
758 |
+
_MM_FROUND_CUR_DIRECTION);
|
759 |
+
}
|
760 |
+
|
761 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS512
|
762 |
+
_mm512_maskz_cvtepu64_ps (__mmask8 __U, __m512i __A) {
|
763 |
+
return (__m256) __builtin_ia32_cvtuqq2ps512_mask ((__v8di) __A,
|
764 |
+
(__v8sf) _mm256_setzero_ps(),
|
765 |
+
(__mmask8) __U,
|
766 |
+
_MM_FROUND_CUR_DIRECTION);
|
767 |
+
}
|
768 |
+
|
769 |
+
#define _mm512_cvt_roundepu64_ps(A, R) \
|
770 |
+
((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
|
771 |
+
(__v8sf)_mm256_setzero_ps(), \
|
772 |
+
(__mmask8)-1, (int)(R)))
|
773 |
+
|
774 |
+
#define _mm512_mask_cvt_roundepu64_ps(W, U, A, R) \
|
775 |
+
((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
|
776 |
+
(__v8sf)(__m256)(W), (__mmask8)(U), \
|
777 |
+
(int)(R)))
|
778 |
+
|
779 |
+
#define _mm512_maskz_cvt_roundepu64_ps(U, A, R) \
|
780 |
+
((__m256)__builtin_ia32_cvtuqq2ps512_mask((__v8di)(__m512i)(A), \
|
781 |
+
(__v8sf)_mm256_setzero_ps(), \
|
782 |
+
(__mmask8)(U), (int)(R)))
|
783 |
+
|
784 |
+
#define _mm512_range_pd(A, B, C) \
|
785 |
+
((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
|
786 |
+
(__v8df)(__m512d)(B), (int)(C), \
|
787 |
+
(__v8df)_mm512_setzero_pd(), \
|
788 |
+
(__mmask8)-1, \
|
789 |
+
_MM_FROUND_CUR_DIRECTION))
|
790 |
+
|
791 |
+
#define _mm512_mask_range_pd(W, U, A, B, C) \
|
792 |
+
((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
|
793 |
+
(__v8df)(__m512d)(B), (int)(C), \
|
794 |
+
(__v8df)(__m512d)(W), (__mmask8)(U), \
|
795 |
+
_MM_FROUND_CUR_DIRECTION))
|
796 |
+
|
797 |
+
#define _mm512_maskz_range_pd(U, A, B, C) \
|
798 |
+
((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
|
799 |
+
(__v8df)(__m512d)(B), (int)(C), \
|
800 |
+
(__v8df)_mm512_setzero_pd(), \
|
801 |
+
(__mmask8)(U), \
|
802 |
+
_MM_FROUND_CUR_DIRECTION))
|
803 |
+
|
804 |
+
#define _mm512_range_round_pd(A, B, C, R) \
|
805 |
+
((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
|
806 |
+
(__v8df)(__m512d)(B), (int)(C), \
|
807 |
+
(__v8df)_mm512_setzero_pd(), \
|
808 |
+
(__mmask8)-1, (int)(R)))
|
809 |
+
|
810 |
+
#define _mm512_mask_range_round_pd(W, U, A, B, C, R) \
|
811 |
+
((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
|
812 |
+
(__v8df)(__m512d)(B), (int)(C), \
|
813 |
+
(__v8df)(__m512d)(W), (__mmask8)(U), \
|
814 |
+
(int)(R)))
|
815 |
+
|
816 |
+
#define _mm512_maskz_range_round_pd(U, A, B, C, R) \
|
817 |
+
((__m512d)__builtin_ia32_rangepd512_mask((__v8df)(__m512d)(A), \
|
818 |
+
(__v8df)(__m512d)(B), (int)(C), \
|
819 |
+
(__v8df)_mm512_setzero_pd(), \
|
820 |
+
(__mmask8)(U), (int)(R)))
|
821 |
+
|
822 |
+
#define _mm512_range_ps(A, B, C) \
|
823 |
+
((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
|
824 |
+
(__v16sf)(__m512)(B), (int)(C), \
|
825 |
+
(__v16sf)_mm512_setzero_ps(), \
|
826 |
+
(__mmask16)-1, \
|
827 |
+
_MM_FROUND_CUR_DIRECTION))
|
828 |
+
|
829 |
+
#define _mm512_mask_range_ps(W, U, A, B, C) \
|
830 |
+
((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
|
831 |
+
(__v16sf)(__m512)(B), (int)(C), \
|
832 |
+
(__v16sf)(__m512)(W), (__mmask16)(U), \
|
833 |
+
_MM_FROUND_CUR_DIRECTION))
|
834 |
+
|
835 |
+
#define _mm512_maskz_range_ps(U, A, B, C) \
|
836 |
+
((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
|
837 |
+
(__v16sf)(__m512)(B), (int)(C), \
|
838 |
+
(__v16sf)_mm512_setzero_ps(), \
|
839 |
+
(__mmask16)(U), \
|
840 |
+
_MM_FROUND_CUR_DIRECTION))
|
841 |
+
|
842 |
+
#define _mm512_range_round_ps(A, B, C, R) \
|
843 |
+
((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
|
844 |
+
(__v16sf)(__m512)(B), (int)(C), \
|
845 |
+
(__v16sf)_mm512_setzero_ps(), \
|
846 |
+
(__mmask16)-1, (int)(R)))
|
847 |
+
|
848 |
+
#define _mm512_mask_range_round_ps(W, U, A, B, C, R) \
|
849 |
+
((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
|
850 |
+
(__v16sf)(__m512)(B), (int)(C), \
|
851 |
+
(__v16sf)(__m512)(W), (__mmask16)(U), \
|
852 |
+
(int)(R)))
|
853 |
+
|
854 |
+
#define _mm512_maskz_range_round_ps(U, A, B, C, R) \
|
855 |
+
((__m512)__builtin_ia32_rangeps512_mask((__v16sf)(__m512)(A), \
|
856 |
+
(__v16sf)(__m512)(B), (int)(C), \
|
857 |
+
(__v16sf)_mm512_setzero_ps(), \
|
858 |
+
(__mmask16)(U), (int)(R)))
|
859 |
+
|
860 |
+
#define _mm_range_round_ss(A, B, C, R) \
|
861 |
+
((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
|
862 |
+
(__v4sf)(__m128)(B), \
|
863 |
+
(__v4sf)_mm_setzero_ps(), \
|
864 |
+
(__mmask8) -1, (int)(C),\
|
865 |
+
(int)(R)))
|
866 |
+
|
867 |
+
#define _mm_range_ss(A ,B , C) _mm_range_round_ss(A, B, C ,_MM_FROUND_CUR_DIRECTION)
|
868 |
+
|
869 |
+
#define _mm_mask_range_round_ss(W, U, A, B, C, R) \
|
870 |
+
((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
|
871 |
+
(__v4sf)(__m128)(B), \
|
872 |
+
(__v4sf)(__m128)(W),\
|
873 |
+
(__mmask8)(U), (int)(C),\
|
874 |
+
(int)(R)))
|
875 |
+
|
876 |
+
#define _mm_mask_range_ss(W , U, A, B, C) _mm_mask_range_round_ss(W, U, A, B, C , _MM_FROUND_CUR_DIRECTION)
|
877 |
+
|
878 |
+
#define _mm_maskz_range_round_ss(U, A, B, C, R) \
|
879 |
+
((__m128)__builtin_ia32_rangess128_round_mask((__v4sf)(__m128)(A), \
|
880 |
+
(__v4sf)(__m128)(B), \
|
881 |
+
(__v4sf)_mm_setzero_ps(), \
|
882 |
+
(__mmask8)(U), (int)(C),\
|
883 |
+
(int)(R)))
|
884 |
+
|
885 |
+
#define _mm_maskz_range_ss(U, A ,B , C) _mm_maskz_range_round_ss(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
|
886 |
+
|
887 |
+
#define _mm_range_round_sd(A, B, C, R) \
|
888 |
+
((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
|
889 |
+
(__v2df)(__m128d)(B), \
|
890 |
+
(__v2df)_mm_setzero_pd(), \
|
891 |
+
(__mmask8) -1, (int)(C),\
|
892 |
+
(int)(R)))
|
893 |
+
|
894 |
+
#define _mm_range_sd(A ,B , C) _mm_range_round_sd(A, B, C ,_MM_FROUND_CUR_DIRECTION)
|
895 |
+
|
896 |
+
#define _mm_mask_range_round_sd(W, U, A, B, C, R) \
|
897 |
+
((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
|
898 |
+
(__v2df)(__m128d)(B), \
|
899 |
+
(__v2df)(__m128d)(W),\
|
900 |
+
(__mmask8)(U), (int)(C),\
|
901 |
+
(int)(R)))
|
902 |
+
|
903 |
+
#define _mm_mask_range_sd(W, U, A, B, C) _mm_mask_range_round_sd(W, U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
|
904 |
+
|
905 |
+
#define _mm_maskz_range_round_sd(U, A, B, C, R) \
|
906 |
+
((__m128d)__builtin_ia32_rangesd128_round_mask((__v2df)(__m128d)(A), \
|
907 |
+
(__v2df)(__m128d)(B), \
|
908 |
+
(__v2df)_mm_setzero_pd(), \
|
909 |
+
(__mmask8)(U), (int)(C),\
|
910 |
+
(int)(R)))
|
911 |
+
|
912 |
+
#define _mm_maskz_range_sd(U, A, B, C) _mm_maskz_range_round_sd(U, A, B, C ,_MM_FROUND_CUR_DIRECTION)
|
913 |
+
|
914 |
+
#define _mm512_reduce_pd(A, B) \
|
915 |
+
((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
|
916 |
+
(__v8df)_mm512_setzero_pd(), \
|
917 |
+
(__mmask8)-1, \
|
918 |
+
_MM_FROUND_CUR_DIRECTION))
|
919 |
+
|
920 |
+
#define _mm512_mask_reduce_pd(W, U, A, B) \
|
921 |
+
((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
|
922 |
+
(__v8df)(__m512d)(W), \
|
923 |
+
(__mmask8)(U), \
|
924 |
+
_MM_FROUND_CUR_DIRECTION))
|
925 |
+
|
926 |
+
#define _mm512_maskz_reduce_pd(U, A, B) \
|
927 |
+
((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
|
928 |
+
(__v8df)_mm512_setzero_pd(), \
|
929 |
+
(__mmask8)(U), \
|
930 |
+
_MM_FROUND_CUR_DIRECTION))
|
931 |
+
|
932 |
+
#define _mm512_reduce_ps(A, B) \
|
933 |
+
((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
|
934 |
+
(__v16sf)_mm512_setzero_ps(), \
|
935 |
+
(__mmask16)-1, \
|
936 |
+
_MM_FROUND_CUR_DIRECTION))
|
937 |
+
|
938 |
+
#define _mm512_mask_reduce_ps(W, U, A, B) \
|
939 |
+
((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
|
940 |
+
(__v16sf)(__m512)(W), \
|
941 |
+
(__mmask16)(U), \
|
942 |
+
_MM_FROUND_CUR_DIRECTION))
|
943 |
+
|
944 |
+
#define _mm512_maskz_reduce_ps(U, A, B) \
|
945 |
+
((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
|
946 |
+
(__v16sf)_mm512_setzero_ps(), \
|
947 |
+
(__mmask16)(U), \
|
948 |
+
_MM_FROUND_CUR_DIRECTION))
|
949 |
+
|
950 |
+
#define _mm512_reduce_round_pd(A, B, R) \
|
951 |
+
((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
|
952 |
+
(__v8df)_mm512_setzero_pd(), \
|
953 |
+
(__mmask8)-1, (int)(R)))
|
954 |
+
|
955 |
+
#define _mm512_mask_reduce_round_pd(W, U, A, B, R) \
|
956 |
+
((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
|
957 |
+
(__v8df)(__m512d)(W), \
|
958 |
+
(__mmask8)(U), (int)(R)))
|
959 |
+
|
960 |
+
#define _mm512_maskz_reduce_round_pd(U, A, B, R) \
|
961 |
+
((__m512d)__builtin_ia32_reducepd512_mask((__v8df)(__m512d)(A), (int)(B), \
|
962 |
+
(__v8df)_mm512_setzero_pd(), \
|
963 |
+
(__mmask8)(U), (int)(R)))
|
964 |
+
|
965 |
+
#define _mm512_reduce_round_ps(A, B, R) \
|
966 |
+
((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
|
967 |
+
(__v16sf)_mm512_setzero_ps(), \
|
968 |
+
(__mmask16)-1, (int)(R)))
|
969 |
+
|
970 |
+
#define _mm512_mask_reduce_round_ps(W, U, A, B, R) \
|
971 |
+
((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
|
972 |
+
(__v16sf)(__m512)(W), \
|
973 |
+
(__mmask16)(U), (int)(R)))
|
974 |
+
|
975 |
+
#define _mm512_maskz_reduce_round_ps(U, A, B, R) \
|
976 |
+
((__m512)__builtin_ia32_reduceps512_mask((__v16sf)(__m512)(A), (int)(B), \
|
977 |
+
(__v16sf)_mm512_setzero_ps(), \
|
978 |
+
(__mmask16)(U), (int)(R)))
|
979 |
+
|
980 |
+
#define _mm_reduce_ss(A, B, C) \
|
981 |
+
((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
|
982 |
+
(__v4sf)(__m128)(B), \
|
983 |
+
(__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
|
984 |
+
(int)(C), _MM_FROUND_CUR_DIRECTION))
|
985 |
+
|
986 |
+
#define _mm_mask_reduce_ss(W, U, A, B, C) \
|
987 |
+
((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
|
988 |
+
(__v4sf)(__m128)(B), \
|
989 |
+
(__v4sf)(__m128)(W), (__mmask8)(U), \
|
990 |
+
(int)(C), _MM_FROUND_CUR_DIRECTION))
|
991 |
+
|
992 |
+
#define _mm_maskz_reduce_ss(U, A, B, C) \
|
993 |
+
((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
|
994 |
+
(__v4sf)(__m128)(B), \
|
995 |
+
(__v4sf)_mm_setzero_ps(), \
|
996 |
+
(__mmask8)(U), (int)(C), \
|
997 |
+
_MM_FROUND_CUR_DIRECTION))
|
998 |
+
|
999 |
+
#define _mm_reduce_round_ss(A, B, C, R) \
|
1000 |
+
((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
|
1001 |
+
(__v4sf)(__m128)(B), \
|
1002 |
+
(__v4sf)_mm_setzero_ps(), (__mmask8)-1, \
|
1003 |
+
(int)(C), (int)(R)))
|
1004 |
+
|
1005 |
+
#define _mm_mask_reduce_round_ss(W, U, A, B, C, R) \
|
1006 |
+
((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
|
1007 |
+
(__v4sf)(__m128)(B), \
|
1008 |
+
(__v4sf)(__m128)(W), (__mmask8)(U), \
|
1009 |
+
(int)(C), (int)(R)))
|
1010 |
+
|
1011 |
+
#define _mm_maskz_reduce_round_ss(U, A, B, C, R) \
|
1012 |
+
((__m128)__builtin_ia32_reducess_mask((__v4sf)(__m128)(A), \
|
1013 |
+
(__v4sf)(__m128)(B), \
|
1014 |
+
(__v4sf)_mm_setzero_ps(), \
|
1015 |
+
(__mmask8)(U), (int)(C), (int)(R)))
|
1016 |
+
|
1017 |
+
#define _mm_reduce_sd(A, B, C) \
|
1018 |
+
((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
|
1019 |
+
(__v2df)(__m128d)(B), \
|
1020 |
+
(__v2df)_mm_setzero_pd(), \
|
1021 |
+
(__mmask8)-1, (int)(C), \
|
1022 |
+
_MM_FROUND_CUR_DIRECTION))
|
1023 |
+
|
1024 |
+
#define _mm_mask_reduce_sd(W, U, A, B, C) \
|
1025 |
+
((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
|
1026 |
+
(__v2df)(__m128d)(B), \
|
1027 |
+
(__v2df)(__m128d)(W), (__mmask8)(U), \
|
1028 |
+
(int)(C), _MM_FROUND_CUR_DIRECTION))
|
1029 |
+
|
1030 |
+
#define _mm_maskz_reduce_sd(U, A, B, C) \
|
1031 |
+
((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
|
1032 |
+
(__v2df)(__m128d)(B), \
|
1033 |
+
(__v2df)_mm_setzero_pd(), \
|
1034 |
+
(__mmask8)(U), (int)(C), \
|
1035 |
+
_MM_FROUND_CUR_DIRECTION))
|
1036 |
+
|
1037 |
+
#define _mm_reduce_round_sd(A, B, C, R) \
|
1038 |
+
((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
|
1039 |
+
(__v2df)(__m128d)(B), \
|
1040 |
+
(__v2df)_mm_setzero_pd(), \
|
1041 |
+
(__mmask8)-1, (int)(C), (int)(R)))
|
1042 |
+
|
1043 |
+
#define _mm_mask_reduce_round_sd(W, U, A, B, C, R) \
|
1044 |
+
((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
|
1045 |
+
(__v2df)(__m128d)(B), \
|
1046 |
+
(__v2df)(__m128d)(W), (__mmask8)(U), \
|
1047 |
+
(int)(C), (int)(R)))
|
1048 |
+
|
1049 |
+
#define _mm_maskz_reduce_round_sd(U, A, B, C, R) \
|
1050 |
+
((__m128d)__builtin_ia32_reducesd_mask((__v2df)(__m128d)(A), \
|
1051 |
+
(__v2df)(__m128d)(B), \
|
1052 |
+
(__v2df)_mm_setzero_pd(), \
|
1053 |
+
(__mmask8)(U), (int)(C), (int)(R)))
|
1054 |
+
|
1055 |
+
static __inline__ __mmask16 __DEFAULT_FN_ATTRS512
|
1056 |
+
_mm512_movepi32_mask (__m512i __A)
|
1057 |
+
{
|
1058 |
+
return (__mmask16) __builtin_ia32_cvtd2mask512 ((__v16si) __A);
|
1059 |
+
}
|
1060 |
+
|
1061 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1062 |
+
_mm512_movm_epi32 (__mmask16 __A)
|
1063 |
+
{
|
1064 |
+
return (__m512i) __builtin_ia32_cvtmask2d512 (__A);
|
1065 |
+
}
|
1066 |
+
|
1067 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1068 |
+
_mm512_movm_epi64 (__mmask8 __A)
|
1069 |
+
{
|
1070 |
+
return (__m512i) __builtin_ia32_cvtmask2q512 (__A);
|
1071 |
+
}
|
1072 |
+
|
1073 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS512
|
1074 |
+
_mm512_movepi64_mask (__m512i __A)
|
1075 |
+
{
|
1076 |
+
return (__mmask8) __builtin_ia32_cvtq2mask512 ((__v8di) __A);
|
1077 |
+
}
|
1078 |
+
|
1079 |
+
|
1080 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
1081 |
+
_mm512_broadcast_f32x2 (__m128 __A)
|
1082 |
+
{
|
1083 |
+
return (__m512)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
|
1084 |
+
0, 1, 0, 1, 0, 1, 0, 1,
|
1085 |
+
0, 1, 0, 1, 0, 1, 0, 1);
|
1086 |
+
}
|
1087 |
+
|
1088 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
1089 |
+
_mm512_mask_broadcast_f32x2 (__m512 __O, __mmask16 __M, __m128 __A)
|
1090 |
+
{
|
1091 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
|
1092 |
+
(__v16sf)_mm512_broadcast_f32x2(__A),
|
1093 |
+
(__v16sf)__O);
|
1094 |
+
}
|
1095 |
+
|
1096 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
1097 |
+
_mm512_maskz_broadcast_f32x2 (__mmask16 __M, __m128 __A)
|
1098 |
+
{
|
1099 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
|
1100 |
+
(__v16sf)_mm512_broadcast_f32x2(__A),
|
1101 |
+
(__v16sf)_mm512_setzero_ps());
|
1102 |
+
}
|
1103 |
+
|
1104 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
1105 |
+
_mm512_broadcast_f32x8(__m256 __A)
|
1106 |
+
{
|
1107 |
+
return (__m512)__builtin_shufflevector((__v8sf)__A, (__v8sf)__A,
|
1108 |
+
0, 1, 2, 3, 4, 5, 6, 7,
|
1109 |
+
0, 1, 2, 3, 4, 5, 6, 7);
|
1110 |
+
}
|
1111 |
+
|
1112 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
1113 |
+
_mm512_mask_broadcast_f32x8(__m512 __O, __mmask16 __M, __m256 __A)
|
1114 |
+
{
|
1115 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
|
1116 |
+
(__v16sf)_mm512_broadcast_f32x8(__A),
|
1117 |
+
(__v16sf)__O);
|
1118 |
+
}
|
1119 |
+
|
1120 |
+
static __inline__ __m512 __DEFAULT_FN_ATTRS512
|
1121 |
+
_mm512_maskz_broadcast_f32x8(__mmask16 __M, __m256 __A)
|
1122 |
+
{
|
1123 |
+
return (__m512)__builtin_ia32_selectps_512((__mmask16)__M,
|
1124 |
+
(__v16sf)_mm512_broadcast_f32x8(__A),
|
1125 |
+
(__v16sf)_mm512_setzero_ps());
|
1126 |
+
}
|
1127 |
+
|
1128 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
1129 |
+
_mm512_broadcast_f64x2(__m128d __A)
|
1130 |
+
{
|
1131 |
+
return (__m512d)__builtin_shufflevector((__v2df)__A, (__v2df)__A,
|
1132 |
+
0, 1, 0, 1, 0, 1, 0, 1);
|
1133 |
+
}
|
1134 |
+
|
1135 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
1136 |
+
_mm512_mask_broadcast_f64x2(__m512d __O, __mmask8 __M, __m128d __A)
|
1137 |
+
{
|
1138 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
|
1139 |
+
(__v8df)_mm512_broadcast_f64x2(__A),
|
1140 |
+
(__v8df)__O);
|
1141 |
+
}
|
1142 |
+
|
1143 |
+
static __inline__ __m512d __DEFAULT_FN_ATTRS512
|
1144 |
+
_mm512_maskz_broadcast_f64x2(__mmask8 __M, __m128d __A)
|
1145 |
+
{
|
1146 |
+
return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__M,
|
1147 |
+
(__v8df)_mm512_broadcast_f64x2(__A),
|
1148 |
+
(__v8df)_mm512_setzero_pd());
|
1149 |
+
}
|
1150 |
+
|
1151 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1152 |
+
_mm512_broadcast_i32x2 (__m128i __A)
|
1153 |
+
{
|
1154 |
+
return (__m512i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
|
1155 |
+
0, 1, 0, 1, 0, 1, 0, 1,
|
1156 |
+
0, 1, 0, 1, 0, 1, 0, 1);
|
1157 |
+
}
|
1158 |
+
|
1159 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1160 |
+
_mm512_mask_broadcast_i32x2 (__m512i __O, __mmask16 __M, __m128i __A)
|
1161 |
+
{
|
1162 |
+
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
|
1163 |
+
(__v16si)_mm512_broadcast_i32x2(__A),
|
1164 |
+
(__v16si)__O);
|
1165 |
+
}
|
1166 |
+
|
1167 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1168 |
+
_mm512_maskz_broadcast_i32x2 (__mmask16 __M, __m128i __A)
|
1169 |
+
{
|
1170 |
+
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
|
1171 |
+
(__v16si)_mm512_broadcast_i32x2(__A),
|
1172 |
+
(__v16si)_mm512_setzero_si512());
|
1173 |
+
}
|
1174 |
+
|
1175 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1176 |
+
_mm512_broadcast_i32x8(__m256i __A)
|
1177 |
+
{
|
1178 |
+
return (__m512i)__builtin_shufflevector((__v8si)__A, (__v8si)__A,
|
1179 |
+
0, 1, 2, 3, 4, 5, 6, 7,
|
1180 |
+
0, 1, 2, 3, 4, 5, 6, 7);
|
1181 |
+
}
|
1182 |
+
|
1183 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1184 |
+
_mm512_mask_broadcast_i32x8(__m512i __O, __mmask16 __M, __m256i __A)
|
1185 |
+
{
|
1186 |
+
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
|
1187 |
+
(__v16si)_mm512_broadcast_i32x8(__A),
|
1188 |
+
(__v16si)__O);
|
1189 |
+
}
|
1190 |
+
|
1191 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1192 |
+
_mm512_maskz_broadcast_i32x8(__mmask16 __M, __m256i __A)
|
1193 |
+
{
|
1194 |
+
return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
|
1195 |
+
(__v16si)_mm512_broadcast_i32x8(__A),
|
1196 |
+
(__v16si)_mm512_setzero_si512());
|
1197 |
+
}
|
1198 |
+
|
1199 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1200 |
+
_mm512_broadcast_i64x2(__m128i __A)
|
1201 |
+
{
|
1202 |
+
return (__m512i)__builtin_shufflevector((__v2di)__A, (__v2di)__A,
|
1203 |
+
0, 1, 0, 1, 0, 1, 0, 1);
|
1204 |
+
}
|
1205 |
+
|
1206 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1207 |
+
_mm512_mask_broadcast_i64x2(__m512i __O, __mmask8 __M, __m128i __A)
|
1208 |
+
{
|
1209 |
+
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
|
1210 |
+
(__v8di)_mm512_broadcast_i64x2(__A),
|
1211 |
+
(__v8di)__O);
|
1212 |
+
}
|
1213 |
+
|
1214 |
+
static __inline__ __m512i __DEFAULT_FN_ATTRS512
|
1215 |
+
_mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A)
|
1216 |
+
{
|
1217 |
+
return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
|
1218 |
+
(__v8di)_mm512_broadcast_i64x2(__A),
|
1219 |
+
(__v8di)_mm512_setzero_si512());
|
1220 |
+
}
|
1221 |
+
|
1222 |
+
#define _mm512_extractf32x8_ps(A, imm) \
|
1223 |
+
((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
|
1224 |
+
(__v8sf)_mm256_undefined_ps(), \
|
1225 |
+
(__mmask8)-1))
|
1226 |
+
|
1227 |
+
#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
|
1228 |
+
((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
|
1229 |
+
(__v8sf)(__m256)(W), \
|
1230 |
+
(__mmask8)(U)))
|
1231 |
+
|
1232 |
+
#define _mm512_maskz_extractf32x8_ps(U, A, imm) \
|
1233 |
+
((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
|
1234 |
+
(__v8sf)_mm256_setzero_ps(), \
|
1235 |
+
(__mmask8)(U)))
|
1236 |
+
|
1237 |
+
#define _mm512_extractf64x2_pd(A, imm) \
|
1238 |
+
((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
|
1239 |
+
(int)(imm), \
|
1240 |
+
(__v2df)_mm_undefined_pd(), \
|
1241 |
+
(__mmask8)-1))
|
1242 |
+
|
1243 |
+
#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
|
1244 |
+
((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
|
1245 |
+
(int)(imm), \
|
1246 |
+
(__v2df)(__m128d)(W), \
|
1247 |
+
(__mmask8)(U)))
|
1248 |
+
|
1249 |
+
#define _mm512_maskz_extractf64x2_pd(U, A, imm) \
|
1250 |
+
((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
|
1251 |
+
(int)(imm), \
|
1252 |
+
(__v2df)_mm_setzero_pd(), \
|
1253 |
+
(__mmask8)(U)))
|
1254 |
+
|
1255 |
+
#define _mm512_extracti32x8_epi32(A, imm) \
|
1256 |
+
((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
|
1257 |
+
(__v8si)_mm256_undefined_si256(), \
|
1258 |
+
(__mmask8)-1))
|
1259 |
+
|
1260 |
+
#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
|
1261 |
+
((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
|
1262 |
+
(__v8si)(__m256i)(W), \
|
1263 |
+
(__mmask8)(U)))
|
1264 |
+
|
1265 |
+
#define _mm512_maskz_extracti32x8_epi32(U, A, imm) \
|
1266 |
+
((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
|
1267 |
+
(__v8si)_mm256_setzero_si256(), \
|
1268 |
+
(__mmask8)(U)))
|
1269 |
+
|
1270 |
+
#define _mm512_extracti64x2_epi64(A, imm) \
|
1271 |
+
((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
|
1272 |
+
(int)(imm), \
|
1273 |
+
(__v2di)_mm_undefined_si128(), \
|
1274 |
+
(__mmask8)-1))
|
1275 |
+
|
1276 |
+
#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
|
1277 |
+
((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
|
1278 |
+
(int)(imm), \
|
1279 |
+
(__v2di)(__m128i)(W), \
|
1280 |
+
(__mmask8)(U)))
|
1281 |
+
|
1282 |
+
#define _mm512_maskz_extracti64x2_epi64(U, A, imm) \
|
1283 |
+
((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
|
1284 |
+
(int)(imm), \
|
1285 |
+
(__v2di)_mm_setzero_si128(), \
|
1286 |
+
(__mmask8)(U)))
|
1287 |
+
|
1288 |
+
#define _mm512_insertf32x8(A, B, imm) \
|
1289 |
+
((__m512)__builtin_ia32_insertf32x8((__v16sf)(__m512)(A), \
|
1290 |
+
(__v8sf)(__m256)(B), (int)(imm)))
|
1291 |
+
|
1292 |
+
#define _mm512_mask_insertf32x8(W, U, A, B, imm) \
|
1293 |
+
((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
|
1294 |
+
(__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
|
1295 |
+
(__v16sf)(__m512)(W)))
|
1296 |
+
|
1297 |
+
#define _mm512_maskz_insertf32x8(U, A, B, imm) \
|
1298 |
+
((__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
|
1299 |
+
(__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
|
1300 |
+
(__v16sf)_mm512_setzero_ps()))
|
1301 |
+
|
1302 |
+
#define _mm512_insertf64x2(A, B, imm) \
|
1303 |
+
((__m512d)__builtin_ia32_insertf64x2_512((__v8df)(__m512d)(A), \
|
1304 |
+
(__v2df)(__m128d)(B), (int)(imm)))
|
1305 |
+
|
1306 |
+
#define _mm512_mask_insertf64x2(W, U, A, B, imm) \
|
1307 |
+
((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
|
1308 |
+
(__v8df)_mm512_insertf64x2((A), (B), (imm)), \
|
1309 |
+
(__v8df)(__m512d)(W)))
|
1310 |
+
|
1311 |
+
#define _mm512_maskz_insertf64x2(U, A, B, imm) \
|
1312 |
+
((__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
|
1313 |
+
(__v8df)_mm512_insertf64x2((A), (B), (imm)), \
|
1314 |
+
(__v8df)_mm512_setzero_pd()))
|
1315 |
+
|
1316 |
+
#define _mm512_inserti32x8(A, B, imm) \
|
1317 |
+
((__m512i)__builtin_ia32_inserti32x8((__v16si)(__m512i)(A), \
|
1318 |
+
(__v8si)(__m256i)(B), (int)(imm)))
|
1319 |
+
|
1320 |
+
#define _mm512_mask_inserti32x8(W, U, A, B, imm) \
|
1321 |
+
((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
|
1322 |
+
(__v16si)_mm512_inserti32x8((A), (B), (imm)), \
|
1323 |
+
(__v16si)(__m512i)(W)))
|
1324 |
+
|
1325 |
+
#define _mm512_maskz_inserti32x8(U, A, B, imm) \
|
1326 |
+
((__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
|
1327 |
+
(__v16si)_mm512_inserti32x8((A), (B), (imm)), \
|
1328 |
+
(__v16si)_mm512_setzero_si512()))
|
1329 |
+
|
1330 |
+
#define _mm512_inserti64x2(A, B, imm) \
|
1331 |
+
((__m512i)__builtin_ia32_inserti64x2_512((__v8di)(__m512i)(A), \
|
1332 |
+
(__v2di)(__m128i)(B), (int)(imm)))
|
1333 |
+
|
1334 |
+
#define _mm512_mask_inserti64x2(W, U, A, B, imm) \
|
1335 |
+
((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
|
1336 |
+
(__v8di)_mm512_inserti64x2((A), (B), (imm)), \
|
1337 |
+
(__v8di)(__m512i)(W)))
|
1338 |
+
|
1339 |
+
#define _mm512_maskz_inserti64x2(U, A, B, imm) \
|
1340 |
+
((__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
|
1341 |
+
(__v8di)_mm512_inserti64x2((A), (B), (imm)), \
|
1342 |
+
(__v8di)_mm512_setzero_si512()))
|
1343 |
+
|
1344 |
+
#define _mm512_mask_fpclass_ps_mask(U, A, imm) \
|
1345 |
+
((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
|
1346 |
+
(int)(imm), (__mmask16)(U)))
|
1347 |
+
|
1348 |
+
#define _mm512_fpclass_ps_mask(A, imm) \
|
1349 |
+
((__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
|
1350 |
+
(int)(imm), (__mmask16)-1))
|
1351 |
+
|
1352 |
+
#define _mm512_mask_fpclass_pd_mask(U, A, imm) \
|
1353 |
+
((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
|
1354 |
+
(__mmask8)(U)))
|
1355 |
+
|
1356 |
+
#define _mm512_fpclass_pd_mask(A, imm) \
|
1357 |
+
((__mmask8)__builtin_ia32_fpclasspd512_mask((__v8df)(__m512d)(A), (int)(imm), \
|
1358 |
+
(__mmask8)-1))
|
1359 |
+
|
1360 |
+
#define _mm_fpclass_sd_mask(A, imm) \
|
1361 |
+
((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
|
1362 |
+
(__mmask8)-1))
|
1363 |
+
|
1364 |
+
#define _mm_mask_fpclass_sd_mask(U, A, imm) \
|
1365 |
+
((__mmask8)__builtin_ia32_fpclasssd_mask((__v2df)(__m128d)(A), (int)(imm), \
|
1366 |
+
(__mmask8)(U)))
|
1367 |
+
|
1368 |
+
#define _mm_fpclass_ss_mask(A, imm) \
|
1369 |
+
((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
|
1370 |
+
(__mmask8)-1))
|
1371 |
+
|
1372 |
+
#define _mm_mask_fpclass_ss_mask(U, A, imm) \
|
1373 |
+
((__mmask8)__builtin_ia32_fpclassss_mask((__v4sf)(__m128)(A), (int)(imm), \
|
1374 |
+
(__mmask8)(U)))
|
1375 |
+
|
1376 |
+
#undef __DEFAULT_FN_ATTRS512
|
1377 |
+
#undef __DEFAULT_FN_ATTRS
|
1378 |
+
|
1379 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vbmivlintrin.h
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===------------- avx512vbmivlintrin.h - VBMI intrinsics ------------------===
|
2 |
+
*
|
3 |
+
*
|
4 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
5 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
6 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
7 |
+
*
|
8 |
+
*===-----------------------------------------------------------------------===
|
9 |
+
*/
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error "Never use <avx512vbmivlintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __VBMIVLINTRIN_H
|
15 |
+
#define __VBMIVLINTRIN_H
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS128 \
|
19 |
+
__attribute__((__always_inline__, __nodebug__, \
|
20 |
+
__target__("avx512vbmi,avx512vl,no-evex512"), \
|
21 |
+
__min_vector_width__(128)))
|
22 |
+
#define __DEFAULT_FN_ATTRS256 \
|
23 |
+
__attribute__((__always_inline__, __nodebug__, \
|
24 |
+
__target__("avx512vbmi,avx512vl,no-evex512"), \
|
25 |
+
__min_vector_width__(256)))
|
26 |
+
|
27 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
28 |
+
_mm_permutex2var_epi8(__m128i __A, __m128i __I, __m128i __B)
|
29 |
+
{
|
30 |
+
return (__m128i)__builtin_ia32_vpermi2varqi128((__v16qi)__A,
|
31 |
+
(__v16qi)__I,
|
32 |
+
(__v16qi)__B);
|
33 |
+
}
|
34 |
+
|
35 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
36 |
+
_mm_mask_permutex2var_epi8(__m128i __A, __mmask16 __U, __m128i __I,
|
37 |
+
__m128i __B)
|
38 |
+
{
|
39 |
+
return (__m128i)__builtin_ia32_selectb_128(__U,
|
40 |
+
(__v16qi)_mm_permutex2var_epi8(__A, __I, __B),
|
41 |
+
(__v16qi)__A);
|
42 |
+
}
|
43 |
+
|
44 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
45 |
+
_mm_mask2_permutex2var_epi8(__m128i __A, __m128i __I, __mmask16 __U,
|
46 |
+
__m128i __B)
|
47 |
+
{
|
48 |
+
return (__m128i)__builtin_ia32_selectb_128(__U,
|
49 |
+
(__v16qi)_mm_permutex2var_epi8(__A, __I, __B),
|
50 |
+
(__v16qi)__I);
|
51 |
+
}
|
52 |
+
|
53 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
54 |
+
_mm_maskz_permutex2var_epi8(__mmask16 __U, __m128i __A, __m128i __I,
|
55 |
+
__m128i __B)
|
56 |
+
{
|
57 |
+
return (__m128i)__builtin_ia32_selectb_128(__U,
|
58 |
+
(__v16qi)_mm_permutex2var_epi8(__A, __I, __B),
|
59 |
+
(__v16qi)_mm_setzero_si128());
|
60 |
+
}
|
61 |
+
|
62 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
63 |
+
_mm256_permutex2var_epi8(__m256i __A, __m256i __I, __m256i __B)
|
64 |
+
{
|
65 |
+
return (__m256i)__builtin_ia32_vpermi2varqi256((__v32qi)__A, (__v32qi)__I,
|
66 |
+
(__v32qi)__B);
|
67 |
+
}
|
68 |
+
|
69 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
70 |
+
_mm256_mask_permutex2var_epi8(__m256i __A, __mmask32 __U, __m256i __I,
|
71 |
+
__m256i __B)
|
72 |
+
{
|
73 |
+
return (__m256i)__builtin_ia32_selectb_256(__U,
|
74 |
+
(__v32qi)_mm256_permutex2var_epi8(__A, __I, __B),
|
75 |
+
(__v32qi)__A);
|
76 |
+
}
|
77 |
+
|
78 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
79 |
+
_mm256_mask2_permutex2var_epi8(__m256i __A, __m256i __I, __mmask32 __U,
|
80 |
+
__m256i __B)
|
81 |
+
{
|
82 |
+
return (__m256i)__builtin_ia32_selectb_256(__U,
|
83 |
+
(__v32qi)_mm256_permutex2var_epi8(__A, __I, __B),
|
84 |
+
(__v32qi)__I);
|
85 |
+
}
|
86 |
+
|
87 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
88 |
+
_mm256_maskz_permutex2var_epi8(__mmask32 __U, __m256i __A, __m256i __I,
|
89 |
+
__m256i __B)
|
90 |
+
{
|
91 |
+
return (__m256i)__builtin_ia32_selectb_256(__U,
|
92 |
+
(__v32qi)_mm256_permutex2var_epi8(__A, __I, __B),
|
93 |
+
(__v32qi)_mm256_setzero_si256());
|
94 |
+
}
|
95 |
+
|
96 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
97 |
+
_mm_permutexvar_epi8 (__m128i __A, __m128i __B)
|
98 |
+
{
|
99 |
+
return (__m128i)__builtin_ia32_permvarqi128((__v16qi)__B, (__v16qi)__A);
|
100 |
+
}
|
101 |
+
|
102 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
103 |
+
_mm_maskz_permutexvar_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
|
104 |
+
{
|
105 |
+
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
|
106 |
+
(__v16qi)_mm_permutexvar_epi8(__A, __B),
|
107 |
+
(__v16qi)_mm_setzero_si128());
|
108 |
+
}
|
109 |
+
|
110 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
111 |
+
_mm_mask_permutexvar_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
|
112 |
+
__m128i __B)
|
113 |
+
{
|
114 |
+
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
|
115 |
+
(__v16qi)_mm_permutexvar_epi8(__A, __B),
|
116 |
+
(__v16qi)__W);
|
117 |
+
}
|
118 |
+
|
119 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
120 |
+
_mm256_permutexvar_epi8 (__m256i __A, __m256i __B)
|
121 |
+
{
|
122 |
+
return (__m256i)__builtin_ia32_permvarqi256((__v32qi) __B, (__v32qi) __A);
|
123 |
+
}
|
124 |
+
|
125 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
126 |
+
_mm256_maskz_permutexvar_epi8 (__mmask32 __M, __m256i __A,
|
127 |
+
__m256i __B)
|
128 |
+
{
|
129 |
+
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
|
130 |
+
(__v32qi)_mm256_permutexvar_epi8(__A, __B),
|
131 |
+
(__v32qi)_mm256_setzero_si256());
|
132 |
+
}
|
133 |
+
|
134 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
135 |
+
_mm256_mask_permutexvar_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
|
136 |
+
__m256i __B)
|
137 |
+
{
|
138 |
+
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
|
139 |
+
(__v32qi)_mm256_permutexvar_epi8(__A, __B),
|
140 |
+
(__v32qi)__W);
|
141 |
+
}
|
142 |
+
|
143 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
144 |
+
_mm_multishift_epi64_epi8(__m128i __X, __m128i __Y)
|
145 |
+
{
|
146 |
+
return (__m128i)__builtin_ia32_vpmultishiftqb128((__v16qi)__X, (__v16qi)__Y);
|
147 |
+
}
|
148 |
+
|
149 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
150 |
+
_mm_mask_multishift_epi64_epi8(__m128i __W, __mmask16 __M, __m128i __X,
|
151 |
+
__m128i __Y)
|
152 |
+
{
|
153 |
+
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
|
154 |
+
(__v16qi)_mm_multishift_epi64_epi8(__X, __Y),
|
155 |
+
(__v16qi)__W);
|
156 |
+
}
|
157 |
+
|
158 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
159 |
+
_mm_maskz_multishift_epi64_epi8(__mmask16 __M, __m128i __X, __m128i __Y)
|
160 |
+
{
|
161 |
+
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
|
162 |
+
(__v16qi)_mm_multishift_epi64_epi8(__X, __Y),
|
163 |
+
(__v16qi)_mm_setzero_si128());
|
164 |
+
}
|
165 |
+
|
166 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
167 |
+
_mm256_multishift_epi64_epi8(__m256i __X, __m256i __Y)
|
168 |
+
{
|
169 |
+
return (__m256i)__builtin_ia32_vpmultishiftqb256((__v32qi)__X, (__v32qi)__Y);
|
170 |
+
}
|
171 |
+
|
172 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
173 |
+
_mm256_mask_multishift_epi64_epi8(__m256i __W, __mmask32 __M, __m256i __X,
|
174 |
+
__m256i __Y)
|
175 |
+
{
|
176 |
+
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
|
177 |
+
(__v32qi)_mm256_multishift_epi64_epi8(__X, __Y),
|
178 |
+
(__v32qi)__W);
|
179 |
+
}
|
180 |
+
|
181 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
182 |
+
_mm256_maskz_multishift_epi64_epi8(__mmask32 __M, __m256i __X, __m256i __Y)
|
183 |
+
{
|
184 |
+
return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
|
185 |
+
(__v32qi)_mm256_multishift_epi64_epi8(__X, __Y),
|
186 |
+
(__v32qi)_mm256_setzero_si256());
|
187 |
+
}
|
188 |
+
|
189 |
+
|
190 |
+
#undef __DEFAULT_FN_ATTRS128
|
191 |
+
#undef __DEFAULT_FN_ATTRS256
|
192 |
+
|
193 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vlbf16intrin.h
ADDED
@@ -0,0 +1,517 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===--------- avx512vlbf16intrin.h - AVX512_BF16 intrinsics ---------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
#ifndef __IMMINTRIN_H
|
10 |
+
#error "Never use <avx512vlbf16intrin.h> directly; include <immintrin.h> instead."
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#ifdef __SSE2__
|
14 |
+
|
15 |
+
#ifndef __AVX512VLBF16INTRIN_H
|
16 |
+
#define __AVX512VLBF16INTRIN_H
|
17 |
+
|
18 |
+
#define __DEFAULT_FN_ATTRS128 \
|
19 |
+
__attribute__((__always_inline__, __nodebug__, \
|
20 |
+
__target__("avx512vl,avx512bf16,no-evex512"), \
|
21 |
+
__min_vector_width__(128)))
|
22 |
+
#define __DEFAULT_FN_ATTRS256 \
|
23 |
+
__attribute__((__always_inline__, __nodebug__, \
|
24 |
+
__target__("avx512vl,avx512bf16,no-evex512"), \
|
25 |
+
__min_vector_width__(256)))
|
26 |
+
|
27 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
28 |
+
///
|
29 |
+
/// \headerfile <x86intrin.h>
|
30 |
+
///
|
31 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
32 |
+
///
|
33 |
+
/// \param __A
|
34 |
+
/// A 128-bit vector of [4 x float].
|
35 |
+
/// \param __B
|
36 |
+
/// A 128-bit vector of [4 x float].
|
37 |
+
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
|
38 |
+
/// conversion of __B, and higher 64 bits come from conversion of __A.
|
39 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
|
40 |
+
_mm_cvtne2ps_pbh(__m128 __A, __m128 __B) {
|
41 |
+
return (__m128bh)__builtin_ia32_cvtne2ps2bf16_128((__v4sf) __A,
|
42 |
+
(__v4sf) __B);
|
43 |
+
}
|
44 |
+
|
45 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
46 |
+
///
|
47 |
+
/// \headerfile <x86intrin.h>
|
48 |
+
///
|
49 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
50 |
+
///
|
51 |
+
/// \param __A
|
52 |
+
/// A 128-bit vector of [4 x float].
|
53 |
+
/// \param __B
|
54 |
+
/// A 128-bit vector of [4 x float].
|
55 |
+
/// \param __W
|
56 |
+
/// A 128-bit vector of [8 x bfloat].
|
57 |
+
/// \param __U
|
58 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
59 |
+
/// A 1 means conversion of __A or __B. A 0 means element from __W.
|
60 |
+
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
|
61 |
+
/// conversion of __B, and higher 64 bits come from conversion of __A.
|
62 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
|
63 |
+
_mm_mask_cvtne2ps_pbh(__m128bh __W, __mmask8 __U, __m128 __A, __m128 __B) {
|
64 |
+
return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U,
|
65 |
+
(__v8bf)_mm_cvtne2ps_pbh(__A, __B),
|
66 |
+
(__v8bf)__W);
|
67 |
+
}
|
68 |
+
|
69 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
70 |
+
///
|
71 |
+
/// \headerfile <x86intrin.h>
|
72 |
+
///
|
73 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
74 |
+
///
|
75 |
+
/// \param __A
|
76 |
+
/// A 128-bit vector of [4 x float].
|
77 |
+
/// \param __B
|
78 |
+
/// A 128-bit vector of [4 x float].
|
79 |
+
/// \param __U
|
80 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
81 |
+
/// A 1 means conversion of __A or __B. A 0 means element is zero.
|
82 |
+
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
|
83 |
+
/// conversion of __B, and higher 64 bits come from conversion of __A.
|
84 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
|
85 |
+
_mm_maskz_cvtne2ps_pbh(__mmask8 __U, __m128 __A, __m128 __B) {
|
86 |
+
return (__m128bh)__builtin_ia32_selectpbf_128((__mmask8)__U,
|
87 |
+
(__v8bf)_mm_cvtne2ps_pbh(__A, __B),
|
88 |
+
(__v8bf)_mm_setzero_si128());
|
89 |
+
}
|
90 |
+
|
91 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
92 |
+
///
|
93 |
+
/// \headerfile <x86intrin.h>
|
94 |
+
///
|
95 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
96 |
+
///
|
97 |
+
/// \param __A
|
98 |
+
/// A 256-bit vector of [8 x float].
|
99 |
+
/// \param __B
|
100 |
+
/// A 256-bit vector of [8 x float].
|
101 |
+
/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
|
102 |
+
/// conversion of __B, and higher 128 bits come from conversion of __A.
|
103 |
+
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
|
104 |
+
_mm256_cvtne2ps_pbh(__m256 __A, __m256 __B) {
|
105 |
+
return (__m256bh)__builtin_ia32_cvtne2ps2bf16_256((__v8sf) __A,
|
106 |
+
(__v8sf) __B);
|
107 |
+
}
|
108 |
+
|
109 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
110 |
+
///
|
111 |
+
/// \headerfile <x86intrin.h>
|
112 |
+
///
|
113 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
114 |
+
///
|
115 |
+
/// \param __A
|
116 |
+
/// A 256-bit vector of [8 x float].
|
117 |
+
/// \param __B
|
118 |
+
/// A 256-bit vector of [8 x float].
|
119 |
+
/// \param __W
|
120 |
+
/// A 256-bit vector of [16 x bfloat].
|
121 |
+
/// \param __U
|
122 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
123 |
+
/// A 1 means conversion of __A or __B. A 0 means element from __W.
|
124 |
+
/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
|
125 |
+
/// conversion of __B, and higher 128 bits come from conversion of __A.
|
126 |
+
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
|
127 |
+
_mm256_mask_cvtne2ps_pbh(__m256bh __W, __mmask16 __U, __m256 __A, __m256 __B) {
|
128 |
+
return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U,
|
129 |
+
(__v16bf)_mm256_cvtne2ps_pbh(__A, __B),
|
130 |
+
(__v16bf)__W);
|
131 |
+
}
|
132 |
+
|
133 |
+
/// Convert Two Packed Single Data to One Packed BF16 Data.
|
134 |
+
///
|
135 |
+
/// \headerfile <x86intrin.h>
|
136 |
+
///
|
137 |
+
/// This intrinsic corresponds to the <c> VCVTNE2PS2BF16 </c> instructions.
|
138 |
+
///
|
139 |
+
/// \param __A
|
140 |
+
/// A 256-bit vector of [8 x float].
|
141 |
+
/// \param __B
|
142 |
+
/// A 256-bit vector of [8 x float].
|
143 |
+
/// \param __U
|
144 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
145 |
+
/// A 1 means conversion of __A or __B. A 0 means element is zero.
|
146 |
+
/// \returns A 256-bit vector of [16 x bfloat] whose lower 128 bits come from
|
147 |
+
/// conversion of __B, and higher 128 bits come from conversion of __A.
|
148 |
+
static __inline__ __m256bh __DEFAULT_FN_ATTRS256
|
149 |
+
_mm256_maskz_cvtne2ps_pbh(__mmask16 __U, __m256 __A, __m256 __B) {
|
150 |
+
return (__m256bh)__builtin_ia32_selectpbf_256((__mmask16)__U,
|
151 |
+
(__v16bf)_mm256_cvtne2ps_pbh(__A, __B),
|
152 |
+
(__v16bf)_mm256_setzero_si256());
|
153 |
+
}
|
154 |
+
|
155 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
156 |
+
///
|
157 |
+
/// \headerfile <x86intrin.h>
|
158 |
+
///
|
159 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
160 |
+
///
|
161 |
+
/// \param __A
|
162 |
+
/// A 128-bit vector of [4 x float].
|
163 |
+
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
|
164 |
+
/// conversion of __A, and higher 64 bits are 0.
|
165 |
+
#define _mm_cvtneps_pbh(A) \
|
166 |
+
((__m128bh)__builtin_ia32_vcvtneps2bf16128((__v4sf)(A)))
|
167 |
+
|
168 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
169 |
+
///
|
170 |
+
/// \headerfile <x86intrin.h>
|
171 |
+
///
|
172 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
173 |
+
///
|
174 |
+
/// \param __A
|
175 |
+
/// A 128-bit vector of [4 x float].
|
176 |
+
/// \param __W
|
177 |
+
/// A 128-bit vector of [8 x bfloat].
|
178 |
+
/// \param __U
|
179 |
+
/// A 4-bit mask value specifying what is chosen for each element.
|
180 |
+
/// A 1 means conversion of __A. A 0 means element from __W.
|
181 |
+
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
|
182 |
+
/// conversion of __A, and higher 64 bits are 0.
|
183 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
|
184 |
+
_mm_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m128 __A) {
|
185 |
+
return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
|
186 |
+
(__v8bf)__W,
|
187 |
+
(__mmask8)__U);
|
188 |
+
}
|
189 |
+
|
190 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
191 |
+
///
|
192 |
+
/// \headerfile <x86intrin.h>
|
193 |
+
///
|
194 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
195 |
+
///
|
196 |
+
/// \param __A
|
197 |
+
/// A 128-bit vector of [4 x float].
|
198 |
+
/// \param __U
|
199 |
+
/// A 4-bit mask value specifying what is chosen for each element.
|
200 |
+
/// A 1 means conversion of __A. A 0 means element is zero.
|
201 |
+
/// \returns A 128-bit vector of [8 x bfloat] whose lower 64 bits come from
|
202 |
+
/// conversion of __A, and higher 64 bits are 0.
|
203 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS128
|
204 |
+
_mm_maskz_cvtneps_pbh(__mmask8 __U, __m128 __A) {
|
205 |
+
return (__m128bh)__builtin_ia32_cvtneps2bf16_128_mask((__v4sf) __A,
|
206 |
+
(__v8bf)_mm_setzero_si128(),
|
207 |
+
(__mmask8)__U);
|
208 |
+
}
|
209 |
+
|
210 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
211 |
+
///
|
212 |
+
/// \headerfile <x86intrin.h>
|
213 |
+
///
|
214 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
215 |
+
///
|
216 |
+
/// \param __A
|
217 |
+
/// A 256-bit vector of [8 x float].
|
218 |
+
/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
|
219 |
+
#define _mm256_cvtneps_pbh(A) \
|
220 |
+
((__m128bh)__builtin_ia32_vcvtneps2bf16256((__v8sf)(A)))
|
221 |
+
|
222 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
223 |
+
///
|
224 |
+
/// \headerfile <x86intrin.h>
|
225 |
+
///
|
226 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
227 |
+
///
|
228 |
+
/// \param __A
|
229 |
+
/// A 256-bit vector of [8 x float].
|
230 |
+
/// \param __W
|
231 |
+
/// A 256-bit vector of [8 x bfloat].
|
232 |
+
/// \param __U
|
233 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
234 |
+
/// A 1 means conversion of __A. A 0 means element from __W.
|
235 |
+
/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
|
236 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS256
|
237 |
+
_mm256_mask_cvtneps_pbh(__m128bh __W, __mmask8 __U, __m256 __A) {
|
238 |
+
return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
|
239 |
+
(__v8bf)__W,
|
240 |
+
(__mmask8)__U);
|
241 |
+
}
|
242 |
+
|
243 |
+
/// Convert Packed Single Data to Packed BF16 Data.
|
244 |
+
///
|
245 |
+
/// \headerfile <x86intrin.h>
|
246 |
+
///
|
247 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
248 |
+
///
|
249 |
+
/// \param __A
|
250 |
+
/// A 256-bit vector of [8 x float].
|
251 |
+
/// \param __U
|
252 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
253 |
+
/// A 1 means conversion of __A. A 0 means element is zero.
|
254 |
+
/// \returns A 128-bit vector of [8 x bfloat] comes from conversion of __A.
|
255 |
+
static __inline__ __m128bh __DEFAULT_FN_ATTRS256
|
256 |
+
_mm256_maskz_cvtneps_pbh(__mmask8 __U, __m256 __A) {
|
257 |
+
return (__m128bh)__builtin_ia32_cvtneps2bf16_256_mask((__v8sf)__A,
|
258 |
+
(__v8bf)_mm_setzero_si128(),
|
259 |
+
(__mmask8)__U);
|
260 |
+
}
|
261 |
+
|
262 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
263 |
+
///
|
264 |
+
/// \headerfile <x86intrin.h>
|
265 |
+
///
|
266 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
267 |
+
///
|
268 |
+
/// \param __A
|
269 |
+
/// A 128-bit vector of [8 x bfloat].
|
270 |
+
/// \param __B
|
271 |
+
/// A 128-bit vector of [8 x bfloat].
|
272 |
+
/// \param __D
|
273 |
+
/// A 128-bit vector of [4 x float].
|
274 |
+
/// \returns A 128-bit vector of [4 x float] comes from Dot Product of
|
275 |
+
/// __A, __B and __D
|
276 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
277 |
+
_mm_dpbf16_ps(__m128 __D, __m128bh __A, __m128bh __B) {
|
278 |
+
return (__m128)__builtin_ia32_dpbf16ps_128((__v4sf)__D,
|
279 |
+
(__v8bf)__A,
|
280 |
+
(__v8bf)__B);
|
281 |
+
}
|
282 |
+
|
283 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
284 |
+
///
|
285 |
+
/// \headerfile <x86intrin.h>
|
286 |
+
///
|
287 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
288 |
+
///
|
289 |
+
/// \param __A
|
290 |
+
/// A 128-bit vector of [8 x bfloat].
|
291 |
+
/// \param __B
|
292 |
+
/// A 128-bit vector of [8 x bfloat].
|
293 |
+
/// \param __D
|
294 |
+
/// A 128-bit vector of [4 x float].
|
295 |
+
/// \param __U
|
296 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
297 |
+
/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
|
298 |
+
/// \returns A 128-bit vector of [4 x float] comes from Dot Product of
|
299 |
+
/// __A, __B and __D
|
300 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
301 |
+
_mm_mask_dpbf16_ps(__m128 __D, __mmask8 __U, __m128bh __A, __m128bh __B) {
|
302 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
303 |
+
(__v4sf)_mm_dpbf16_ps(__D, __A, __B),
|
304 |
+
(__v4sf)__D);
|
305 |
+
}
|
306 |
+
|
307 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
308 |
+
///
|
309 |
+
/// \headerfile <x86intrin.h>
|
310 |
+
///
|
311 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
312 |
+
///
|
313 |
+
/// \param __A
|
314 |
+
/// A 128-bit vector of [8 x bfloat].
|
315 |
+
/// \param __B
|
316 |
+
/// A 128-bit vector of [8 x bfloat].
|
317 |
+
/// \param __D
|
318 |
+
/// A 128-bit vector of [4 x float].
|
319 |
+
/// \param __U
|
320 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
321 |
+
/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
|
322 |
+
/// \returns A 128-bit vector of [4 x float] comes from Dot Product of
|
323 |
+
/// __A, __B and __D
|
324 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
325 |
+
_mm_maskz_dpbf16_ps(__mmask8 __U, __m128 __D, __m128bh __A, __m128bh __B) {
|
326 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
327 |
+
(__v4sf)_mm_dpbf16_ps(__D, __A, __B),
|
328 |
+
(__v4sf)_mm_setzero_si128());
|
329 |
+
}
|
330 |
+
|
331 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
332 |
+
///
|
333 |
+
/// \headerfile <x86intrin.h>
|
334 |
+
///
|
335 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
336 |
+
///
|
337 |
+
/// \param __A
|
338 |
+
/// A 256-bit vector of [16 x bfloat].
|
339 |
+
/// \param __B
|
340 |
+
/// A 256-bit vector of [16 x bfloat].
|
341 |
+
/// \param __D
|
342 |
+
/// A 256-bit vector of [8 x float].
|
343 |
+
/// \returns A 256-bit vector of [8 x float] comes from Dot Product of
|
344 |
+
/// __A, __B and __D
|
345 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
346 |
+
_mm256_dpbf16_ps(__m256 __D, __m256bh __A, __m256bh __B) {
|
347 |
+
return (__m256)__builtin_ia32_dpbf16ps_256((__v8sf)__D,
|
348 |
+
(__v16bf)__A,
|
349 |
+
(__v16bf)__B);
|
350 |
+
}
|
351 |
+
|
352 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
353 |
+
///
|
354 |
+
/// \headerfile <x86intrin.h>
|
355 |
+
///
|
356 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
357 |
+
///
|
358 |
+
/// \param __A
|
359 |
+
/// A 256-bit vector of [16 x bfloat].
|
360 |
+
/// \param __B
|
361 |
+
/// A 256-bit vector of [16 x bfloat].
|
362 |
+
/// \param __D
|
363 |
+
/// A 256-bit vector of [8 x float].
|
364 |
+
/// \param __U
|
365 |
+
/// A 16-bit mask value specifying what is chosen for each element.
|
366 |
+
/// A 1 means __A and __B's dot product accumulated with __D. A 0 means __D.
|
367 |
+
/// \returns A 256-bit vector of [8 x float] comes from Dot Product of
|
368 |
+
/// __A, __B and __D
|
369 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
370 |
+
_mm256_mask_dpbf16_ps(__m256 __D, __mmask8 __U, __m256bh __A, __m256bh __B) {
|
371 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
372 |
+
(__v8sf)_mm256_dpbf16_ps(__D, __A, __B),
|
373 |
+
(__v8sf)__D);
|
374 |
+
}
|
375 |
+
|
376 |
+
/// Dot Product of BF16 Pairs Accumulated into Packed Single Precision.
|
377 |
+
///
|
378 |
+
/// \headerfile <x86intrin.h>
|
379 |
+
///
|
380 |
+
/// This intrinsic corresponds to the <c> VDPBF16PS </c> instructions.
|
381 |
+
///
|
382 |
+
/// \param __A
|
383 |
+
/// A 256-bit vector of [16 x bfloat].
|
384 |
+
/// \param __B
|
385 |
+
/// A 256-bit vector of [16 x bfloat].
|
386 |
+
/// \param __D
|
387 |
+
/// A 256-bit vector of [8 x float].
|
388 |
+
/// \param __U
|
389 |
+
/// A 8-bit mask value specifying what is chosen for each element.
|
390 |
+
/// A 1 means __A and __B's dot product accumulated with __D. A 0 means 0.
|
391 |
+
/// \returns A 256-bit vector of [8 x float] comes from Dot Product of
|
392 |
+
/// __A, __B and __D
|
393 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
394 |
+
_mm256_maskz_dpbf16_ps(__mmask8 __U, __m256 __D, __m256bh __A, __m256bh __B) {
|
395 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
396 |
+
(__v8sf)_mm256_dpbf16_ps(__D, __A, __B),
|
397 |
+
(__v8sf)_mm256_setzero_si256());
|
398 |
+
}
|
399 |
+
|
400 |
+
/// Convert One Single float Data to One BF16 Data.
|
401 |
+
///
|
402 |
+
/// \headerfile <x86intrin.h>
|
403 |
+
///
|
404 |
+
/// This intrinsic corresponds to the <c> VCVTNEPS2BF16 </c> instructions.
|
405 |
+
///
|
406 |
+
/// \param __A
|
407 |
+
/// A float data.
|
408 |
+
/// \returns A bf16 data whose sign field and exponent field keep unchanged,
|
409 |
+
/// and fraction field is truncated to 7 bits.
|
410 |
+
static __inline__ __bf16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
|
411 |
+
__v4sf __V = {__A, 0, 0, 0};
|
412 |
+
__v8bf __R = __builtin_ia32_cvtneps2bf16_128_mask(
|
413 |
+
(__v4sf)__V, (__v8bf)_mm_undefined_si128(), (__mmask8)-1);
|
414 |
+
return (__bf16)__R[0];
|
415 |
+
}
|
416 |
+
|
417 |
+
/// Convert Packed BF16 Data to Packed float Data.
|
418 |
+
///
|
419 |
+
/// \headerfile <x86intrin.h>
|
420 |
+
///
|
421 |
+
/// \param __A
|
422 |
+
/// A 128-bit vector of [4 x bfloat].
|
423 |
+
/// \returns A 128-bit vector of [4 x float] come from conversion of __A
|
424 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_cvtpbh_ps(__m128bh __A) {
|
425 |
+
return _mm_castsi128_ps(
|
426 |
+
(__m128i)_mm_slli_epi32((__m128i)_mm_cvtepi16_epi32((__m128i)__A), 16));
|
427 |
+
}
|
428 |
+
|
429 |
+
/// Convert Packed BF16 Data to Packed float Data.
|
430 |
+
///
|
431 |
+
/// \headerfile <x86intrin.h>
|
432 |
+
///
|
433 |
+
/// \param __A
|
434 |
+
/// A 128-bit vector of [8 x bfloat].
|
435 |
+
/// \returns A 256-bit vector of [8 x float] come from conversion of __A
|
436 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256 _mm256_cvtpbh_ps(__m128bh __A) {
|
437 |
+
return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
|
438 |
+
(__m256i)_mm256_cvtepi16_epi32((__m128i)__A), 16));
|
439 |
+
}
|
440 |
+
|
441 |
+
/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
|
442 |
+
///
|
443 |
+
/// \headerfile <x86intrin.h>
|
444 |
+
///
|
445 |
+
/// \param __U
|
446 |
+
/// A 4-bit mask. Elements are zeroed out when the corresponding mask
|
447 |
+
/// bit is not set.
|
448 |
+
/// \param __A
|
449 |
+
/// A 128-bit vector of [4 x bfloat].
|
450 |
+
/// \returns A 128-bit vector of [4 x float] come from conversion of __A
|
451 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
452 |
+
_mm_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
|
453 |
+
return _mm_castsi128_ps((__m128i)_mm_slli_epi32(
|
454 |
+
(__m128i)_mm_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
|
455 |
+
}
|
456 |
+
|
457 |
+
/// Convert Packed BF16 Data to Packed float Data using zeroing mask.
|
458 |
+
///
|
459 |
+
/// \headerfile <x86intrin.h>
|
460 |
+
///
|
461 |
+
/// \param __U
|
462 |
+
/// A 8-bit mask. Elements are zeroed out when the corresponding mask
|
463 |
+
/// bit is not set.
|
464 |
+
/// \param __A
|
465 |
+
/// A 128-bit vector of [8 x bfloat].
|
466 |
+
/// \returns A 256-bit vector of [8 x float] come from conversion of __A
|
467 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
468 |
+
_mm256_maskz_cvtpbh_ps(__mmask8 __U, __m128bh __A) {
|
469 |
+
return _mm256_castsi256_ps((__m256i)_mm256_slli_epi32(
|
470 |
+
(__m256i)_mm256_maskz_cvtepi16_epi32((__mmask8)__U, (__m128i)__A), 16));
|
471 |
+
}
|
472 |
+
|
473 |
+
/// Convert Packed BF16 Data to Packed float Data using merging mask.
|
474 |
+
///
|
475 |
+
/// \headerfile <x86intrin.h>
|
476 |
+
///
|
477 |
+
/// \param __S
|
478 |
+
/// A 128-bit vector of [4 x float]. Elements are copied from __S when
|
479 |
+
/// the corresponding mask bit is not set.
|
480 |
+
/// \param __U
|
481 |
+
/// A 4-bit mask. Elements are zeroed out when the corresponding mask
|
482 |
+
/// bit is not set.
|
483 |
+
/// \param __A
|
484 |
+
/// A 128-bit vector of [4 x bfloat].
|
485 |
+
/// \returns A 128-bit vector of [4 x float] come from conversion of __A
|
486 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
487 |
+
_mm_mask_cvtpbh_ps(__m128 __S, __mmask8 __U, __m128bh __A) {
|
488 |
+
return _mm_castsi128_ps((__m128i)_mm_mask_slli_epi32(
|
489 |
+
(__m128i)__S, (__mmask8)__U, (__m128i)_mm_cvtepi16_epi32((__m128i)__A),
|
490 |
+
16));
|
491 |
+
}
|
492 |
+
|
493 |
+
/// Convert Packed BF16 Data to Packed float Data using merging mask.
|
494 |
+
///
|
495 |
+
/// \headerfile <x86intrin.h>
|
496 |
+
///
|
497 |
+
/// \param __S
|
498 |
+
/// A 256-bit vector of [8 x float]. Elements are copied from __S when
|
499 |
+
/// the corresponding mask bit is not set.
|
500 |
+
/// \param __U
|
501 |
+
/// A 8-bit mask. Elements are zeroed out when the corresponding mask
|
502 |
+
/// bit is not set.
|
503 |
+
/// \param __A
|
504 |
+
/// A 128-bit vector of [8 x bfloat].
|
505 |
+
/// \returns A 256-bit vector of [8 x float] come from conversion of __A
|
506 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
507 |
+
_mm256_mask_cvtpbh_ps(__m256 __S, __mmask8 __U, __m128bh __A) {
|
508 |
+
return _mm256_castsi256_ps((__m256i)_mm256_mask_slli_epi32(
|
509 |
+
(__m256i)__S, (__mmask8)__U, (__m256i)_mm256_cvtepi16_epi32((__m128i)__A),
|
510 |
+
16));
|
511 |
+
}
|
512 |
+
|
513 |
+
#undef __DEFAULT_FN_ATTRS128
|
514 |
+
#undef __DEFAULT_FN_ATTRS256
|
515 |
+
|
516 |
+
#endif
|
517 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vlcdintrin.h
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- avx512vlcdintrin.h - AVX512VL and AVX512CD intrinsics ------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
#ifndef __IMMINTRIN_H
|
10 |
+
#error "Never use <avx512vlcdintrin.h> directly; include <immintrin.h> instead."
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#ifndef __AVX512VLCDINTRIN_H
|
14 |
+
#define __AVX512VLCDINTRIN_H
|
15 |
+
|
16 |
+
/* Define the default attributes for the functions in this file. */
|
17 |
+
#define __DEFAULT_FN_ATTRS128 \
|
18 |
+
__attribute__((__always_inline__, __nodebug__, \
|
19 |
+
__target__("avx512vl,avx512cd,no-evex512"), \
|
20 |
+
__min_vector_width__(128)))
|
21 |
+
#define __DEFAULT_FN_ATTRS256 \
|
22 |
+
__attribute__((__always_inline__, __nodebug__, \
|
23 |
+
__target__("avx512vl,avx512cd,no-evex512"), \
|
24 |
+
__min_vector_width__(256)))
|
25 |
+
|
26 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
27 |
+
_mm_broadcastmb_epi64 (__mmask8 __A)
|
28 |
+
{
|
29 |
+
return (__m128i) _mm_set1_epi64x((long long) __A);
|
30 |
+
}
|
31 |
+
|
32 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
33 |
+
_mm256_broadcastmb_epi64 (__mmask8 __A)
|
34 |
+
{
|
35 |
+
return (__m256i) _mm256_set1_epi64x((long long)__A);
|
36 |
+
}
|
37 |
+
|
38 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
39 |
+
_mm_broadcastmw_epi32 (__mmask16 __A)
|
40 |
+
{
|
41 |
+
return (__m128i) _mm_set1_epi32((int)__A);
|
42 |
+
}
|
43 |
+
|
44 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
45 |
+
_mm256_broadcastmw_epi32 (__mmask16 __A)
|
46 |
+
{
|
47 |
+
return (__m256i) _mm256_set1_epi32((int)__A);
|
48 |
+
}
|
49 |
+
|
50 |
+
|
51 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
52 |
+
_mm_conflict_epi64 (__m128i __A)
|
53 |
+
{
|
54 |
+
return (__m128i) __builtin_ia32_vpconflictdi_128 ((__v2di) __A);
|
55 |
+
}
|
56 |
+
|
57 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
58 |
+
_mm_mask_conflict_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
|
59 |
+
{
|
60 |
+
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
|
61 |
+
(__v2di)_mm_conflict_epi64(__A),
|
62 |
+
(__v2di)__W);
|
63 |
+
}
|
64 |
+
|
65 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
66 |
+
_mm_maskz_conflict_epi64 (__mmask8 __U, __m128i __A)
|
67 |
+
{
|
68 |
+
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
|
69 |
+
(__v2di)_mm_conflict_epi64(__A),
|
70 |
+
(__v2di)_mm_setzero_si128());
|
71 |
+
}
|
72 |
+
|
73 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
74 |
+
_mm256_conflict_epi64 (__m256i __A)
|
75 |
+
{
|
76 |
+
return (__m256i) __builtin_ia32_vpconflictdi_256 ((__v4di) __A);
|
77 |
+
}
|
78 |
+
|
79 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
80 |
+
_mm256_mask_conflict_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
|
81 |
+
{
|
82 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
|
83 |
+
(__v4di)_mm256_conflict_epi64(__A),
|
84 |
+
(__v4di)__W);
|
85 |
+
}
|
86 |
+
|
87 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
88 |
+
_mm256_maskz_conflict_epi64 (__mmask8 __U, __m256i __A)
|
89 |
+
{
|
90 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
|
91 |
+
(__v4di)_mm256_conflict_epi64(__A),
|
92 |
+
(__v4di)_mm256_setzero_si256());
|
93 |
+
}
|
94 |
+
|
95 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
96 |
+
_mm_conflict_epi32 (__m128i __A)
|
97 |
+
{
|
98 |
+
return (__m128i) __builtin_ia32_vpconflictsi_128 ((__v4si) __A);
|
99 |
+
}
|
100 |
+
|
101 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
102 |
+
_mm_mask_conflict_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
|
103 |
+
{
|
104 |
+
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
|
105 |
+
(__v4si)_mm_conflict_epi32(__A),
|
106 |
+
(__v4si)__W);
|
107 |
+
}
|
108 |
+
|
109 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
110 |
+
_mm_maskz_conflict_epi32 (__mmask8 __U, __m128i __A)
|
111 |
+
{
|
112 |
+
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
|
113 |
+
(__v4si)_mm_conflict_epi32(__A),
|
114 |
+
(__v4si)_mm_setzero_si128());
|
115 |
+
}
|
116 |
+
|
117 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
118 |
+
_mm256_conflict_epi32 (__m256i __A)
|
119 |
+
{
|
120 |
+
return (__m256i) __builtin_ia32_vpconflictsi_256 ((__v8si) __A);
|
121 |
+
}
|
122 |
+
|
123 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
124 |
+
_mm256_mask_conflict_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
|
125 |
+
{
|
126 |
+
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
|
127 |
+
(__v8si)_mm256_conflict_epi32(__A),
|
128 |
+
(__v8si)__W);
|
129 |
+
}
|
130 |
+
|
131 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
132 |
+
_mm256_maskz_conflict_epi32 (__mmask8 __U, __m256i __A)
|
133 |
+
{
|
134 |
+
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
|
135 |
+
(__v8si)_mm256_conflict_epi32(__A),
|
136 |
+
(__v8si)_mm256_setzero_si256());
|
137 |
+
}
|
138 |
+
|
139 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
140 |
+
_mm_lzcnt_epi32 (__m128i __A)
|
141 |
+
{
|
142 |
+
return (__m128i) __builtin_ia32_vplzcntd_128 ((__v4si) __A);
|
143 |
+
}
|
144 |
+
|
145 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
146 |
+
_mm_mask_lzcnt_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
|
147 |
+
{
|
148 |
+
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
|
149 |
+
(__v4si)_mm_lzcnt_epi32(__A),
|
150 |
+
(__v4si)__W);
|
151 |
+
}
|
152 |
+
|
153 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
154 |
+
_mm_maskz_lzcnt_epi32 (__mmask8 __U, __m128i __A)
|
155 |
+
{
|
156 |
+
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
|
157 |
+
(__v4si)_mm_lzcnt_epi32(__A),
|
158 |
+
(__v4si)_mm_setzero_si128());
|
159 |
+
}
|
160 |
+
|
161 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
162 |
+
_mm256_lzcnt_epi32 (__m256i __A)
|
163 |
+
{
|
164 |
+
return (__m256i) __builtin_ia32_vplzcntd_256 ((__v8si) __A);
|
165 |
+
}
|
166 |
+
|
167 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
168 |
+
_mm256_mask_lzcnt_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
|
169 |
+
{
|
170 |
+
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
|
171 |
+
(__v8si)_mm256_lzcnt_epi32(__A),
|
172 |
+
(__v8si)__W);
|
173 |
+
}
|
174 |
+
|
175 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
176 |
+
_mm256_maskz_lzcnt_epi32 (__mmask8 __U, __m256i __A)
|
177 |
+
{
|
178 |
+
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
|
179 |
+
(__v8si)_mm256_lzcnt_epi32(__A),
|
180 |
+
(__v8si)_mm256_setzero_si256());
|
181 |
+
}
|
182 |
+
|
183 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
184 |
+
_mm_lzcnt_epi64 (__m128i __A)
|
185 |
+
{
|
186 |
+
return (__m128i) __builtin_ia32_vplzcntq_128 ((__v2di) __A);
|
187 |
+
}
|
188 |
+
|
189 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
190 |
+
_mm_mask_lzcnt_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
|
191 |
+
{
|
192 |
+
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
|
193 |
+
(__v2di)_mm_lzcnt_epi64(__A),
|
194 |
+
(__v2di)__W);
|
195 |
+
}
|
196 |
+
|
197 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
198 |
+
_mm_maskz_lzcnt_epi64 (__mmask8 __U, __m128i __A)
|
199 |
+
{
|
200 |
+
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
|
201 |
+
(__v2di)_mm_lzcnt_epi64(__A),
|
202 |
+
(__v2di)_mm_setzero_si128());
|
203 |
+
}
|
204 |
+
|
205 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
206 |
+
_mm256_lzcnt_epi64 (__m256i __A)
|
207 |
+
{
|
208 |
+
return (__m256i) __builtin_ia32_vplzcntq_256 ((__v4di) __A);
|
209 |
+
}
|
210 |
+
|
211 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
212 |
+
_mm256_mask_lzcnt_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
|
213 |
+
{
|
214 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
|
215 |
+
(__v4di)_mm256_lzcnt_epi64(__A),
|
216 |
+
(__v4di)__W);
|
217 |
+
}
|
218 |
+
|
219 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
220 |
+
_mm256_maskz_lzcnt_epi64 (__mmask8 __U, __m256i __A)
|
221 |
+
{
|
222 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
|
223 |
+
(__v4di)_mm256_lzcnt_epi64(__A),
|
224 |
+
(__v4di)_mm256_setzero_si256());
|
225 |
+
}
|
226 |
+
|
227 |
+
#undef __DEFAULT_FN_ATTRS128
|
228 |
+
#undef __DEFAULT_FN_ATTRS256
|
229 |
+
|
230 |
+
#endif /* __AVX512VLCDINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vldqintrin.h
ADDED
@@ -0,0 +1,1173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- avx512vldqintrin.h - AVX512VL and AVX512DQ intrinsics ------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error "Never use <avx512vldqintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __AVX512VLDQINTRIN_H
|
15 |
+
#define __AVX512VLDQINTRIN_H
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS128 \
|
19 |
+
__attribute__((__always_inline__, __nodebug__, \
|
20 |
+
__target__("avx512vl,avx512dq,no-evex512"), \
|
21 |
+
__min_vector_width__(128)))
|
22 |
+
#define __DEFAULT_FN_ATTRS256 \
|
23 |
+
__attribute__((__always_inline__, __nodebug__, \
|
24 |
+
__target__("avx512vl,avx512dq,no-evex512"), \
|
25 |
+
__min_vector_width__(256)))
|
26 |
+
|
27 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
28 |
+
_mm256_mullo_epi64 (__m256i __A, __m256i __B) {
|
29 |
+
return (__m256i) ((__v4du) __A * (__v4du) __B);
|
30 |
+
}
|
31 |
+
|
32 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
33 |
+
_mm256_mask_mullo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
|
34 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
|
35 |
+
(__v4di)_mm256_mullo_epi64(__A, __B),
|
36 |
+
(__v4di)__W);
|
37 |
+
}
|
38 |
+
|
39 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
40 |
+
_mm256_maskz_mullo_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
|
41 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
|
42 |
+
(__v4di)_mm256_mullo_epi64(__A, __B),
|
43 |
+
(__v4di)_mm256_setzero_si256());
|
44 |
+
}
|
45 |
+
|
46 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
47 |
+
_mm_mullo_epi64 (__m128i __A, __m128i __B) {
|
48 |
+
return (__m128i) ((__v2du) __A * (__v2du) __B);
|
49 |
+
}
|
50 |
+
|
51 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
52 |
+
_mm_mask_mullo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
|
53 |
+
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
|
54 |
+
(__v2di)_mm_mullo_epi64(__A, __B),
|
55 |
+
(__v2di)__W);
|
56 |
+
}
|
57 |
+
|
58 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
59 |
+
_mm_maskz_mullo_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
|
60 |
+
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
|
61 |
+
(__v2di)_mm_mullo_epi64(__A, __B),
|
62 |
+
(__v2di)_mm_setzero_si128());
|
63 |
+
}
|
64 |
+
|
65 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
66 |
+
_mm256_mask_andnot_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
|
67 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
68 |
+
(__v4df)_mm256_andnot_pd(__A, __B),
|
69 |
+
(__v4df)__W);
|
70 |
+
}
|
71 |
+
|
72 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
73 |
+
_mm256_maskz_andnot_pd(__mmask8 __U, __m256d __A, __m256d __B) {
|
74 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
75 |
+
(__v4df)_mm256_andnot_pd(__A, __B),
|
76 |
+
(__v4df)_mm256_setzero_pd());
|
77 |
+
}
|
78 |
+
|
79 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
80 |
+
_mm_mask_andnot_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
|
81 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
82 |
+
(__v2df)_mm_andnot_pd(__A, __B),
|
83 |
+
(__v2df)__W);
|
84 |
+
}
|
85 |
+
|
86 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
87 |
+
_mm_maskz_andnot_pd(__mmask8 __U, __m128d __A, __m128d __B) {
|
88 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
89 |
+
(__v2df)_mm_andnot_pd(__A, __B),
|
90 |
+
(__v2df)_mm_setzero_pd());
|
91 |
+
}
|
92 |
+
|
93 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
94 |
+
_mm256_mask_andnot_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
|
95 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
96 |
+
(__v8sf)_mm256_andnot_ps(__A, __B),
|
97 |
+
(__v8sf)__W);
|
98 |
+
}
|
99 |
+
|
100 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
101 |
+
_mm256_maskz_andnot_ps(__mmask8 __U, __m256 __A, __m256 __B) {
|
102 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
103 |
+
(__v8sf)_mm256_andnot_ps(__A, __B),
|
104 |
+
(__v8sf)_mm256_setzero_ps());
|
105 |
+
}
|
106 |
+
|
107 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
108 |
+
_mm_mask_andnot_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
|
109 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
110 |
+
(__v4sf)_mm_andnot_ps(__A, __B),
|
111 |
+
(__v4sf)__W);
|
112 |
+
}
|
113 |
+
|
114 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
115 |
+
_mm_maskz_andnot_ps(__mmask8 __U, __m128 __A, __m128 __B) {
|
116 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
117 |
+
(__v4sf)_mm_andnot_ps(__A, __B),
|
118 |
+
(__v4sf)_mm_setzero_ps());
|
119 |
+
}
|
120 |
+
|
121 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
122 |
+
_mm256_mask_and_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
|
123 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
124 |
+
(__v4df)_mm256_and_pd(__A, __B),
|
125 |
+
(__v4df)__W);
|
126 |
+
}
|
127 |
+
|
128 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
129 |
+
_mm256_maskz_and_pd(__mmask8 __U, __m256d __A, __m256d __B) {
|
130 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
131 |
+
(__v4df)_mm256_and_pd(__A, __B),
|
132 |
+
(__v4df)_mm256_setzero_pd());
|
133 |
+
}
|
134 |
+
|
135 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
136 |
+
_mm_mask_and_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
|
137 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
138 |
+
(__v2df)_mm_and_pd(__A, __B),
|
139 |
+
(__v2df)__W);
|
140 |
+
}
|
141 |
+
|
142 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
143 |
+
_mm_maskz_and_pd(__mmask8 __U, __m128d __A, __m128d __B) {
|
144 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
145 |
+
(__v2df)_mm_and_pd(__A, __B),
|
146 |
+
(__v2df)_mm_setzero_pd());
|
147 |
+
}
|
148 |
+
|
149 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
150 |
+
_mm256_mask_and_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
|
151 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
152 |
+
(__v8sf)_mm256_and_ps(__A, __B),
|
153 |
+
(__v8sf)__W);
|
154 |
+
}
|
155 |
+
|
156 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
157 |
+
_mm256_maskz_and_ps(__mmask8 __U, __m256 __A, __m256 __B) {
|
158 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
159 |
+
(__v8sf)_mm256_and_ps(__A, __B),
|
160 |
+
(__v8sf)_mm256_setzero_ps());
|
161 |
+
}
|
162 |
+
|
163 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
164 |
+
_mm_mask_and_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
|
165 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
166 |
+
(__v4sf)_mm_and_ps(__A, __B),
|
167 |
+
(__v4sf)__W);
|
168 |
+
}
|
169 |
+
|
170 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
171 |
+
_mm_maskz_and_ps(__mmask8 __U, __m128 __A, __m128 __B) {
|
172 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
173 |
+
(__v4sf)_mm_and_ps(__A, __B),
|
174 |
+
(__v4sf)_mm_setzero_ps());
|
175 |
+
}
|
176 |
+
|
177 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
178 |
+
_mm256_mask_xor_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
|
179 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
180 |
+
(__v4df)_mm256_xor_pd(__A, __B),
|
181 |
+
(__v4df)__W);
|
182 |
+
}
|
183 |
+
|
184 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
185 |
+
_mm256_maskz_xor_pd(__mmask8 __U, __m256d __A, __m256d __B) {
|
186 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
187 |
+
(__v4df)_mm256_xor_pd(__A, __B),
|
188 |
+
(__v4df)_mm256_setzero_pd());
|
189 |
+
}
|
190 |
+
|
191 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
192 |
+
_mm_mask_xor_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
|
193 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
194 |
+
(__v2df)_mm_xor_pd(__A, __B),
|
195 |
+
(__v2df)__W);
|
196 |
+
}
|
197 |
+
|
198 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
199 |
+
_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
|
200 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
201 |
+
(__v2df)_mm_xor_pd(__A, __B),
|
202 |
+
(__v2df)_mm_setzero_pd());
|
203 |
+
}
|
204 |
+
|
205 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
206 |
+
_mm256_mask_xor_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
|
207 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
208 |
+
(__v8sf)_mm256_xor_ps(__A, __B),
|
209 |
+
(__v8sf)__W);
|
210 |
+
}
|
211 |
+
|
212 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
213 |
+
_mm256_maskz_xor_ps(__mmask8 __U, __m256 __A, __m256 __B) {
|
214 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
215 |
+
(__v8sf)_mm256_xor_ps(__A, __B),
|
216 |
+
(__v8sf)_mm256_setzero_ps());
|
217 |
+
}
|
218 |
+
|
219 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
220 |
+
_mm_mask_xor_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
|
221 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
222 |
+
(__v4sf)_mm_xor_ps(__A, __B),
|
223 |
+
(__v4sf)__W);
|
224 |
+
}
|
225 |
+
|
226 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
227 |
+
_mm_maskz_xor_ps(__mmask8 __U, __m128 __A, __m128 __B) {
|
228 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
229 |
+
(__v4sf)_mm_xor_ps(__A, __B),
|
230 |
+
(__v4sf)_mm_setzero_ps());
|
231 |
+
}
|
232 |
+
|
233 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
234 |
+
_mm256_mask_or_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
|
235 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
236 |
+
(__v4df)_mm256_or_pd(__A, __B),
|
237 |
+
(__v4df)__W);
|
238 |
+
}
|
239 |
+
|
240 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
241 |
+
_mm256_maskz_or_pd(__mmask8 __U, __m256d __A, __m256d __B) {
|
242 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
243 |
+
(__v4df)_mm256_or_pd(__A, __B),
|
244 |
+
(__v4df)_mm256_setzero_pd());
|
245 |
+
}
|
246 |
+
|
247 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
248 |
+
_mm_mask_or_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
|
249 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
250 |
+
(__v2df)_mm_or_pd(__A, __B),
|
251 |
+
(__v2df)__W);
|
252 |
+
}
|
253 |
+
|
254 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
255 |
+
_mm_maskz_or_pd(__mmask8 __U, __m128d __A, __m128d __B) {
|
256 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
257 |
+
(__v2df)_mm_or_pd(__A, __B),
|
258 |
+
(__v2df)_mm_setzero_pd());
|
259 |
+
}
|
260 |
+
|
261 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
262 |
+
_mm256_mask_or_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
|
263 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
264 |
+
(__v8sf)_mm256_or_ps(__A, __B),
|
265 |
+
(__v8sf)__W);
|
266 |
+
}
|
267 |
+
|
268 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
269 |
+
_mm256_maskz_or_ps(__mmask8 __U, __m256 __A, __m256 __B) {
|
270 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
|
271 |
+
(__v8sf)_mm256_or_ps(__A, __B),
|
272 |
+
(__v8sf)_mm256_setzero_ps());
|
273 |
+
}
|
274 |
+
|
275 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
276 |
+
_mm_mask_or_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
|
277 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
278 |
+
(__v4sf)_mm_or_ps(__A, __B),
|
279 |
+
(__v4sf)__W);
|
280 |
+
}
|
281 |
+
|
282 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
283 |
+
_mm_maskz_or_ps(__mmask8 __U, __m128 __A, __m128 __B) {
|
284 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
285 |
+
(__v4sf)_mm_or_ps(__A, __B),
|
286 |
+
(__v4sf)_mm_setzero_ps());
|
287 |
+
}
|
288 |
+
|
289 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
290 |
+
_mm_cvtpd_epi64 (__m128d __A) {
|
291 |
+
return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
|
292 |
+
(__v2di) _mm_setzero_si128(),
|
293 |
+
(__mmask8) -1);
|
294 |
+
}
|
295 |
+
|
296 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
297 |
+
_mm_mask_cvtpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
|
298 |
+
return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
|
299 |
+
(__v2di) __W,
|
300 |
+
(__mmask8) __U);
|
301 |
+
}
|
302 |
+
|
303 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
304 |
+
_mm_maskz_cvtpd_epi64 (__mmask8 __U, __m128d __A) {
|
305 |
+
return (__m128i) __builtin_ia32_cvtpd2qq128_mask ((__v2df) __A,
|
306 |
+
(__v2di) _mm_setzero_si128(),
|
307 |
+
(__mmask8) __U);
|
308 |
+
}
|
309 |
+
|
310 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
311 |
+
_mm256_cvtpd_epi64 (__m256d __A) {
|
312 |
+
return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
|
313 |
+
(__v4di) _mm256_setzero_si256(),
|
314 |
+
(__mmask8) -1);
|
315 |
+
}
|
316 |
+
|
317 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
318 |
+
_mm256_mask_cvtpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
|
319 |
+
return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
|
320 |
+
(__v4di) __W,
|
321 |
+
(__mmask8) __U);
|
322 |
+
}
|
323 |
+
|
324 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
325 |
+
_mm256_maskz_cvtpd_epi64 (__mmask8 __U, __m256d __A) {
|
326 |
+
return (__m256i) __builtin_ia32_cvtpd2qq256_mask ((__v4df) __A,
|
327 |
+
(__v4di) _mm256_setzero_si256(),
|
328 |
+
(__mmask8) __U);
|
329 |
+
}
|
330 |
+
|
331 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
332 |
+
_mm_cvtpd_epu64 (__m128d __A) {
|
333 |
+
return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
|
334 |
+
(__v2di) _mm_setzero_si128(),
|
335 |
+
(__mmask8) -1);
|
336 |
+
}
|
337 |
+
|
338 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
339 |
+
_mm_mask_cvtpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
|
340 |
+
return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
|
341 |
+
(__v2di) __W,
|
342 |
+
(__mmask8) __U);
|
343 |
+
}
|
344 |
+
|
345 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
346 |
+
_mm_maskz_cvtpd_epu64 (__mmask8 __U, __m128d __A) {
|
347 |
+
return (__m128i) __builtin_ia32_cvtpd2uqq128_mask ((__v2df) __A,
|
348 |
+
(__v2di) _mm_setzero_si128(),
|
349 |
+
(__mmask8) __U);
|
350 |
+
}
|
351 |
+
|
352 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
353 |
+
_mm256_cvtpd_epu64 (__m256d __A) {
|
354 |
+
return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
|
355 |
+
(__v4di) _mm256_setzero_si256(),
|
356 |
+
(__mmask8) -1);
|
357 |
+
}
|
358 |
+
|
359 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
360 |
+
_mm256_mask_cvtpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
|
361 |
+
return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
|
362 |
+
(__v4di) __W,
|
363 |
+
(__mmask8) __U);
|
364 |
+
}
|
365 |
+
|
366 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
367 |
+
_mm256_maskz_cvtpd_epu64 (__mmask8 __U, __m256d __A) {
|
368 |
+
return (__m256i) __builtin_ia32_cvtpd2uqq256_mask ((__v4df) __A,
|
369 |
+
(__v4di) _mm256_setzero_si256(),
|
370 |
+
(__mmask8) __U);
|
371 |
+
}
|
372 |
+
|
373 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
374 |
+
_mm_cvtps_epi64 (__m128 __A) {
|
375 |
+
return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
|
376 |
+
(__v2di) _mm_setzero_si128(),
|
377 |
+
(__mmask8) -1);
|
378 |
+
}
|
379 |
+
|
380 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
381 |
+
_mm_mask_cvtps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
|
382 |
+
return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
|
383 |
+
(__v2di) __W,
|
384 |
+
(__mmask8) __U);
|
385 |
+
}
|
386 |
+
|
387 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
388 |
+
_mm_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
|
389 |
+
return (__m128i) __builtin_ia32_cvtps2qq128_mask ((__v4sf) __A,
|
390 |
+
(__v2di) _mm_setzero_si128(),
|
391 |
+
(__mmask8) __U);
|
392 |
+
}
|
393 |
+
|
394 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
395 |
+
_mm256_cvtps_epi64 (__m128 __A) {
|
396 |
+
return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
|
397 |
+
(__v4di) _mm256_setzero_si256(),
|
398 |
+
(__mmask8) -1);
|
399 |
+
}
|
400 |
+
|
401 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
402 |
+
_mm256_mask_cvtps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
|
403 |
+
return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
|
404 |
+
(__v4di) __W,
|
405 |
+
(__mmask8) __U);
|
406 |
+
}
|
407 |
+
|
408 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
409 |
+
_mm256_maskz_cvtps_epi64 (__mmask8 __U, __m128 __A) {
|
410 |
+
return (__m256i) __builtin_ia32_cvtps2qq256_mask ((__v4sf) __A,
|
411 |
+
(__v4di) _mm256_setzero_si256(),
|
412 |
+
(__mmask8) __U);
|
413 |
+
}
|
414 |
+
|
415 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
416 |
+
_mm_cvtps_epu64 (__m128 __A) {
|
417 |
+
return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
|
418 |
+
(__v2di) _mm_setzero_si128(),
|
419 |
+
(__mmask8) -1);
|
420 |
+
}
|
421 |
+
|
422 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
423 |
+
_mm_mask_cvtps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
|
424 |
+
return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
|
425 |
+
(__v2di) __W,
|
426 |
+
(__mmask8) __U);
|
427 |
+
}
|
428 |
+
|
429 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
430 |
+
_mm_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
|
431 |
+
return (__m128i) __builtin_ia32_cvtps2uqq128_mask ((__v4sf) __A,
|
432 |
+
(__v2di) _mm_setzero_si128(),
|
433 |
+
(__mmask8) __U);
|
434 |
+
}
|
435 |
+
|
436 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
437 |
+
_mm256_cvtps_epu64 (__m128 __A) {
|
438 |
+
return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
|
439 |
+
(__v4di) _mm256_setzero_si256(),
|
440 |
+
(__mmask8) -1);
|
441 |
+
}
|
442 |
+
|
443 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
444 |
+
_mm256_mask_cvtps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
|
445 |
+
return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
|
446 |
+
(__v4di) __W,
|
447 |
+
(__mmask8) __U);
|
448 |
+
}
|
449 |
+
|
450 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
451 |
+
_mm256_maskz_cvtps_epu64 (__mmask8 __U, __m128 __A) {
|
452 |
+
return (__m256i) __builtin_ia32_cvtps2uqq256_mask ((__v4sf) __A,
|
453 |
+
(__v4di) _mm256_setzero_si256(),
|
454 |
+
(__mmask8) __U);
|
455 |
+
}
|
456 |
+
|
457 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
458 |
+
_mm_cvtepi64_pd (__m128i __A) {
|
459 |
+
return (__m128d)__builtin_convertvector((__v2di)__A, __v2df);
|
460 |
+
}
|
461 |
+
|
462 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
463 |
+
_mm_mask_cvtepi64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
|
464 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
465 |
+
(__v2df)_mm_cvtepi64_pd(__A),
|
466 |
+
(__v2df)__W);
|
467 |
+
}
|
468 |
+
|
469 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
470 |
+
_mm_maskz_cvtepi64_pd (__mmask8 __U, __m128i __A) {
|
471 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
472 |
+
(__v2df)_mm_cvtepi64_pd(__A),
|
473 |
+
(__v2df)_mm_setzero_pd());
|
474 |
+
}
|
475 |
+
|
476 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
477 |
+
_mm256_cvtepi64_pd (__m256i __A) {
|
478 |
+
return (__m256d)__builtin_convertvector((__v4di)__A, __v4df);
|
479 |
+
}
|
480 |
+
|
481 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
482 |
+
_mm256_mask_cvtepi64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
|
483 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
484 |
+
(__v4df)_mm256_cvtepi64_pd(__A),
|
485 |
+
(__v4df)__W);
|
486 |
+
}
|
487 |
+
|
488 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
489 |
+
_mm256_maskz_cvtepi64_pd (__mmask8 __U, __m256i __A) {
|
490 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
491 |
+
(__v4df)_mm256_cvtepi64_pd(__A),
|
492 |
+
(__v4df)_mm256_setzero_pd());
|
493 |
+
}
|
494 |
+
|
495 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
496 |
+
_mm_cvtepi64_ps (__m128i __A) {
|
497 |
+
return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
|
498 |
+
(__v4sf) _mm_setzero_ps(),
|
499 |
+
(__mmask8) -1);
|
500 |
+
}
|
501 |
+
|
502 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
503 |
+
_mm_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
|
504 |
+
return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
|
505 |
+
(__v4sf) __W,
|
506 |
+
(__mmask8) __U);
|
507 |
+
}
|
508 |
+
|
509 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
510 |
+
_mm_maskz_cvtepi64_ps (__mmask8 __U, __m128i __A) {
|
511 |
+
return (__m128) __builtin_ia32_cvtqq2ps128_mask ((__v2di) __A,
|
512 |
+
(__v4sf) _mm_setzero_ps(),
|
513 |
+
(__mmask8) __U);
|
514 |
+
}
|
515 |
+
|
516 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS256
|
517 |
+
_mm256_cvtepi64_ps (__m256i __A) {
|
518 |
+
return (__m128)__builtin_convertvector((__v4di)__A, __v4sf);
|
519 |
+
}
|
520 |
+
|
521 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS256
|
522 |
+
_mm256_mask_cvtepi64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
|
523 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
524 |
+
(__v4sf)_mm256_cvtepi64_ps(__A),
|
525 |
+
(__v4sf)__W);
|
526 |
+
}
|
527 |
+
|
528 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS256
|
529 |
+
_mm256_maskz_cvtepi64_ps (__mmask8 __U, __m256i __A) {
|
530 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
531 |
+
(__v4sf)_mm256_cvtepi64_ps(__A),
|
532 |
+
(__v4sf)_mm_setzero_ps());
|
533 |
+
}
|
534 |
+
|
535 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
536 |
+
_mm_cvttpd_epi64 (__m128d __A) {
|
537 |
+
return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
|
538 |
+
(__v2di) _mm_setzero_si128(),
|
539 |
+
(__mmask8) -1);
|
540 |
+
}
|
541 |
+
|
542 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
543 |
+
_mm_mask_cvttpd_epi64 (__m128i __W, __mmask8 __U, __m128d __A) {
|
544 |
+
return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
|
545 |
+
(__v2di) __W,
|
546 |
+
(__mmask8) __U);
|
547 |
+
}
|
548 |
+
|
549 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
550 |
+
_mm_maskz_cvttpd_epi64 (__mmask8 __U, __m128d __A) {
|
551 |
+
return (__m128i) __builtin_ia32_cvttpd2qq128_mask ((__v2df) __A,
|
552 |
+
(__v2di) _mm_setzero_si128(),
|
553 |
+
(__mmask8) __U);
|
554 |
+
}
|
555 |
+
|
556 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
557 |
+
_mm256_cvttpd_epi64 (__m256d __A) {
|
558 |
+
return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
|
559 |
+
(__v4di) _mm256_setzero_si256(),
|
560 |
+
(__mmask8) -1);
|
561 |
+
}
|
562 |
+
|
563 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
564 |
+
_mm256_mask_cvttpd_epi64 (__m256i __W, __mmask8 __U, __m256d __A) {
|
565 |
+
return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
|
566 |
+
(__v4di) __W,
|
567 |
+
(__mmask8) __U);
|
568 |
+
}
|
569 |
+
|
570 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
571 |
+
_mm256_maskz_cvttpd_epi64 (__mmask8 __U, __m256d __A) {
|
572 |
+
return (__m256i) __builtin_ia32_cvttpd2qq256_mask ((__v4df) __A,
|
573 |
+
(__v4di) _mm256_setzero_si256(),
|
574 |
+
(__mmask8) __U);
|
575 |
+
}
|
576 |
+
|
577 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
578 |
+
_mm_cvttpd_epu64 (__m128d __A) {
|
579 |
+
return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
|
580 |
+
(__v2di) _mm_setzero_si128(),
|
581 |
+
(__mmask8) -1);
|
582 |
+
}
|
583 |
+
|
584 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
585 |
+
_mm_mask_cvttpd_epu64 (__m128i __W, __mmask8 __U, __m128d __A) {
|
586 |
+
return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
|
587 |
+
(__v2di) __W,
|
588 |
+
(__mmask8) __U);
|
589 |
+
}
|
590 |
+
|
591 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
592 |
+
_mm_maskz_cvttpd_epu64 (__mmask8 __U, __m128d __A) {
|
593 |
+
return (__m128i) __builtin_ia32_cvttpd2uqq128_mask ((__v2df) __A,
|
594 |
+
(__v2di) _mm_setzero_si128(),
|
595 |
+
(__mmask8) __U);
|
596 |
+
}
|
597 |
+
|
598 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
599 |
+
_mm256_cvttpd_epu64 (__m256d __A) {
|
600 |
+
return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
|
601 |
+
(__v4di) _mm256_setzero_si256(),
|
602 |
+
(__mmask8) -1);
|
603 |
+
}
|
604 |
+
|
605 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
606 |
+
_mm256_mask_cvttpd_epu64 (__m256i __W, __mmask8 __U, __m256d __A) {
|
607 |
+
return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
|
608 |
+
(__v4di) __W,
|
609 |
+
(__mmask8) __U);
|
610 |
+
}
|
611 |
+
|
612 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
613 |
+
_mm256_maskz_cvttpd_epu64 (__mmask8 __U, __m256d __A) {
|
614 |
+
return (__m256i) __builtin_ia32_cvttpd2uqq256_mask ((__v4df) __A,
|
615 |
+
(__v4di) _mm256_setzero_si256(),
|
616 |
+
(__mmask8) __U);
|
617 |
+
}
|
618 |
+
|
619 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
620 |
+
_mm_cvttps_epi64 (__m128 __A) {
|
621 |
+
return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
|
622 |
+
(__v2di) _mm_setzero_si128(),
|
623 |
+
(__mmask8) -1);
|
624 |
+
}
|
625 |
+
|
626 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
627 |
+
_mm_mask_cvttps_epi64 (__m128i __W, __mmask8 __U, __m128 __A) {
|
628 |
+
return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
|
629 |
+
(__v2di) __W,
|
630 |
+
(__mmask8) __U);
|
631 |
+
}
|
632 |
+
|
633 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
634 |
+
_mm_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
|
635 |
+
return (__m128i) __builtin_ia32_cvttps2qq128_mask ((__v4sf) __A,
|
636 |
+
(__v2di) _mm_setzero_si128(),
|
637 |
+
(__mmask8) __U);
|
638 |
+
}
|
639 |
+
|
640 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
641 |
+
_mm256_cvttps_epi64 (__m128 __A) {
|
642 |
+
return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
|
643 |
+
(__v4di) _mm256_setzero_si256(),
|
644 |
+
(__mmask8) -1);
|
645 |
+
}
|
646 |
+
|
647 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
648 |
+
_mm256_mask_cvttps_epi64 (__m256i __W, __mmask8 __U, __m128 __A) {
|
649 |
+
return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
|
650 |
+
(__v4di) __W,
|
651 |
+
(__mmask8) __U);
|
652 |
+
}
|
653 |
+
|
654 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
655 |
+
_mm256_maskz_cvttps_epi64 (__mmask8 __U, __m128 __A) {
|
656 |
+
return (__m256i) __builtin_ia32_cvttps2qq256_mask ((__v4sf) __A,
|
657 |
+
(__v4di) _mm256_setzero_si256(),
|
658 |
+
(__mmask8) __U);
|
659 |
+
}
|
660 |
+
|
661 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
662 |
+
_mm_cvttps_epu64 (__m128 __A) {
|
663 |
+
return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
|
664 |
+
(__v2di) _mm_setzero_si128(),
|
665 |
+
(__mmask8) -1);
|
666 |
+
}
|
667 |
+
|
668 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
669 |
+
_mm_mask_cvttps_epu64 (__m128i __W, __mmask8 __U, __m128 __A) {
|
670 |
+
return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
|
671 |
+
(__v2di) __W,
|
672 |
+
(__mmask8) __U);
|
673 |
+
}
|
674 |
+
|
675 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
676 |
+
_mm_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
|
677 |
+
return (__m128i) __builtin_ia32_cvttps2uqq128_mask ((__v4sf) __A,
|
678 |
+
(__v2di) _mm_setzero_si128(),
|
679 |
+
(__mmask8) __U);
|
680 |
+
}
|
681 |
+
|
682 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
683 |
+
_mm256_cvttps_epu64 (__m128 __A) {
|
684 |
+
return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
|
685 |
+
(__v4di) _mm256_setzero_si256(),
|
686 |
+
(__mmask8) -1);
|
687 |
+
}
|
688 |
+
|
689 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
690 |
+
_mm256_mask_cvttps_epu64 (__m256i __W, __mmask8 __U, __m128 __A) {
|
691 |
+
return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
|
692 |
+
(__v4di) __W,
|
693 |
+
(__mmask8) __U);
|
694 |
+
}
|
695 |
+
|
696 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
697 |
+
_mm256_maskz_cvttps_epu64 (__mmask8 __U, __m128 __A) {
|
698 |
+
return (__m256i) __builtin_ia32_cvttps2uqq256_mask ((__v4sf) __A,
|
699 |
+
(__v4di) _mm256_setzero_si256(),
|
700 |
+
(__mmask8) __U);
|
701 |
+
}
|
702 |
+
|
703 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
704 |
+
_mm_cvtepu64_pd (__m128i __A) {
|
705 |
+
return (__m128d)__builtin_convertvector((__v2du)__A, __v2df);
|
706 |
+
}
|
707 |
+
|
708 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
709 |
+
_mm_mask_cvtepu64_pd (__m128d __W, __mmask8 __U, __m128i __A) {
|
710 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
711 |
+
(__v2df)_mm_cvtepu64_pd(__A),
|
712 |
+
(__v2df)__W);
|
713 |
+
}
|
714 |
+
|
715 |
+
static __inline__ __m128d __DEFAULT_FN_ATTRS128
|
716 |
+
_mm_maskz_cvtepu64_pd (__mmask8 __U, __m128i __A) {
|
717 |
+
return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
|
718 |
+
(__v2df)_mm_cvtepu64_pd(__A),
|
719 |
+
(__v2df)_mm_setzero_pd());
|
720 |
+
}
|
721 |
+
|
722 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
723 |
+
_mm256_cvtepu64_pd (__m256i __A) {
|
724 |
+
return (__m256d)__builtin_convertvector((__v4du)__A, __v4df);
|
725 |
+
}
|
726 |
+
|
727 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
728 |
+
_mm256_mask_cvtepu64_pd (__m256d __W, __mmask8 __U, __m256i __A) {
|
729 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
730 |
+
(__v4df)_mm256_cvtepu64_pd(__A),
|
731 |
+
(__v4df)__W);
|
732 |
+
}
|
733 |
+
|
734 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
735 |
+
_mm256_maskz_cvtepu64_pd (__mmask8 __U, __m256i __A) {
|
736 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
|
737 |
+
(__v4df)_mm256_cvtepu64_pd(__A),
|
738 |
+
(__v4df)_mm256_setzero_pd());
|
739 |
+
}
|
740 |
+
|
741 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
742 |
+
_mm_cvtepu64_ps (__m128i __A) {
|
743 |
+
return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
|
744 |
+
(__v4sf) _mm_setzero_ps(),
|
745 |
+
(__mmask8) -1);
|
746 |
+
}
|
747 |
+
|
748 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
749 |
+
_mm_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m128i __A) {
|
750 |
+
return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
|
751 |
+
(__v4sf) __W,
|
752 |
+
(__mmask8) __U);
|
753 |
+
}
|
754 |
+
|
755 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS128
|
756 |
+
_mm_maskz_cvtepu64_ps (__mmask8 __U, __m128i __A) {
|
757 |
+
return (__m128) __builtin_ia32_cvtuqq2ps128_mask ((__v2di) __A,
|
758 |
+
(__v4sf) _mm_setzero_ps(),
|
759 |
+
(__mmask8) __U);
|
760 |
+
}
|
761 |
+
|
762 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS256
|
763 |
+
_mm256_cvtepu64_ps (__m256i __A) {
|
764 |
+
return (__m128)__builtin_convertvector((__v4du)__A, __v4sf);
|
765 |
+
}
|
766 |
+
|
767 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS256
|
768 |
+
_mm256_mask_cvtepu64_ps (__m128 __W, __mmask8 __U, __m256i __A) {
|
769 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
770 |
+
(__v4sf)_mm256_cvtepu64_ps(__A),
|
771 |
+
(__v4sf)__W);
|
772 |
+
}
|
773 |
+
|
774 |
+
static __inline__ __m128 __DEFAULT_FN_ATTRS256
|
775 |
+
_mm256_maskz_cvtepu64_ps (__mmask8 __U, __m256i __A) {
|
776 |
+
return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
|
777 |
+
(__v4sf)_mm256_cvtepu64_ps(__A),
|
778 |
+
(__v4sf)_mm_setzero_ps());
|
779 |
+
}
|
780 |
+
|
781 |
+
#define _mm_range_pd(A, B, C) \
|
782 |
+
((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
|
783 |
+
(__v2df)(__m128d)(B), (int)(C), \
|
784 |
+
(__v2df)_mm_setzero_pd(), \
|
785 |
+
(__mmask8)-1))
|
786 |
+
|
787 |
+
#define _mm_mask_range_pd(W, U, A, B, C) \
|
788 |
+
((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
|
789 |
+
(__v2df)(__m128d)(B), (int)(C), \
|
790 |
+
(__v2df)(__m128d)(W), \
|
791 |
+
(__mmask8)(U)))
|
792 |
+
|
793 |
+
#define _mm_maskz_range_pd(U, A, B, C) \
|
794 |
+
((__m128d)__builtin_ia32_rangepd128_mask((__v2df)(__m128d)(A), \
|
795 |
+
(__v2df)(__m128d)(B), (int)(C), \
|
796 |
+
(__v2df)_mm_setzero_pd(), \
|
797 |
+
(__mmask8)(U)))
|
798 |
+
|
799 |
+
#define _mm256_range_pd(A, B, C) \
|
800 |
+
((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
|
801 |
+
(__v4df)(__m256d)(B), (int)(C), \
|
802 |
+
(__v4df)_mm256_setzero_pd(), \
|
803 |
+
(__mmask8)-1))
|
804 |
+
|
805 |
+
#define _mm256_mask_range_pd(W, U, A, B, C) \
|
806 |
+
((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
|
807 |
+
(__v4df)(__m256d)(B), (int)(C), \
|
808 |
+
(__v4df)(__m256d)(W), \
|
809 |
+
(__mmask8)(U)))
|
810 |
+
|
811 |
+
#define _mm256_maskz_range_pd(U, A, B, C) \
|
812 |
+
((__m256d)__builtin_ia32_rangepd256_mask((__v4df)(__m256d)(A), \
|
813 |
+
(__v4df)(__m256d)(B), (int)(C), \
|
814 |
+
(__v4df)_mm256_setzero_pd(), \
|
815 |
+
(__mmask8)(U)))
|
816 |
+
|
817 |
+
#define _mm_range_ps(A, B, C) \
|
818 |
+
((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
|
819 |
+
(__v4sf)(__m128)(B), (int)(C), \
|
820 |
+
(__v4sf)_mm_setzero_ps(), \
|
821 |
+
(__mmask8)-1))
|
822 |
+
|
823 |
+
#define _mm_mask_range_ps(W, U, A, B, C) \
|
824 |
+
((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
|
825 |
+
(__v4sf)(__m128)(B), (int)(C), \
|
826 |
+
(__v4sf)(__m128)(W), (__mmask8)(U)))
|
827 |
+
|
828 |
+
#define _mm_maskz_range_ps(U, A, B, C) \
|
829 |
+
((__m128)__builtin_ia32_rangeps128_mask((__v4sf)(__m128)(A), \
|
830 |
+
(__v4sf)(__m128)(B), (int)(C), \
|
831 |
+
(__v4sf)_mm_setzero_ps(), \
|
832 |
+
(__mmask8)(U)))
|
833 |
+
|
834 |
+
#define _mm256_range_ps(A, B, C) \
|
835 |
+
((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
|
836 |
+
(__v8sf)(__m256)(B), (int)(C), \
|
837 |
+
(__v8sf)_mm256_setzero_ps(), \
|
838 |
+
(__mmask8)-1))
|
839 |
+
|
840 |
+
#define _mm256_mask_range_ps(W, U, A, B, C) \
|
841 |
+
((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
|
842 |
+
(__v8sf)(__m256)(B), (int)(C), \
|
843 |
+
(__v8sf)(__m256)(W), (__mmask8)(U)))
|
844 |
+
|
845 |
+
#define _mm256_maskz_range_ps(U, A, B, C) \
|
846 |
+
((__m256)__builtin_ia32_rangeps256_mask((__v8sf)(__m256)(A), \
|
847 |
+
(__v8sf)(__m256)(B), (int)(C), \
|
848 |
+
(__v8sf)_mm256_setzero_ps(), \
|
849 |
+
(__mmask8)(U)))
|
850 |
+
|
851 |
+
#define _mm_reduce_pd(A, B) \
|
852 |
+
((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
|
853 |
+
(__v2df)_mm_setzero_pd(), \
|
854 |
+
(__mmask8)-1))
|
855 |
+
|
856 |
+
#define _mm_mask_reduce_pd(W, U, A, B) \
|
857 |
+
((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
|
858 |
+
(__v2df)(__m128d)(W), \
|
859 |
+
(__mmask8)(U)))
|
860 |
+
|
861 |
+
#define _mm_maskz_reduce_pd(U, A, B) \
|
862 |
+
((__m128d)__builtin_ia32_reducepd128_mask((__v2df)(__m128d)(A), (int)(B), \
|
863 |
+
(__v2df)_mm_setzero_pd(), \
|
864 |
+
(__mmask8)(U)))
|
865 |
+
|
866 |
+
#define _mm256_reduce_pd(A, B) \
|
867 |
+
((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
|
868 |
+
(__v4df)_mm256_setzero_pd(), \
|
869 |
+
(__mmask8)-1))
|
870 |
+
|
871 |
+
#define _mm256_mask_reduce_pd(W, U, A, B) \
|
872 |
+
((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
|
873 |
+
(__v4df)(__m256d)(W), \
|
874 |
+
(__mmask8)(U)))
|
875 |
+
|
876 |
+
#define _mm256_maskz_reduce_pd(U, A, B) \
|
877 |
+
((__m256d)__builtin_ia32_reducepd256_mask((__v4df)(__m256d)(A), (int)(B), \
|
878 |
+
(__v4df)_mm256_setzero_pd(), \
|
879 |
+
(__mmask8)(U)))
|
880 |
+
|
881 |
+
#define _mm_reduce_ps(A, B) \
|
882 |
+
((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
|
883 |
+
(__v4sf)_mm_setzero_ps(), \
|
884 |
+
(__mmask8)-1))
|
885 |
+
|
886 |
+
#define _mm_mask_reduce_ps(W, U, A, B) \
|
887 |
+
((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
|
888 |
+
(__v4sf)(__m128)(W), \
|
889 |
+
(__mmask8)(U)))
|
890 |
+
|
891 |
+
#define _mm_maskz_reduce_ps(U, A, B) \
|
892 |
+
((__m128)__builtin_ia32_reduceps128_mask((__v4sf)(__m128)(A), (int)(B), \
|
893 |
+
(__v4sf)_mm_setzero_ps(), \
|
894 |
+
(__mmask8)(U)))
|
895 |
+
|
896 |
+
#define _mm256_reduce_ps(A, B) \
|
897 |
+
((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
|
898 |
+
(__v8sf)_mm256_setzero_ps(), \
|
899 |
+
(__mmask8)-1))
|
900 |
+
|
901 |
+
#define _mm256_mask_reduce_ps(W, U, A, B) \
|
902 |
+
((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
|
903 |
+
(__v8sf)(__m256)(W), \
|
904 |
+
(__mmask8)(U)))
|
905 |
+
|
906 |
+
#define _mm256_maskz_reduce_ps(U, A, B) \
|
907 |
+
((__m256)__builtin_ia32_reduceps256_mask((__v8sf)(__m256)(A), (int)(B), \
|
908 |
+
(__v8sf)_mm256_setzero_ps(), \
|
909 |
+
(__mmask8)(U)))
|
910 |
+
|
911 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
|
912 |
+
_mm_movepi32_mask (__m128i __A)
|
913 |
+
{
|
914 |
+
return (__mmask8) __builtin_ia32_cvtd2mask128 ((__v4si) __A);
|
915 |
+
}
|
916 |
+
|
917 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
|
918 |
+
_mm256_movepi32_mask (__m256i __A)
|
919 |
+
{
|
920 |
+
return (__mmask8) __builtin_ia32_cvtd2mask256 ((__v8si) __A);
|
921 |
+
}
|
922 |
+
|
923 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
924 |
+
_mm_movm_epi32 (__mmask8 __A)
|
925 |
+
{
|
926 |
+
return (__m128i) __builtin_ia32_cvtmask2d128 (__A);
|
927 |
+
}
|
928 |
+
|
929 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
930 |
+
_mm256_movm_epi32 (__mmask8 __A)
|
931 |
+
{
|
932 |
+
return (__m256i) __builtin_ia32_cvtmask2d256 (__A);
|
933 |
+
}
|
934 |
+
|
935 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
936 |
+
_mm_movm_epi64 (__mmask8 __A)
|
937 |
+
{
|
938 |
+
return (__m128i) __builtin_ia32_cvtmask2q128 (__A);
|
939 |
+
}
|
940 |
+
|
941 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
942 |
+
_mm256_movm_epi64 (__mmask8 __A)
|
943 |
+
{
|
944 |
+
return (__m256i) __builtin_ia32_cvtmask2q256 (__A);
|
945 |
+
}
|
946 |
+
|
947 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
|
948 |
+
_mm_movepi64_mask (__m128i __A)
|
949 |
+
{
|
950 |
+
return (__mmask8) __builtin_ia32_cvtq2mask128 ((__v2di) __A);
|
951 |
+
}
|
952 |
+
|
953 |
+
static __inline__ __mmask8 __DEFAULT_FN_ATTRS256
|
954 |
+
_mm256_movepi64_mask (__m256i __A)
|
955 |
+
{
|
956 |
+
return (__mmask8) __builtin_ia32_cvtq2mask256 ((__v4di) __A);
|
957 |
+
}
|
958 |
+
|
959 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
960 |
+
_mm256_broadcast_f32x2 (__m128 __A)
|
961 |
+
{
|
962 |
+
return (__m256)__builtin_shufflevector((__v4sf)__A, (__v4sf)__A,
|
963 |
+
0, 1, 0, 1, 0, 1, 0, 1);
|
964 |
+
}
|
965 |
+
|
966 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
967 |
+
_mm256_mask_broadcast_f32x2 (__m256 __O, __mmask8 __M, __m128 __A)
|
968 |
+
{
|
969 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
|
970 |
+
(__v8sf)_mm256_broadcast_f32x2(__A),
|
971 |
+
(__v8sf)__O);
|
972 |
+
}
|
973 |
+
|
974 |
+
static __inline__ __m256 __DEFAULT_FN_ATTRS256
|
975 |
+
_mm256_maskz_broadcast_f32x2 (__mmask8 __M, __m128 __A)
|
976 |
+
{
|
977 |
+
return (__m256)__builtin_ia32_selectps_256((__mmask8)__M,
|
978 |
+
(__v8sf)_mm256_broadcast_f32x2(__A),
|
979 |
+
(__v8sf)_mm256_setzero_ps());
|
980 |
+
}
|
981 |
+
|
982 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
983 |
+
_mm256_broadcast_f64x2(__m128d __A)
|
984 |
+
{
|
985 |
+
return (__m256d)__builtin_shufflevector((__v2df)__A, (__v2df)__A,
|
986 |
+
0, 1, 0, 1);
|
987 |
+
}
|
988 |
+
|
989 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
990 |
+
_mm256_mask_broadcast_f64x2(__m256d __O, __mmask8 __M, __m128d __A)
|
991 |
+
{
|
992 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M,
|
993 |
+
(__v4df)_mm256_broadcast_f64x2(__A),
|
994 |
+
(__v4df)__O);
|
995 |
+
}
|
996 |
+
|
997 |
+
static __inline__ __m256d __DEFAULT_FN_ATTRS256
|
998 |
+
_mm256_maskz_broadcast_f64x2 (__mmask8 __M, __m128d __A)
|
999 |
+
{
|
1000 |
+
return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__M,
|
1001 |
+
(__v4df)_mm256_broadcast_f64x2(__A),
|
1002 |
+
(__v4df)_mm256_setzero_pd());
|
1003 |
+
}
|
1004 |
+
|
1005 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
1006 |
+
_mm_broadcast_i32x2 (__m128i __A)
|
1007 |
+
{
|
1008 |
+
return (__m128i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
|
1009 |
+
0, 1, 0, 1);
|
1010 |
+
}
|
1011 |
+
|
1012 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
1013 |
+
_mm_mask_broadcast_i32x2 (__m128i __O, __mmask8 __M, __m128i __A)
|
1014 |
+
{
|
1015 |
+
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
|
1016 |
+
(__v4si)_mm_broadcast_i32x2(__A),
|
1017 |
+
(__v4si)__O);
|
1018 |
+
}
|
1019 |
+
|
1020 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
1021 |
+
_mm_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
|
1022 |
+
{
|
1023 |
+
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
|
1024 |
+
(__v4si)_mm_broadcast_i32x2(__A),
|
1025 |
+
(__v4si)_mm_setzero_si128());
|
1026 |
+
}
|
1027 |
+
|
1028 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
1029 |
+
_mm256_broadcast_i32x2 (__m128i __A)
|
1030 |
+
{
|
1031 |
+
return (__m256i)__builtin_shufflevector((__v4si)__A, (__v4si)__A,
|
1032 |
+
0, 1, 0, 1, 0, 1, 0, 1);
|
1033 |
+
}
|
1034 |
+
|
1035 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
1036 |
+
_mm256_mask_broadcast_i32x2 (__m256i __O, __mmask8 __M, __m128i __A)
|
1037 |
+
{
|
1038 |
+
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
|
1039 |
+
(__v8si)_mm256_broadcast_i32x2(__A),
|
1040 |
+
(__v8si)__O);
|
1041 |
+
}
|
1042 |
+
|
1043 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
1044 |
+
_mm256_maskz_broadcast_i32x2 (__mmask8 __M, __m128i __A)
|
1045 |
+
{
|
1046 |
+
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
|
1047 |
+
(__v8si)_mm256_broadcast_i32x2(__A),
|
1048 |
+
(__v8si)_mm256_setzero_si256());
|
1049 |
+
}
|
1050 |
+
|
1051 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
1052 |
+
_mm256_broadcast_i64x2(__m128i __A)
|
1053 |
+
{
|
1054 |
+
return (__m256i)__builtin_shufflevector((__v2di)__A, (__v2di)__A,
|
1055 |
+
0, 1, 0, 1);
|
1056 |
+
}
|
1057 |
+
|
1058 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
1059 |
+
_mm256_mask_broadcast_i64x2(__m256i __O, __mmask8 __M, __m128i __A)
|
1060 |
+
{
|
1061 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
|
1062 |
+
(__v4di)_mm256_broadcast_i64x2(__A),
|
1063 |
+
(__v4di)__O);
|
1064 |
+
}
|
1065 |
+
|
1066 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
1067 |
+
_mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
|
1068 |
+
{
|
1069 |
+
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
|
1070 |
+
(__v4di)_mm256_broadcast_i64x2(__A),
|
1071 |
+
(__v4di)_mm256_setzero_si256());
|
1072 |
+
}
|
1073 |
+
|
1074 |
+
#define _mm256_extractf64x2_pd(A, imm) \
|
1075 |
+
((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
|
1076 |
+
(int)(imm), \
|
1077 |
+
(__v2df)_mm_undefined_pd(), \
|
1078 |
+
(__mmask8)-1))
|
1079 |
+
|
1080 |
+
#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
|
1081 |
+
((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
|
1082 |
+
(int)(imm), \
|
1083 |
+
(__v2df)(__m128d)(W), \
|
1084 |
+
(__mmask8)(U)))
|
1085 |
+
|
1086 |
+
#define _mm256_maskz_extractf64x2_pd(U, A, imm) \
|
1087 |
+
((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
|
1088 |
+
(int)(imm), \
|
1089 |
+
(__v2df)_mm_setzero_pd(), \
|
1090 |
+
(__mmask8)(U)))
|
1091 |
+
|
1092 |
+
#define _mm256_extracti64x2_epi64(A, imm) \
|
1093 |
+
((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
|
1094 |
+
(int)(imm), \
|
1095 |
+
(__v2di)_mm_undefined_si128(), \
|
1096 |
+
(__mmask8)-1))
|
1097 |
+
|
1098 |
+
#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
|
1099 |
+
((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
|
1100 |
+
(int)(imm), \
|
1101 |
+
(__v2di)(__m128i)(W), \
|
1102 |
+
(__mmask8)(U)))
|
1103 |
+
|
1104 |
+
#define _mm256_maskz_extracti64x2_epi64(U, A, imm) \
|
1105 |
+
((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
|
1106 |
+
(int)(imm), \
|
1107 |
+
(__v2di)_mm_setzero_si128(), \
|
1108 |
+
(__mmask8)(U)))
|
1109 |
+
|
1110 |
+
#define _mm256_insertf64x2(A, B, imm) \
|
1111 |
+
((__m256d)__builtin_ia32_insertf64x2_256((__v4df)(__m256d)(A), \
|
1112 |
+
(__v2df)(__m128d)(B), (int)(imm)))
|
1113 |
+
|
1114 |
+
#define _mm256_mask_insertf64x2(W, U, A, B, imm) \
|
1115 |
+
((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
|
1116 |
+
(__v4df)_mm256_insertf64x2((A), (B), (imm)), \
|
1117 |
+
(__v4df)(__m256d)(W)))
|
1118 |
+
|
1119 |
+
#define _mm256_maskz_insertf64x2(U, A, B, imm) \
|
1120 |
+
((__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
|
1121 |
+
(__v4df)_mm256_insertf64x2((A), (B), (imm)), \
|
1122 |
+
(__v4df)_mm256_setzero_pd()))
|
1123 |
+
|
1124 |
+
#define _mm256_inserti64x2(A, B, imm) \
|
1125 |
+
((__m256i)__builtin_ia32_inserti64x2_256((__v4di)(__m256i)(A), \
|
1126 |
+
(__v2di)(__m128i)(B), (int)(imm)))
|
1127 |
+
|
1128 |
+
#define _mm256_mask_inserti64x2(W, U, A, B, imm) \
|
1129 |
+
((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
|
1130 |
+
(__v4di)_mm256_inserti64x2((A), (B), (imm)), \
|
1131 |
+
(__v4di)(__m256i)(W)))
|
1132 |
+
|
1133 |
+
#define _mm256_maskz_inserti64x2(U, A, B, imm) \
|
1134 |
+
((__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
|
1135 |
+
(__v4di)_mm256_inserti64x2((A), (B), (imm)), \
|
1136 |
+
(__v4di)_mm256_setzero_si256()))
|
1137 |
+
|
1138 |
+
#define _mm_mask_fpclass_pd_mask(U, A, imm) \
|
1139 |
+
((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
|
1140 |
+
(__mmask8)(U)))
|
1141 |
+
|
1142 |
+
#define _mm_fpclass_pd_mask(A, imm) \
|
1143 |
+
((__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
|
1144 |
+
(__mmask8)-1))
|
1145 |
+
|
1146 |
+
#define _mm256_mask_fpclass_pd_mask(U, A, imm) \
|
1147 |
+
((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
|
1148 |
+
(__mmask8)(U)))
|
1149 |
+
|
1150 |
+
#define _mm256_fpclass_pd_mask(A, imm) \
|
1151 |
+
((__mmask8)__builtin_ia32_fpclasspd256_mask((__v4df)(__m256d)(A), (int)(imm), \
|
1152 |
+
(__mmask8)-1))
|
1153 |
+
|
1154 |
+
#define _mm_mask_fpclass_ps_mask(U, A, imm) \
|
1155 |
+
((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
|
1156 |
+
(__mmask8)(U)))
|
1157 |
+
|
1158 |
+
#define _mm_fpclass_ps_mask(A, imm) \
|
1159 |
+
((__mmask8)__builtin_ia32_fpclassps128_mask((__v4sf)(__m128)(A), (int)(imm), \
|
1160 |
+
(__mmask8)-1))
|
1161 |
+
|
1162 |
+
#define _mm256_mask_fpclass_ps_mask(U, A, imm) \
|
1163 |
+
((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
|
1164 |
+
(__mmask8)(U)))
|
1165 |
+
|
1166 |
+
#define _mm256_fpclass_ps_mask(A, imm) \
|
1167 |
+
((__mmask8)__builtin_ia32_fpclassps256_mask((__v8sf)(__m256)(A), (int)(imm), \
|
1168 |
+
(__mmask8)-1))
|
1169 |
+
|
1170 |
+
#undef __DEFAULT_FN_ATTRS128
|
1171 |
+
#undef __DEFAULT_FN_ATTRS256
|
1172 |
+
|
1173 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vp2intersectintrin.h
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===------- avx512vpintersectintrin.h - VP2INTERSECT intrinsics ------------===
|
2 |
+
*
|
3 |
+
*
|
4 |
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
5 |
+
* of this software and associated documentation files (the "Software"), to deal
|
6 |
+
* in the Software without restriction, including without limitation the rights
|
7 |
+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
8 |
+
* copies of the Software, and to permit persons to whom the Software is
|
9 |
+
* furnished to do so, subject to the following conditions:
|
10 |
+
*
|
11 |
+
* The above copyright notice and this permission notice shall be included in
|
12 |
+
* all copies or substantial portions of the Software.
|
13 |
+
*
|
14 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
15 |
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
16 |
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
17 |
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
18 |
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
19 |
+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
20 |
+
* THE SOFTWARE.
|
21 |
+
*
|
22 |
+
*===-----------------------------------------------------------------------===
|
23 |
+
*/
|
24 |
+
#ifndef __IMMINTRIN_H
|
25 |
+
#error "Never use <avx512vp2intersect.h> directly; include <immintrin.h> instead."
|
26 |
+
#endif
|
27 |
+
|
28 |
+
#ifndef _AVX512VP2INTERSECT_H
|
29 |
+
#define _AVX512VP2INTERSECT_H
|
30 |
+
|
31 |
+
#define __DEFAULT_FN_ATTRS \
|
32 |
+
__attribute__((__always_inline__, __nodebug__, \
|
33 |
+
__target__("avx512vp2intersect,evex512"), \
|
34 |
+
__min_vector_width__(512)))
|
35 |
+
|
36 |
+
/// Store, in an even/odd pair of mask registers, the indicators of the
|
37 |
+
/// locations of value matches between dwords in operands __a and __b.
|
38 |
+
///
|
39 |
+
/// \headerfile <x86intrin.h>
|
40 |
+
///
|
41 |
+
/// This intrinsic corresponds to the <c> VP2INTERSECTD </c> instruction.
|
42 |
+
///
|
43 |
+
/// \param __a
|
44 |
+
/// A 512-bit vector of [16 x i32].
|
45 |
+
/// \param __b
|
46 |
+
/// A 512-bit vector of [16 x i32]
|
47 |
+
/// \param __m0
|
48 |
+
/// A pointer point to 16-bit mask
|
49 |
+
/// \param __m1
|
50 |
+
/// A pointer point to 16-bit mask
|
51 |
+
static __inline__ void __DEFAULT_FN_ATTRS
|
52 |
+
_mm512_2intersect_epi32(__m512i __a, __m512i __b, __mmask16 *__m0, __mmask16 *__m1) {
|
53 |
+
__builtin_ia32_vp2intersect_d_512((__v16si)__a, (__v16si)__b, __m0, __m1);
|
54 |
+
}
|
55 |
+
|
56 |
+
/// Store, in an even/odd pair of mask registers, the indicators of the
|
57 |
+
/// locations of value matches between quadwords in operands __a and __b.
|
58 |
+
///
|
59 |
+
/// \headerfile <x86intrin.h>
|
60 |
+
///
|
61 |
+
/// This intrinsic corresponds to the <c> VP2INTERSECTQ </c> instruction.
|
62 |
+
///
|
63 |
+
/// \param __a
|
64 |
+
/// A 512-bit vector of [8 x i64].
|
65 |
+
/// \param __b
|
66 |
+
/// A 512-bit vector of [8 x i64]
|
67 |
+
/// \param __m0
|
68 |
+
/// A pointer point to 8-bit mask
|
69 |
+
/// \param __m1
|
70 |
+
/// A pointer point to 8-bit mask
|
71 |
+
static __inline__ void __DEFAULT_FN_ATTRS
|
72 |
+
_mm512_2intersect_epi64(__m512i __a, __m512i __b, __mmask8 *__m0, __mmask8 *__m1) {
|
73 |
+
__builtin_ia32_vp2intersect_q_512((__v8di)__a, (__v8di)__b, __m0, __m1);
|
74 |
+
}
|
75 |
+
|
76 |
+
#undef __DEFAULT_FN_ATTRS
|
77 |
+
|
78 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/avx512vpopcntdqvlintrin.h
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- avx512vpopcntdqintrin.h - AVX512VPOPCNTDQ intrinsics -------------===
|
2 |
+
*
|
3 |
+
*
|
4 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
5 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
6 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
7 |
+
*
|
8 |
+
*===-----------------------------------------------------------------------===
|
9 |
+
*/
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error \
|
12 |
+
"Never use <avx512vpopcntdqvlintrin.h> directly; include <immintrin.h> instead."
|
13 |
+
#endif
|
14 |
+
|
15 |
+
#ifndef __AVX512VPOPCNTDQVLINTRIN_H
|
16 |
+
#define __AVX512VPOPCNTDQVLINTRIN_H
|
17 |
+
|
18 |
+
/* Define the default attributes for the functions in this file. */
|
19 |
+
#define __DEFAULT_FN_ATTRS128 \
|
20 |
+
__attribute__((__always_inline__, __nodebug__, \
|
21 |
+
__target__("avx512vpopcntdq,avx512vl,no-evex512"), \
|
22 |
+
__min_vector_width__(128)))
|
23 |
+
#define __DEFAULT_FN_ATTRS256 \
|
24 |
+
__attribute__((__always_inline__, __nodebug__, \
|
25 |
+
__target__("avx512vpopcntdq,avx512vl,no-evex512"), \
|
26 |
+
__min_vector_width__(256)))
|
27 |
+
|
28 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
29 |
+
_mm_popcnt_epi64(__m128i __A) {
|
30 |
+
return (__m128i)__builtin_ia32_vpopcntq_128((__v2di)__A);
|
31 |
+
}
|
32 |
+
|
33 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
34 |
+
_mm_mask_popcnt_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
|
35 |
+
return (__m128i)__builtin_ia32_selectq_128(
|
36 |
+
(__mmask8)__U, (__v2di)_mm_popcnt_epi64(__A), (__v2di)__W);
|
37 |
+
}
|
38 |
+
|
39 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
40 |
+
_mm_maskz_popcnt_epi64(__mmask8 __U, __m128i __A) {
|
41 |
+
return _mm_mask_popcnt_epi64((__m128i)_mm_setzero_si128(), __U, __A);
|
42 |
+
}
|
43 |
+
|
44 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
45 |
+
_mm_popcnt_epi32(__m128i __A) {
|
46 |
+
return (__m128i)__builtin_ia32_vpopcntd_128((__v4si)__A);
|
47 |
+
}
|
48 |
+
|
49 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
50 |
+
_mm_mask_popcnt_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
|
51 |
+
return (__m128i)__builtin_ia32_selectd_128(
|
52 |
+
(__mmask8)__U, (__v4si)_mm_popcnt_epi32(__A), (__v4si)__W);
|
53 |
+
}
|
54 |
+
|
55 |
+
static __inline__ __m128i __DEFAULT_FN_ATTRS128
|
56 |
+
_mm_maskz_popcnt_epi32(__mmask8 __U, __m128i __A) {
|
57 |
+
return _mm_mask_popcnt_epi32((__m128i)_mm_setzero_si128(), __U, __A);
|
58 |
+
}
|
59 |
+
|
60 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
61 |
+
_mm256_popcnt_epi64(__m256i __A) {
|
62 |
+
return (__m256i)__builtin_ia32_vpopcntq_256((__v4di)__A);
|
63 |
+
}
|
64 |
+
|
65 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
66 |
+
_mm256_mask_popcnt_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
|
67 |
+
return (__m256i)__builtin_ia32_selectq_256(
|
68 |
+
(__mmask8)__U, (__v4di)_mm256_popcnt_epi64(__A), (__v4di)__W);
|
69 |
+
}
|
70 |
+
|
71 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
72 |
+
_mm256_maskz_popcnt_epi64(__mmask8 __U, __m256i __A) {
|
73 |
+
return _mm256_mask_popcnt_epi64((__m256i)_mm256_setzero_si256(), __U, __A);
|
74 |
+
}
|
75 |
+
|
76 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
77 |
+
_mm256_popcnt_epi32(__m256i __A) {
|
78 |
+
return (__m256i)__builtin_ia32_vpopcntd_256((__v8si)__A);
|
79 |
+
}
|
80 |
+
|
81 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
82 |
+
_mm256_mask_popcnt_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
|
83 |
+
return (__m256i)__builtin_ia32_selectd_256(
|
84 |
+
(__mmask8)__U, (__v8si)_mm256_popcnt_epi32(__A), (__v8si)__W);
|
85 |
+
}
|
86 |
+
|
87 |
+
static __inline__ __m256i __DEFAULT_FN_ATTRS256
|
88 |
+
_mm256_maskz_popcnt_epi32(__mmask8 __U, __m256i __A) {
|
89 |
+
return _mm256_mask_popcnt_epi32((__m256i)_mm256_setzero_si256(), __U, __A);
|
90 |
+
}
|
91 |
+
|
92 |
+
#undef __DEFAULT_FN_ATTRS128
|
93 |
+
#undef __DEFAULT_FN_ATTRS256
|
94 |
+
|
95 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/builtins.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- builtins.h - Standard header for extra builtins -----------------===*\
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
\*===----------------------------------------------------------------------===*/
|
8 |
+
|
9 |
+
/// Some legacy compilers have builtin definitions in a file named builtins.h.
|
10 |
+
/// This header file has been added to allow compatibility with code that was
|
11 |
+
/// written for those compilers. Code may have an include line for this file
|
12 |
+
/// and to avoid an error an empty file with this name is provided.
|
13 |
+
#ifndef __BUILTINS_H
|
14 |
+
#define __BUILTINS_H
|
15 |
+
|
16 |
+
#if defined(__MVS__) && __has_include_next(<builtins.h>)
|
17 |
+
#include_next <builtins.h>
|
18 |
+
#endif /* __MVS__ */
|
19 |
+
#endif /* __BUILTINS_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cmpccxaddintrin.h
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===--------------- cmpccxaddintrin.h - CMPCCXADD intrinsics--------------===
|
2 |
+
*
|
3 |
+
*
|
4 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
5 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
6 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
7 |
+
*
|
8 |
+
*===-----------------------------------------------------------------------===
|
9 |
+
*/
|
10 |
+
#ifndef __X86GPRINTRIN_H
|
11 |
+
#error \
|
12 |
+
"Never use <cmpccxaddintrin.h> directly; include <x86gprintrin.h> instead."
|
13 |
+
#endif // __X86GPRINTRIN_H
|
14 |
+
|
15 |
+
#ifndef __CMPCCXADDINTRIN_H
|
16 |
+
#define __CMPCCXADDINTRIN_H
|
17 |
+
#ifdef __x86_64__
|
18 |
+
|
19 |
+
typedef enum {
|
20 |
+
_CMPCCX_O, /* Overflow. */
|
21 |
+
_CMPCCX_NO, /* No overflow. */
|
22 |
+
_CMPCCX_B, /* Below. */
|
23 |
+
_CMPCCX_NB, /* Not below. */
|
24 |
+
_CMPCCX_Z, /* Zero. */
|
25 |
+
_CMPCCX_NZ, /* Not zero. */
|
26 |
+
_CMPCCX_BE, /* Below or equal. */
|
27 |
+
_CMPCCX_NBE, /* Neither below nor equal. */
|
28 |
+
_CMPCCX_S, /* Sign. */
|
29 |
+
_CMPCCX_NS, /* No sign. */
|
30 |
+
_CMPCCX_P, /* Parity. */
|
31 |
+
_CMPCCX_NP, /* No parity. */
|
32 |
+
_CMPCCX_L, /* Less. */
|
33 |
+
_CMPCCX_NL, /* Not less. */
|
34 |
+
_CMPCCX_LE, /* Less or equal. */
|
35 |
+
_CMPCCX_NLE, /* Neither less nor equal. */
|
36 |
+
} _CMPCCX_ENUM;
|
37 |
+
|
38 |
+
/// Compares the value from the memory __A with the value of __B. If the
|
39 |
+
/// specified condition __D is met, then add the third operand __C to the
|
40 |
+
/// __A and write it into __A, else the value of __A is unchanged. The return
|
41 |
+
/// value is the original value of __A.
|
42 |
+
///
|
43 |
+
/// \headerfile <immintrin.h>
|
44 |
+
///
|
45 |
+
/// This intrinsic corresponds to the \c CMPCCXADD instructions.
|
46 |
+
///
|
47 |
+
/// \param __A
|
48 |
+
/// __A pointer specifying the memory address.
|
49 |
+
///
|
50 |
+
/// \param __B
|
51 |
+
/// A integer operand.
|
52 |
+
///
|
53 |
+
/// \param __C
|
54 |
+
/// A integer operand.
|
55 |
+
///
|
56 |
+
/// \param __D
|
57 |
+
/// The specified condition.
|
58 |
+
///
|
59 |
+
/// \returns a integer which is the original value of first operand.
|
60 |
+
|
61 |
+
#define _cmpccxadd_epi32(__A, __B, __C, __D) \
|
62 |
+
((int)(__builtin_ia32_cmpccxadd32((void *)(__A), (int)(__B), (int)(__C), \
|
63 |
+
(int)(__D))))
|
64 |
+
|
65 |
+
#define _cmpccxadd_epi64(__A, __B, __C, __D) \
|
66 |
+
((long long)(__builtin_ia32_cmpccxadd64((void *)(__A), (long long)(__B), \
|
67 |
+
(long long)(__C), (int)(__D))))
|
68 |
+
|
69 |
+
#endif // __x86_64__
|
70 |
+
#endif // __CMPCCXADDINTRIN_H
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cpuid.h
ADDED
@@ -0,0 +1,351 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- cpuid.h - X86 cpu model detection --------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __CPUID_H
|
11 |
+
#define __CPUID_H
|
12 |
+
|
13 |
+
#if !defined(__x86_64__) && !defined(__i386__)
|
14 |
+
#error this header is for x86 only
|
15 |
+
#endif
|
16 |
+
|
17 |
+
/* Responses identification request with %eax 0 */
|
18 |
+
/* AMD: "AuthenticAMD" */
|
19 |
+
#define signature_AMD_ebx 0x68747541
|
20 |
+
#define signature_AMD_edx 0x69746e65
|
21 |
+
#define signature_AMD_ecx 0x444d4163
|
22 |
+
/* CENTAUR: "CentaurHauls" */
|
23 |
+
#define signature_CENTAUR_ebx 0x746e6543
|
24 |
+
#define signature_CENTAUR_edx 0x48727561
|
25 |
+
#define signature_CENTAUR_ecx 0x736c7561
|
26 |
+
/* CYRIX: "CyrixInstead" */
|
27 |
+
#define signature_CYRIX_ebx 0x69727943
|
28 |
+
#define signature_CYRIX_edx 0x736e4978
|
29 |
+
#define signature_CYRIX_ecx 0x64616574
|
30 |
+
/* HYGON: "HygonGenuine" */
|
31 |
+
#define signature_HYGON_ebx 0x6f677948
|
32 |
+
#define signature_HYGON_edx 0x6e65476e
|
33 |
+
#define signature_HYGON_ecx 0x656e6975
|
34 |
+
/* INTEL: "GenuineIntel" */
|
35 |
+
#define signature_INTEL_ebx 0x756e6547
|
36 |
+
#define signature_INTEL_edx 0x49656e69
|
37 |
+
#define signature_INTEL_ecx 0x6c65746e
|
38 |
+
/* TM1: "TransmetaCPU" */
|
39 |
+
#define signature_TM1_ebx 0x6e617254
|
40 |
+
#define signature_TM1_edx 0x74656d73
|
41 |
+
#define signature_TM1_ecx 0x55504361
|
42 |
+
/* TM2: "GenuineTMx86" */
|
43 |
+
#define signature_TM2_ebx 0x756e6547
|
44 |
+
#define signature_TM2_edx 0x54656e69
|
45 |
+
#define signature_TM2_ecx 0x3638784d
|
46 |
+
/* NSC: "Geode by NSC" */
|
47 |
+
#define signature_NSC_ebx 0x646f6547
|
48 |
+
#define signature_NSC_edx 0x79622065
|
49 |
+
#define signature_NSC_ecx 0x43534e20
|
50 |
+
/* NEXGEN: "NexGenDriven" */
|
51 |
+
#define signature_NEXGEN_ebx 0x4778654e
|
52 |
+
#define signature_NEXGEN_edx 0x72446e65
|
53 |
+
#define signature_NEXGEN_ecx 0x6e657669
|
54 |
+
/* RISE: "RiseRiseRise" */
|
55 |
+
#define signature_RISE_ebx 0x65736952
|
56 |
+
#define signature_RISE_edx 0x65736952
|
57 |
+
#define signature_RISE_ecx 0x65736952
|
58 |
+
/* SIS: "SiS SiS SiS " */
|
59 |
+
#define signature_SIS_ebx 0x20536953
|
60 |
+
#define signature_SIS_edx 0x20536953
|
61 |
+
#define signature_SIS_ecx 0x20536953
|
62 |
+
/* UMC: "UMC UMC UMC " */
|
63 |
+
#define signature_UMC_ebx 0x20434d55
|
64 |
+
#define signature_UMC_edx 0x20434d55
|
65 |
+
#define signature_UMC_ecx 0x20434d55
|
66 |
+
/* VIA: "VIA VIA VIA " */
|
67 |
+
#define signature_VIA_ebx 0x20414956
|
68 |
+
#define signature_VIA_edx 0x20414956
|
69 |
+
#define signature_VIA_ecx 0x20414956
|
70 |
+
/* VORTEX: "Vortex86 SoC" */
|
71 |
+
#define signature_VORTEX_ebx 0x74726f56
|
72 |
+
#define signature_VORTEX_edx 0x36387865
|
73 |
+
#define signature_VORTEX_ecx 0x436f5320
|
74 |
+
|
75 |
+
/* Features in %ecx for leaf 1 */
|
76 |
+
#define bit_SSE3 0x00000001
|
77 |
+
#define bit_PCLMULQDQ 0x00000002
|
78 |
+
#define bit_PCLMUL bit_PCLMULQDQ /* for gcc compat */
|
79 |
+
#define bit_DTES64 0x00000004
|
80 |
+
#define bit_MONITOR 0x00000008
|
81 |
+
#define bit_DSCPL 0x00000010
|
82 |
+
#define bit_VMX 0x00000020
|
83 |
+
#define bit_SMX 0x00000040
|
84 |
+
#define bit_EIST 0x00000080
|
85 |
+
#define bit_TM2 0x00000100
|
86 |
+
#define bit_SSSE3 0x00000200
|
87 |
+
#define bit_CNXTID 0x00000400
|
88 |
+
#define bit_FMA 0x00001000
|
89 |
+
#define bit_CMPXCHG16B 0x00002000
|
90 |
+
#define bit_xTPR 0x00004000
|
91 |
+
#define bit_PDCM 0x00008000
|
92 |
+
#define bit_PCID 0x00020000
|
93 |
+
#define bit_DCA 0x00040000
|
94 |
+
#define bit_SSE41 0x00080000
|
95 |
+
#define bit_SSE4_1 bit_SSE41 /* for gcc compat */
|
96 |
+
#define bit_SSE42 0x00100000
|
97 |
+
#define bit_SSE4_2 bit_SSE42 /* for gcc compat */
|
98 |
+
#define bit_x2APIC 0x00200000
|
99 |
+
#define bit_MOVBE 0x00400000
|
100 |
+
#define bit_POPCNT 0x00800000
|
101 |
+
#define bit_TSCDeadline 0x01000000
|
102 |
+
#define bit_AESNI 0x02000000
|
103 |
+
#define bit_AES bit_AESNI /* for gcc compat */
|
104 |
+
#define bit_XSAVE 0x04000000
|
105 |
+
#define bit_OSXSAVE 0x08000000
|
106 |
+
#define bit_AVX 0x10000000
|
107 |
+
#define bit_F16C 0x20000000
|
108 |
+
#define bit_RDRND 0x40000000
|
109 |
+
|
110 |
+
/* Features in %edx for leaf 1 */
|
111 |
+
#define bit_FPU 0x00000001
|
112 |
+
#define bit_VME 0x00000002
|
113 |
+
#define bit_DE 0x00000004
|
114 |
+
#define bit_PSE 0x00000008
|
115 |
+
#define bit_TSC 0x00000010
|
116 |
+
#define bit_MSR 0x00000020
|
117 |
+
#define bit_PAE 0x00000040
|
118 |
+
#define bit_MCE 0x00000080
|
119 |
+
#define bit_CX8 0x00000100
|
120 |
+
#define bit_CMPXCHG8B bit_CX8 /* for gcc compat */
|
121 |
+
#define bit_APIC 0x00000200
|
122 |
+
#define bit_SEP 0x00000800
|
123 |
+
#define bit_MTRR 0x00001000
|
124 |
+
#define bit_PGE 0x00002000
|
125 |
+
#define bit_MCA 0x00004000
|
126 |
+
#define bit_CMOV 0x00008000
|
127 |
+
#define bit_PAT 0x00010000
|
128 |
+
#define bit_PSE36 0x00020000
|
129 |
+
#define bit_PSN 0x00040000
|
130 |
+
#define bit_CLFSH 0x00080000
|
131 |
+
#define bit_DS 0x00200000
|
132 |
+
#define bit_ACPI 0x00400000
|
133 |
+
#define bit_MMX 0x00800000
|
134 |
+
#define bit_FXSR 0x01000000
|
135 |
+
#define bit_FXSAVE bit_FXSR /* for gcc compat */
|
136 |
+
#define bit_SSE 0x02000000
|
137 |
+
#define bit_SSE2 0x04000000
|
138 |
+
#define bit_SS 0x08000000
|
139 |
+
#define bit_HTT 0x10000000
|
140 |
+
#define bit_TM 0x20000000
|
141 |
+
#define bit_PBE 0x80000000
|
142 |
+
|
143 |
+
/* Features in %ebx for leaf 7 sub-leaf 0 */
|
144 |
+
#define bit_FSGSBASE 0x00000001
|
145 |
+
#define bit_SGX 0x00000004
|
146 |
+
#define bit_BMI 0x00000008
|
147 |
+
#define bit_HLE 0x00000010
|
148 |
+
#define bit_AVX2 0x00000020
|
149 |
+
#define bit_SMEP 0x00000080
|
150 |
+
#define bit_BMI2 0x00000100
|
151 |
+
#define bit_ENH_MOVSB 0x00000200
|
152 |
+
#define bit_INVPCID 0x00000400
|
153 |
+
#define bit_RTM 0x00000800
|
154 |
+
#define bit_MPX 0x00004000
|
155 |
+
#define bit_AVX512F 0x00010000
|
156 |
+
#define bit_AVX512DQ 0x00020000
|
157 |
+
#define bit_RDSEED 0x00040000
|
158 |
+
#define bit_ADX 0x00080000
|
159 |
+
#define bit_AVX512IFMA 0x00200000
|
160 |
+
#define bit_CLFLUSHOPT 0x00800000
|
161 |
+
#define bit_CLWB 0x01000000
|
162 |
+
#define bit_AVX512PF 0x04000000
|
163 |
+
#define bit_AVX512ER 0x08000000
|
164 |
+
#define bit_AVX512CD 0x10000000
|
165 |
+
#define bit_SHA 0x20000000
|
166 |
+
#define bit_AVX512BW 0x40000000
|
167 |
+
#define bit_AVX512VL 0x80000000
|
168 |
+
|
169 |
+
/* Features in %ecx for leaf 7 sub-leaf 0 */
|
170 |
+
#define bit_PREFTCHWT1 0x00000001
|
171 |
+
#define bit_AVX512VBMI 0x00000002
|
172 |
+
#define bit_PKU 0x00000004
|
173 |
+
#define bit_OSPKE 0x00000010
|
174 |
+
#define bit_WAITPKG 0x00000020
|
175 |
+
#define bit_AVX512VBMI2 0x00000040
|
176 |
+
#define bit_SHSTK 0x00000080
|
177 |
+
#define bit_GFNI 0x00000100
|
178 |
+
#define bit_VAES 0x00000200
|
179 |
+
#define bit_VPCLMULQDQ 0x00000400
|
180 |
+
#define bit_AVX512VNNI 0x00000800
|
181 |
+
#define bit_AVX512BITALG 0x00001000
|
182 |
+
#define bit_AVX512VPOPCNTDQ 0x00004000
|
183 |
+
#define bit_RDPID 0x00400000
|
184 |
+
#define bit_CLDEMOTE 0x02000000
|
185 |
+
#define bit_MOVDIRI 0x08000000
|
186 |
+
#define bit_MOVDIR64B 0x10000000
|
187 |
+
#define bit_ENQCMD 0x20000000
|
188 |
+
|
189 |
+
/* Features in %edx for leaf 7 sub-leaf 0 */
|
190 |
+
#define bit_AVX5124VNNIW 0x00000004
|
191 |
+
#define bit_AVX5124FMAPS 0x00000008
|
192 |
+
#define bit_UINTR 0x00000020
|
193 |
+
#define bit_SERIALIZE 0x00004000
|
194 |
+
#define bit_TSXLDTRK 0x00010000
|
195 |
+
#define bit_PCONFIG 0x00040000
|
196 |
+
#define bit_IBT 0x00100000
|
197 |
+
#define bit_AMXBF16 0x00400000
|
198 |
+
#define bit_AVX512FP16 0x00800000
|
199 |
+
#define bit_AMXTILE 0x01000000
|
200 |
+
#define bit_AMXINT8 0x02000000
|
201 |
+
|
202 |
+
/* Features in %eax for leaf 7 sub-leaf 1 */
|
203 |
+
#define bit_SHA512 0x00000001
|
204 |
+
#define bit_SM3 0x00000002
|
205 |
+
#define bit_SM4 0x00000004
|
206 |
+
#define bit_RAOINT 0x00000008
|
207 |
+
#define bit_AVXVNNI 0x00000010
|
208 |
+
#define bit_AVX512BF16 0x00000020
|
209 |
+
#define bit_CMPCCXADD 0x00000080
|
210 |
+
#define bit_AMXFP16 0x00200000
|
211 |
+
#define bit_HRESET 0x00400000
|
212 |
+
#define bit_AVXIFMA 0x00800000
|
213 |
+
|
214 |
+
/* Features in %edx for leaf 7 sub-leaf 1 */
|
215 |
+
#define bit_AVXVNNIINT8 0x00000010
|
216 |
+
#define bit_AVXNECONVERT 0x00000020
|
217 |
+
#define bit_AMXCOMPLEX 0x00000100
|
218 |
+
#define bit_AVXVNNIINT16 0x00000400
|
219 |
+
#define bit_PREFETCHI 0x00004000
|
220 |
+
#define bit_USERMSR 0x00008000
|
221 |
+
#define bit_AVX10 0x00080000
|
222 |
+
#define bit_APXF 0x00200000
|
223 |
+
|
224 |
+
/* Features in %eax for leaf 13 sub-leaf 1 */
|
225 |
+
#define bit_XSAVEOPT 0x00000001
|
226 |
+
#define bit_XSAVEC 0x00000002
|
227 |
+
#define bit_XSAVES 0x00000008
|
228 |
+
|
229 |
+
/* Features in %eax for leaf 0x14 sub-leaf 0 */
|
230 |
+
#define bit_PTWRITE 0x00000010
|
231 |
+
|
232 |
+
/* Features in %ecx for leaf 0x80000001 */
|
233 |
+
#define bit_LAHF_LM 0x00000001
|
234 |
+
#define bit_ABM 0x00000020
|
235 |
+
#define bit_LZCNT bit_ABM /* for gcc compat */
|
236 |
+
#define bit_SSE4a 0x00000040
|
237 |
+
#define bit_PRFCHW 0x00000100
|
238 |
+
#define bit_XOP 0x00000800
|
239 |
+
#define bit_LWP 0x00008000
|
240 |
+
#define bit_FMA4 0x00010000
|
241 |
+
#define bit_TBM 0x00200000
|
242 |
+
#define bit_MWAITX 0x20000000
|
243 |
+
|
244 |
+
/* Features in %edx for leaf 0x80000001 */
|
245 |
+
#define bit_MMXEXT 0x00400000
|
246 |
+
#define bit_LM 0x20000000
|
247 |
+
#define bit_3DNOWP 0x40000000
|
248 |
+
#define bit_3DNOW 0x80000000
|
249 |
+
|
250 |
+
/* Features in %ebx for leaf 0x80000008 */
|
251 |
+
#define bit_CLZERO 0x00000001
|
252 |
+
#define bit_RDPRU 0x00000010
|
253 |
+
#define bit_WBNOINVD 0x00000200
|
254 |
+
|
255 |
+
/* Features in %ebx for leaf 0x24 */
|
256 |
+
#define bit_AVX10_256 0x00020000
|
257 |
+
#define bit_AVX10_512 0x00040000
|
258 |
+
|
259 |
+
#ifdef __i386__
|
260 |
+
#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
|
261 |
+
__asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
|
262 |
+
: "0"(__leaf))
|
263 |
+
|
264 |
+
#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
|
265 |
+
__asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \
|
266 |
+
: "0"(__leaf), "2"(__count))
|
267 |
+
#else
|
268 |
+
/* x86-64 uses %rbx as the base register, so preserve it. */
|
269 |
+
#define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \
|
270 |
+
__asm(" xchgq %%rbx,%q1\n" \
|
271 |
+
" cpuid\n" \
|
272 |
+
" xchgq %%rbx,%q1" \
|
273 |
+
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
|
274 |
+
: "0"(__leaf))
|
275 |
+
|
276 |
+
#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
|
277 |
+
__asm(" xchgq %%rbx,%q1\n" \
|
278 |
+
" cpuid\n" \
|
279 |
+
" xchgq %%rbx,%q1" \
|
280 |
+
: "=a"(__eax), "=r" (__ebx), "=c"(__ecx), "=d"(__edx) \
|
281 |
+
: "0"(__leaf), "2"(__count))
|
282 |
+
#endif
|
283 |
+
|
284 |
+
static __inline unsigned int __get_cpuid_max (unsigned int __leaf,
|
285 |
+
unsigned int *__sig)
|
286 |
+
{
|
287 |
+
unsigned int __eax, __ebx, __ecx, __edx;
|
288 |
+
#ifdef __i386__
|
289 |
+
int __cpuid_supported;
|
290 |
+
|
291 |
+
__asm(" pushfl\n"
|
292 |
+
" popl %%eax\n"
|
293 |
+
" movl %%eax,%%ecx\n"
|
294 |
+
" xorl $0x00200000,%%eax\n"
|
295 |
+
" pushl %%eax\n"
|
296 |
+
" popfl\n"
|
297 |
+
" pushfl\n"
|
298 |
+
" popl %%eax\n"
|
299 |
+
" movl $0,%0\n"
|
300 |
+
" cmpl %%eax,%%ecx\n"
|
301 |
+
" je 1f\n"
|
302 |
+
" movl $1,%0\n"
|
303 |
+
"1:"
|
304 |
+
: "=r" (__cpuid_supported) : : "eax", "ecx");
|
305 |
+
if (!__cpuid_supported)
|
306 |
+
return 0;
|
307 |
+
#endif
|
308 |
+
|
309 |
+
__cpuid(__leaf, __eax, __ebx, __ecx, __edx);
|
310 |
+
if (__sig)
|
311 |
+
*__sig = __ebx;
|
312 |
+
return __eax;
|
313 |
+
}
|
314 |
+
|
315 |
+
static __inline int __get_cpuid (unsigned int __leaf, unsigned int *__eax,
|
316 |
+
unsigned int *__ebx, unsigned int *__ecx,
|
317 |
+
unsigned int *__edx)
|
318 |
+
{
|
319 |
+
unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);
|
320 |
+
|
321 |
+
if (__max_leaf == 0 || __max_leaf < __leaf)
|
322 |
+
return 0;
|
323 |
+
|
324 |
+
__cpuid(__leaf, *__eax, *__ebx, *__ecx, *__edx);
|
325 |
+
return 1;
|
326 |
+
}
|
327 |
+
|
328 |
+
static __inline int __get_cpuid_count (unsigned int __leaf,
|
329 |
+
unsigned int __subleaf,
|
330 |
+
unsigned int *__eax, unsigned int *__ebx,
|
331 |
+
unsigned int *__ecx, unsigned int *__edx)
|
332 |
+
{
|
333 |
+
unsigned int __max_leaf = __get_cpuid_max(__leaf & 0x80000000, 0);
|
334 |
+
|
335 |
+
if (__max_leaf == 0 || __max_leaf < __leaf)
|
336 |
+
return 0;
|
337 |
+
|
338 |
+
__cpuid_count(__leaf, __subleaf, *__eax, *__ebx, *__ecx, *__edx);
|
339 |
+
return 1;
|
340 |
+
}
|
341 |
+
|
342 |
+
// In some configurations, __cpuidex is defined as a builtin (primarily
|
343 |
+
// -fms-extensions) which will conflict with the __cpuidex definition below.
|
344 |
+
#if !(__has_builtin(__cpuidex))
|
345 |
+
static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) {
|
346 |
+
__cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2],
|
347 |
+
__cpu_info[3]);
|
348 |
+
}
|
349 |
+
#endif
|
350 |
+
|
351 |
+
#endif /* __CPUID_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cuda_wrappers/algorithm
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- algorithm - CUDA wrapper for <algorithm> -------------------------===
|
2 |
+
*
|
3 |
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4 |
+
* of this software and associated documentation files (the "Software"), to deal
|
5 |
+
* in the Software without restriction, including without limitation the rights
|
6 |
+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7 |
+
* copies of the Software, and to permit persons to whom the Software is
|
8 |
+
* furnished to do so, subject to the following conditions:
|
9 |
+
*
|
10 |
+
* The above copyright notice and this permission notice shall be included in
|
11 |
+
* all copies or substantial portions of the Software.
|
12 |
+
*
|
13 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14 |
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15 |
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16 |
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17 |
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18 |
+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19 |
+
* THE SOFTWARE.
|
20 |
+
*
|
21 |
+
*===-----------------------------------------------------------------------===
|
22 |
+
*/
|
23 |
+
|
24 |
+
#ifndef __CLANG_CUDA_WRAPPERS_ALGORITHM
|
25 |
+
#define __CLANG_CUDA_WRAPPERS_ALGORITHM
|
26 |
+
|
27 |
+
// This header defines __device__ overloads of std::min/max.
|
28 |
+
//
|
29 |
+
// Ideally we'd declare these functions only if we're <= C++11. In C++14,
|
30 |
+
// these functions are constexpr, and so are implicitly __host__ __device__.
|
31 |
+
//
|
32 |
+
// However, the compiler being in C++14 mode does not imply that the standard
|
33 |
+
// library supports C++14. There is no macro we can test to check that the
|
34 |
+
// stdlib has constexpr std::min/max. Thus we have to unconditionally define
|
35 |
+
// our device overloads.
|
36 |
+
//
|
37 |
+
// A host+device function cannot be overloaded, and a constexpr function
|
38 |
+
// implicitly become host device if there's no explicitly host or device
|
39 |
+
// overload preceding it. So the simple thing to do would be to declare our
|
40 |
+
// device min/max overloads, and then #include_next <algorithm>. This way our
|
41 |
+
// device overloads would come first, and so if we have a C++14 stdlib, its
|
42 |
+
// min/max won't become host+device and conflict with our device overloads.
|
43 |
+
//
|
44 |
+
// But that also doesn't work. libstdc++ is evil and declares std::min/max in
|
45 |
+
// an internal header that is included *before* <algorithm>. Thus by the time
|
46 |
+
// we're inside of this file, std::min/max may already have been declared, and
|
47 |
+
// thus we can't prevent them from becoming host+device if they're constexpr.
|
48 |
+
//
|
49 |
+
// Therefore we perpetrate the following hack: We mark our __device__ overloads
|
50 |
+
// with __attribute__((enable_if(true, ""))). This causes the signature of the
|
51 |
+
// function to change without changing anything else about it. (Except that
|
52 |
+
// overload resolution will prefer it over the __host__ __device__ version
|
53 |
+
// rather than considering them equally good).
|
54 |
+
|
55 |
+
#include_next <algorithm>
|
56 |
+
|
57 |
+
// We need to define these overloads in exactly the namespace our standard
|
58 |
+
// library uses (including the right inline namespace), otherwise they won't be
|
59 |
+
// picked up by other functions in the standard library (e.g. functions in
|
60 |
+
// <complex>). Thus the ugliness below.
|
61 |
+
#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
|
62 |
+
_LIBCPP_BEGIN_NAMESPACE_STD
|
63 |
+
#else
|
64 |
+
namespace std {
|
65 |
+
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
|
66 |
+
_GLIBCXX_BEGIN_NAMESPACE_VERSION
|
67 |
+
#endif
|
68 |
+
#endif
|
69 |
+
|
70 |
+
#pragma push_macro("_CPP14_CONSTEXPR")
|
71 |
+
#if __cplusplus >= 201402L
|
72 |
+
#define _CPP14_CONSTEXPR constexpr
|
73 |
+
#else
|
74 |
+
#define _CPP14_CONSTEXPR
|
75 |
+
#endif
|
76 |
+
|
77 |
+
template <class __T, class __Cmp>
|
78 |
+
__attribute__((enable_if(true, "")))
|
79 |
+
inline _CPP14_CONSTEXPR __host__ __device__ const __T &
|
80 |
+
max(const __T &__a, const __T &__b, __Cmp __cmp) {
|
81 |
+
return __cmp(__a, __b) ? __b : __a;
|
82 |
+
}
|
83 |
+
|
84 |
+
template <class __T>
|
85 |
+
__attribute__((enable_if(true, "")))
|
86 |
+
inline _CPP14_CONSTEXPR __host__ __device__ const __T &
|
87 |
+
max(const __T &__a, const __T &__b) {
|
88 |
+
return __a < __b ? __b : __a;
|
89 |
+
}
|
90 |
+
|
91 |
+
template <class __T, class __Cmp>
|
92 |
+
__attribute__((enable_if(true, "")))
|
93 |
+
inline _CPP14_CONSTEXPR __host__ __device__ const __T &
|
94 |
+
min(const __T &__a, const __T &__b, __Cmp __cmp) {
|
95 |
+
return __cmp(__b, __a) ? __b : __a;
|
96 |
+
}
|
97 |
+
|
98 |
+
template <class __T>
|
99 |
+
__attribute__((enable_if(true, "")))
|
100 |
+
inline _CPP14_CONSTEXPR __host__ __device__ const __T &
|
101 |
+
min(const __T &__a, const __T &__b) {
|
102 |
+
return __b < __a ? __b : __a;
|
103 |
+
}
|
104 |
+
|
105 |
+
#pragma pop_macro("_CPP14_CONSTEXPR")
|
106 |
+
|
107 |
+
#ifdef _LIBCPP_END_NAMESPACE_STD
|
108 |
+
_LIBCPP_END_NAMESPACE_STD
|
109 |
+
#else
|
110 |
+
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
|
111 |
+
_GLIBCXX_END_NAMESPACE_VERSION
|
112 |
+
#endif
|
113 |
+
} // namespace std
|
114 |
+
#endif
|
115 |
+
|
116 |
+
#endif // __CLANG_CUDA_WRAPPERS_ALGORITHM
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/cuda_wrappers/complex
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- complex - CUDA wrapper for <complex> ------------------------------===
|
2 |
+
*
|
3 |
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4 |
+
* of this software and associated documentation files (the "Software"), to deal
|
5 |
+
* in the Software without restriction, including without limitation the rights
|
6 |
+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7 |
+
* copies of the Software, and to permit persons to whom the Software is
|
8 |
+
* furnished to do so, subject to the following conditions:
|
9 |
+
*
|
10 |
+
* The above copyright notice and this permission notice shall be included in
|
11 |
+
* all copies or substantial portions of the Software.
|
12 |
+
*
|
13 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14 |
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15 |
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16 |
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17 |
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18 |
+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19 |
+
* THE SOFTWARE.
|
20 |
+
*
|
21 |
+
*===-----------------------------------------------------------------------===
|
22 |
+
*/
|
23 |
+
|
24 |
+
#ifndef __CLANG_CUDA_WRAPPERS_COMPLEX
|
25 |
+
#define __CLANG_CUDA_WRAPPERS_COMPLEX
|
26 |
+
|
27 |
+
// Wrapper around <complex> that forces its functions to be __host__
|
28 |
+
// __device__.
|
29 |
+
|
30 |
+
// First, include host-only headers we think are likely to be included by
|
31 |
+
// <complex>, so that the pragma below only applies to <complex> itself.
|
32 |
+
#if __cplusplus >= 201103L
|
33 |
+
#include <type_traits>
|
34 |
+
#endif
|
35 |
+
#include <stdexcept>
|
36 |
+
#include <cmath>
|
37 |
+
#include <sstream>
|
38 |
+
|
39 |
+
// Next, include our <algorithm> wrapper, to ensure that device overloads of
|
40 |
+
// std::min/max are available.
|
41 |
+
#include <algorithm>
|
42 |
+
|
43 |
+
#pragma clang force_cuda_host_device begin
|
44 |
+
|
45 |
+
// When compiling for device, ask libstdc++ to use its own implements of
|
46 |
+
// complex functions, rather than calling builtins (which resolve to library
|
47 |
+
// functions that don't exist when compiling CUDA device code).
|
48 |
+
//
|
49 |
+
// This is a little dicey, because it causes libstdc++ to define a different
|
50 |
+
// set of overloads on host and device.
|
51 |
+
//
|
52 |
+
// // Present only when compiling for host.
|
53 |
+
// __host__ __device__ void complex<float> sin(const complex<float>& x) {
|
54 |
+
// return __builtin_csinf(x);
|
55 |
+
// }
|
56 |
+
//
|
57 |
+
// // Present when compiling for host and for device.
|
58 |
+
// template <typename T>
|
59 |
+
// void __host__ __device__ complex<T> sin(const complex<T>& x) {
|
60 |
+
// return complex<T>(sin(x.real()) * cosh(x.imag()),
|
61 |
+
// cos(x.real()), sinh(x.imag()));
|
62 |
+
// }
|
63 |
+
//
|
64 |
+
// This is safe because when compiling for device, all function calls in
|
65 |
+
// __host__ code to sin() will still resolve to *something*, even if they don't
|
66 |
+
// resolve to the same function as they resolve to when compiling for host. We
|
67 |
+
// don't care that they don't resolve to the right function because we won't
|
68 |
+
// codegen this host code when compiling for device.
|
69 |
+
|
70 |
+
#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX")
|
71 |
+
#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX_TR1")
|
72 |
+
#define _GLIBCXX_USE_C99_COMPLEX 0
|
73 |
+
#define _GLIBCXX_USE_C99_COMPLEX_TR1 0
|
74 |
+
|
75 |
+
// Work around a compatibility issue with libstdc++ 11.1.0
|
76 |
+
// https://bugs.llvm.org/show_bug.cgi?id=50383
|
77 |
+
#pragma push_macro("__failed_assertion")
|
78 |
+
#if _GLIBCXX_RELEASE == 11
|
79 |
+
#define __failed_assertion __cuda_failed_assertion
|
80 |
+
#endif
|
81 |
+
|
82 |
+
#include_next <complex>
|
83 |
+
|
84 |
+
#pragma pop_macro("__failed_assertion")
|
85 |
+
#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX_TR1")
|
86 |
+
#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX")
|
87 |
+
|
88 |
+
#pragma clang force_cuda_host_device end
|
89 |
+
|
90 |
+
#endif // include guard
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/f16cintrin.h
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- f16cintrin.h - F16C intrinsics -----------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#if !defined __IMMINTRIN_H
|
11 |
+
#error "Never use <f16cintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __F16CINTRIN_H
|
15 |
+
#define __F16CINTRIN_H
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS128 \
|
19 |
+
__attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(128)))
|
20 |
+
#define __DEFAULT_FN_ATTRS256 \
|
21 |
+
__attribute__((__always_inline__, __nodebug__, __target__("f16c"), __min_vector_width__(256)))
|
22 |
+
|
23 |
+
/* NOTE: Intel documents the 128-bit versions of these as being in emmintrin.h,
|
24 |
+
* but that's because icc can emulate these without f16c using a library call.
|
25 |
+
* Since we don't do that let's leave these in f16cintrin.h.
|
26 |
+
*/
|
27 |
+
|
28 |
+
/// Converts a 16-bit half-precision float value into a 32-bit float
|
29 |
+
/// value.
|
30 |
+
///
|
31 |
+
/// \headerfile <x86intrin.h>
|
32 |
+
///
|
33 |
+
/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
|
34 |
+
///
|
35 |
+
/// \param __a
|
36 |
+
/// A 16-bit half-precision float value.
|
37 |
+
/// \returns The converted 32-bit float value.
|
38 |
+
static __inline float __DEFAULT_FN_ATTRS128
|
39 |
+
_cvtsh_ss(unsigned short __a)
|
40 |
+
{
|
41 |
+
__v8hi __v = {(short)__a, 0, 0, 0, 0, 0, 0, 0};
|
42 |
+
__v4sf __r = __builtin_ia32_vcvtph2ps(__v);
|
43 |
+
return __r[0];
|
44 |
+
}
|
45 |
+
|
46 |
+
/// Converts a 32-bit single-precision float value to a 16-bit
|
47 |
+
/// half-precision float value.
|
48 |
+
///
|
49 |
+
/// \headerfile <x86intrin.h>
|
50 |
+
///
|
51 |
+
/// \code
|
52 |
+
/// unsigned short _cvtss_sh(float a, const int imm);
|
53 |
+
/// \endcode
|
54 |
+
///
|
55 |
+
/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
|
56 |
+
///
|
57 |
+
/// \param a
|
58 |
+
/// A 32-bit single-precision float value to be converted to a 16-bit
|
59 |
+
/// half-precision float value.
|
60 |
+
/// \param imm
|
61 |
+
/// An immediate value controlling rounding using bits [2:0]: \n
|
62 |
+
/// 000: Nearest \n
|
63 |
+
/// 001: Down \n
|
64 |
+
/// 010: Up \n
|
65 |
+
/// 011: Truncate \n
|
66 |
+
/// 1XX: Use MXCSR.RC for rounding
|
67 |
+
/// \returns The converted 16-bit half-precision float value.
|
68 |
+
#define _cvtss_sh(a, imm) __extension__ ({ \
|
69 |
+
(unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
|
70 |
+
(imm)))[0]); })
|
71 |
+
|
72 |
+
/// Converts a 128-bit vector containing 32-bit float values into a
|
73 |
+
/// 128-bit vector containing 16-bit half-precision float values.
|
74 |
+
///
|
75 |
+
/// \headerfile <x86intrin.h>
|
76 |
+
///
|
77 |
+
/// \code
|
78 |
+
/// __m128i _mm_cvtps_ph(__m128 a, const int imm);
|
79 |
+
/// \endcode
|
80 |
+
///
|
81 |
+
/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
|
82 |
+
///
|
83 |
+
/// \param a
|
84 |
+
/// A 128-bit vector containing 32-bit float values.
|
85 |
+
/// \param imm
|
86 |
+
/// An immediate value controlling rounding using bits [2:0]: \n
|
87 |
+
/// 000: Nearest \n
|
88 |
+
/// 001: Down \n
|
89 |
+
/// 010: Up \n
|
90 |
+
/// 011: Truncate \n
|
91 |
+
/// 1XX: Use MXCSR.RC for rounding
|
92 |
+
/// \returns A 128-bit vector containing converted 16-bit half-precision float
|
93 |
+
/// values. The lower 64 bits are used to store the converted 16-bit
|
94 |
+
/// half-precision floating-point values.
|
95 |
+
#define _mm_cvtps_ph(a, imm) \
|
96 |
+
((__m128i)__builtin_ia32_vcvtps2ph((__v4sf)(__m128)(a), (imm)))
|
97 |
+
|
98 |
+
/// Converts a 128-bit vector containing 16-bit half-precision float
|
99 |
+
/// values into a 128-bit vector containing 32-bit float values.
|
100 |
+
///
|
101 |
+
/// \headerfile <x86intrin.h>
|
102 |
+
///
|
103 |
+
/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
|
104 |
+
///
|
105 |
+
/// \param __a
|
106 |
+
/// A 128-bit vector containing 16-bit half-precision float values. The lower
|
107 |
+
/// 64 bits are used in the conversion.
|
108 |
+
/// \returns A 128-bit vector of [4 x float] containing converted float values.
|
109 |
+
static __inline __m128 __DEFAULT_FN_ATTRS128
|
110 |
+
_mm_cvtph_ps(__m128i __a)
|
111 |
+
{
|
112 |
+
return (__m128)__builtin_ia32_vcvtph2ps((__v8hi)__a);
|
113 |
+
}
|
114 |
+
|
115 |
+
/// Converts a 256-bit vector of [8 x float] into a 128-bit vector
|
116 |
+
/// containing 16-bit half-precision float values.
|
117 |
+
///
|
118 |
+
/// \headerfile <x86intrin.h>
|
119 |
+
///
|
120 |
+
/// \code
|
121 |
+
/// __m128i _mm256_cvtps_ph(__m256 a, const int imm);
|
122 |
+
/// \endcode
|
123 |
+
///
|
124 |
+
/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
|
125 |
+
///
|
126 |
+
/// \param a
|
127 |
+
/// A 256-bit vector containing 32-bit single-precision float values to be
|
128 |
+
/// converted to 16-bit half-precision float values.
|
129 |
+
/// \param imm
|
130 |
+
/// An immediate value controlling rounding using bits [2:0]: \n
|
131 |
+
/// 000: Nearest \n
|
132 |
+
/// 001: Down \n
|
133 |
+
/// 010: Up \n
|
134 |
+
/// 011: Truncate \n
|
135 |
+
/// 1XX: Use MXCSR.RC for rounding
|
136 |
+
/// \returns A 128-bit vector containing the converted 16-bit half-precision
|
137 |
+
/// float values.
|
138 |
+
#define _mm256_cvtps_ph(a, imm) \
|
139 |
+
((__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)))
|
140 |
+
|
141 |
+
/// Converts a 128-bit vector containing 16-bit half-precision float
|
142 |
+
/// values into a 256-bit vector of [8 x float].
|
143 |
+
///
|
144 |
+
/// \headerfile <x86intrin.h>
|
145 |
+
///
|
146 |
+
/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
|
147 |
+
///
|
148 |
+
/// \param __a
|
149 |
+
/// A 128-bit vector containing 16-bit half-precision float values to be
|
150 |
+
/// converted to 32-bit single-precision float values.
|
151 |
+
/// \returns A vector of [8 x float] containing the converted 32-bit
|
152 |
+
/// single-precision float values.
|
153 |
+
static __inline __m256 __DEFAULT_FN_ATTRS256
|
154 |
+
_mm256_cvtph_ps(__m128i __a)
|
155 |
+
{
|
156 |
+
return (__m256)__builtin_ia32_vcvtph2ps256((__v8hi)__a);
|
157 |
+
}
|
158 |
+
|
159 |
+
#undef __DEFAULT_FN_ATTRS128
|
160 |
+
#undef __DEFAULT_FN_ATTRS256
|
161 |
+
|
162 |
+
#endif /* __F16CINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/hexagon_circ_brev_intrinsics.h
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
//===----------------------------------------------------------------------===//
|
2 |
+
//
|
3 |
+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
//
|
7 |
+
//===----------------------------------------------------------------------===//
|
8 |
+
|
9 |
+
#ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_
|
10 |
+
#define _HEXAGON_CIRC_BREV_INTRINSICS_H_ 1
|
11 |
+
|
12 |
+
#include <hexagon_protos.h>
|
13 |
+
#include <stdint.h>
|
14 |
+
|
15 |
+
/* Circular Load */
|
16 |
+
/* ==========================================================================
|
17 |
+
Assembly Syntax: Return=instruction()
|
18 |
+
C Intrinsic Prototype: void Q6_circ_load_update_D(Word64 dst, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
19 |
+
Instruction Type: InstructionType
|
20 |
+
Execution Slots: SLOT0123
|
21 |
+
========================================================================== */
|
22 |
+
#define Q6_circ_load_update_D(dest,ptr,incr,bufsize,K) \
|
23 |
+
{ ptr = (int64_t *) HEXAGON_circ_ldd (ptr, &(dest), ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); }
|
24 |
+
|
25 |
+
/* ==========================================================================
|
26 |
+
Assembly Syntax: Return=instruction()
|
27 |
+
C Intrinsic Prototype: void Q6_circ_load_update_W(Word32 dst, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
28 |
+
Instruction Type: InstructionType
|
29 |
+
Execution Slots: SLOT0123
|
30 |
+
========================================================================== */
|
31 |
+
#define Q6_circ_load_update_W(dest,ptr,incr,bufsize,K) \
|
32 |
+
{ ptr = (int *) HEXAGON_circ_ldw (ptr, &(dest), (((K)<<24)|((bufsize)<<2)), ((incr)*4)); }
|
33 |
+
|
34 |
+
/* ==========================================================================
|
35 |
+
Assembly Syntax: Return=instruction()
|
36 |
+
C Intrinsic Prototype: void Q6_circ_load_update_H(Word16 dst, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
37 |
+
Instruction Type: InstructionType
|
38 |
+
Execution Slots: SLOT0123
|
39 |
+
========================================================================== */
|
40 |
+
#define Q6_circ_load_update_H(dest,ptr,incr,bufsize,K) \
|
41 |
+
{ ptr = (int16_t *) HEXAGON_circ_ldh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
|
42 |
+
|
43 |
+
/* ==========================================================================
|
44 |
+
Assembly Syntax: Return=instruction()
|
45 |
+
C Intrinsic Prototype: void Q6_circ_load_update_UH( UWord16 dst, UWord16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
46 |
+
Instruction Type: InstructionType
|
47 |
+
Execution Slots: SLOT0123
|
48 |
+
========================================================================== */
|
49 |
+
#define Q6_circ_load_update_UH(dest,ptr,incr,bufsize,K) \
|
50 |
+
{ ptr = (uint16_t *) HEXAGON_circ_lduh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
|
51 |
+
|
52 |
+
/* ==========================================================================
|
53 |
+
Assembly Syntax: Return=instruction()
|
54 |
+
C Intrinsic Prototype: void Q6_circ_load_update_B(Word8 dst, Word8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
55 |
+
Instruction Type: InstructionType
|
56 |
+
Execution Slots: SLOT0123
|
57 |
+
========================================================================== */
|
58 |
+
#define Q6_circ_load_update_B(dest,ptr,incr,bufsize,K) \
|
59 |
+
{ ptr = (int8_t *) HEXAGON_circ_ldb (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); }
|
60 |
+
|
61 |
+
/* ==========================================================================
|
62 |
+
Assembly Syntax: Return=instruction()
|
63 |
+
C Intrinsic Prototype: void Q6_circ_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
64 |
+
Instruction Type: InstructionType
|
65 |
+
Execution Slots: SLOT0123
|
66 |
+
========================================================================== */
|
67 |
+
#define Q6_circ_load_update_UB(dest,ptr,incr,bufsize,K) \
|
68 |
+
{ ptr = (uint8_t *) HEXAGON_circ_ldub (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); }
|
69 |
+
|
70 |
+
/* Circular Store */
|
71 |
+
/* ==========================================================================
|
72 |
+
Assembly Syntax: Return=instruction()
|
73 |
+
C Intrinsic Prototype: void Q6_circ_store_update_D(Word64 *src, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
74 |
+
Instruction Type: InstructionType
|
75 |
+
Execution Slots: SLOT0123
|
76 |
+
========================================================================== */
|
77 |
+
#define Q6_circ_store_update_D(src,ptr,incr,bufsize,K) \
|
78 |
+
{ ptr = (int64_t *) HEXAGON_circ_std (ptr, src, ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); }
|
79 |
+
|
80 |
+
/* ==========================================================================
|
81 |
+
Assembly Syntax: Return=instruction()
|
82 |
+
C Intrinsic Prototype: void Q6_circ_store_update_W(Word32 *src, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
83 |
+
Instruction Type: InstructionType
|
84 |
+
Execution Slots: SLOT0123
|
85 |
+
========================================================================== */
|
86 |
+
#define Q6_circ_store_update_W(src,ptr,incr,bufsize,K) \
|
87 |
+
{ ptr = (int *) HEXAGON_circ_stw (ptr, src, (((K)<<24)|((bufsize)<<2)), ((incr)*4)); }
|
88 |
+
|
89 |
+
/* ==========================================================================
|
90 |
+
Assembly Syntax: Return=instruction()
|
91 |
+
C Intrinsic Prototype: void Q6_circ_store_update_HL(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
92 |
+
Instruction Type: InstructionType
|
93 |
+
Execution Slots: SLOT0123
|
94 |
+
========================================================================== */
|
95 |
+
#define Q6_circ_store_update_HL(src,ptr,incr,bufsize,K) \
|
96 |
+
{ ptr = (int16_t *) HEXAGON_circ_sth (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
|
97 |
+
|
98 |
+
/* ==========================================================================
|
99 |
+
Assembly Syntax: Return=instruction()
|
100 |
+
C Intrinsic Prototype: void Q6_circ_store_update_HH(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
|
101 |
+
Instruction Type: InstructionType
|
102 |
+
Execution Slots: SLOT0123
|
103 |
+
========================================================================== */
|
104 |
+
#define Q6_circ_store_update_HH(src,ptr,incr,bufsize,K) \
|
105 |
+
{ ptr = (int16_t *) HEXAGON_circ_sthhi (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
|
106 |
+
|
107 |
+
/* ==========================================================================
|
108 |
+
Assembly Syntax: Return=instruction()
|
109 |
+
C Intrinsic Prototype: void Q6_circ_store_update_B(Word8 *src, Word8 *ptr, UWord32 I4, UWord32 bufsize, UWord64 K)
|
110 |
+
Instruction Type: InstructionType
|
111 |
+
Execution Slots: SLOT0123
|
112 |
+
========================================================================== */
|
113 |
+
#define Q6_circ_store_update_B(src,ptr,incr,bufsize,K) \
|
114 |
+
{ ptr = (int8_t *) HEXAGON_circ_stb (ptr, src, ((((K)-2)<<24)|(bufsize)), incr); }
|
115 |
+
|
116 |
+
|
117 |
+
/* Bit Reverse Load */
|
118 |
+
/* ==========================================================================
|
119 |
+
Assembly Syntax: Return=instruction()
|
120 |
+
C Intrinsic Prototype: void Q6_bitrev_load_update_D(Word64 dst, Word64 *ptr, UWord32 Iu4)
|
121 |
+
Instruction Type: InstructionType
|
122 |
+
Execution Slots: SLOT0123
|
123 |
+
========================================================================== */
|
124 |
+
#define Q6_bitrev_load_update_D(dest,ptr,log2bufsize) \
|
125 |
+
{ ptr = (int64_t *) HEXAGON_brev_ldd (ptr, &(dest), (1<<(16-((log2bufsize) + 3)))); }
|
126 |
+
|
127 |
+
/* ==========================================================================
|
128 |
+
Assembly Syntax: Return=instruction()
|
129 |
+
C Intrinsic Prototype: void Q6_bitrev_load_update_W(Word32 dst, Word32 *ptr, UWord32 Iu4)
|
130 |
+
Instruction Type: InstructionType
|
131 |
+
Execution Slots: SLOT0123
|
132 |
+
========================================================================== */
|
133 |
+
#define Q6_bitrev_load_update_W(dest,ptr,log2bufsize) \
|
134 |
+
{ ptr = (int *) HEXAGON_brev_ldw (ptr, &(dest), (1<<(16-((log2bufsize) + 2)))); }
|
135 |
+
|
136 |
+
/* ==========================================================================
|
137 |
+
Assembly Syntax: Return=instruction()
|
138 |
+
C Intrinsic Prototype: void Q6_bitrev_load_update_H(Word16 dst, Word16 *ptr, UWord32 Iu4)
|
139 |
+
Instruction Type: InstructionType
|
140 |
+
Execution Slots: SLOT0123
|
141 |
+
========================================================================== */
|
142 |
+
#define Q6_bitrev_load_update_H(dest,ptr,log2bufsize) \
|
143 |
+
{ ptr = (int16_t *) HEXAGON_brev_ldh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); }
|
144 |
+
|
145 |
+
/* ==========================================================================
|
146 |
+
Assembly Syntax: Return=instruction()
|
147 |
+
C Intrinsic Prototype: void Q6_bitrev_load_update_UH(UWord16 dst, UWord16 *ptr, UWord32 Iu4)
|
148 |
+
Instruction Type: InstructionType
|
149 |
+
Execution Slots: SLOT0123
|
150 |
+
========================================================================== */
|
151 |
+
#define Q6_bitrev_load_update_UH(dest,ptr,log2bufsize) \
|
152 |
+
{ ptr = (uint16_t *) HEXAGON_brev_lduh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); }
|
153 |
+
|
154 |
+
/* ==========================================================================
|
155 |
+
Assembly Syntax: Return=instruction()
|
156 |
+
C Intrinsic Prototype: void Q6_bitrev_load_update_B(Word8 dst, Word8 *ptr, UWord32 Iu4)
|
157 |
+
Instruction Type: InstructionType
|
158 |
+
Execution Slots: SLOT0123
|
159 |
+
========================================================================== */
|
160 |
+
#define Q6_bitrev_load_update_B(dest,ptr,log2bufsize) \
|
161 |
+
{ ptr = (int8_t *) HEXAGON_brev_ldb (ptr, &(dest), (1<<(16-((log2bufsize))))); }
|
162 |
+
|
163 |
+
/* ==========================================================================
|
164 |
+
Assembly Syntax: Return=instruction()
|
165 |
+
C Intrinsic Prototype: void Q6_bitrev_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 Iu4)
|
166 |
+
Instruction Type: InstructionType
|
167 |
+
Execution Slots: SLOT0123
|
168 |
+
========================================================================== */
|
169 |
+
#define Q6_bitrev_load_update_UB(dest,ptr,log2bufsize) \
|
170 |
+
{ ptr = (uint8_t *) HEXAGON_brev_ldub (ptr, &(dest), (1<<(16-((log2bufsize))))); }
|
171 |
+
|
172 |
+
/* Bit Reverse Store */
|
173 |
+
|
174 |
+
/* ==========================================================================
|
175 |
+
Assembly Syntax: Return=instruction()
|
176 |
+
C Intrinsic Prototype: void Q6_bitrev_store_update_D(Word64 *src, Word64 *ptr, UWord32 Iu4)
|
177 |
+
Instruction Type: InstructionType
|
178 |
+
Execution Slots: SLOT0123
|
179 |
+
========================================================================== */
|
180 |
+
#define Q6_bitrev_store_update_D(src,ptr,log2bufsize) \
|
181 |
+
{ ptr = (int64_t *) HEXAGON_brev_std (ptr, src, (1<<(16-((log2bufsize) + 3)))); }
|
182 |
+
|
183 |
+
/* ==========================================================================
|
184 |
+
Assembly Syntax: Return=instruction()
|
185 |
+
C Intrinsic Prototype: void Q6_bitrev_store_update_W(Word32 *src, Word32 *ptr, UWord32 Iu4)
|
186 |
+
Instruction Type: InstructionType
|
187 |
+
Execution Slots: SLOT0123
|
188 |
+
========================================================================== */
|
189 |
+
#define Q6_bitrev_store_update_W(src,ptr,log2bufsize) \
|
190 |
+
{ ptr = (int *) HEXAGON_brev_stw (ptr, src, (1<<(16-((log2bufsize) + 2)))); }
|
191 |
+
|
192 |
+
/* ==========================================================================
|
193 |
+
Assembly Syntax: Return=instruction()
|
194 |
+
C Intrinsic Prototype: void Q6_bitrev_store_update_HL(Word16 *src, Word16 *ptr, Word32 Iu4)
|
195 |
+
Instruction Type: InstructionType
|
196 |
+
Execution Slots: SLOT0123
|
197 |
+
========================================================================== */
|
198 |
+
#define Q6_bitrev_store_update_HL(src,ptr,log2bufsize) \
|
199 |
+
{ ptr = (int16_t *) HEXAGON_brev_sth (ptr, src, (1<<(16-((log2bufsize) + 1)))); }
|
200 |
+
|
201 |
+
/* ==========================================================================
|
202 |
+
Assembly Syntax: Return=instruction()
|
203 |
+
C Intrinsic Prototype: void Q6_bitrev_store_update_HH(Word16 *src, Word16 *ptr, UWord32 Iu4)
|
204 |
+
Instruction Type: InstructionType
|
205 |
+
Execution Slots: SLOT0123
|
206 |
+
========================================================================== */
|
207 |
+
#define Q6_bitrev_store_update_HH(src,ptr,log2bufsize) \
|
208 |
+
{ ptr = (int16_t *) HEXAGON_brev_sthhi (ptr, src, (1<<(16-((log2bufsize) + 1)))); }
|
209 |
+
|
210 |
+
/* ==========================================================================
|
211 |
+
Assembly Syntax: Return=instruction()
|
212 |
+
C Intrinsic Prototype: void Q6_bitrev_store_update_B(Word8 *src, Word8 *ptr, UWord32 Iu4)
|
213 |
+
Instruction Type: InstructionType
|
214 |
+
Execution Slots: SLOT0123
|
215 |
+
========================================================================== */
|
216 |
+
#define Q6_bitrev_store_update_B(src,ptr,log2bufsize) \
|
217 |
+
{ ptr = (int8_t *) HEXAGON_brev_stb (ptr, src, (1<<(16-((log2bufsize))))); }
|
218 |
+
|
219 |
+
|
220 |
+
#define HEXAGON_circ_ldd __builtin_circ_ldd
|
221 |
+
#define HEXAGON_circ_ldw __builtin_circ_ldw
|
222 |
+
#define HEXAGON_circ_ldh __builtin_circ_ldh
|
223 |
+
#define HEXAGON_circ_lduh __builtin_circ_lduh
|
224 |
+
#define HEXAGON_circ_ldb __builtin_circ_ldb
|
225 |
+
#define HEXAGON_circ_ldub __builtin_circ_ldub
|
226 |
+
|
227 |
+
|
228 |
+
#define HEXAGON_circ_std __builtin_circ_std
|
229 |
+
#define HEXAGON_circ_stw __builtin_circ_stw
|
230 |
+
#define HEXAGON_circ_sth __builtin_circ_sth
|
231 |
+
#define HEXAGON_circ_sthhi __builtin_circ_sthhi
|
232 |
+
#define HEXAGON_circ_stb __builtin_circ_stb
|
233 |
+
|
234 |
+
|
235 |
+
#define HEXAGON_brev_ldd __builtin_brev_ldd
|
236 |
+
#define HEXAGON_brev_ldw __builtin_brev_ldw
|
237 |
+
#define HEXAGON_brev_ldh __builtin_brev_ldh
|
238 |
+
#define HEXAGON_brev_lduh __builtin_brev_lduh
|
239 |
+
#define HEXAGON_brev_ldb __builtin_brev_ldb
|
240 |
+
#define HEXAGON_brev_ldub __builtin_brev_ldub
|
241 |
+
|
242 |
+
#define HEXAGON_brev_std __builtin_brev_std
|
243 |
+
#define HEXAGON_brev_stw __builtin_brev_stw
|
244 |
+
#define HEXAGON_brev_sth __builtin_brev_sth
|
245 |
+
#define HEXAGON_brev_sthhi __builtin_brev_sthhi
|
246 |
+
#define HEXAGON_brev_stb __builtin_brev_stb
|
247 |
+
|
248 |
+
#ifdef __HVX__
|
249 |
+
/* ==========================================================================
|
250 |
+
Assembly Syntax: if (Qt) vmem(Rt+#0) = Vs
|
251 |
+
C Intrinsic Prototype: void Q6_vmaskedstoreq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
|
252 |
+
Instruction Type: COPROC_VMEM
|
253 |
+
Execution Slots: SLOT0
|
254 |
+
========================================================================== */
|
255 |
+
|
256 |
+
#define Q6_vmaskedstoreq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstoreq)
|
257 |
+
|
258 |
+
/* ==========================================================================
|
259 |
+
Assembly Syntax: if (!Qt) vmem(Rt+#0) = Vs
|
260 |
+
C Intrinsic Prototype: void Q6_vmaskedstorenq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
|
261 |
+
Instruction Type: COPROC_VMEM
|
262 |
+
Execution Slots: SLOT0
|
263 |
+
========================================================================== */
|
264 |
+
|
265 |
+
#define Q6_vmaskedstorenq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorenq)
|
266 |
+
|
267 |
+
/* ==========================================================================
|
268 |
+
Assembly Syntax: if (Qt) vmem(Rt+#0):nt = Vs
|
269 |
+
C Intrinsic Prototype: void Q6_vmaskedstorentq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
|
270 |
+
Instruction Type: COPROC_VMEM
|
271 |
+
Execution Slots: SLOT0
|
272 |
+
========================================================================== */
|
273 |
+
|
274 |
+
#define Q6_vmaskedstorentq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentq)
|
275 |
+
|
276 |
+
/* ==========================================================================
|
277 |
+
Assembly Syntax: if (!Qt) vmem(Rt+#0):nt = Vs
|
278 |
+
C Intrinsic Prototype: void Q6_vmaskedstorentnq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
|
279 |
+
Instruction Type: COPROC_VMEM
|
280 |
+
Execution Slots: SLOT0
|
281 |
+
========================================================================== */
|
282 |
+
|
283 |
+
#define Q6_vmaskedstorentnq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentnq)
|
284 |
+
|
285 |
+
#endif
|
286 |
+
|
287 |
+
|
288 |
+
#endif /* #ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_ */
|
289 |
+
|
290 |
+
#ifdef __NOT_DEFINED__
|
291 |
+
/*** comment block template ***/
|
292 |
+
/* ==========================================================================
|
293 |
+
Assembly Syntax: Return=instruction()
|
294 |
+
C Intrinsic Prototype: ReturnType Intrinsic(ParamType Rs, ParamType Rt)
|
295 |
+
Instruction Type: InstructionType
|
296 |
+
Execution Slots: SLOT0123
|
297 |
+
========================================================================== */
|
298 |
+
#endif /*** __NOT_DEFINED__ ***/
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/hexagon_protos.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/hvx_hexagon_protos.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/intrin0.h
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* ===-------- intrin.h ---------------------------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
/* Only include this if we're compiling for the windows platform. */
|
11 |
+
#ifndef _MSC_VER
|
12 |
+
#include_next <intrin0.h>
|
13 |
+
#else
|
14 |
+
|
15 |
+
#ifndef __INTRIN0_H
|
16 |
+
#define __INTRIN0_H
|
17 |
+
|
18 |
+
#if defined(__x86_64__) && !defined(__arm64ec__)
|
19 |
+
#include <adcintrin.h>
|
20 |
+
#endif
|
21 |
+
|
22 |
+
#ifdef __cplusplus
|
23 |
+
extern "C" {
|
24 |
+
#endif
|
25 |
+
|
26 |
+
unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask);
|
27 |
+
unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask);
|
28 |
+
void _ReadWriteBarrier(void);
|
29 |
+
|
30 |
+
#if defined(__aarch64__) || defined(__arm64ec__)
|
31 |
+
unsigned int _CountLeadingZeros(unsigned long);
|
32 |
+
unsigned int _CountLeadingZeros64(unsigned _int64);
|
33 |
+
unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination,
|
34 |
+
__int64 _ExchangeHigh,
|
35 |
+
__int64 _ExchangeLow,
|
36 |
+
__int64 *_ComparandResult);
|
37 |
+
unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination,
|
38 |
+
__int64 _ExchangeHigh,
|
39 |
+
__int64 _ExchangeLow,
|
40 |
+
__int64 *_ComparandResult);
|
41 |
+
unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,
|
42 |
+
__int64 _ExchangeHigh,
|
43 |
+
__int64 _ExchangeLow,
|
44 |
+
__int64 *_ComparandResult);
|
45 |
+
#endif
|
46 |
+
|
47 |
+
#if defined(__x86_64__) && !defined(__arm64ec__)
|
48 |
+
unsigned __int64 _umul128(unsigned __int64, unsigned __int64,
|
49 |
+
unsigned __int64 *);
|
50 |
+
unsigned __int64 __shiftleft128(unsigned __int64 _LowPart,
|
51 |
+
unsigned __int64 _HighPart,
|
52 |
+
unsigned char _Shift);
|
53 |
+
unsigned __int64 __shiftright128(unsigned __int64 _LowPart,
|
54 |
+
unsigned __int64 _HighPart,
|
55 |
+
unsigned char _Shift);
|
56 |
+
#endif
|
57 |
+
|
58 |
+
#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__))
|
59 |
+
void _mm_pause(void);
|
60 |
+
#endif
|
61 |
+
|
62 |
+
#if defined(__x86_64__) || defined(__aarch64__)
|
63 |
+
unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination,
|
64 |
+
__int64 _ExchangeHigh,
|
65 |
+
__int64 _ExchangeLow,
|
66 |
+
__int64 *_ComparandResult);
|
67 |
+
#endif
|
68 |
+
|
69 |
+
#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__)
|
70 |
+
unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask);
|
71 |
+
unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask);
|
72 |
+
#endif
|
73 |
+
|
74 |
+
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
|
75 |
+
defined(__aarch64__)
|
76 |
+
__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
|
77 |
+
__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
|
78 |
+
__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
|
79 |
+
__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
|
80 |
+
__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
|
81 |
+
__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
|
82 |
+
__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
|
83 |
+
__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
|
84 |
+
#endif
|
85 |
+
|
86 |
+
#if defined(__arm__) || defined(__aarch64__) || defined(__arm64ec__)
|
87 |
+
/*----------------------------------------------------------------------------*\
|
88 |
+
|* Interlocked Exchange Add
|
89 |
+
\*----------------------------------------------------------------------------*/
|
90 |
+
char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value);
|
91 |
+
char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value);
|
92 |
+
char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value);
|
93 |
+
short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value);
|
94 |
+
short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value);
|
95 |
+
short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value);
|
96 |
+
long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value);
|
97 |
+
long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value);
|
98 |
+
long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value);
|
99 |
+
__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend,
|
100 |
+
__int64 _Value);
|
101 |
+
__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value);
|
102 |
+
__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend,
|
103 |
+
__int64 _Value);
|
104 |
+
|
105 |
+
/*----------------------------------------------------------------------------*\
|
106 |
+
|* Interlocked Increment
|
107 |
+
\*----------------------------------------------------------------------------*/
|
108 |
+
short _InterlockedIncrement16_acq(short volatile *_Value);
|
109 |
+
short _InterlockedIncrement16_nf(short volatile *_Value);
|
110 |
+
short _InterlockedIncrement16_rel(short volatile *_Value);
|
111 |
+
long _InterlockedIncrement_acq(long volatile *_Value);
|
112 |
+
long _InterlockedIncrement_nf(long volatile *_Value);
|
113 |
+
long _InterlockedIncrement_rel(long volatile *_Value);
|
114 |
+
__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value);
|
115 |
+
__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value);
|
116 |
+
__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value);
|
117 |
+
|
118 |
+
/*----------------------------------------------------------------------------*\
|
119 |
+
|* Interlocked Decrement
|
120 |
+
\*----------------------------------------------------------------------------*/
|
121 |
+
short _InterlockedDecrement16_acq(short volatile *_Value);
|
122 |
+
short _InterlockedDecrement16_nf(short volatile *_Value);
|
123 |
+
short _InterlockedDecrement16_rel(short volatile *_Value);
|
124 |
+
long _InterlockedDecrement_acq(long volatile *_Value);
|
125 |
+
long _InterlockedDecrement_nf(long volatile *_Value);
|
126 |
+
long _InterlockedDecrement_rel(long volatile *_Value);
|
127 |
+
__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value);
|
128 |
+
__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value);
|
129 |
+
__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value);
|
130 |
+
|
131 |
+
/*----------------------------------------------------------------------------*\
|
132 |
+
|* Interlocked And
|
133 |
+
\*----------------------------------------------------------------------------*/
|
134 |
+
char _InterlockedAnd8_acq(char volatile *_Value, char _Mask);
|
135 |
+
char _InterlockedAnd8_nf(char volatile *_Value, char _Mask);
|
136 |
+
char _InterlockedAnd8_rel(char volatile *_Value, char _Mask);
|
137 |
+
short _InterlockedAnd16_acq(short volatile *_Value, short _Mask);
|
138 |
+
short _InterlockedAnd16_nf(short volatile *_Value, short _Mask);
|
139 |
+
short _InterlockedAnd16_rel(short volatile *_Value, short _Mask);
|
140 |
+
long _InterlockedAnd_acq(long volatile *_Value, long _Mask);
|
141 |
+
long _InterlockedAnd_nf(long volatile *_Value, long _Mask);
|
142 |
+
long _InterlockedAnd_rel(long volatile *_Value, long _Mask);
|
143 |
+
__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask);
|
144 |
+
__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask);
|
145 |
+
__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask);
|
146 |
+
|
147 |
+
/*----------------------------------------------------------------------------*\
|
148 |
+
|* Bit Counting and Testing
|
149 |
+
\*----------------------------------------------------------------------------*/
|
150 |
+
unsigned char _interlockedbittestandset_acq(long volatile *_BitBase,
|
151 |
+
long _BitPos);
|
152 |
+
unsigned char _interlockedbittestandset_nf(long volatile *_BitBase,
|
153 |
+
long _BitPos);
|
154 |
+
unsigned char _interlockedbittestandset_rel(long volatile *_BitBase,
|
155 |
+
long _BitPos);
|
156 |
+
unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase,
|
157 |
+
long _BitPos);
|
158 |
+
unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase,
|
159 |
+
long _BitPos);
|
160 |
+
unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase,
|
161 |
+
long _BitPos);
|
162 |
+
|
163 |
+
/*----------------------------------------------------------------------------*\
|
164 |
+
|* Interlocked Or
|
165 |
+
\*----------------------------------------------------------------------------*/
|
166 |
+
char _InterlockedOr8_acq(char volatile *_Value, char _Mask);
|
167 |
+
char _InterlockedOr8_nf(char volatile *_Value, char _Mask);
|
168 |
+
char _InterlockedOr8_rel(char volatile *_Value, char _Mask);
|
169 |
+
short _InterlockedOr16_acq(short volatile *_Value, short _Mask);
|
170 |
+
short _InterlockedOr16_nf(short volatile *_Value, short _Mask);
|
171 |
+
short _InterlockedOr16_rel(short volatile *_Value, short _Mask);
|
172 |
+
long _InterlockedOr_acq(long volatile *_Value, long _Mask);
|
173 |
+
long _InterlockedOr_nf(long volatile *_Value, long _Mask);
|
174 |
+
long _InterlockedOr_rel(long volatile *_Value, long _Mask);
|
175 |
+
__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask);
|
176 |
+
__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask);
|
177 |
+
__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask);
|
178 |
+
|
179 |
+
/*----------------------------------------------------------------------------*\
|
180 |
+
|* Interlocked Xor
|
181 |
+
\*----------------------------------------------------------------------------*/
|
182 |
+
char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
|
183 |
+
char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
|
184 |
+
char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
|
185 |
+
short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
|
186 |
+
short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
|
187 |
+
short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
|
188 |
+
long _InterlockedXor_acq(long volatile *_Value, long _Mask);
|
189 |
+
long _InterlockedXor_nf(long volatile *_Value, long _Mask);
|
190 |
+
long _InterlockedXor_rel(long volatile *_Value, long _Mask);
|
191 |
+
__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
|
192 |
+
__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
|
193 |
+
__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
|
194 |
+
|
195 |
+
/*----------------------------------------------------------------------------*\
|
196 |
+
|* Interlocked Exchange
|
197 |
+
\*----------------------------------------------------------------------------*/
|
198 |
+
char _InterlockedExchange8_acq(char volatile *_Target, char _Value);
|
199 |
+
char _InterlockedExchange8_nf(char volatile *_Target, char _Value);
|
200 |
+
char _InterlockedExchange8_rel(char volatile *_Target, char _Value);
|
201 |
+
short _InterlockedExchange16_acq(short volatile *_Target, short _Value);
|
202 |
+
short _InterlockedExchange16_nf(short volatile *_Target, short _Value);
|
203 |
+
short _InterlockedExchange16_rel(short volatile *_Target, short _Value);
|
204 |
+
long _InterlockedExchange_acq(long volatile *_Target, long _Value);
|
205 |
+
long _InterlockedExchange_nf(long volatile *_Target, long _Value);
|
206 |
+
long _InterlockedExchange_rel(long volatile *_Target, long _Value);
|
207 |
+
__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value);
|
208 |
+
__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value);
|
209 |
+
__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value);
|
210 |
+
|
211 |
+
/*----------------------------------------------------------------------------*\
|
212 |
+
|* Interlocked Compare Exchange
|
213 |
+
\*----------------------------------------------------------------------------*/
|
214 |
+
char _InterlockedCompareExchange8_acq(char volatile *_Destination,
|
215 |
+
char _Exchange, char _Comparand);
|
216 |
+
char _InterlockedCompareExchange8_nf(char volatile *_Destination,
|
217 |
+
char _Exchange, char _Comparand);
|
218 |
+
char _InterlockedCompareExchange8_rel(char volatile *_Destination,
|
219 |
+
char _Exchange, char _Comparand);
|
220 |
+
short _InterlockedCompareExchange16_acq(short volatile *_Destination,
|
221 |
+
short _Exchange, short _Comparand);
|
222 |
+
short _InterlockedCompareExchange16_nf(short volatile *_Destination,
|
223 |
+
short _Exchange, short _Comparand);
|
224 |
+
short _InterlockedCompareExchange16_rel(short volatile *_Destination,
|
225 |
+
short _Exchange, short _Comparand);
|
226 |
+
long _InterlockedCompareExchange_acq(long volatile *_Destination,
|
227 |
+
long _Exchange, long _Comparand);
|
228 |
+
long _InterlockedCompareExchange_nf(long volatile *_Destination, long _Exchange,
|
229 |
+
long _Comparand);
|
230 |
+
long _InterlockedCompareExchange_rel(long volatile *_Destination,
|
231 |
+
long _Exchange, long _Comparand);
|
232 |
+
__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
|
233 |
+
__int64 _Exchange,
|
234 |
+
__int64 _Comparand);
|
235 |
+
__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
|
236 |
+
__int64 _Exchange, __int64 _Comparand);
|
237 |
+
__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
|
238 |
+
__int64 _Exchange,
|
239 |
+
__int64 _Comparand);
|
240 |
+
#endif
|
241 |
+
|
242 |
+
#ifdef __cplusplus
|
243 |
+
}
|
244 |
+
#endif
|
245 |
+
|
246 |
+
#endif /* __INTRIN0_H */
|
247 |
+
#endif /* _MSC_VER */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/keylockerintrin.h
ADDED
@@ -0,0 +1,527 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===----------------- keylockerintrin.h - KL Intrinsics -------------------===
|
2 |
+
*
|
3 |
+
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
4 |
+
* of this software and associated documentation files (the "Software"), to deal
|
5 |
+
* in the Software without restriction, including without limitation the rights
|
6 |
+
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
7 |
+
* copies of the Software, and to permit persons to whom the Software is
|
8 |
+
* furnished to do so, subject to the following conditions:
|
9 |
+
*
|
10 |
+
* The above copyright notice and this permission notice shall be included in
|
11 |
+
* all copies or substantial portions of the Software.
|
12 |
+
*
|
13 |
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
14 |
+
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
15 |
+
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
16 |
+
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
17 |
+
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
18 |
+
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
19 |
+
* THE SOFTWARE.
|
20 |
+
*
|
21 |
+
*===-----------------------------------------------------------------------===
|
22 |
+
*/
|
23 |
+
|
24 |
+
#ifndef __IMMINTRIN_H
|
25 |
+
#error "Never use <keylockerintrin.h> directly; include <immintrin.h> instead."
|
26 |
+
#endif
|
27 |
+
|
28 |
+
#ifndef _KEYLOCKERINTRIN_H
|
29 |
+
#define _KEYLOCKERINTRIN_H
|
30 |
+
|
31 |
+
#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__)
|
32 |
+
|
33 |
+
/* Define the default attributes for the functions in this file. */
|
34 |
+
#define __DEFAULT_FN_ATTRS \
|
35 |
+
__attribute__((__always_inline__, __nodebug__, __target__("kl"),\
|
36 |
+
__min_vector_width__(128)))
|
37 |
+
|
38 |
+
/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl
|
39 |
+
/// will assigned to EAX, whch specifies the KeySource and whether backing up
|
40 |
+
/// the key is permitted. The 256-bit encryption key is loaded from the two
|
41 |
+
/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is
|
42 |
+
/// loaded from the implicit operand XMM0 which assigned by __intkey.
|
43 |
+
///
|
44 |
+
/// \headerfile <x86intrin.h>
|
45 |
+
///
|
46 |
+
/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
|
47 |
+
///
|
48 |
+
/// \code{.operation}
|
49 |
+
/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
|
50 |
+
/// GP (0)
|
51 |
+
/// FI
|
52 |
+
/// IF “LOADIWKEY exiting” VM execution control set
|
53 |
+
/// VMexit
|
54 |
+
/// FI
|
55 |
+
/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used
|
56 |
+
/// GP (0)
|
57 |
+
/// FI
|
58 |
+
/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set
|
59 |
+
/// GP (0)
|
60 |
+
/// FI
|
61 |
+
/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part
|
62 |
+
/// GP (0)
|
63 |
+
/// FI
|
64 |
+
/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part
|
65 |
+
/// GP (0)
|
66 |
+
/// FI
|
67 |
+
/// IF (__ctl[4:1] == 0) // KeySource of 0.
|
68 |
+
/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0]:
|
69 |
+
/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0]
|
70 |
+
/// IWKey.IntegrityKey[127:0] := __intkey[127:0]
|
71 |
+
/// IWKey.NoBackup := __ctl[0]
|
72 |
+
/// IWKey.KeySource := __ctl[4:1]
|
73 |
+
/// ZF := 0
|
74 |
+
/// ELSE // KeySource of 1. See RDSEED definition for details of randomness
|
75 |
+
/// IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received
|
76 |
+
/// IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0]
|
77 |
+
/// IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128]
|
78 |
+
/// IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0]
|
79 |
+
/// IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256]
|
80 |
+
/// IWKey.NoBackup := __ctl[0]
|
81 |
+
/// IWKey.KeySource := __ctl[4:1]
|
82 |
+
/// ZF := 0
|
83 |
+
/// ELSE // Random data was not returned from RDSEED. IWKey was not loaded
|
84 |
+
/// ZF := 1
|
85 |
+
/// FI
|
86 |
+
/// FI
|
87 |
+
/// dst := ZF
|
88 |
+
/// OF := 0
|
89 |
+
/// SF := 0
|
90 |
+
/// AF := 0
|
91 |
+
/// PF := 0
|
92 |
+
/// CF := 0
|
93 |
+
/// \endcode
|
94 |
+
static __inline__ void __DEFAULT_FN_ATTRS
|
95 |
+
_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
|
96 |
+
__m128i __enkey_lo, __m128i __enkey_hi) {
|
97 |
+
__builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl);
|
98 |
+
}
|
99 |
+
|
100 |
+
/// Wrap a 128-bit AES key from __key into a key handle and output in
|
101 |
+
/// ((__m128i*)__h) to ((__m128i*)__h) + 2 and a 32-bit value as return.
|
102 |
+
/// The explicit source operand __htype specifies handle restrictions.
|
103 |
+
///
|
104 |
+
/// \headerfile <x86intrin.h>
|
105 |
+
///
|
106 |
+
/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
|
107 |
+
///
|
108 |
+
/// \code{.operation}
|
109 |
+
/// InputKey[127:0] := __key[127:0]
|
110 |
+
/// KeyMetadata[2:0] := __htype[2:0]
|
111 |
+
/// KeyMetadata[23:3] := 0 // Reserved for future usage
|
112 |
+
/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0)
|
113 |
+
/// KeyMetadata[127:28] := 0 // Reserved for future usage
|
114 |
+
/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0],
|
115 |
+
/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
|
116 |
+
/// dst[0] := IWKey.NoBackup
|
117 |
+
/// dst[4:1] := IWKey.KeySource[3:0]
|
118 |
+
/// dst[31:5] := 0
|
119 |
+
/// MEM[__h+127:__h] := Handle[127:0] // AAD
|
120 |
+
/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag
|
121 |
+
/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText
|
122 |
+
/// OF := 0
|
123 |
+
/// SF := 0
|
124 |
+
/// ZF := 0
|
125 |
+
/// AF := 0
|
126 |
+
/// PF := 0
|
127 |
+
/// CF := 0
|
128 |
+
/// \endcode
|
129 |
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
130 |
+
_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
|
131 |
+
return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
|
132 |
+
}
|
133 |
+
|
134 |
+
/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then
|
135 |
+
/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 3 and
|
136 |
+
/// a 32-bit value as return.
|
137 |
+
/// The explicit source operand __htype specifies handle restrictions.
|
138 |
+
///
|
139 |
+
/// \headerfile <x86intrin.h>
|
140 |
+
///
|
141 |
+
/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
|
142 |
+
///
|
143 |
+
/// \code{.operation}
|
144 |
+
/// InputKey[127:0] := __key_lo[127:0]
|
145 |
+
/// InputKey[255:128] := __key_hi[255:128]
|
146 |
+
/// KeyMetadata[2:0] := __htype[2:0]
|
147 |
+
/// KeyMetadata[23:3] := 0 // Reserved for future usage
|
148 |
+
/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1)
|
149 |
+
/// KeyMetadata[127:28] := 0 // Reserved for future usage
|
150 |
+
/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0],
|
151 |
+
/// IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
|
152 |
+
/// dst[0] := IWKey.NoBackup
|
153 |
+
/// dst[4:1] := IWKey.KeySource[3:0]
|
154 |
+
/// dst[31:5] := 0
|
155 |
+
/// MEM[__h+127:__h] := Handle[127:0] // AAD
|
156 |
+
/// MEM[__h+255:__h+128] := Handle[255:128] // Tag
|
157 |
+
/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]
|
158 |
+
/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]
|
159 |
+
/// OF := 0
|
160 |
+
/// SF := 0
|
161 |
+
/// ZF := 0
|
162 |
+
/// AF := 0
|
163 |
+
/// PF := 0
|
164 |
+
/// CF := 0
|
165 |
+
/// \endcode
|
166 |
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
167 |
+
_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
|
168 |
+
void *__h) {
|
169 |
+
return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo,
|
170 |
+
(__v2di)__key_hi, __h);
|
171 |
+
}
|
172 |
+
|
173 |
+
/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using
|
174 |
+
/// the 128-bit key in the handle from the __h. It stores the result in the
|
175 |
+
/// __odata. And return the affected ZF flag status.
|
176 |
+
///
|
177 |
+
/// \headerfile <x86intrin.h>
|
178 |
+
///
|
179 |
+
/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
|
180 |
+
///
|
181 |
+
/// \code{.operation}
|
182 |
+
/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
|
183 |
+
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
|
184 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
185 |
+
/// Handle[383:256] ||
|
186 |
+
/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
|
187 |
+
/// IF (IllegalHandle)
|
188 |
+
/// ZF := 1
|
189 |
+
/// ELSE
|
190 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
|
191 |
+
/// IF (Authentic == 0)
|
192 |
+
/// ZF := 1
|
193 |
+
/// ELSE
|
194 |
+
/// MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey)
|
195 |
+
/// ZF := 0
|
196 |
+
/// FI
|
197 |
+
/// FI
|
198 |
+
/// dst := ZF
|
199 |
+
/// OF := 0
|
200 |
+
/// SF := 0
|
201 |
+
/// AF := 0
|
202 |
+
/// PF := 0
|
203 |
+
/// CF := 0
|
204 |
+
/// \endcode
|
205 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
206 |
+
_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
|
207 |
+
return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
|
208 |
+
}
|
209 |
+
|
210 |
+
/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using
|
211 |
+
/// the 256-bit key in the handle from the __h. It stores the result in the
|
212 |
+
/// __odata. And return the affected ZF flag status.
|
213 |
+
///
|
214 |
+
/// \headerfile <x86intrin.h>
|
215 |
+
///
|
216 |
+
/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
|
217 |
+
///
|
218 |
+
/// \code{.operation}
|
219 |
+
/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
|
220 |
+
/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
|
221 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
222 |
+
/// Handle[255:128] ||
|
223 |
+
/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )
|
224 |
+
/// IF (IllegalHandle)
|
225 |
+
/// ZF := 1
|
226 |
+
/// MEM[__odata+127:__odata] := 0
|
227 |
+
/// ELSE
|
228 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
|
229 |
+
/// IF (Authentic == 0)
|
230 |
+
/// ZF := 1
|
231 |
+
/// MEM[__odata+127:__odata] := 0
|
232 |
+
/// ELSE
|
233 |
+
/// MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)
|
234 |
+
/// ZF := 0
|
235 |
+
/// FI
|
236 |
+
/// FI
|
237 |
+
/// dst := ZF
|
238 |
+
/// OF := 0
|
239 |
+
/// SF := 0
|
240 |
+
/// AF := 0
|
241 |
+
/// PF := 0
|
242 |
+
/// CF := 0
|
243 |
+
/// \endcode
|
244 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
245 |
+
_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
|
246 |
+
return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
|
247 |
+
}
|
248 |
+
|
249 |
+
/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using
|
250 |
+
/// the 128-bit key in the handle from the __h. It stores the result in the
|
251 |
+
/// __odata. And return the affected ZF flag status.
|
252 |
+
///
|
253 |
+
/// \headerfile <x86intrin.h>
|
254 |
+
///
|
255 |
+
/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
|
256 |
+
///
|
257 |
+
/// \code{.operation}
|
258 |
+
/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
|
259 |
+
/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
|
260 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
261 |
+
/// Handle[383:256] ||
|
262 |
+
/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)
|
263 |
+
/// IF (IllegalHandle)
|
264 |
+
/// ZF := 1
|
265 |
+
/// MEM[__odata+127:__odata] := 0
|
266 |
+
/// ELSE
|
267 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
|
268 |
+
/// IF (Authentic == 0)
|
269 |
+
/// ZF := 1
|
270 |
+
/// MEM[__odata+127:__odata] := 0
|
271 |
+
/// ELSE
|
272 |
+
/// MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)
|
273 |
+
/// ZF := 0
|
274 |
+
/// FI
|
275 |
+
/// FI
|
276 |
+
/// dst := ZF
|
277 |
+
/// OF := 0
|
278 |
+
/// SF := 0
|
279 |
+
/// AF := 0
|
280 |
+
/// PF := 0
|
281 |
+
/// CF := 0
|
282 |
+
/// \endcode
|
283 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
284 |
+
_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
|
285 |
+
return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
|
286 |
+
}
|
287 |
+
|
288 |
+
/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using
|
289 |
+
/// the 256-bit key in the handle from the __h. It stores the result in the
|
290 |
+
/// __odata. And return the affected ZF flag status.
|
291 |
+
///
|
292 |
+
/// \headerfile <x86intrin.h>
|
293 |
+
///
|
294 |
+
/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
|
295 |
+
///
|
296 |
+
/// \code{.operation}
|
297 |
+
/// Handle[511:0] := MEM[__h+511:__h]
|
298 |
+
/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
|
299 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
300 |
+
/// Handle[383:256] ||
|
301 |
+
/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)
|
302 |
+
/// IF (IllegalHandle)
|
303 |
+
/// ZF := 1
|
304 |
+
/// MEM[__odata+127:__odata] := 0
|
305 |
+
/// ELSE
|
306 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
|
307 |
+
/// IF (Authentic == 0)
|
308 |
+
/// ZF := 1
|
309 |
+
/// MEM[__odata+127:__odata] := 0
|
310 |
+
/// ELSE
|
311 |
+
/// MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)
|
312 |
+
/// ZF := 0
|
313 |
+
/// FI
|
314 |
+
/// FI
|
315 |
+
/// dst := ZF
|
316 |
+
/// OF := 0
|
317 |
+
/// SF := 0
|
318 |
+
/// AF := 0
|
319 |
+
/// PF := 0
|
320 |
+
/// CF := 0
|
321 |
+
/// \endcode
|
322 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
323 |
+
_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
|
324 |
+
return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
|
325 |
+
}
|
326 |
+
|
327 |
+
#undef __DEFAULT_FN_ATTRS
|
328 |
+
|
329 |
+
#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */
|
330 |
+
|
331 |
+
#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__)
|
332 |
+
|
333 |
+
/* Define the default attributes for the functions in this file. */
|
334 |
+
#define __DEFAULT_FN_ATTRS \
|
335 |
+
__attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
|
336 |
+
__min_vector_width__(128)))
|
337 |
+
|
338 |
+
/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
|
339 |
+
/// at __h and store each resultant block back from __odata to __odata+7. And
|
340 |
+
/// return the affected ZF flag status.
|
341 |
+
///
|
342 |
+
/// \headerfile <x86intrin.h>
|
343 |
+
///
|
344 |
+
/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
|
345 |
+
///
|
346 |
+
/// \code{.operation}
|
347 |
+
/// Handle := MEM[__h+383:__h]
|
348 |
+
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
|
349 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
350 |
+
/// Handle[255:128] ||
|
351 |
+
/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
|
352 |
+
/// IF (IllegalHandle)
|
353 |
+
/// ZF := 1
|
354 |
+
/// FOR i := 0 to 7
|
355 |
+
/// __odata[i] := 0
|
356 |
+
/// ENDFOR
|
357 |
+
/// ELSE
|
358 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
|
359 |
+
/// IF Authentic == 0
|
360 |
+
/// ZF := 1
|
361 |
+
/// FOR i := 0 to 7
|
362 |
+
/// __odata[i] := 0
|
363 |
+
/// ENDFOR
|
364 |
+
/// ELSE
|
365 |
+
/// FOR i := 0 to 7
|
366 |
+
/// __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)
|
367 |
+
/// ENDFOR
|
368 |
+
/// ZF := 0
|
369 |
+
/// FI
|
370 |
+
/// FI
|
371 |
+
/// dst := ZF
|
372 |
+
/// OF := 0
|
373 |
+
/// SF := 0
|
374 |
+
/// AF := 0
|
375 |
+
/// PF := 0
|
376 |
+
/// CF := 0
|
377 |
+
/// \endcode
|
378 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
379 |
+
_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
|
380 |
+
return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
|
381 |
+
(const __v2di *)__idata, __h);
|
382 |
+
}
|
383 |
+
|
384 |
+
/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
|
385 |
+
/// at __h and store each resultant block back from __odata to __odata+7. And
|
386 |
+
/// return the affected ZF flag status.
|
387 |
+
///
|
388 |
+
/// \headerfile <x86intrin.h>
|
389 |
+
///
|
390 |
+
/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
|
391 |
+
///
|
392 |
+
/// \code{.operation}
|
393 |
+
/// Handle[511:0] := MEM[__h+511:__h]
|
394 |
+
/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
|
395 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
396 |
+
/// Handle[255:128] ||
|
397 |
+
/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )
|
398 |
+
/// IF (IllegalHandle)
|
399 |
+
/// ZF := 1
|
400 |
+
/// FOR i := 0 to 7
|
401 |
+
/// __odata[i] := 0
|
402 |
+
/// ENDFOR
|
403 |
+
/// ELSE
|
404 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
|
405 |
+
/// IF Authentic == 0
|
406 |
+
/// ZF := 1
|
407 |
+
/// FOR i := 0 to 7
|
408 |
+
/// __odata[i] := 0
|
409 |
+
/// ENDFOR
|
410 |
+
/// ELSE
|
411 |
+
/// FOR i := 0 to 7
|
412 |
+
/// __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)
|
413 |
+
/// ENDFOR
|
414 |
+
/// ZF := 0
|
415 |
+
/// FI
|
416 |
+
/// FI
|
417 |
+
/// dst := ZF
|
418 |
+
/// OF := 0
|
419 |
+
/// SF := 0
|
420 |
+
/// AF := 0
|
421 |
+
/// PF := 0
|
422 |
+
/// CF := 0
|
423 |
+
/// \endcode
|
424 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
425 |
+
_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
|
426 |
+
return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
|
427 |
+
(const __v2di *)__idata, __h);
|
428 |
+
}
|
429 |
+
|
430 |
+
/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
|
431 |
+
/// at __h and store each resultant block back from __odata to __odata+7. And
|
432 |
+
/// return the affected ZF flag status.
|
433 |
+
///
|
434 |
+
/// \headerfile <x86intrin.h>
|
435 |
+
///
|
436 |
+
/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
|
437 |
+
///
|
438 |
+
/// \code{.operation}
|
439 |
+
/// Handle[383:0] := MEM[__h+383:__h]
|
440 |
+
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
|
441 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
442 |
+
/// Handle[255:128] ||
|
443 |
+
/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )
|
444 |
+
/// IF (IllegalHandle)
|
445 |
+
/// ZF := 1
|
446 |
+
/// FOR i := 0 to 7
|
447 |
+
/// __odata[i] := 0
|
448 |
+
/// ENDFOR
|
449 |
+
/// ELSE
|
450 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
|
451 |
+
/// IF Authentic == 0
|
452 |
+
/// ZF := 1
|
453 |
+
/// FOR i := 0 to 7
|
454 |
+
/// __odata[i] := 0
|
455 |
+
/// ENDFOR
|
456 |
+
/// ELSE
|
457 |
+
/// FOR i := 0 to 7
|
458 |
+
/// __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)
|
459 |
+
/// ENDFOR
|
460 |
+
/// ZF := 0
|
461 |
+
/// FI
|
462 |
+
/// FI
|
463 |
+
/// dst := ZF
|
464 |
+
/// OF := 0
|
465 |
+
/// SF := 0
|
466 |
+
/// AF := 0
|
467 |
+
/// PF := 0
|
468 |
+
/// CF := 0
|
469 |
+
/// \endcode
|
470 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
471 |
+
_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
|
472 |
+
return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
|
473 |
+
(const __v2di *)__idata, __h);
|
474 |
+
}
|
475 |
+
|
476 |
+
/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
|
477 |
+
/// at __h and store each resultant block back from __odata to __odata+7. And
|
478 |
+
/// return the affected ZF flag status.
|
479 |
+
///
|
480 |
+
/// \headerfile <x86intrin.h>
|
481 |
+
///
|
482 |
+
/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
|
483 |
+
///
|
484 |
+
/// \code{.operation}
|
485 |
+
/// Handle[511:0] := MEM[__h+511:__h]
|
486 |
+
/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
|
487 |
+
/// (Handle[127:0] AND (CPL > 0)) ||
|
488 |
+
/// Handle[255:128] ||
|
489 |
+
/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )
|
490 |
+
/// If (IllegalHandle)
|
491 |
+
/// ZF := 1
|
492 |
+
/// FOR i := 0 to 7
|
493 |
+
/// __odata[i] := 0
|
494 |
+
/// ENDFOR
|
495 |
+
/// ELSE
|
496 |
+
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
|
497 |
+
/// IF Authentic == 0
|
498 |
+
/// ZF := 1
|
499 |
+
/// FOR i := 0 to 7
|
500 |
+
/// __odata[i] := 0
|
501 |
+
/// ENDFOR
|
502 |
+
/// ELSE
|
503 |
+
/// FOR i := 0 to 7
|
504 |
+
/// __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)
|
505 |
+
/// ENDFOR
|
506 |
+
/// ZF := 0
|
507 |
+
/// FI
|
508 |
+
/// FI
|
509 |
+
/// dst := ZF
|
510 |
+
/// OF := 0
|
511 |
+
/// SF := 0
|
512 |
+
/// AF := 0
|
513 |
+
/// PF := 0
|
514 |
+
/// CF := 0
|
515 |
+
/// \endcode
|
516 |
+
static __inline__ unsigned char __DEFAULT_FN_ATTRS
|
517 |
+
_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
|
518 |
+
return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
|
519 |
+
(const __v2di *)__idata, __h);
|
520 |
+
}
|
521 |
+
|
522 |
+
#undef __DEFAULT_FN_ATTRS
|
523 |
+
|
524 |
+
#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \
|
525 |
+
*/
|
526 |
+
|
527 |
+
#endif /* _KEYLOCKERINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/lsxintrin.h
ADDED
The diff for this file is too large to render.
See raw diff
|
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/lzcntintrin.h
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
|
11 |
+
#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __LZCNTINTRIN_H
|
15 |
+
#define __LZCNTINTRIN_H
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
|
19 |
+
|
20 |
+
#ifndef _MSC_VER
|
21 |
+
/// Counts the number of leading zero bits in the operand.
|
22 |
+
///
|
23 |
+
/// \headerfile <x86intrin.h>
|
24 |
+
///
|
25 |
+
/// This intrinsic corresponds to the \c LZCNT instruction.
|
26 |
+
///
|
27 |
+
/// \param __X
|
28 |
+
/// An unsigned 16-bit integer whose leading zeros are to be counted.
|
29 |
+
/// \returns An unsigned 16-bit integer containing the number of leading zero
|
30 |
+
/// bits in the operand.
|
31 |
+
#define __lzcnt16(X) __builtin_ia32_lzcnt_u16((unsigned short)(X))
|
32 |
+
#endif // _MSC_VER
|
33 |
+
|
34 |
+
/// Counts the number of leading zero bits in the operand.
|
35 |
+
///
|
36 |
+
/// \headerfile <x86intrin.h>
|
37 |
+
///
|
38 |
+
/// This intrinsic corresponds to the \c LZCNT instruction.
|
39 |
+
///
|
40 |
+
/// \param __X
|
41 |
+
/// An unsigned 32-bit integer whose leading zeros are to be counted.
|
42 |
+
/// \returns An unsigned 32-bit integer containing the number of leading zero
|
43 |
+
/// bits in the operand.
|
44 |
+
/// \see _lzcnt_u32
|
45 |
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
46 |
+
__lzcnt32(unsigned int __X)
|
47 |
+
{
|
48 |
+
return __builtin_ia32_lzcnt_u32(__X);
|
49 |
+
}
|
50 |
+
|
51 |
+
/// Counts the number of leading zero bits in the operand.
|
52 |
+
///
|
53 |
+
/// \headerfile <x86intrin.h>
|
54 |
+
///
|
55 |
+
/// This intrinsic corresponds to the \c LZCNT instruction.
|
56 |
+
///
|
57 |
+
/// \param __X
|
58 |
+
/// An unsigned 32-bit integer whose leading zeros are to be counted.
|
59 |
+
/// \returns An unsigned 32-bit integer containing the number of leading zero
|
60 |
+
/// bits in the operand.
|
61 |
+
/// \see __lzcnt32
|
62 |
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
63 |
+
_lzcnt_u32(unsigned int __X)
|
64 |
+
{
|
65 |
+
return __builtin_ia32_lzcnt_u32(__X);
|
66 |
+
}
|
67 |
+
|
68 |
+
#ifdef __x86_64__
|
69 |
+
#ifndef _MSC_VER
|
70 |
+
/// Counts the number of leading zero bits in the operand.
|
71 |
+
///
|
72 |
+
/// \headerfile <x86intrin.h>
|
73 |
+
///
|
74 |
+
/// This intrinsic corresponds to the \c LZCNT instruction.
|
75 |
+
///
|
76 |
+
/// \param __X
|
77 |
+
/// An unsigned 64-bit integer whose leading zeros are to be counted.
|
78 |
+
/// \returns An unsigned 64-bit integer containing the number of leading zero
|
79 |
+
/// bits in the operand.
|
80 |
+
/// \see _lzcnt_u64
|
81 |
+
#define __lzcnt64(X) __builtin_ia32_lzcnt_u64((unsigned long long)(X))
|
82 |
+
#endif // _MSC_VER
|
83 |
+
|
84 |
+
/// Counts the number of leading zero bits in the operand.
|
85 |
+
///
|
86 |
+
/// \headerfile <x86intrin.h>
|
87 |
+
///
|
88 |
+
/// This intrinsic corresponds to the \c LZCNT instruction.
|
89 |
+
///
|
90 |
+
/// \param __X
|
91 |
+
/// An unsigned 64-bit integer whose leading zeros are to be counted.
|
92 |
+
/// \returns An unsigned 64-bit integer containing the number of leading zero
|
93 |
+
/// bits in the operand.
|
94 |
+
/// \see __lzcnt64
|
95 |
+
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
|
96 |
+
_lzcnt_u64(unsigned long long __X)
|
97 |
+
{
|
98 |
+
return __builtin_ia32_lzcnt_u64(__X);
|
99 |
+
}
|
100 |
+
#endif
|
101 |
+
|
102 |
+
#undef __DEFAULT_FN_ATTRS
|
103 |
+
|
104 |
+
#endif /* __LZCNTINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/movdirintrin.h
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===------------------------- movdirintrin.h ------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
|
10 |
+
#error "Never use <movdirintrin.h> directly; include <x86intrin.h> instead."
|
11 |
+
#endif
|
12 |
+
|
13 |
+
#ifndef _MOVDIRINTRIN_H
|
14 |
+
#define _MOVDIRINTRIN_H
|
15 |
+
|
16 |
+
/* Move doubleword as direct store */
|
17 |
+
static __inline__ void
|
18 |
+
__attribute__((__always_inline__, __nodebug__, __target__("movdiri")))
|
19 |
+
_directstoreu_u32 (void *__dst, unsigned int __value)
|
20 |
+
{
|
21 |
+
__builtin_ia32_directstore_u32((unsigned int *)__dst, (unsigned int)__value);
|
22 |
+
}
|
23 |
+
|
24 |
+
#ifdef __x86_64__
|
25 |
+
|
26 |
+
/* Move quadword as direct store */
|
27 |
+
static __inline__ void
|
28 |
+
__attribute__((__always_inline__, __nodebug__, __target__("movdiri")))
|
29 |
+
_directstoreu_u64 (void *__dst, unsigned long __value)
|
30 |
+
{
|
31 |
+
__builtin_ia32_directstore_u64((unsigned long *)__dst, __value);
|
32 |
+
}
|
33 |
+
|
34 |
+
#endif /* __x86_64__ */
|
35 |
+
|
36 |
+
/*
|
37 |
+
* movdir64b - Move 64 bytes as direct store.
|
38 |
+
* The destination must be 64 byte aligned, and the store is atomic.
|
39 |
+
* The source address has no alignment requirement, and the load from
|
40 |
+
* the source address is not atomic.
|
41 |
+
*/
|
42 |
+
static __inline__ void
|
43 |
+
__attribute__((__always_inline__, __nodebug__, __target__("movdir64b")))
|
44 |
+
_movdir64b (void *__dst __attribute__((align_value(64))), const void *__src)
|
45 |
+
{
|
46 |
+
__builtin_ia32_movdir64b(__dst, __src);
|
47 |
+
}
|
48 |
+
|
49 |
+
#endif /* _MOVDIRINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/nmmintrin.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __NMMINTRIN_H
|
11 |
+
#define __NMMINTRIN_H
|
12 |
+
|
13 |
+
#if !defined(__i386__) && !defined(__x86_64__)
|
14 |
+
#error "This header is only meant to be used on x86 and x64 architecture"
|
15 |
+
#endif
|
16 |
+
|
17 |
+
/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
|
18 |
+
just include it now then. */
|
19 |
+
#include <smmintrin.h>
|
20 |
+
#endif /* __NMMINTRIN_H */
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/opencl-c-base.h
ADDED
@@ -0,0 +1,829 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
//===----- opencl-c-base.h - OpenCL C language base definitions -----------===//
|
2 |
+
//
|
3 |
+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
// See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
//
|
7 |
+
//===----------------------------------------------------------------------===//
|
8 |
+
|
9 |
+
#ifndef _OPENCL_BASE_H_
|
10 |
+
#define _OPENCL_BASE_H_
|
11 |
+
|
12 |
+
// Define extension macros
|
13 |
+
|
14 |
+
#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
|
15 |
+
// For SPIR and SPIR-V all extensions are supported.
|
16 |
+
#if defined(__SPIR__) || defined(__SPIRV__)
|
17 |
+
#define cl_khr_subgroup_extended_types 1
|
18 |
+
#define cl_khr_subgroup_non_uniform_vote 1
|
19 |
+
#define cl_khr_subgroup_ballot 1
|
20 |
+
#define cl_khr_subgroup_non_uniform_arithmetic 1
|
21 |
+
#define cl_khr_subgroup_shuffle 1
|
22 |
+
#define cl_khr_subgroup_shuffle_relative 1
|
23 |
+
#define cl_khr_subgroup_clustered_reduce 1
|
24 |
+
#define cl_khr_subgroup_rotate 1
|
25 |
+
#define cl_khr_extended_bit_ops 1
|
26 |
+
#define cl_khr_integer_dot_product 1
|
27 |
+
#define __opencl_c_integer_dot_product_input_4x8bit 1
|
28 |
+
#define __opencl_c_integer_dot_product_input_4x8bit_packed 1
|
29 |
+
#define cl_ext_float_atomics 1
|
30 |
+
#ifdef cl_khr_fp16
|
31 |
+
#define __opencl_c_ext_fp16_global_atomic_load_store 1
|
32 |
+
#define __opencl_c_ext_fp16_local_atomic_load_store 1
|
33 |
+
#define __opencl_c_ext_fp16_global_atomic_add 1
|
34 |
+
#define __opencl_c_ext_fp16_local_atomic_add 1
|
35 |
+
#define __opencl_c_ext_fp16_global_atomic_min_max 1
|
36 |
+
#define __opencl_c_ext_fp16_local_atomic_min_max 1
|
37 |
+
#endif
|
38 |
+
#ifdef cl_khr_fp64
|
39 |
+
#define __opencl_c_ext_fp64_global_atomic_add 1
|
40 |
+
#define __opencl_c_ext_fp64_local_atomic_add 1
|
41 |
+
#define __opencl_c_ext_fp64_global_atomic_min_max 1
|
42 |
+
#define __opencl_c_ext_fp64_local_atomic_min_max 1
|
43 |
+
#endif
|
44 |
+
#define __opencl_c_ext_fp32_global_atomic_add 1
|
45 |
+
#define __opencl_c_ext_fp32_local_atomic_add 1
|
46 |
+
#define __opencl_c_ext_fp32_global_atomic_min_max 1
|
47 |
+
#define __opencl_c_ext_fp32_local_atomic_min_max 1
|
48 |
+
#define __opencl_c_ext_image_raw10_raw12 1
|
49 |
+
#define cl_khr_kernel_clock 1
|
50 |
+
#define __opencl_c_kernel_clock_scope_device 1
|
51 |
+
#define __opencl_c_kernel_clock_scope_work_group 1
|
52 |
+
#define __opencl_c_kernel_clock_scope_sub_group 1
|
53 |
+
|
54 |
+
#endif // defined(__SPIR__) || defined(__SPIRV__)
|
55 |
+
#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
|
56 |
+
|
57 |
+
// Define feature macros for OpenCL C 2.0
|
58 |
+
#if (__OPENCL_CPP_VERSION__ == 100 || __OPENCL_C_VERSION__ == 200)
|
59 |
+
#define __opencl_c_pipes 1
|
60 |
+
#define __opencl_c_generic_address_space 1
|
61 |
+
#define __opencl_c_work_group_collective_functions 1
|
62 |
+
#define __opencl_c_atomic_order_acq_rel 1
|
63 |
+
#define __opencl_c_atomic_order_seq_cst 1
|
64 |
+
#define __opencl_c_atomic_scope_device 1
|
65 |
+
#define __opencl_c_atomic_scope_all_devices 1
|
66 |
+
#define __opencl_c_device_enqueue 1
|
67 |
+
#define __opencl_c_read_write_images 1
|
68 |
+
#define __opencl_c_program_scope_global_variables 1
|
69 |
+
#define __opencl_c_images 1
|
70 |
+
#endif
|
71 |
+
|
72 |
+
// Define header-only feature macros for OpenCL C 3.0.
|
73 |
+
#if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
|
74 |
+
// For the SPIR and SPIR-V target all features are supported.
|
75 |
+
#if defined(__SPIR__) || defined(__SPIRV__)
|
76 |
+
#define __opencl_c_work_group_collective_functions 1
|
77 |
+
#define __opencl_c_atomic_order_seq_cst 1
|
78 |
+
#define __opencl_c_atomic_scope_device 1
|
79 |
+
#define __opencl_c_atomic_scope_all_devices 1
|
80 |
+
#define __opencl_c_read_write_images 1
|
81 |
+
#endif // defined(__SPIR__)
|
82 |
+
|
83 |
+
// Undefine any feature macros that have been explicitly disabled using
|
84 |
+
// an __undef_<feature> macro.
|
85 |
+
#ifdef __undef___opencl_c_work_group_collective_functions
|
86 |
+
#undef __opencl_c_work_group_collective_functions
|
87 |
+
#endif
|
88 |
+
#ifdef __undef___opencl_c_atomic_order_seq_cst
|
89 |
+
#undef __opencl_c_atomic_order_seq_cst
|
90 |
+
#endif
|
91 |
+
#ifdef __undef___opencl_c_atomic_scope_device
|
92 |
+
#undef __opencl_c_atomic_scope_device
|
93 |
+
#endif
|
94 |
+
#ifdef __undef___opencl_c_atomic_scope_all_devices
|
95 |
+
#undef __opencl_c_atomic_scope_all_devices
|
96 |
+
#endif
|
97 |
+
#ifdef __undef___opencl_c_read_write_images
|
98 |
+
#undef __opencl_c_read_write_images
|
99 |
+
#endif
|
100 |
+
|
101 |
+
#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
|
102 |
+
|
103 |
+
#if !defined(__opencl_c_generic_address_space)
|
104 |
+
// Internal feature macro to provide named (global, local, private) address
|
105 |
+
// space overloads for builtin functions that take a pointer argument.
|
106 |
+
#define __opencl_c_named_address_space_builtins 1
|
107 |
+
#endif // !defined(__opencl_c_generic_address_space)
|
108 |
+
|
109 |
+
#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
|
110 |
+
// Internal feature macro to provide subgroup builtins.
|
111 |
+
#define __opencl_subgroup_builtins 1
|
112 |
+
#endif
|
113 |
+
|
114 |
+
// built-in scalar data types:
|
115 |
+
|
116 |
+
/**
|
117 |
+
* An unsigned 8-bit integer.
|
118 |
+
*/
|
119 |
+
typedef unsigned char uchar;
|
120 |
+
|
121 |
+
/**
|
122 |
+
* An unsigned 16-bit integer.
|
123 |
+
*/
|
124 |
+
typedef unsigned short ushort;
|
125 |
+
|
126 |
+
/**
|
127 |
+
* An unsigned 32-bit integer.
|
128 |
+
*/
|
129 |
+
typedef unsigned int uint;
|
130 |
+
|
131 |
+
/**
|
132 |
+
* An unsigned 64-bit integer.
|
133 |
+
*/
|
134 |
+
typedef unsigned long ulong;
|
135 |
+
|
136 |
+
/**
|
137 |
+
* The unsigned integer type of the result of the sizeof operator. This
|
138 |
+
* is a 32-bit unsigned integer if CL_DEVICE_ADDRESS_BITS
|
139 |
+
* defined in table 4.3 is 32-bits and is a 64-bit unsigned integer if
|
140 |
+
* CL_DEVICE_ADDRESS_BITS is 64-bits.
|
141 |
+
*/
|
142 |
+
typedef __SIZE_TYPE__ size_t;
|
143 |
+
|
144 |
+
/**
|
145 |
+
* A signed integer type that is the result of subtracting two pointers.
|
146 |
+
* This is a 32-bit signed integer if CL_DEVICE_ADDRESS_BITS
|
147 |
+
* defined in table 4.3 is 32-bits and is a 64-bit signed integer if
|
148 |
+
* CL_DEVICE_ADDRESS_BITS is 64-bits.
|
149 |
+
*/
|
150 |
+
typedef __PTRDIFF_TYPE__ ptrdiff_t;
|
151 |
+
|
152 |
+
/**
|
153 |
+
* A signed integer type with the property that any valid pointer to
|
154 |
+
* void can be converted to this type, then converted back to pointer
|
155 |
+
* to void, and the result will compare equal to the original pointer.
|
156 |
+
*/
|
157 |
+
typedef __INTPTR_TYPE__ intptr_t;
|
158 |
+
|
159 |
+
/**
|
160 |
+
* An unsigned integer type with the property that any valid pointer to
|
161 |
+
* void can be converted to this type, then converted back to pointer
|
162 |
+
* to void, and the result will compare equal to the original pointer.
|
163 |
+
*/
|
164 |
+
typedef __UINTPTR_TYPE__ uintptr_t;
|
165 |
+
|
166 |
+
// built-in vector data types:
|
167 |
+
typedef char char2 __attribute__((ext_vector_type(2)));
|
168 |
+
typedef char char3 __attribute__((ext_vector_type(3)));
|
169 |
+
typedef char char4 __attribute__((ext_vector_type(4)));
|
170 |
+
typedef char char8 __attribute__((ext_vector_type(8)));
|
171 |
+
typedef char char16 __attribute__((ext_vector_type(16)));
|
172 |
+
typedef uchar uchar2 __attribute__((ext_vector_type(2)));
|
173 |
+
typedef uchar uchar3 __attribute__((ext_vector_type(3)));
|
174 |
+
typedef uchar uchar4 __attribute__((ext_vector_type(4)));
|
175 |
+
typedef uchar uchar8 __attribute__((ext_vector_type(8)));
|
176 |
+
typedef uchar uchar16 __attribute__((ext_vector_type(16)));
|
177 |
+
typedef short short2 __attribute__((ext_vector_type(2)));
|
178 |
+
typedef short short3 __attribute__((ext_vector_type(3)));
|
179 |
+
typedef short short4 __attribute__((ext_vector_type(4)));
|
180 |
+
typedef short short8 __attribute__((ext_vector_type(8)));
|
181 |
+
typedef short short16 __attribute__((ext_vector_type(16)));
|
182 |
+
typedef ushort ushort2 __attribute__((ext_vector_type(2)));
|
183 |
+
typedef ushort ushort3 __attribute__((ext_vector_type(3)));
|
184 |
+
typedef ushort ushort4 __attribute__((ext_vector_type(4)));
|
185 |
+
typedef ushort ushort8 __attribute__((ext_vector_type(8)));
|
186 |
+
typedef ushort ushort16 __attribute__((ext_vector_type(16)));
|
187 |
+
typedef int int2 __attribute__((ext_vector_type(2)));
|
188 |
+
typedef int int3 __attribute__((ext_vector_type(3)));
|
189 |
+
typedef int int4 __attribute__((ext_vector_type(4)));
|
190 |
+
typedef int int8 __attribute__((ext_vector_type(8)));
|
191 |
+
typedef int int16 __attribute__((ext_vector_type(16)));
|
192 |
+
typedef uint uint2 __attribute__((ext_vector_type(2)));
|
193 |
+
typedef uint uint3 __attribute__((ext_vector_type(3)));
|
194 |
+
typedef uint uint4 __attribute__((ext_vector_type(4)));
|
195 |
+
typedef uint uint8 __attribute__((ext_vector_type(8)));
|
196 |
+
typedef uint uint16 __attribute__((ext_vector_type(16)));
|
197 |
+
typedef long long2 __attribute__((ext_vector_type(2)));
|
198 |
+
typedef long long3 __attribute__((ext_vector_type(3)));
|
199 |
+
typedef long long4 __attribute__((ext_vector_type(4)));
|
200 |
+
typedef long long8 __attribute__((ext_vector_type(8)));
|
201 |
+
typedef long long16 __attribute__((ext_vector_type(16)));
|
202 |
+
typedef ulong ulong2 __attribute__((ext_vector_type(2)));
|
203 |
+
typedef ulong ulong3 __attribute__((ext_vector_type(3)));
|
204 |
+
typedef ulong ulong4 __attribute__((ext_vector_type(4)));
|
205 |
+
typedef ulong ulong8 __attribute__((ext_vector_type(8)));
|
206 |
+
typedef ulong ulong16 __attribute__((ext_vector_type(16)));
|
207 |
+
typedef float float2 __attribute__((ext_vector_type(2)));
|
208 |
+
typedef float float3 __attribute__((ext_vector_type(3)));
|
209 |
+
typedef float float4 __attribute__((ext_vector_type(4)));
|
210 |
+
typedef float float8 __attribute__((ext_vector_type(8)));
|
211 |
+
typedef float float16 __attribute__((ext_vector_type(16)));
|
212 |
+
#ifdef cl_khr_fp16
|
213 |
+
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
|
214 |
+
typedef half half2 __attribute__((ext_vector_type(2)));
|
215 |
+
typedef half half3 __attribute__((ext_vector_type(3)));
|
216 |
+
typedef half half4 __attribute__((ext_vector_type(4)));
|
217 |
+
typedef half half8 __attribute__((ext_vector_type(8)));
|
218 |
+
typedef half half16 __attribute__((ext_vector_type(16)));
|
219 |
+
#endif
|
220 |
+
#ifdef cl_khr_fp64
|
221 |
+
#if __OPENCL_C_VERSION__ < CL_VERSION_1_2
|
222 |
+
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
|
223 |
+
#endif
|
224 |
+
typedef double double2 __attribute__((ext_vector_type(2)));
|
225 |
+
typedef double double3 __attribute__((ext_vector_type(3)));
|
226 |
+
typedef double double4 __attribute__((ext_vector_type(4)));
|
227 |
+
typedef double double8 __attribute__((ext_vector_type(8)));
|
228 |
+
typedef double double16 __attribute__((ext_vector_type(16)));
|
229 |
+
#endif
|
230 |
+
|
231 |
+
// An internal alias for half, for use by OpenCLBuiltins.td.
|
232 |
+
#define __half half
|
233 |
+
|
234 |
+
#if defined(__OPENCL_CPP_VERSION__)
|
235 |
+
#define NULL nullptr
|
236 |
+
#elif defined(__OPENCL_C_VERSION__)
|
237 |
+
#define NULL ((void*)0)
|
238 |
+
#endif
|
239 |
+
|
240 |
+
/**
|
241 |
+
* Value of maximum non-infinite single-precision floating-point
|
242 |
+
* number.
|
243 |
+
*/
|
244 |
+
#define MAXFLOAT 0x1.fffffep127f
|
245 |
+
|
246 |
+
/**
|
247 |
+
* A positive float constant expression. HUGE_VALF evaluates
|
248 |
+
* to +infinity. Used as an error value returned by the built-in
|
249 |
+
* math functions.
|
250 |
+
*/
|
251 |
+
#define HUGE_VALF (__builtin_huge_valf())
|
252 |
+
|
253 |
+
/**
|
254 |
+
* A positive double constant expression. HUGE_VAL evaluates
|
255 |
+
* to +infinity. Used as an error value returned by the built-in
|
256 |
+
* math functions.
|
257 |
+
*/
|
258 |
+
#define HUGE_VAL (__builtin_huge_val())
|
259 |
+
|
260 |
+
/**
|
261 |
+
* A constant expression of type float representing positive or
|
262 |
+
* unsigned infinity.
|
263 |
+
*/
|
264 |
+
#define INFINITY (__builtin_inff())
|
265 |
+
|
266 |
+
/**
|
267 |
+
* A constant expression of type float representing a quiet NaN.
|
268 |
+
*/
|
269 |
+
#define NAN as_float(INT_MAX)
|
270 |
+
|
271 |
+
#define FP_ILOGB0 INT_MIN
|
272 |
+
#define FP_ILOGBNAN INT_MAX
|
273 |
+
|
274 |
+
#define FLT_DIG 6
|
275 |
+
#define FLT_MANT_DIG 24
|
276 |
+
#define FLT_MAX_10_EXP +38
|
277 |
+
#define FLT_MAX_EXP +128
|
278 |
+
#define FLT_MIN_10_EXP -37
|
279 |
+
#define FLT_MIN_EXP -125
|
280 |
+
#define FLT_RADIX 2
|
281 |
+
#define FLT_MAX 0x1.fffffep127f
|
282 |
+
#define FLT_MIN 0x1.0p-126f
|
283 |
+
#define FLT_EPSILON 0x1.0p-23f
|
284 |
+
|
285 |
+
#define M_E_F 2.71828182845904523536028747135266250f
|
286 |
+
#define M_LOG2E_F 1.44269504088896340735992468100189214f
|
287 |
+
#define M_LOG10E_F 0.434294481903251827651128918916605082f
|
288 |
+
#define M_LN2_F 0.693147180559945309417232121458176568f
|
289 |
+
#define M_LN10_F 2.30258509299404568401799145468436421f
|
290 |
+
#define M_PI_F 3.14159265358979323846264338327950288f
|
291 |
+
#define M_PI_2_F 1.57079632679489661923132169163975144f
|
292 |
+
#define M_PI_4_F 0.785398163397448309615660845819875721f
|
293 |
+
#define M_1_PI_F 0.318309886183790671537767526745028724f
|
294 |
+
#define M_2_PI_F 0.636619772367581343075535053490057448f
|
295 |
+
#define M_2_SQRTPI_F 1.12837916709551257389615890312154517f
|
296 |
+
#define M_SQRT2_F 1.41421356237309504880168872420969808f
|
297 |
+
#define M_SQRT1_2_F 0.707106781186547524400844362104849039f
|
298 |
+
|
299 |
+
#define DBL_DIG 15
|
300 |
+
#define DBL_MANT_DIG 53
|
301 |
+
#define DBL_MAX_10_EXP +308
|
302 |
+
#define DBL_MAX_EXP +1024
|
303 |
+
#define DBL_MIN_10_EXP -307
|
304 |
+
#define DBL_MIN_EXP -1021
|
305 |
+
#define DBL_RADIX 2
|
306 |
+
#define DBL_MAX 0x1.fffffffffffffp1023
|
307 |
+
#define DBL_MIN 0x1.0p-1022
|
308 |
+
#define DBL_EPSILON 0x1.0p-52
|
309 |
+
|
310 |
+
#define M_E 0x1.5bf0a8b145769p+1
|
311 |
+
#define M_LOG2E 0x1.71547652b82fep+0
|
312 |
+
#define M_LOG10E 0x1.bcb7b1526e50ep-2
|
313 |
+
#define M_LN2 0x1.62e42fefa39efp-1
|
314 |
+
#define M_LN10 0x1.26bb1bbb55516p+1
|
315 |
+
#define M_PI 0x1.921fb54442d18p+1
|
316 |
+
#define M_PI_2 0x1.921fb54442d18p+0
|
317 |
+
#define M_PI_4 0x1.921fb54442d18p-1
|
318 |
+
#define M_1_PI 0x1.45f306dc9c883p-2
|
319 |
+
#define M_2_PI 0x1.45f306dc9c883p-1
|
320 |
+
#define M_2_SQRTPI 0x1.20dd750429b6dp+0
|
321 |
+
#define M_SQRT2 0x1.6a09e667f3bcdp+0
|
322 |
+
#define M_SQRT1_2 0x1.6a09e667f3bcdp-1
|
323 |
+
|
324 |
+
#ifdef cl_khr_fp16
|
325 |
+
|
326 |
+
#define HALF_DIG 3
|
327 |
+
#define HALF_MANT_DIG 11
|
328 |
+
#define HALF_MAX_10_EXP +4
|
329 |
+
#define HALF_MAX_EXP +16
|
330 |
+
#define HALF_MIN_10_EXP -4
|
331 |
+
#define HALF_MIN_EXP -13
|
332 |
+
#define HALF_RADIX 2
|
333 |
+
#define HALF_MAX ((0x1.ffcp15h))
|
334 |
+
#define HALF_MIN ((0x1.0p-14h))
|
335 |
+
#define HALF_EPSILON ((0x1.0p-10h))
|
336 |
+
|
337 |
+
#define M_E_H 2.71828182845904523536028747135266250h
|
338 |
+
#define M_LOG2E_H 1.44269504088896340735992468100189214h
|
339 |
+
#define M_LOG10E_H 0.434294481903251827651128918916605082h
|
340 |
+
#define M_LN2_H 0.693147180559945309417232121458176568h
|
341 |
+
#define M_LN10_H 2.30258509299404568401799145468436421h
|
342 |
+
#define M_PI_H 3.14159265358979323846264338327950288h
|
343 |
+
#define M_PI_2_H 1.57079632679489661923132169163975144h
|
344 |
+
#define M_PI_4_H 0.785398163397448309615660845819875721h
|
345 |
+
#define M_1_PI_H 0.318309886183790671537767526745028724h
|
346 |
+
#define M_2_PI_H 0.636619772367581343075535053490057448h
|
347 |
+
#define M_2_SQRTPI_H 1.12837916709551257389615890312154517h
|
348 |
+
#define M_SQRT2_H 1.41421356237309504880168872420969808h
|
349 |
+
#define M_SQRT1_2_H 0.707106781186547524400844362104849039h
|
350 |
+
|
351 |
+
#endif //cl_khr_fp16
|
352 |
+
|
353 |
+
#define CHAR_BIT 8
|
354 |
+
#define SCHAR_MAX 127
|
355 |
+
#define SCHAR_MIN (-128)
|
356 |
+
#define UCHAR_MAX 255
|
357 |
+
#define CHAR_MAX SCHAR_MAX
|
358 |
+
#define CHAR_MIN SCHAR_MIN
|
359 |
+
#define USHRT_MAX 65535
|
360 |
+
#define SHRT_MAX 32767
|
361 |
+
#define SHRT_MIN (-32768)
|
362 |
+
#define UINT_MAX 0xffffffff
|
363 |
+
#define INT_MAX 2147483647
|
364 |
+
#define INT_MIN (-2147483647-1)
|
365 |
+
#define ULONG_MAX 0xffffffffffffffffUL
|
366 |
+
#define LONG_MAX 0x7fffffffffffffffL
|
367 |
+
#define LONG_MIN (-0x7fffffffffffffffL-1)
|
368 |
+
|
369 |
+
// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
|
370 |
+
|
371 |
+
// Flag type and values for barrier, mem_fence, read_mem_fence, write_mem_fence
|
372 |
+
typedef uint cl_mem_fence_flags;
|
373 |
+
|
374 |
+
/**
|
375 |
+
* Queue a memory fence to ensure correct
|
376 |
+
* ordering of memory operations to local memory
|
377 |
+
*/
|
378 |
+
#define CLK_LOCAL_MEM_FENCE 0x01
|
379 |
+
|
380 |
+
/**
|
381 |
+
* Queue a memory fence to ensure correct
|
382 |
+
* ordering of memory operations to global memory
|
383 |
+
*/
|
384 |
+
#define CLK_GLOBAL_MEM_FENCE 0x02
|
385 |
+
|
386 |
+
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
|
387 |
+
|
388 |
+
typedef enum memory_scope {
|
389 |
+
memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
|
390 |
+
memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
|
391 |
+
memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
|
392 |
+
#if defined(__opencl_c_atomic_scope_all_devices)
|
393 |
+
memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
|
394 |
+
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
|
395 |
+
memory_scope_all_devices = memory_scope_all_svm_devices,
|
396 |
+
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
|
397 |
+
#endif // defined(__opencl_c_atomic_scope_all_devices)
|
398 |
+
/**
|
399 |
+
* Subgroups have different requirements on forward progress, so just test
|
400 |
+
* all the relevant macros.
|
401 |
+
* CL 3.0 sub-groups "they are not guaranteed to make independent forward progress"
|
402 |
+
* KHR subgroups "Subgroups within a workgroup are independent, make forward progress with respect to each other"
|
403 |
+
*/
|
404 |
+
#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
|
405 |
+
memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
|
406 |
+
#endif
|
407 |
+
} memory_scope;
|
408 |
+
|
409 |
+
/**
|
410 |
+
* Queue a memory fence to ensure correct ordering of memory
|
411 |
+
* operations between work-items of a work-group to
|
412 |
+
* image memory.
|
413 |
+
*/
|
414 |
+
#define CLK_IMAGE_MEM_FENCE 0x04
|
415 |
+
|
416 |
+
#ifndef ATOMIC_VAR_INIT
|
417 |
+
#define ATOMIC_VAR_INIT(x) (x)
|
418 |
+
#endif //ATOMIC_VAR_INIT
|
419 |
+
#define ATOMIC_FLAG_INIT 0
|
420 |
+
|
421 |
+
// enum values aligned with what clang uses in EmitAtomicExpr()
|
422 |
+
typedef enum memory_order
|
423 |
+
{
|
424 |
+
memory_order_relaxed = __ATOMIC_RELAXED,
|
425 |
+
memory_order_acquire = __ATOMIC_ACQUIRE,
|
426 |
+
memory_order_release = __ATOMIC_RELEASE,
|
427 |
+
memory_order_acq_rel = __ATOMIC_ACQ_REL,
|
428 |
+
#if defined(__opencl_c_atomic_order_seq_cst)
|
429 |
+
memory_order_seq_cst = __ATOMIC_SEQ_CST
|
430 |
+
#endif
|
431 |
+
} memory_order;
|
432 |
+
|
433 |
+
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
|
434 |
+
|
435 |
+
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
|
436 |
+
|
437 |
+
// These values need to match the runtime equivalent
|
438 |
+
//
|
439 |
+
// Addressing Mode.
|
440 |
+
//
|
441 |
+
#define CLK_ADDRESS_NONE 0
|
442 |
+
#define CLK_ADDRESS_CLAMP_TO_EDGE 2
|
443 |
+
#define CLK_ADDRESS_CLAMP 4
|
444 |
+
#define CLK_ADDRESS_REPEAT 6
|
445 |
+
#define CLK_ADDRESS_MIRRORED_REPEAT 8
|
446 |
+
|
447 |
+
//
|
448 |
+
// Coordination Normalization
|
449 |
+
//
|
450 |
+
#define CLK_NORMALIZED_COORDS_FALSE 0
|
451 |
+
#define CLK_NORMALIZED_COORDS_TRUE 1
|
452 |
+
|
453 |
+
//
|
454 |
+
// Filtering Mode.
|
455 |
+
//
|
456 |
+
#define CLK_FILTER_NEAREST 0x10
|
457 |
+
#define CLK_FILTER_LINEAR 0x20
|
458 |
+
|
459 |
+
#ifdef cl_khr_gl_msaa_sharing
|
460 |
+
#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
|
461 |
+
#endif //cl_khr_gl_msaa_sharing
|
462 |
+
|
463 |
+
//
|
464 |
+
// Channel Datatype.
|
465 |
+
//
|
466 |
+
#define CLK_SNORM_INT8 0x10D0
|
467 |
+
#define CLK_SNORM_INT16 0x10D1
|
468 |
+
#define CLK_UNORM_INT8 0x10D2
|
469 |
+
#define CLK_UNORM_INT16 0x10D3
|
470 |
+
#define CLK_UNORM_SHORT_565 0x10D4
|
471 |
+
#define CLK_UNORM_SHORT_555 0x10D5
|
472 |
+
#define CLK_UNORM_INT_101010 0x10D6
|
473 |
+
#define CLK_SIGNED_INT8 0x10D7
|
474 |
+
#define CLK_SIGNED_INT16 0x10D8
|
475 |
+
#define CLK_SIGNED_INT32 0x10D9
|
476 |
+
#define CLK_UNSIGNED_INT8 0x10DA
|
477 |
+
#define CLK_UNSIGNED_INT16 0x10DB
|
478 |
+
#define CLK_UNSIGNED_INT32 0x10DC
|
479 |
+
#define CLK_HALF_FLOAT 0x10DD
|
480 |
+
#define CLK_FLOAT 0x10DE
|
481 |
+
#define CLK_UNORM_INT24 0x10DF
|
482 |
+
#if __OPENCL_C_VERSION__ >= CL_VERSION_3_0
|
483 |
+
#define CLK_UNORM_INT_101010_2 0x10E0
|
484 |
+
#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0
|
485 |
+
#ifdef __opencl_c_ext_image_raw10_raw12
|
486 |
+
#define CLK_UNSIGNED_INT_RAW10_EXT 0x10E3
|
487 |
+
#define CLK_UNSIGNED_INT_RAW12_EXT 0x10E4
|
488 |
+
#endif // __opencl_c_ext_image_raw10_raw12
|
489 |
+
|
490 |
+
// Channel order, numbering must be aligned with cl_channel_order in cl.h
|
491 |
+
//
|
492 |
+
#define CLK_R 0x10B0
|
493 |
+
#define CLK_A 0x10B1
|
494 |
+
#define CLK_RG 0x10B2
|
495 |
+
#define CLK_RA 0x10B3
|
496 |
+
#define CLK_RGB 0x10B4
|
497 |
+
#define CLK_RGBA 0x10B5
|
498 |
+
#define CLK_BGRA 0x10B6
|
499 |
+
#define CLK_ARGB 0x10B7
|
500 |
+
#define CLK_INTENSITY 0x10B8
|
501 |
+
#define CLK_LUMINANCE 0x10B9
|
502 |
+
#define CLK_Rx 0x10BA
|
503 |
+
#define CLK_RGx 0x10BB
|
504 |
+
#define CLK_RGBx 0x10BC
|
505 |
+
#define CLK_DEPTH 0x10BD
|
506 |
+
#define CLK_DEPTH_STENCIL 0x10BE
|
507 |
+
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
|
508 |
+
#define CLK_sRGB 0x10BF
|
509 |
+
#define CLK_sRGBx 0x10C0
|
510 |
+
#define CLK_sRGBA 0x10C1
|
511 |
+
#define CLK_sBGRA 0x10C2
|
512 |
+
#define CLK_ABGR 0x10C3
|
513 |
+
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
|
514 |
+
|
515 |
+
// OpenCL v2.0 s6.13.16 - Pipe Functions
|
516 |
+
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
|
517 |
+
#define CLK_NULL_RESERVE_ID (__builtin_astype(((void*)(__SIZE_MAX__)), reserve_id_t))
|
518 |
+
|
519 |
+
// OpenCL v2.0 s6.13.17 - Enqueue Kernels
|
520 |
+
#define CL_COMPLETE 0x0
|
521 |
+
#define CL_RUNNING 0x1
|
522 |
+
#define CL_SUBMITTED 0x2
|
523 |
+
#define CL_QUEUED 0x3
|
524 |
+
|
525 |
+
#define CLK_SUCCESS 0
|
526 |
+
#define CLK_ENQUEUE_FAILURE -101
|
527 |
+
#define CLK_INVALID_QUEUE -102
|
528 |
+
#define CLK_INVALID_NDRANGE -160
|
529 |
+
#define CLK_INVALID_EVENT_WAIT_LIST -57
|
530 |
+
#define CLK_DEVICE_QUEUE_FULL -161
|
531 |
+
#define CLK_INVALID_ARG_SIZE -51
|
532 |
+
#define CLK_EVENT_ALLOCATION_FAILURE -100
|
533 |
+
#define CLK_OUT_OF_RESOURCES -5
|
534 |
+
|
535 |
+
#define CLK_NULL_QUEUE 0
|
536 |
+
#define CLK_NULL_EVENT (__builtin_astype(((__SIZE_MAX__)), clk_event_t))
|
537 |
+
|
538 |
+
// execution model related definitions
|
539 |
+
#define CLK_ENQUEUE_FLAGS_NO_WAIT 0x0
|
540 |
+
#define CLK_ENQUEUE_FLAGS_WAIT_KERNEL 0x1
|
541 |
+
#define CLK_ENQUEUE_FLAGS_WAIT_WORK_GROUP 0x2
|
542 |
+
|
543 |
+
typedef int kernel_enqueue_flags_t;
|
544 |
+
typedef int clk_profiling_info;
|
545 |
+
|
546 |
+
// Profiling info name (see capture_event_profiling_info)
|
547 |
+
#define CLK_PROFILING_COMMAND_EXEC_TIME 0x1
|
548 |
+
|
549 |
+
#define MAX_WORK_DIM 3
|
550 |
+
|
551 |
+
#ifdef __opencl_c_device_enqueue
|
552 |
+
typedef struct {
|
553 |
+
unsigned int workDimension;
|
554 |
+
size_t globalWorkOffset[MAX_WORK_DIM];
|
555 |
+
size_t globalWorkSize[MAX_WORK_DIM];
|
556 |
+
size_t localWorkSize[MAX_WORK_DIM];
|
557 |
+
} ndrange_t;
|
558 |
+
#endif // __opencl_c_device_enqueue
|
559 |
+
|
560 |
+
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
|
561 |
+
|
562 |
+
/**
|
563 |
+
* OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
|
564 |
+
* Reinterprets a data type as another data type of the same size
|
565 |
+
*/
|
566 |
+
#define as_char(x) __builtin_astype((x), char)
|
567 |
+
#define as_char2(x) __builtin_astype((x), char2)
|
568 |
+
#define as_char3(x) __builtin_astype((x), char3)
|
569 |
+
#define as_char4(x) __builtin_astype((x), char4)
|
570 |
+
#define as_char8(x) __builtin_astype((x), char8)
|
571 |
+
#define as_char16(x) __builtin_astype((x), char16)
|
572 |
+
|
573 |
+
#define as_uchar(x) __builtin_astype((x), uchar)
|
574 |
+
#define as_uchar2(x) __builtin_astype((x), uchar2)
|
575 |
+
#define as_uchar3(x) __builtin_astype((x), uchar3)
|
576 |
+
#define as_uchar4(x) __builtin_astype((x), uchar4)
|
577 |
+
#define as_uchar8(x) __builtin_astype((x), uchar8)
|
578 |
+
#define as_uchar16(x) __builtin_astype((x), uchar16)
|
579 |
+
|
580 |
+
#define as_short(x) __builtin_astype((x), short)
|
581 |
+
#define as_short2(x) __builtin_astype((x), short2)
|
582 |
+
#define as_short3(x) __builtin_astype((x), short3)
|
583 |
+
#define as_short4(x) __builtin_astype((x), short4)
|
584 |
+
#define as_short8(x) __builtin_astype((x), short8)
|
585 |
+
#define as_short16(x) __builtin_astype((x), short16)
|
586 |
+
|
587 |
+
#define as_ushort(x) __builtin_astype((x), ushort)
|
588 |
+
#define as_ushort2(x) __builtin_astype((x), ushort2)
|
589 |
+
#define as_ushort3(x) __builtin_astype((x), ushort3)
|
590 |
+
#define as_ushort4(x) __builtin_astype((x), ushort4)
|
591 |
+
#define as_ushort8(x) __builtin_astype((x), ushort8)
|
592 |
+
#define as_ushort16(x) __builtin_astype((x), ushort16)
|
593 |
+
|
594 |
+
#define as_int(x) __builtin_astype((x), int)
|
595 |
+
#define as_int2(x) __builtin_astype((x), int2)
|
596 |
+
#define as_int3(x) __builtin_astype((x), int3)
|
597 |
+
#define as_int4(x) __builtin_astype((x), int4)
|
598 |
+
#define as_int8(x) __builtin_astype((x), int8)
|
599 |
+
#define as_int16(x) __builtin_astype((x), int16)
|
600 |
+
|
601 |
+
#define as_uint(x) __builtin_astype((x), uint)
|
602 |
+
#define as_uint2(x) __builtin_astype((x), uint2)
|
603 |
+
#define as_uint3(x) __builtin_astype((x), uint3)
|
604 |
+
#define as_uint4(x) __builtin_astype((x), uint4)
|
605 |
+
#define as_uint8(x) __builtin_astype((x), uint8)
|
606 |
+
#define as_uint16(x) __builtin_astype((x), uint16)
|
607 |
+
|
608 |
+
#define as_long(x) __builtin_astype((x), long)
|
609 |
+
#define as_long2(x) __builtin_astype((x), long2)
|
610 |
+
#define as_long3(x) __builtin_astype((x), long3)
|
611 |
+
#define as_long4(x) __builtin_astype((x), long4)
|
612 |
+
#define as_long8(x) __builtin_astype((x), long8)
|
613 |
+
#define as_long16(x) __builtin_astype((x), long16)
|
614 |
+
|
615 |
+
#define as_ulong(x) __builtin_astype((x), ulong)
|
616 |
+
#define as_ulong2(x) __builtin_astype((x), ulong2)
|
617 |
+
#define as_ulong3(x) __builtin_astype((x), ulong3)
|
618 |
+
#define as_ulong4(x) __builtin_astype((x), ulong4)
|
619 |
+
#define as_ulong8(x) __builtin_astype((x), ulong8)
|
620 |
+
#define as_ulong16(x) __builtin_astype((x), ulong16)
|
621 |
+
|
622 |
+
#define as_float(x) __builtin_astype((x), float)
|
623 |
+
#define as_float2(x) __builtin_astype((x), float2)
|
624 |
+
#define as_float3(x) __builtin_astype((x), float3)
|
625 |
+
#define as_float4(x) __builtin_astype((x), float4)
|
626 |
+
#define as_float8(x) __builtin_astype((x), float8)
|
627 |
+
#define as_float16(x) __builtin_astype((x), float16)
|
628 |
+
|
629 |
+
#ifdef cl_khr_fp64
|
630 |
+
#define as_double(x) __builtin_astype((x), double)
|
631 |
+
#define as_double2(x) __builtin_astype((x), double2)
|
632 |
+
#define as_double3(x) __builtin_astype((x), double3)
|
633 |
+
#define as_double4(x) __builtin_astype((x), double4)
|
634 |
+
#define as_double8(x) __builtin_astype((x), double8)
|
635 |
+
#define as_double16(x) __builtin_astype((x), double16)
|
636 |
+
#endif // cl_khr_fp64
|
637 |
+
|
638 |
+
#ifdef cl_khr_fp16
|
639 |
+
#define as_half(x) __builtin_astype((x), half)
|
640 |
+
#define as_half2(x) __builtin_astype((x), half2)
|
641 |
+
#define as_half3(x) __builtin_astype((x), half3)
|
642 |
+
#define as_half4(x) __builtin_astype((x), half4)
|
643 |
+
#define as_half8(x) __builtin_astype((x), half8)
|
644 |
+
#define as_half16(x) __builtin_astype((x), half16)
|
645 |
+
#endif // cl_khr_fp16
|
646 |
+
|
647 |
+
#define as_size_t(x) __builtin_astype((x), size_t)
|
648 |
+
#define as_ptrdiff_t(x) __builtin_astype((x), ptrdiff_t)
|
649 |
+
#define as_intptr_t(x) __builtin_astype((x), intptr_t)
|
650 |
+
#define as_uintptr_t(x) __builtin_astype((x), uintptr_t)
|
651 |
+
|
652 |
+
// C++ for OpenCL - __remove_address_space
|
653 |
+
#if defined(__OPENCL_CPP_VERSION__)
|
654 |
+
template <typename _Tp> struct __remove_address_space { using type = _Tp; };
|
655 |
+
#if defined(__opencl_c_generic_address_space)
|
656 |
+
template <typename _Tp> struct __remove_address_space<__generic _Tp> {
|
657 |
+
using type = _Tp;
|
658 |
+
};
|
659 |
+
#endif
|
660 |
+
template <typename _Tp> struct __remove_address_space<__global _Tp> {
|
661 |
+
using type = _Tp;
|
662 |
+
};
|
663 |
+
template <typename _Tp> struct __remove_address_space<__private _Tp> {
|
664 |
+
using type = _Tp;
|
665 |
+
};
|
666 |
+
template <typename _Tp> struct __remove_address_space<__local _Tp> {
|
667 |
+
using type = _Tp;
|
668 |
+
};
|
669 |
+
template <typename _Tp> struct __remove_address_space<__constant _Tp> {
|
670 |
+
using type = _Tp;
|
671 |
+
};
|
672 |
+
#endif
|
673 |
+
|
674 |
+
// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
|
675 |
+
|
676 |
+
#define __kernel_exec(X, typen) __kernel \
|
677 |
+
__attribute__((work_group_size_hint(X, 1, 1))) \
|
678 |
+
__attribute__((vec_type_hint(typen)))
|
679 |
+
|
680 |
+
#define kernel_exec(X, typen) __kernel \
|
681 |
+
__attribute__((work_group_size_hint(X, 1, 1))) \
|
682 |
+
__attribute__((vec_type_hint(typen)))
|
683 |
+
|
684 |
+
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
|
685 |
+
// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
|
686 |
+
|
687 |
+
int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
|
688 |
+
#endif
|
689 |
+
|
690 |
+
#ifdef cl_intel_device_side_avc_motion_estimation
|
691 |
+
|
692 |
+
#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
|
693 |
+
#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
|
694 |
+
#define CLK_AVC_ME_MAJOR_8x16_INTEL 0x2
|
695 |
+
#define CLK_AVC_ME_MAJOR_8x8_INTEL 0x3
|
696 |
+
|
697 |
+
#define CLK_AVC_ME_MINOR_8x8_INTEL 0x0
|
698 |
+
#define CLK_AVC_ME_MINOR_8x4_INTEL 0x1
|
699 |
+
#define CLK_AVC_ME_MINOR_4x8_INTEL 0x2
|
700 |
+
#define CLK_AVC_ME_MINOR_4x4_INTEL 0x3
|
701 |
+
|
702 |
+
#define CLK_AVC_ME_MAJOR_FORWARD_INTEL 0x0
|
703 |
+
#define CLK_AVC_ME_MAJOR_BACKWARD_INTEL 0x1
|
704 |
+
#define CLK_AVC_ME_MAJOR_BIDIRECTIONAL_INTEL 0x2
|
705 |
+
|
706 |
+
#define CLK_AVC_ME_PARTITION_MASK_ALL_INTEL 0x0
|
707 |
+
#define CLK_AVC_ME_PARTITION_MASK_16x16_INTEL 0x7E
|
708 |
+
#define CLK_AVC_ME_PARTITION_MASK_16x8_INTEL 0x7D
|
709 |
+
#define CLK_AVC_ME_PARTITION_MASK_8x16_INTEL 0x7B
|
710 |
+
#define CLK_AVC_ME_PARTITION_MASK_8x8_INTEL 0x77
|
711 |
+
#define CLK_AVC_ME_PARTITION_MASK_8x4_INTEL 0x6F
|
712 |
+
#define CLK_AVC_ME_PARTITION_MASK_4x8_INTEL 0x5F
|
713 |
+
#define CLK_AVC_ME_PARTITION_MASK_4x4_INTEL 0x3F
|
714 |
+
|
715 |
+
#define CLK_AVC_ME_SLICE_TYPE_PRED_INTEL 0x0
|
716 |
+
#define CLK_AVC_ME_SLICE_TYPE_BPRED_INTEL 0x1
|
717 |
+
#define CLK_AVC_ME_SLICE_TYPE_INTRA_INTEL 0x2
|
718 |
+
|
719 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_EXHAUSTIVE_INTEL 0x0
|
720 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_SMALL_INTEL 0x1
|
721 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_TINY_INTEL 0x2
|
722 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_EXTRA_TINY_INTEL 0x3
|
723 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_DIAMOND_INTEL 0x4
|
724 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_LARGE_DIAMOND_INTEL 0x5
|
725 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED0_INTEL 0x6
|
726 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_RESERVED1_INTEL 0x7
|
727 |
+
#define CLK_AVC_ME_SEARCH_WINDOW_CUSTOM_INTEL 0x8
|
728 |
+
|
729 |
+
#define CLK_AVC_ME_SAD_ADJUST_MODE_NONE_INTEL 0x0
|
730 |
+
#define CLK_AVC_ME_SAD_ADJUST_MODE_HAAR_INTEL 0x2
|
731 |
+
|
732 |
+
#define CLK_AVC_ME_SUBPIXEL_MODE_INTEGER_INTEL 0x0
|
733 |
+
#define CLK_AVC_ME_SUBPIXEL_MODE_HPEL_INTEL 0x1
|
734 |
+
#define CLK_AVC_ME_SUBPIXEL_MODE_QPEL_INTEL 0x3
|
735 |
+
|
736 |
+
#define CLK_AVC_ME_COST_PRECISION_QPEL_INTEL 0x0
|
737 |
+
#define CLK_AVC_ME_COST_PRECISION_HPEL_INTEL 0x1
|
738 |
+
#define CLK_AVC_ME_COST_PRECISION_PEL_INTEL 0x2
|
739 |
+
#define CLK_AVC_ME_COST_PRECISION_DPEL_INTEL 0x3
|
740 |
+
|
741 |
+
#define CLK_AVC_ME_BIDIR_WEIGHT_QUARTER_INTEL 0x10
|
742 |
+
#define CLK_AVC_ME_BIDIR_WEIGHT_THIRD_INTEL 0x15
|
743 |
+
#define CLK_AVC_ME_BIDIR_WEIGHT_HALF_INTEL 0x20
|
744 |
+
#define CLK_AVC_ME_BIDIR_WEIGHT_TWO_THIRD_INTEL 0x2B
|
745 |
+
#define CLK_AVC_ME_BIDIR_WEIGHT_THREE_QUARTER_INTEL 0x30
|
746 |
+
|
747 |
+
#define CLK_AVC_ME_BORDER_REACHED_LEFT_INTEL 0x0
|
748 |
+
#define CLK_AVC_ME_BORDER_REACHED_RIGHT_INTEL 0x2
|
749 |
+
#define CLK_AVC_ME_BORDER_REACHED_TOP_INTEL 0x4
|
750 |
+
#define CLK_AVC_ME_BORDER_REACHED_BOTTOM_INTEL 0x8
|
751 |
+
|
752 |
+
#define CLK_AVC_ME_INTRA_16x16_INTEL 0x0
|
753 |
+
#define CLK_AVC_ME_INTRA_8x8_INTEL 0x1
|
754 |
+
#define CLK_AVC_ME_INTRA_4x4_INTEL 0x2
|
755 |
+
|
756 |
+
#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_16x16_INTEL 0x0
|
757 |
+
#define CLK_AVC_ME_SKIP_BLOCK_PARTITION_8x8_INTEL 0x4000
|
758 |
+
|
759 |
+
#define CLK_AVC_ME_SKIP_BLOCK_16x16_FORWARD_ENABLE_INTEL (0x1 << 24)
|
760 |
+
#define CLK_AVC_ME_SKIP_BLOCK_16x16_BACKWARD_ENABLE_INTEL (0x2 << 24)
|
761 |
+
#define CLK_AVC_ME_SKIP_BLOCK_16x16_DUAL_ENABLE_INTEL (0x3 << 24)
|
762 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_FORWARD_ENABLE_INTEL (0x55 << 24)
|
763 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_BACKWARD_ENABLE_INTEL (0xAA << 24)
|
764 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_DUAL_ENABLE_INTEL (0xFF << 24)
|
765 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_FORWARD_ENABLE_INTEL (0x1 << 24)
|
766 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_0_BACKWARD_ENABLE_INTEL (0x2 << 24)
|
767 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_FORWARD_ENABLE_INTEL (0x1 << 26)
|
768 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_1_BACKWARD_ENABLE_INTEL (0x2 << 26)
|
769 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_FORWARD_ENABLE_INTEL (0x1 << 28)
|
770 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_2_BACKWARD_ENABLE_INTEL (0x2 << 28)
|
771 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_FORWARD_ENABLE_INTEL (0x1 << 30)
|
772 |
+
#define CLK_AVC_ME_SKIP_BLOCK_8x8_3_BACKWARD_ENABLE_INTEL (0x2 << 30)
|
773 |
+
|
774 |
+
#define CLK_AVC_ME_BLOCK_BASED_SKIP_4x4_INTEL 0x00
|
775 |
+
#define CLK_AVC_ME_BLOCK_BASED_SKIP_8x8_INTEL 0x80
|
776 |
+
|
777 |
+
#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_ALL_INTEL 0x0
|
778 |
+
#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_16x16_INTEL 0x6
|
779 |
+
#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_8x8_INTEL 0x5
|
780 |
+
#define CLK_AVC_ME_INTRA_LUMA_PARTITION_MASK_4x4_INTEL 0x3
|
781 |
+
|
782 |
+
#define CLK_AVC_ME_INTRA_NEIGHBOR_LEFT_MASK_ENABLE_INTEL 0x60
|
783 |
+
#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_MASK_ENABLE_INTEL 0x10
|
784 |
+
#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_RIGHT_MASK_ENABLE_INTEL 0x8
|
785 |
+
#define CLK_AVC_ME_INTRA_NEIGHBOR_UPPER_LEFT_MASK_ENABLE_INTEL 0x4
|
786 |
+
|
787 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_INTEL 0x0
|
788 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
|
789 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DC_INTEL 0x2
|
790 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_LEFT_INTEL 0x3
|
791 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_DIAGONAL_DOWN_RIGHT_INTEL 0x4
|
792 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_PLANE_INTEL 0x4
|
793 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_RIGHT_INTEL 0x5
|
794 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_DOWN_INTEL 0x6
|
795 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_VERTICAL_LEFT_INTEL 0x7
|
796 |
+
#define CLK_AVC_ME_LUMA_PREDICTOR_MODE_HORIZONTAL_UP_INTEL 0x8
|
797 |
+
#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_DC_INTEL 0x0
|
798 |
+
#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_HORIZONTAL_INTEL 0x1
|
799 |
+
#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_VERTICAL_INTEL 0x2
|
800 |
+
#define CLK_AVC_ME_CHROMA_PREDICTOR_MODE_PLANE_INTEL 0x3
|
801 |
+
|
802 |
+
#define CLK_AVC_ME_FRAME_FORWARD_INTEL 0x1
|
803 |
+
#define CLK_AVC_ME_FRAME_BACKWARD_INTEL 0x2
|
804 |
+
#define CLK_AVC_ME_FRAME_DUAL_INTEL 0x3
|
805 |
+
|
806 |
+
#define CLK_AVC_ME_INTERLACED_SCAN_TOP_FIELD_INTEL 0x0
|
807 |
+
#define CLK_AVC_ME_INTERLACED_SCAN_BOTTOM_FIELD_INTEL 0x1
|
808 |
+
|
809 |
+
#define CLK_AVC_ME_INITIALIZE_INTEL 0x0
|
810 |
+
|
811 |
+
#define CLK_AVC_IME_PAYLOAD_INITIALIZE_INTEL 0x0
|
812 |
+
#define CLK_AVC_REF_PAYLOAD_INITIALIZE_INTEL 0x0
|
813 |
+
#define CLK_AVC_SIC_PAYLOAD_INITIALIZE_INTEL 0x0
|
814 |
+
|
815 |
+
#define CLK_AVC_IME_RESULT_INITIALIZE_INTEL 0x0
|
816 |
+
#define CLK_AVC_REF_RESULT_INITIALIZE_INTEL 0x0
|
817 |
+
#define CLK_AVC_SIC_RESULT_INITIALIZE_INTEL 0x0
|
818 |
+
|
819 |
+
#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
|
820 |
+
#define CLK_AVC_IME_RESULT_SINGLE_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
|
821 |
+
#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
|
822 |
+
#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
|
823 |
+
|
824 |
+
#endif // cl_intel_device_side_avc_motion_estimation
|
825 |
+
|
826 |
+
// Disable any extensions we may have enabled previously.
|
827 |
+
#pragma OPENCL EXTENSION all : disable
|
828 |
+
|
829 |
+
#endif //_OPENCL_BASE_H_
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/openmp_wrappers/cmath
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===-- __clang_openmp_device_functions.h - OpenMP math declares -*- c++ -*-===
|
2 |
+
*
|
3 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
4 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
5 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
6 |
+
*
|
7 |
+
*===-----------------------------------------------------------------------===
|
8 |
+
*/
|
9 |
+
|
10 |
+
#ifndef __CLANG_OPENMP_CMATH_H__
|
11 |
+
#define __CLANG_OPENMP_CMATH_H__
|
12 |
+
|
13 |
+
#ifndef _OPENMP
|
14 |
+
#error "This file is for OpenMP compilation only."
|
15 |
+
#endif
|
16 |
+
|
17 |
+
#include_next <cmath>
|
18 |
+
|
19 |
+
// Make sure we include our math.h overlay, it probably happend already but we
|
20 |
+
// need to be sure.
|
21 |
+
#include <math.h>
|
22 |
+
|
23 |
+
// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs`
|
24 |
+
// which might live in cstdlib.
|
25 |
+
#include <cstdlib>
|
26 |
+
|
27 |
+
// We need limits because __clang_cuda_cmath.h below uses `std::numeric_limit`.
|
28 |
+
#include <limits>
|
29 |
+
|
30 |
+
#pragma omp begin declare variant match( \
|
31 |
+
device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any, allow_templates)})
|
32 |
+
|
33 |
+
#define __CUDA__
|
34 |
+
#define __OPENMP_NVPTX__
|
35 |
+
#include <__clang_cuda_cmath.h>
|
36 |
+
#undef __OPENMP_NVPTX__
|
37 |
+
#undef __CUDA__
|
38 |
+
|
39 |
+
// Overloads not provided by the CUDA wrappers but by the CUDA system headers.
|
40 |
+
// Since we do not include the latter we define them ourselves.
|
41 |
+
#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
|
42 |
+
|
43 |
+
__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
|
44 |
+
__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
|
45 |
+
__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
|
46 |
+
__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
|
47 |
+
__DEVICE__ float erf(float __x) { return ::erff(__x); }
|
48 |
+
__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
|
49 |
+
__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
|
50 |
+
__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
|
51 |
+
__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
|
52 |
+
__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
|
53 |
+
__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
|
54 |
+
__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
|
55 |
+
__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); }
|
56 |
+
__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); }
|
57 |
+
__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
|
58 |
+
__DEVICE__ float log2(float __x) { return ::log2f(__x); }
|
59 |
+
__DEVICE__ float logb(float __x) { return ::logbf(__x); }
|
60 |
+
__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); }
|
61 |
+
__DEVICE__ long int lround(float __x) { return ::lroundf(__x); }
|
62 |
+
__DEVICE__ float nextafter(float __x, float __y) {
|
63 |
+
return ::nextafterf(__x, __y);
|
64 |
+
}
|
65 |
+
__DEVICE__ float remainder(float __x, float __y) {
|
66 |
+
return ::remainderf(__x, __y);
|
67 |
+
}
|
68 |
+
__DEVICE__ float scalbln(float __x, long int __y) {
|
69 |
+
return ::scalblnf(__x, __y);
|
70 |
+
}
|
71 |
+
__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
|
72 |
+
__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
|
73 |
+
|
74 |
+
#undef __DEVICE__
|
75 |
+
|
76 |
+
#pragma omp end declare variant
|
77 |
+
|
78 |
+
#ifdef __AMDGCN__
|
79 |
+
#pragma omp begin declare variant match(device = {arch(amdgcn)})
|
80 |
+
|
81 |
+
#pragma push_macro("__constant__")
|
82 |
+
#define __constant__ __attribute__((constant))
|
83 |
+
#define __OPENMP_AMDGCN__
|
84 |
+
|
85 |
+
#include <__clang_hip_cmath.h>
|
86 |
+
|
87 |
+
#pragma pop_macro("__constant__")
|
88 |
+
#undef __OPENMP_AMDGCN__
|
89 |
+
|
90 |
+
// Define overloads otherwise which are absent
|
91 |
+
#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
|
92 |
+
|
93 |
+
__DEVICE__ float acos(float __x) { return ::acosf(__x); }
|
94 |
+
__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
|
95 |
+
__DEVICE__ float asin(float __x) { return ::asinf(__x); }
|
96 |
+
__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
|
97 |
+
__DEVICE__ float atan(float __x) { return ::atanf(__x); }
|
98 |
+
__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); }
|
99 |
+
__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
|
100 |
+
__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
|
101 |
+
__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
|
102 |
+
__DEVICE__ float erf(float __x) { return ::erff(__x); }
|
103 |
+
__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
|
104 |
+
__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
|
105 |
+
__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
|
106 |
+
__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
|
107 |
+
__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
|
108 |
+
__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
|
109 |
+
__DEVICE__ float ldexp(float __arg, int __exp) {
|
110 |
+
return ::ldexpf(__arg, __exp);
|
111 |
+
}
|
112 |
+
__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
|
113 |
+
__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
|
114 |
+
__DEVICE__ float logb(float __x) { return ::logbf(__x); }
|
115 |
+
__DEVICE__ float nextafter(float __x, float __y) {
|
116 |
+
return ::nextafterf(__x, __y);
|
117 |
+
}
|
118 |
+
__DEVICE__ float remainder(float __x, float __y) {
|
119 |
+
return ::remainderf(__x, __y);
|
120 |
+
}
|
121 |
+
__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
|
122 |
+
__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }
|
123 |
+
__DEVICE__ float tan(float __x) { return ::tanf(__x); }
|
124 |
+
__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
|
125 |
+
__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
|
126 |
+
|
127 |
+
#undef __DEVICE__
|
128 |
+
|
129 |
+
#pragma omp end declare variant
|
130 |
+
#endif // __AMDGCN__
|
131 |
+
|
132 |
+
#endif
|
.cursor-server/data/User/globalStorage/llvm-vs-code-extensions.vscode-clangd/install/19.1.2/clangd_19.1.2/lib/clang/19/include/pkuintrin.h
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*===---- pkuintrin.h - PKU intrinsics -------------------------------------===
|
2 |
+
*
|
3 |
+
*
|
4 |
+
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
5 |
+
* See https://llvm.org/LICENSE.txt for license information.
|
6 |
+
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
7 |
+
*
|
8 |
+
*===-----------------------------------------------------------------------===
|
9 |
+
*/
|
10 |
+
#ifndef __IMMINTRIN_H
|
11 |
+
#error "Never use <pkuintrin.h> directly; include <immintrin.h> instead."
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifndef __PKUINTRIN_H
|
15 |
+
#define __PKUINTRIN_H
|
16 |
+
|
17 |
+
/* Define the default attributes for the functions in this file. */
|
18 |
+
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku")))
|
19 |
+
|
20 |
+
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
21 |
+
_rdpkru_u32(void)
|
22 |
+
{
|
23 |
+
return __builtin_ia32_rdpkru();
|
24 |
+
}
|
25 |
+
|
26 |
+
static __inline__ void __DEFAULT_FN_ATTRS
|
27 |
+
_wrpkru(unsigned int __val)
|
28 |
+
{
|
29 |
+
__builtin_ia32_wrpkru(__val);
|
30 |
+
}
|
31 |
+
|
32 |
+
#undef __DEFAULT_FN_ATTRS
|
33 |
+
|
34 |
+
#endif
|