filename
stringlengths
3
9
code
stringlengths
4
1.87M
610466.c
/* * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <arch_helpers.h> #include <assert.h> #include <io/io_block.h> #include <mmio.h> #include <platform_def.h> #include <sys/types.h> #include <utils_def.h> #include "uniphier.h" #define MMC_CMD_SWITCH 6 #define MMC_CMD_SELECT_CARD 7 #define MMC_CMD_SEND_CSD 9 #define MMC_CMD_READ_MULTIPLE_BLOCK 18 #define EXT_CSD_PART_CONF 179 /* R/W */ #define MMC_RSP_PRESENT BIT(0) #define MMC_RSP_136 BIT(1) /* 136 bit response */ #define MMC_RSP_CRC BIT(2) /* expect valid crc */ #define MMC_RSP_BUSY BIT(3) /* card may send busy */ #define MMC_RSP_OPCODE BIT(4) /* response contains opcode */ #define MMC_RSP_NONE (0) #define MMC_RSP_R1 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) #define MMC_RSP_R1b (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE | \ MMC_RSP_BUSY) #define MMC_RSP_R2 (MMC_RSP_PRESENT | MMC_RSP_136 | MMC_RSP_CRC) #define MMC_RSP_R3 (MMC_RSP_PRESENT) #define MMC_RSP_R4 (MMC_RSP_PRESENT) #define MMC_RSP_R5 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) #define MMC_RSP_R6 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) #define MMC_RSP_R7 (MMC_RSP_PRESENT | MMC_RSP_CRC | MMC_RSP_OPCODE) #define SDHCI_DMA_ADDRESS 0x00 #define SDHCI_BLOCK_SIZE 0x04 #define SDHCI_MAKE_BLKSZ(dma, blksz) ((((dma) & 0x7) << 12) | ((blksz) & 0xFFF)) #define SDHCI_BLOCK_COUNT 0x06 #define SDHCI_ARGUMENT 0x08 #define SDHCI_TRANSFER_MODE 0x0C #define SDHCI_TRNS_DMA BIT(0) #define SDHCI_TRNS_BLK_CNT_EN BIT(1) #define SDHCI_TRNS_ACMD12 BIT(2) #define SDHCI_TRNS_READ BIT(4) #define SDHCI_TRNS_MULTI BIT(5) #define SDHCI_COMMAND 0x0E #define SDHCI_CMD_RESP_MASK 0x03 #define SDHCI_CMD_CRC 0x08 #define SDHCI_CMD_INDEX 0x10 #define SDHCI_CMD_DATA 0x20 #define SDHCI_CMD_ABORTCMD 0xC0 #define SDHCI_CMD_RESP_NONE 0x00 #define SDHCI_CMD_RESP_LONG 0x01 #define SDHCI_CMD_RESP_SHORT 0x02 #define SDHCI_CMD_RESP_SHORT_BUSY 0x03 #define SDHCI_MAKE_CMD(c, f) ((((c) & 0xff) << 8) | ((f) & 0xff)) #define SDHCI_RESPONSE 0x10 #define SDHCI_HOST_CONTROL 0x28 #define SDHCI_CTRL_DMA_MASK 0x18 #define SDHCI_CTRL_SDMA 0x00 #define SDHCI_BLOCK_GAP_CONTROL 0x2A #define SDHCI_SOFTWARE_RESET 0x2F #define SDHCI_RESET_CMD 0x02 #define SDHCI_RESET_DATA 0x04 #define SDHCI_INT_STATUS 0x30 #define SDHCI_INT_RESPONSE BIT(0) #define SDHCI_INT_DATA_END BIT(1) #define SDHCI_INT_DMA_END BIT(3) #define SDHCI_INT_ERROR BIT(15) #define SDHCI_SIGNAL_ENABLE 0x38 /* RCA assigned by Boot ROM */ #define UNIPHIER_EMMC_RCA 0x1000 struct uniphier_mmc_cmd { unsigned int cmdidx; unsigned int resp_type; unsigned int cmdarg; unsigned int is_data; }; static int uniphier_emmc_block_addressing; static int uniphier_emmc_send_cmd(uintptr_t host_base, struct uniphier_mmc_cmd *cmd) { uint32_t mode = 0; uint32_t end_bit; uint32_t stat, flags, dma_addr; mmio_write_32(host_base + SDHCI_INT_STATUS, -1); mmio_write_32(host_base + SDHCI_SIGNAL_ENABLE, 0); mmio_write_32(host_base + SDHCI_ARGUMENT, cmd->cmdarg); if (cmd->is_data) mode = SDHCI_TRNS_DMA | SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_ACMD12 | SDHCI_TRNS_READ | SDHCI_TRNS_MULTI; mmio_write_16(host_base + SDHCI_TRANSFER_MODE, mode); if (!(cmd->resp_type & MMC_RSP_PRESENT)) flags = SDHCI_CMD_RESP_NONE; else if (cmd->resp_type & MMC_RSP_136) flags = SDHCI_CMD_RESP_LONG; else if (cmd->resp_type & MMC_RSP_BUSY) flags = SDHCI_CMD_RESP_SHORT_BUSY; else flags = SDHCI_CMD_RESP_SHORT; if (cmd->resp_type & MMC_RSP_CRC) flags |= SDHCI_CMD_CRC; if (cmd->resp_type & MMC_RSP_OPCODE) flags |= SDHCI_CMD_INDEX; if (cmd->is_data) flags |= SDHCI_CMD_DATA; if (cmd->resp_type & MMC_RSP_BUSY || cmd->is_data) end_bit = SDHCI_INT_DATA_END; else end_bit = SDHCI_INT_RESPONSE; mmio_write_16(host_base + SDHCI_COMMAND, SDHCI_MAKE_CMD(cmd->cmdidx, flags)); do { stat = mmio_read_32(host_base + SDHCI_INT_STATUS); if (stat & SDHCI_INT_ERROR) return -EIO; if (stat & SDHCI_INT_DMA_END) { mmio_write_32(host_base + SDHCI_INT_STATUS, stat); dma_addr = mmio_read_32(host_base + SDHCI_DMA_ADDRESS); mmio_write_32(host_base + SDHCI_DMA_ADDRESS, dma_addr); } } while (!(stat & end_bit)); return 0; } static int uniphier_emmc_switch_part(uintptr_t host_base, int part_num) { struct uniphier_mmc_cmd cmd = {0}; cmd.cmdidx = MMC_CMD_SWITCH; cmd.resp_type = MMC_RSP_R1b; cmd.cmdarg = (EXT_CSD_PART_CONF << 16) | (part_num << 8) | (3 << 24); return uniphier_emmc_send_cmd(host_base, &cmd); } static int uniphier_emmc_is_over_2gb(uintptr_t host_base) { struct uniphier_mmc_cmd cmd = {0}; uint32_t csd40, csd72; /* CSD[71:40], CSD[103:72] */ int ret; cmd.cmdidx = MMC_CMD_SEND_CSD; cmd.resp_type = MMC_RSP_R2; cmd.cmdarg = UNIPHIER_EMMC_RCA << 16; ret = uniphier_emmc_send_cmd(host_base, &cmd); if (ret) return ret; csd40 = mmio_read_32(host_base + SDHCI_RESPONSE + 4); csd72 = mmio_read_32(host_base + SDHCI_RESPONSE + 8); return !(~csd40 & 0xffc00380) && !(~csd72 & 0x3); } static int uniphier_emmc_load_image(uintptr_t host_base, uint32_t dev_addr, unsigned long load_addr, uint32_t block_cnt) { struct uniphier_mmc_cmd cmd = {0}; uint8_t tmp; assert((load_addr >> 32) == 0); mmio_write_32(host_base + SDHCI_DMA_ADDRESS, load_addr); mmio_write_16(host_base + SDHCI_BLOCK_SIZE, SDHCI_MAKE_BLKSZ(7, 512)); mmio_write_16(host_base + SDHCI_BLOCK_COUNT, block_cnt); tmp = mmio_read_8(host_base + SDHCI_HOST_CONTROL); tmp &= ~SDHCI_CTRL_DMA_MASK; tmp |= SDHCI_CTRL_SDMA; mmio_write_8(host_base + SDHCI_HOST_CONTROL, tmp); tmp = mmio_read_8(host_base + SDHCI_BLOCK_GAP_CONTROL); tmp &= ~1; /* clear Stop At Block Gap Request */ mmio_write_8(host_base + SDHCI_BLOCK_GAP_CONTROL, tmp); cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK; cmd.resp_type = MMC_RSP_R1; cmd.cmdarg = dev_addr; cmd.is_data = 1; return uniphier_emmc_send_cmd(host_base, &cmd); } static size_t uniphier_emmc_read(int lba, uintptr_t buf, size_t size) { uintptr_t host_base = 0x5a000200; int ret; inv_dcache_range(buf, size); if (!uniphier_emmc_block_addressing) lba *= 512; ret = uniphier_emmc_load_image(host_base, lba, buf, size / 512); inv_dcache_range(buf, size); return ret ? 0 : size; } static const struct io_block_dev_spec uniphier_emmc_dev_spec = { .buffer = { .offset = UNIPHIER_BLOCK_BUF_BASE, .length = UNIPHIER_BLOCK_BUF_SIZE, }, .ops = { .read = uniphier_emmc_read, }, .block_size = 512, }; static int uniphier_emmc_hw_init(void) { uintptr_t host_base = 0x5a000200; struct uniphier_mmc_cmd cmd = {0}; int ret; /* * deselect card before SEND_CSD command. * Do not check the return code. It fails, but it is OK. */ cmd.cmdidx = MMC_CMD_SELECT_CARD; cmd.resp_type = MMC_RSP_R1; uniphier_emmc_send_cmd(host_base, &cmd); /* CMD7 (arg=0) */ /* reset CMD Line */ mmio_write_8(host_base + SDHCI_SOFTWARE_RESET, SDHCI_RESET_CMD | SDHCI_RESET_DATA); while (mmio_read_8(host_base + SDHCI_SOFTWARE_RESET)) ; ret = uniphier_emmc_is_over_2gb(host_base); if (ret < 0) return ret; uniphier_emmc_block_addressing = ret; cmd.cmdarg = UNIPHIER_EMMC_RCA << 16; /* select card again */ ret = uniphier_emmc_send_cmd(host_base, &cmd); if (ret) return ret; /* switch to Boot Partition 1 */ ret = uniphier_emmc_switch_part(host_base, 1); if (ret) return ret; return 0; } int uniphier_emmc_init(uintptr_t *block_dev_spec) { int ret; ret = uniphier_emmc_hw_init(); if (ret) return ret; *block_dev_spec = (uintptr_t)&uniphier_emmc_dev_spec; return 0; }
637320.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE134_Uncontrolled_Format_String__wchar_t_connect_socket_vprintf_42.c Label Definition File: CWE134_Uncontrolled_Format_String.vasinks.label.xml Template File: sources-vasinks-42.tmpl.c */ /* * @description * CWE: 134 Uncontrolled Format String * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Copy a fixed string into data * Sinks: vprintf * GoodSink: vwprintf with a format string * BadSink : vwprintf without a format string * Flow Variant: 42 Data flow: data returned from one function to another in the same source file * * */ #include <stdarg.h> #include "std_testcase.h" #ifdef _WIN32 # include <winsock2.h> # include <windows.h> # include <direct.h> # pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ # define CLOSE_SOCKET closesocket # define PATH_SZ 100 #else /* NOT _WIN32 */ # define INVALID_SOCKET -1 # define SOCKET_ERROR -1 # define CLOSE_SOCKET close # define SOCKET int # define PATH_SZ PATH_MAX #endif #define TCP_PORT 27015 #ifndef OMITBAD static wchar_t * bad_source(wchar_t * data) { { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; wchar_t *replace; SOCKET connect_socket = INVALID_SOCKET; size_t data_len = wcslen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif connect_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connect_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = inet_addr("127.0.0.1"); s_in.sin_port = htons(TCP_PORT); if (connect(connect_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ recv_rv = recv(connect_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = L'\0'; /* Eliminate CRLF */ replace = wcschr(data, L'\r'); if (replace) *replace = L'\0'; replace = wcschr(data, L'\n'); if (replace) *replace = L'\0'; } while (0); if (connect_socket != INVALID_SOCKET) CLOSE_SOCKET(connect_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } return data; } static void bad_vasink(wchar_t * data, ...) { { va_list args; va_start(args, data); /* POTENTIAL FLAW: Do not specify the format allowing a possible format string vulnerability */ vwprintf(data, args); va_end(args); } } void CWE134_Uncontrolled_Format_String__wchar_t_connect_socket_vprintf_42_bad() { wchar_t * data; wchar_t data_buf[100] = L""; data = data_buf; data = bad_source(data); bad_vasink(data, data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ static wchar_t * goodG2B_source(wchar_t * data) { /* FIX: Use a fixed string that does not contain a format specifier */ wcscpy(data, L"fixedstringtest"); return data; } static void goodG2B_vasink(wchar_t * data, ...) { { va_list args; va_start(args, data); /* POTENTIAL FLAW: Do not specify the format allowing a possible format string vulnerability */ vwprintf(data, args); va_end(args); } } static void goodG2B() { wchar_t * data; wchar_t data_buf[100] = L""; data = data_buf; data = goodG2B_source(data); goodG2B_vasink(data, data); } /* goodB2G uses the BadSource with the GoodSink */ static wchar_t * goodB2G_source(wchar_t * data) { { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; wchar_t *replace; SOCKET connect_socket = INVALID_SOCKET; size_t data_len = wcslen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif connect_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connect_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = inet_addr("127.0.0.1"); s_in.sin_port = htons(TCP_PORT); if (connect(connect_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ recv_rv = recv(connect_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = L'\0'; /* Eliminate CRLF */ replace = wcschr(data, L'\r'); if (replace) *replace = L'\0'; replace = wcschr(data, L'\n'); if (replace) *replace = L'\0'; } while (0); if (connect_socket != INVALID_SOCKET) CLOSE_SOCKET(connect_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } return data; } static void goodB2G_vasink(wchar_t * data, ...) { { va_list args; va_start(args, data); /* FIX: Specify the format disallowing a format string vulnerability */ vwprintf(L"%s", args); va_end(args); } } static void goodB2G() { wchar_t * data; wchar_t data_buf[100] = L""; data = data_buf; data = goodB2G_source(data); goodB2G_vasink(data, data); } void CWE134_Uncontrolled_Format_String__wchar_t_connect_socket_vprintf_42_good() { goodG2B(); goodB2G(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE134_Uncontrolled_Format_String__wchar_t_connect_socket_vprintf_42_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE134_Uncontrolled_Format_String__wchar_t_connect_socket_vprintf_42_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
378679.c
#include <stdlib.h> #include <stdio.h> #include <math.h> int main(){ double a,b,c,x,y; printf("\nCalculadora de funcao de segundo grau"); printf("\n(Considere a equacao como ax2 + bx + c = y)"); printf("\nDigite o (a) da equacao: "); scanf("%lf", &a); printf("\nDigite o (b) da equacao: "); scanf("%lf", &b); printf("\nDigite o (c) da equacao: "); scanf("%lf", &c); printf("\nDigite o (x) da equacao: "); scanf("%lf", &x); y = a*pow(x,2) + b*x + c; printf("\nPara essa equacao o Y e %f\n", y); return 0; }
477172.c
/* * Copyright 2012 Cisco Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/debugfs.h> #include "fnic.h" static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; static struct dentry *fnic_stats_debugfs_root; static struct dentry *fnic_fc_trace_debugfs_file; static struct dentry *fnic_fc_rdata_trace_debugfs_file; static struct dentry *fnic_fc_trace_enable; static struct dentry *fnic_fc_trace_clear; struct fc_trace_flag_type { u8 fc_row_file; u8 fc_normal_file; u8 fnic_trace; u8 fc_trace; u8 fc_clear; }; static struct fc_trace_flag_type *fc_trc_flag; /* * fnic_debugfs_init - Initialize debugfs for fnic debug logging * * Description: * When Debugfs is configured this routine sets up the fnic debugfs * file system. If not already created, this routine will create the * fnic directory and statistics directory for trace buffer and * stats logging. */ int fnic_debugfs_init(void) { int rc = -1; fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); if (!fnic_trace_debugfs_root) { printk(KERN_DEBUG "Cannot create debugfs root\n"); return rc; } if (!fnic_trace_debugfs_root) { printk(KERN_DEBUG "fnic root directory doesn't exist in debugfs\n"); return rc; } fnic_stats_debugfs_root = debugfs_create_dir("statistics", fnic_trace_debugfs_root); if (!fnic_stats_debugfs_root) { printk(KERN_DEBUG "Cannot create Statistics directory\n"); return rc; } /* Allocate memory to structure */ fc_trc_flag = (struct fc_trace_flag_type *) vmalloc(sizeof(struct fc_trace_flag_type)); if (fc_trc_flag) { fc_trc_flag->fc_row_file = 0; fc_trc_flag->fc_normal_file = 1; fc_trc_flag->fnic_trace = 2; fc_trc_flag->fc_trace = 3; fc_trc_flag->fc_clear = 4; } rc = 0; return rc; } /* * fnic_debugfs_terminate - Tear down debugfs infrastructure * * Description: * When Debugfs is configured this routine removes debugfs file system * elements that are specific to fnic. */ void fnic_debugfs_terminate(void) { debugfs_remove(fnic_stats_debugfs_root); fnic_stats_debugfs_root = NULL; debugfs_remove(fnic_trace_debugfs_root); fnic_trace_debugfs_root = NULL; if (fc_trc_flag) vfree(fc_trc_flag); } /* * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace * Or Open fc_trace_enable file for fc_trace * @inode: The inode pointer. * @file: The file pointer to attach the trace enable/disable flag. * * Description: * This routine opens a debugsfs file trace_enable or fc_trace_enable. * * Returns: * This function returns zero if successful. */ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; } /* * fnic_trace_ctrl_read - * Read trace_enable ,fc_trace_enable * or fc_trace_clear debugfs file * @filp: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @cnt: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads value of variable fnic_tracing_enabled or * fnic_fc_tracing_enabled or fnic_fc_trace_cleared * and stores into local @buf. * It will start reading file at @ppos and * copy up to @cnt of data to @ubuf from @buf. * * Returns: * This function returns the amount of data that was read. */ static ssize_t fnic_trace_ctrl_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int len; u8 *trace_type; len = 0; trace_type = (u8 *)filp->private_data; if (*trace_type == fc_trc_flag->fnic_trace) len = sprintf(buf, "%u\n", fnic_tracing_enabled); else if (*trace_type == fc_trc_flag->fc_trace) len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled); else if (*trace_type == fc_trc_flag->fc_clear) len = sprintf(buf, "%u\n", fnic_fc_trace_cleared); else pr_err("fnic: Cannot read to any debugfs file\n"); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } /* * fnic_trace_ctrl_write - * Write to trace_enable, fc_trace_enable or * fc_trace_clear debugfs file * @filp: The file pointer to write from. * @ubuf: The buffer to copy the data from. * @cnt: The number of bytes to write. * @ppos: The position in the file to start writing to. * * Description: * This routine writes data from user buffer @ubuf to buffer @buf and * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared * value as per user input. * * Returns: * This function returns the amount of data that was written. */ static ssize_t fnic_trace_ctrl_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; unsigned long val; int ret; u8 *trace_type; trace_type = (u8 *)filp->private_data; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = kstrtoul(buf, 10, &val); if (ret < 0) return ret; if (*trace_type == fc_trc_flag->fnic_trace) fnic_tracing_enabled = val; else if (*trace_type == fc_trc_flag->fc_trace) fnic_fc_tracing_enabled = val; else if (*trace_type == fc_trc_flag->fc_clear) fnic_fc_trace_cleared = val; else pr_err("fnic: cannot write to any debufs file\n"); (*ppos)++; return cnt; } static const struct file_operations fnic_trace_ctrl_fops = { .owner = THIS_MODULE, .open = fnic_trace_ctrl_open, .read = fnic_trace_ctrl_read, .write = fnic_trace_ctrl_write, }; /* * fnic_trace_debugfs_open - Open the fnic trace log * @inode: The inode pointer * @file: The file pointer to attach the log output * * Description: * This routine is the entry point for the debugfs open file operation. * It allocates the necessary buffer for the log, fills the buffer from * the in-memory log and then returns a pointer to that log in * the private_data field in @file. * * Returns: * This function returns zero if successful. On error it will return * a negative error value. */ static int fnic_trace_debugfs_open(struct inode *inode, struct file *file) { fnic_dbgfs_t *fnic_dbg_prt; u8 *rdata_ptr; rdata_ptr = (u8 *)inode->i_private; fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); if (!fnic_dbg_prt) return -ENOMEM; if (*rdata_ptr == fc_trc_flag->fnic_trace) { fnic_dbg_prt->buffer = vmalloc(3 * (trace_max_pages * PAGE_SIZE)); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } memset((void *)fnic_dbg_prt->buffer, 0, 3 * (trace_max_pages * PAGE_SIZE)); fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); } else { fnic_dbg_prt->buffer = vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } memset((void *)fnic_dbg_prt->buffer, 0, 3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); fnic_dbg_prt->buffer_len = fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); } file->private_data = fnic_dbg_prt; return 0; } /* * fnic_trace_debugfs_lseek - Seek through a debugfs file * @file: The file pointer to seek through. * @offset: The offset to seek to or the amount to seek by. * @howto: Indicates how to seek. * * Description: * This routine is the entry point for the debugfs lseek file operation. * The @howto parameter indicates whether @offset is the offset to directly * seek to, or if it is a value to seek forward or reverse by. This function * figures out what the new offset of the debugfs file will be and assigns * that value to the f_pos field of @file. * * Returns: * This function returns the new offset if successful and returns a negative * error if unable to process the seek. */ static loff_t fnic_trace_debugfs_lseek(struct file *file, loff_t offset, int howto) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; return fixed_size_llseek(file, offset, howto, fnic_dbg_prt->buffer_len); } /* * fnic_trace_debugfs_read - Read a debugfs file * @file: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @pos: The position in the file to start reading from. * * Description: * This routine reads data from the buffer indicated in the private_data * field of @file. It will start reading at @pos and copy up to @nbytes of * data to @ubuf. * * Returns: * This function returns the amount of data that was read (this could be * less than @nbytes if the end of the file was reached). */ static ssize_t fnic_trace_debugfs_read(struct file *file, char __user *ubuf, size_t nbytes, loff_t *pos) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; int rc = 0; rc = simple_read_from_buffer(ubuf, nbytes, pos, fnic_dbg_prt->buffer, fnic_dbg_prt->buffer_len); return rc; } /* * fnic_trace_debugfs_release - Release the buffer used to store * debugfs file data * @inode: The inode pointer * @file: The file pointer that contains the buffer to release * * Description: * This routine frees the buffer that was allocated when the debugfs * file was opened. * * Returns: * This function returns zero. */ static int fnic_trace_debugfs_release(struct inode *inode, struct file *file) { fnic_dbgfs_t *fnic_dbg_prt = file->private_data; vfree(fnic_dbg_prt->buffer); kfree(fnic_dbg_prt); return 0; } static const struct file_operations fnic_trace_debugfs_fops = { .owner = THIS_MODULE, .open = fnic_trace_debugfs_open, .llseek = fnic_trace_debugfs_lseek, .read = fnic_trace_debugfs_read, .release = fnic_trace_debugfs_release, }; /* * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging * * Description: * When Debugfs is configured this routine sets up the fnic debugfs * file system. If not already created, this routine will create the * create file trace to log fnic trace buffer output into debugfs and * it will also create file trace_enable to control enable/disable of * trace logging into trace buffer. */ int fnic_trace_debugfs_init(void) { int rc = -1; if (!fnic_trace_debugfs_root) { printk(KERN_DEBUG "FNIC Debugfs root directory doesn't exist\n"); return rc; } fnic_trace_enable = debugfs_create_file("tracing_enable", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fnic_trace), &fnic_trace_ctrl_fops); if (!fnic_trace_enable) { printk(KERN_DEBUG "Cannot create trace_enable file under debugfs\n"); return rc; } fnic_trace_debugfs_file = debugfs_create_file("trace", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fnic_trace), &fnic_trace_debugfs_fops); if (!fnic_trace_debugfs_file) { printk(KERN_DEBUG "Cannot create trace file under debugfs\n"); return rc; } rc = 0; return rc; } /* * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure * * Description: * When Debugfs is configured this routine removes debugfs file system * elements that are specific to fnic trace logging. */ void fnic_trace_debugfs_terminate(void) { debugfs_remove(fnic_trace_debugfs_file); fnic_trace_debugfs_file = NULL; debugfs_remove(fnic_trace_enable); fnic_trace_enable = NULL; } /* * fnic_fc_trace_debugfs_init - * Initialize debugfs for fnic control frame trace logging * * Description: * When Debugfs is configured this routine sets up the fnic_fc debugfs * file system. If not already created, this routine will create the * create file trace to log fnic fc trace buffer output into debugfs and * it will also create file fc_trace_enable to control enable/disable of * trace logging into trace buffer. */ int fnic_fc_trace_debugfs_init(void) { int rc = -1; if (!fnic_trace_debugfs_root) { pr_err("fnic:Debugfs root directory doesn't exist\n"); return rc; } fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_trace), &fnic_trace_ctrl_fops); if (!fnic_fc_trace_enable) { pr_err("fnic: Failed create fc_trace_enable file\n"); return rc; } fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_clear), &fnic_trace_ctrl_fops); if (!fnic_fc_trace_clear) { pr_err("fnic: Failed to create fc_trace_enable file\n"); return rc; } fnic_fc_rdata_trace_debugfs_file = debugfs_create_file("fc_trace_rdata", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_normal_file), &fnic_trace_debugfs_fops); if (!fnic_fc_rdata_trace_debugfs_file) { pr_err("fnic: Failed create fc_rdata_trace file\n"); return rc; } fnic_fc_trace_debugfs_file = debugfs_create_file("fc_trace", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_row_file), &fnic_trace_debugfs_fops); if (!fnic_fc_trace_debugfs_file) { pr_err("fnic: Failed to create fc_trace file\n"); return rc; } rc = 0; return rc; } /* * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure * * Description: * When Debugfs is configured this routine removes debugfs file system * elements that are specific to fnic_fc trace logging. */ void fnic_fc_trace_debugfs_terminate(void) { debugfs_remove(fnic_fc_trace_debugfs_file); fnic_fc_trace_debugfs_file = NULL; debugfs_remove(fnic_fc_rdata_trace_debugfs_file); fnic_fc_rdata_trace_debugfs_file = NULL; debugfs_remove(fnic_fc_trace_enable); fnic_fc_trace_enable = NULL; debugfs_remove(fnic_fc_trace_clear); fnic_fc_trace_clear = NULL; } /* * fnic_reset_stats_open - Open the reset_stats file * @inode: The inode pointer. * @file: The file pointer to attach the stats reset flag. * * Description: * This routine opens a debugsfs file reset_stats and stores i_private data * to debug structure to retrieve later for while performing other * file oprations. * * Returns: * This function returns zero if successful. */ static int fnic_reset_stats_open(struct inode *inode, struct file *file) { struct stats_debug_info *debug; debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); if (!debug) return -ENOMEM; debug->i_private = inode->i_private; file->private_data = debug; return 0; } /* * fnic_reset_stats_read - Read a reset_stats debugfs file * @filp: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @cnt: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: * This routine reads value of variable reset_stats * and stores into local @buf. It will start reading file at @ppos and * copy up to @cnt of data to @ubuf from @buf. * * Returns: * This function returns the amount of data that was read. */ static ssize_t fnic_reset_stats_read(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) { struct stats_debug_info *debug = file->private_data; struct fnic *fnic = (struct fnic *)debug->i_private; char buf[64]; int len; len = sprintf(buf, "%u\n", fnic->reset_stats); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } /* * fnic_reset_stats_write - Write to reset_stats debugfs file * @filp: The file pointer to write from. * @ubuf: The buffer to copy the data from. * @cnt: The number of bytes to write. * @ppos: The position in the file to start writing to. * * Description: * This routine writes data from user buffer @ubuf to buffer @buf and * resets cumulative stats of fnic. * * Returns: * This function returns the amount of data that was written. */ static ssize_t fnic_reset_stats_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct stats_debug_info *debug = file->private_data; struct fnic *fnic = (struct fnic *)debug->i_private; struct fnic_stats *stats = &fnic->fnic_stats; u64 *io_stats_p = (u64 *)&stats->io_stats; u64 *fw_stats_p = (u64 *)&stats->fw_stats; char buf[64]; unsigned long val; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(&buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = kstrtoul(buf, 10, &val); if (ret < 0) return ret; fnic->reset_stats = val; if (fnic->reset_stats) { /* Skip variable is used to avoid descrepancies to Num IOs * and IO Completions stats. Skip incrementing No IO Compls * for pending active IOs after reset stats */ atomic64_set(&fnic->io_cmpl_skip, atomic64_read(&stats->io_stats.active_ios)); memset(&stats->abts_stats, 0, sizeof(struct abort_stats)); memset(&stats->term_stats, 0, sizeof(struct terminate_stats)); memset(&stats->reset_stats, 0, sizeof(struct reset_stats)); memset(&stats->misc_stats, 0, sizeof(struct misc_stats)); memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats)); memset(io_stats_p+1, 0, sizeof(struct io_path_stats) - sizeof(u64)); memset(fw_stats_p+1, 0, sizeof(struct fw_stats) - sizeof(u64)); } (*ppos)++; return cnt; } /* * fnic_reset_stats_release - Release the buffer used to store * debugfs file data * @inode: The inode pointer * @file: The file pointer that contains the buffer to release * * Description: * This routine frees the buffer that was allocated when the debugfs * file was opened. * * Returns: * This function returns zero. */ static int fnic_reset_stats_release(struct inode *inode, struct file *file) { struct stats_debug_info *debug = file->private_data; kfree(debug); return 0; } /* * fnic_stats_debugfs_open - Open the stats file for specific host * and get fnic stats. * @inode: The inode pointer. * @file: The file pointer to attach the specific host statistics. * * Description: * This routine opens a debugsfs file stats of specific host and print * fnic stats. * * Returns: * This function returns zero if successful. */ static int fnic_stats_debugfs_open(struct inode *inode, struct file *file) { struct fnic *fnic = inode->i_private; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct stats_debug_info *debug; int buf_size = 2 * PAGE_SIZE; debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); if (!debug) return -ENOMEM; debug->debug_buffer = vmalloc(buf_size); if (!debug->debug_buffer) { kfree(debug); return -ENOMEM; } debug->buf_size = buf_size; memset((void *)debug->debug_buffer, 0, buf_size); debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); file->private_data = debug; return 0; } /* * fnic_stats_debugfs_read - Read a debugfs file * @file: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @nbytes: The number of bytes to read. * @pos: The position in the file to start reading from. * * Description: * This routine reads data from the buffer indicated in the private_data * field of @file. It will start reading at @pos and copy up to @nbytes of * data to @ubuf. * * Returns: * This function returns the amount of data that was read (this could be * less than @nbytes if the end of the file was reached). */ static ssize_t fnic_stats_debugfs_read(struct file *file, char __user *ubuf, size_t nbytes, loff_t *pos) { struct stats_debug_info *debug = file->private_data; int rc = 0; rc = simple_read_from_buffer(ubuf, nbytes, pos, debug->debug_buffer, debug->buffer_len); return rc; } /* * fnic_stats_stats_release - Release the buffer used to store * debugfs file data * @inode: The inode pointer * @file: The file pointer that contains the buffer to release * * Description: * This routine frees the buffer that was allocated when the debugfs * file was opened. * * Returns: * This function returns zero. */ static int fnic_stats_debugfs_release(struct inode *inode, struct file *file) { struct stats_debug_info *debug = file->private_data; vfree(debug->debug_buffer); kfree(debug); return 0; } static const struct file_operations fnic_stats_debugfs_fops = { .owner = THIS_MODULE, .open = fnic_stats_debugfs_open, .read = fnic_stats_debugfs_read, .release = fnic_stats_debugfs_release, }; static const struct file_operations fnic_reset_debugfs_fops = { .owner = THIS_MODULE, .open = fnic_reset_stats_open, .read = fnic_reset_stats_read, .write = fnic_reset_stats_write, .release = fnic_reset_stats_release, }; /* * fnic_stats_init - Initialize stats struct and create stats file per fnic * * Description: * When Debugfs is configured this routine sets up the stats file per fnic * It will create file stats and reset_stats under statistics/host# directory * to log per fnic stats. */ int fnic_stats_debugfs_init(struct fnic *fnic) { int rc = -1; char name[16]; snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); if (!fnic_stats_debugfs_root) { printk(KERN_DEBUG "fnic_stats root doesn't exist\n"); return rc; } fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, fnic_stats_debugfs_root); if (!fnic->fnic_stats_debugfs_host) { printk(KERN_DEBUG "Cannot create host directory\n"); return rc; } fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_stats_debugfs_fops); if (!fnic->fnic_stats_debugfs_file) { printk(KERN_DEBUG "Cannot create host stats file\n"); return rc; } fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_reset_debugfs_fops); if (!fnic->fnic_reset_debugfs_file) { printk(KERN_DEBUG "Cannot create host stats file\n"); return rc; } rc = 0; return rc; } /* * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats * * Description: * When Debugfs is configured this routine removes debugfs file system * elements that are specific to fnic stats. */ void fnic_stats_debugfs_remove(struct fnic *fnic) { if (!fnic) return; debugfs_remove(fnic->fnic_stats_debugfs_file); fnic->fnic_stats_debugfs_file = NULL; debugfs_remove(fnic->fnic_reset_debugfs_file); fnic->fnic_reset_debugfs_file = NULL; debugfs_remove(fnic->fnic_stats_debugfs_host); fnic->fnic_stats_debugfs_host = NULL; }
450634.c
// SPDX-License-Identifier: GPL-2.0-or-later /* mpihelp-add_2.c - MPI helper functions * Copyright (C) 1994, 1996, 1997, 1998, 2001 Free Software Foundation, Inc. * * This file is part of GnuPG. * * Note: This code is heavily based on the GNU MP Library. * Actually it's the same code with only minor changes in the * way the data is stored; this is to support the abstraction * of an optional secure memory allocation which may be used * to avoid revealing of sensitive data due to paging etc. * The GNU MP Library itself is published under the LGPL; * however I decided to publish this code under the plain GPL. */ #include "mpi-internal.h" #include "longlong.h" mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size) { mpi_limb_t x, y, cy; mpi_size_t j; /* The loop counter and index J goes from -SIZE to -1. This way the loop becomes faster. */ j = -size; /* Offset the base pointers to compensate for the negative indices. */ s1_ptr -= j; s2_ptr -= j; res_ptr -= j; cy = 0; do { y = s2_ptr[j]; x = s1_ptr[j]; y += cy; /* add previous carry to subtrahend */ cy = y < cy; /* get out carry from that addition */ y = x - y; /* main subtract */ cy += y > x; /* get out carry from the subtract, combine */ res_ptr[j] = y; } while (++j); return cy; }
559641.c
/* * QEMU NE2000 emulation -- isa bus windup * * Copyright (c) 2003-2004 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "hw.h" #include "pc.h" #include "isa.h" #include "qdev.h" #include "net.h" #include "ne2000.h" #include "exec-memory.h" typedef struct ISANE2000State { ISADevice dev; uint32_t iobase; uint32_t isairq; NE2000State ne2000; } ISANE2000State; static void isa_ne2000_cleanup(VLANClientState *nc) { NE2000State *s = DO_UPCAST(NICState, nc, nc)->opaque; s->nic = NULL; } static NetClientInfo net_ne2000_isa_info = { .type = NET_CLIENT_TYPE_NIC, .size = sizeof(NICState), .can_receive = ne2000_can_receive, .receive = ne2000_receive, .cleanup = isa_ne2000_cleanup, }; static const VMStateDescription vmstate_isa_ne2000 = { .name = "ne2000", .version_id = 2, .minimum_version_id = 0, .minimum_version_id_old = 0, .fields = (VMStateField []) { VMSTATE_STRUCT(ne2000, ISANE2000State, 0, vmstate_ne2000, NE2000State), VMSTATE_END_OF_LIST() } }; static int isa_ne2000_initfn(ISADevice *dev) { ISANE2000State *isa = DO_UPCAST(ISANE2000State, dev, dev); NE2000State *s = &isa->ne2000; ne2000_setup_io(s, 0x20); isa_register_ioport(dev, &s->io, isa->iobase); isa_init_irq(dev, &s->irq, isa->isairq); qemu_macaddr_default_if_unset(&s->c.macaddr); ne2000_reset(s); s->nic = qemu_new_nic(&net_ne2000_isa_info, &s->c, dev->qdev.info->name, dev->qdev.id, s); qemu_format_nic_info_str(&s->nic->nc, s->c.macaddr.a); return 0; } static ISADeviceInfo ne2000_isa_info = { .qdev.name = "ne2k_isa", .qdev.size = sizeof(ISANE2000State), .init = isa_ne2000_initfn, .qdev.props = (Property[]) { DEFINE_PROP_HEX32("iobase", ISANE2000State, iobase, 0x300), DEFINE_PROP_UINT32("irq", ISANE2000State, isairq, 9), DEFINE_NIC_PROPERTIES(ISANE2000State, ne2000.c), DEFINE_PROP_END_OF_LIST(), }, }; static void ne2000_isa_register_devices(void) { isa_qdev_register(&ne2000_isa_info); } device_init(ne2000_isa_register_devices)
719413.c
#include "Segment.h" #include "LKH.h" #include "Sequence.h" /* * The BestKOptMove function makes edge exchanges. If possible, it makes a * r-opt move (r >= 2) that improves the tour. Otherwise, it makes the most * promising sequential K-opt move that fulfils the positive gain criterion. * To prevent an infinity chain of moves the last edge in a K-opt move must * not previously have been included in the chain. * * The edge (t[1],t[2]) is the first edge to be exchanged. G0 is a pointer to * the accumulated gain. * * In case a K-opt move is found that improves the tour, the improvement of * the cost is made available to the caller through the parameter Gain. * If *Gain > 0, an improvement of the current tour has been found. In this * case the function returns 0. * * Otherwise, the best K-opt move is made, and a pointer to the node that was * connected to t[1] (in order to close the tour) is returned. The new * accumulated gain is made available to the caller through the parameter G0. * * The function is called from the LinKernighan function. */ static GainType BestG2; static GainType BestKOptMoveRec(int k, GainType G0); Node *BestKOptMove(Node * t1, Node * t2, GainType * G0, GainType * Gain) { K = Swaps == 0 ? MoveType : SubsequentMoveType; *Gain = 0; t[1] = t1; t[2] = t2; T[2 * K] = 0; BestG2 = MINUS_INFINITY; /* * Determine (T[3],T[4], ..., T[2K]) = (t[3],t[4], ..., t[2K]) * such that * * G[2 * K] = *G0 - C(t[2],T[3]) + C(T[3],T[4]) * - C(T[4],T[5]) + C(T[5],T[6]) * ... * - C(T[2K-3],T[2K-2]) + C(T[2K-1],T[2K]) * * is maximum, and (T[2K-1],T[2K]) has not previously been included. * If during this process a legal move with *Gain > 0 is found, then * make the move and exit BestKOptMove immediately. */ MarkDeleted(t1, t2); *Gain = BestKOptMoveRec(2, *G0); UnmarkDeleted(t1, t2); if (*Gain <= 0 && T[2 * K]) { int i; memcpy(t + 1, T + 1, 2 * K * sizeof(Node *)); for (i = 2; i < 2 * K; i += 2) incl[incl[i] = i + 1] = i; incl[incl[1] = 2 * K] = 1; MakeKOptMove(K); for (i = 1; i < 2 * K; i += 2) Exclude(T[i], T[i + 1]); *G0 = BestG2; return T[2 * K]; } return 0; } static GainType BestKOptMoveRec(int k, GainType G0) { Candidate *Nt2; Node *t1, *t2, *t3, *t4; GainType G1, G2, G3, Gain; int X4, i; int Breadth2 = 0; t1 = t[1]; t2 = t[i = 2 * k - 2]; incl[incl[i] = i + 1] = i; incl[incl[1] = i + 2] = 1; /* Choose (t2,t3) as a candidate edge emanating from t2 */ for (Nt2 = t2->CandidateSet; (t3 = Nt2->To); Nt2++) { if (t3 == t2->Pred || t3 == t2->Suc || ((G1 = G0 - Nt2->Cost) <= 0 && GainCriterionUsed && ProblemType != HCP && ProblemType != HPP) || Added(t2, t3)) continue; if (++Breadth2 > MaxBreadth) break; MarkAdded(t2, t3); t[2 * k - 1] = t3; G[2 * k - 2] = G1 + t3->Pi; /* Choose t4 as one of t3's two neighbors on the tour */ for (X4 = 1; X4 <= 2; X4++) { t4 = X4 == 1 ? PRED(t3) : SUC(t3); if (FixedOrCommon(t3, t4) || Deleted(t3, t4)) continue; t[2 * k] = t4; G2 = G1 + C(t3, t4); G3 = MINUS_INFINITY; if (t4 != t1 && !Forbidden(t4, t1) && !Added(t4, t1) && (!c || G2 - c(t4, t1) > 0) && (G3 = G2 - C(t4, t1)) > 0 && FeasibleKOptMove(k)) { UnmarkAdded(t2, t3); MakeKOptMove(k); return G3; } if (Backtracking && !Excludable(t3, t4)) continue; MarkDeleted(t3, t4); G[2 * k - 1] = G2 - t4->Pi; if (k < K) { if ((Gain = BestKOptMoveRec(k + 1, G2)) > 0) { UnmarkAdded(t2, t3); UnmarkDeleted(t3, t4); return Gain; } incl[incl[1] = 2 * k] = 1; } if (t4 != t1 && !Forbidden(t4, t1) && k + 1 < NonsequentialMoveType && PatchingC >= 2 && PatchingA >= 1 && (Swaps == 0 || SubsequentPatching)) { if (G3 == MINUS_INFINITY) G3 = G2 - C(t4, t1); if ((PatchingCRestricted ? G3 > 0 && IsCandidate(t4, t1) : PatchingCExtended ? G3 > 0 || IsCandidate(t4, t1) : G3 > 0) && (Gain = PatchCycles(k, G3)) > 0) { UnmarkAdded(t2, t3); UnmarkDeleted(t3, t4); return Gain; } } UnmarkDeleted(t3, t4); if (k == K && t4 != t1 && t3 != t1 && G3 <= 0 && !Added(t4, t1) && (!GainCriterionUsed || G2 - Precision >= t4->Cost)) { if (!Backtracking || Swaps > 0) { if ((G2 > BestG2 || (G2 == BestG2 && !Near(t3, t4) && Near(T[2 * K - 1], T[2 * K]))) && Swaps < MaxSwaps && Excludable(t3, t4) && !InInputTour(t3, t4)) { if (RestrictedSearch && K > 2 && ProblemType != HCP && ProblemType != HPP) { /* Ignore the move if the gain does not vary */ G[0] = G[2 * K - 2]; G[1] = G[2 * K - 1]; for (i = 2 * K - 3; i >= 2; i--) if (G[i] != G[i % 2]) break; if (i < 2) continue; } if (FeasibleKOptMove(K)) { BestG2 = G2; memcpy(T + 1, t + 1, 2 * K * sizeof(Node *)); } } } else if (MaxSwaps > 0 && FeasibleKOptMove(K)) { Node *SUCt1 = SUC(t1); MakeKOptMove(K); for (i = 1; i < 2 * k; i += 2) { Exclude(t[i], t[i + 1]); UnmarkDeleted(t[i], t[i + 1]); } for (i = 2; i < 2 * k; i += 2) UnmarkAdded(t[i], t[i + 1]); memcpy(tSaved + 1, t + 1, 2 * k * sizeof(Node *)); while ((t4 = BestSubsequentMove(t1, t4, &G2, &Gain))); if (Gain > 0) { UnmarkAdded(t2, t3); return Gain; } RestoreTour(); K = k; memcpy(t + 1, tSaved + 1, 2 * K * sizeof(Node *)); for (i = 1; i < 2 * K - 2; i += 2) MarkDeleted(t[i], t[i + 1]); for (i = 2; i < 2 * K; i += 2) MarkAdded(t[i], t[i + 1]); for (i = 2; i < 2 * K; i += 2) incl[incl[i] = i + 1] = i; incl[incl[1] = 2 * K] = 1; if (SUCt1 != SUC(t1)) Reversed ^= 1; T[2 * K] = 0; } } } UnmarkAdded(t2, t3); if (t3 == t1) continue; /* Try to delete an added edge, (_,t3) or (t3,_) */ for (i = 2 * k - 4; i >= 2; i--) { if (t3 == t[i]) { t4 = t[i ^ 1]; if (t4 == t1 || Forbidden(t4, t1) || FixedOrCommon(t3, t4) || Added(t4, t1)) continue; G2 = G1 + C(t3, t4); if ((!c || G2 - c(t4, t1) > 0) && (Gain = G2 - C(t4, t1)) > 0) { incl[incl[i ^ 1] = 1] = i ^ 1; incl[incl[i] = 2 * k - 2] = i; if (FeasibleKOptMove(k - 1)) { MakeKOptMove(k - 1); return Gain; } incl[incl[i ^ 1] = i] = i ^ 1; } } } incl[1] = 2 * k; incl[2 * k - 2] = 2 * k - 1; } return 0; }
497434.c
/****************************************************** Copyright (c) 2011-2013 Percona LLC and/or its affiliates. The xbstream utility: serialize/deserialize files in the XBSTREAM format. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *******************************************************/ #include <mysql_version.h> #include <my_base.h> #include <my_getopt.h> #include <hash.h> #include <my_pthread.h> #include "common.h" #include "xbstream.h" #include "datasink.h" #include "crc_glue.h" #define XBSTREAM_VERSION "1.0" #define XBSTREAM_BUFFER_SIZE (10 * 1024 * 1024UL) #define START_FILE_HASH_SIZE 16 typedef enum { RUN_MODE_NONE, RUN_MODE_CREATE, RUN_MODE_EXTRACT } run_mode_t; /* Need the following definitions to avoid linking with ds_*.o and their link dependencies */ datasink_t datasink_archive; datasink_t datasink_xbstream; datasink_t datasink_compress; datasink_t datasink_tmpfile; datasink_t datasink_buffer; static run_mode_t opt_mode; static char * opt_directory = NULL; static my_bool opt_verbose = 0; static int opt_parallel = 1; static struct my_option my_long_options[] = { {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"create", 'c', "Stream the specified files to the standard output.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"extract", 'x', "Extract to disk files from the stream on the " "standard input.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"directory", 'C', "Change the current directory to the specified one " "before streaming or extracting.", &opt_directory, &opt_directory, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print verbose output.", &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"parallel", 'p', "Number of worker threads for reading / writing.", &opt_parallel, &opt_parallel, 0, GET_INT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; typedef struct { HASH *filehash; xb_rstream_t *stream; ds_ctxt_t *ds_ctxt; pthread_mutex_t *mutex; } extract_ctxt_t; typedef struct { char *path; uint pathlen; my_off_t offset; ds_file_t *file; pthread_mutex_t mutex; } file_entry_t; static int get_options(int *argc, char ***argv); static int mode_create(int argc, char **argv); static int mode_extract(int n_threads, int argc, char **argv); static my_bool get_one_option(int optid, const struct my_option *opt, char *argument); int main(int argc, char **argv) { MY_INIT(argv[0]); crc_init(); if (get_options(&argc, &argv)) { goto err; } if (opt_mode == RUN_MODE_NONE) { msg("%s: either -c or -x must be specified.\n", my_progname); goto err; } /* Change the current directory if -C is specified */ if (opt_directory && my_setwd(opt_directory, MYF(MY_WME))) { goto err; } if (opt_mode == RUN_MODE_CREATE && mode_create(argc, argv)) { goto err; } else if (opt_mode == RUN_MODE_EXTRACT && mode_extract(opt_parallel, argc, argv)) { goto err; } my_cleanup_options(my_long_options); my_end(0); return EXIT_SUCCESS; err: my_cleanup_options(my_long_options); my_end(0); exit(EXIT_FAILURE); } static int get_options(int *argc, char ***argv) { int ho_error; if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) { exit(EXIT_FAILURE); } return 0; } static void print_version(void) { printf("%s Ver %s for %s (%s)\n", my_progname, XBSTREAM_VERSION, SYSTEM_TYPE, MACHINE_TYPE); } static void usage(void) { print_version(); puts("Copyright (C) 2011-2013 Percona LLC and/or its affiliates."); puts("This software comes with ABSOLUTELY NO WARRANTY. " "This is free software,\nand you are welcome to modify and " "redistribute it under the GPL license.\n"); puts("Serialize/deserialize files in the XBSTREAM format.\n"); puts("Usage: "); printf(" %s -c [OPTIONS...] FILES... # stream specified files to " "standard output.\n", my_progname); printf(" %s -x [OPTIONS...] # extract files from the stream" "on the standard input.\n", my_progname); puts("\nOptions:"); my_print_help(my_long_options); } static int set_run_mode(run_mode_t mode) { if (opt_mode != RUN_MODE_NONE) { msg("%s: can't set specify both -c and -x.\n", my_progname); return 1; } opt_mode = mode; return 0; } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument __attribute__((unused))) { switch (optid) { case 'c': if (set_run_mode(RUN_MODE_CREATE)) { return TRUE; } break; case 'x': if (set_run_mode(RUN_MODE_EXTRACT)) { return TRUE; } break; case '?': usage(); exit(0); } return FALSE; } static int stream_one_file(File file, xb_wstream_file_t *xbfile) { uchar *buf; ssize_t bytes; my_off_t offset; posix_fadvise(file, 0, 0, POSIX_FADV_SEQUENTIAL); offset = my_tell(file, MYF(MY_WME)); buf = (uchar*)(my_malloc(XBSTREAM_BUFFER_SIZE, MYF(MY_FAE))); while ((bytes = (ssize_t)my_read(file, buf, XBSTREAM_BUFFER_SIZE, MYF(MY_WME))) > 0) { if (xb_stream_write_data(xbfile, buf, bytes)) { msg("%s: xb_stream_write_data() failed.\n", my_progname); my_free(buf); return 1; } posix_fadvise(file, offset, XBSTREAM_BUFFER_SIZE, POSIX_FADV_DONTNEED); offset += XBSTREAM_BUFFER_SIZE; } my_free(buf); if (bytes < 0) { return 1; } return 0; } static int mode_create(int argc, char **argv) { int i; MY_STAT mystat; xb_wstream_t *stream; if (argc < 1) { msg("%s: no files are specified.\n", my_progname); return 1; } stream = xb_stream_write_new(); if (stream == NULL) { msg("%s: xb_stream_write_new() failed.\n", my_progname); return 1; } for (i = 0; i < argc; i++) { char *filepath = argv[i]; File src_file; xb_wstream_file_t *file; if (my_stat(filepath, &mystat, MYF(MY_WME)) == NULL) { goto err; } if (!MY_S_ISREG(mystat.st_mode)) { msg("%s: %s is not a regular file, exiting.\n", my_progname, filepath); goto err; } if ((src_file = my_open(filepath, O_RDONLY, MYF(MY_WME))) < 0) { msg("%s: failed to open %s.\n", my_progname, filepath); goto err; } file = xb_stream_write_open(stream, filepath, &mystat, NULL, NULL); if (file == NULL) { goto err; } if (opt_verbose) { msg("%s\n", filepath); } if (stream_one_file(src_file, file) || xb_stream_write_close(file) || my_close(src_file, MYF(MY_WME))) { goto err; } } xb_stream_write_done(stream); return 0; err: xb_stream_write_done(stream); return 1; } static file_entry_t * file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen) { file_entry_t *entry; ds_file_t *file; entry = (file_entry_t *) my_malloc(sizeof(file_entry_t), MYF(MY_WME | MY_ZEROFILL)); if (entry == NULL) { return NULL; } entry->path = my_strndup(path, pathlen, MYF(MY_WME)); if (entry->path == NULL) { goto err; } entry->pathlen = pathlen; file = ds_open(ctxt->ds_ctxt, path, NULL); if (file == NULL) { msg("%s: failed to create file.\n", my_progname); goto err; } if (opt_verbose) { msg("%s\n", entry->path); } entry->file = file; pthread_mutex_init(&entry->mutex, NULL); return entry; err: if (entry->path != NULL) { my_free(entry->path); } my_free(entry); return NULL; } static uchar * get_file_entry_key(file_entry_t *entry, size_t *length, my_bool not_used __attribute__((unused))) { *length = entry->pathlen; return (uchar *) entry->path; } static void file_entry_free(file_entry_t *entry) { pthread_mutex_destroy(&entry->mutex); ds_close(entry->file); my_free(entry->path); my_free(entry); } static void * extract_worker_thread_func(void *arg) { xb_rstream_chunk_t chunk; file_entry_t *entry; xb_rstream_result_t res; extract_ctxt_t *ctxt = (extract_ctxt_t *) arg; my_thread_init(); memset(&chunk, 0, sizeof(chunk)); while (1) { pthread_mutex_lock(ctxt->mutex); res = xb_stream_read_chunk(ctxt->stream, &chunk); if (res != XB_STREAM_READ_CHUNK) { pthread_mutex_unlock(ctxt->mutex); break; } /* If unknown type and ignorable flag is set, skip this chunk */ if (chunk.type == XB_CHUNK_TYPE_UNKNOWN && \ !(chunk.flags & XB_STREAM_FLAG_IGNORABLE)) { pthread_mutex_unlock(ctxt->mutex); continue; } /* See if we already have this file open */ entry = (file_entry_t *) my_hash_search(ctxt->filehash, (uchar *) chunk.path, chunk.pathlen); if (entry == NULL) { entry = file_entry_new(ctxt, chunk.path, chunk.pathlen); if (entry == NULL) { pthread_mutex_unlock(ctxt->mutex); break; } if (my_hash_insert(ctxt->filehash, (uchar *) entry)) { msg("%s: my_hash_insert() failed.\n", my_progname); pthread_mutex_unlock(ctxt->mutex); break; } } pthread_mutex_lock(&entry->mutex); pthread_mutex_unlock(ctxt->mutex); res = xb_stream_validate_checksum(&chunk); if (res != XB_STREAM_READ_CHUNK) { pthread_mutex_unlock(&entry->mutex); break; } if (chunk.type == XB_CHUNK_TYPE_EOF) { pthread_mutex_lock(ctxt->mutex); pthread_mutex_unlock(&entry->mutex); my_hash_delete(ctxt->filehash, (uchar *) entry); pthread_mutex_unlock(ctxt->mutex); continue; } if (entry->offset != chunk.offset) { msg("%s: out-of-order chunk: real offset = 0x%llx, " "expected offset = 0x%llx\n", my_progname, chunk.offset, entry->offset); pthread_mutex_unlock(&entry->mutex); res = XB_STREAM_READ_ERROR; break; } if (ds_write(entry->file, chunk.data, chunk.length)) { msg("%s: my_write() failed.\n", my_progname); pthread_mutex_unlock(&entry->mutex); res = XB_STREAM_READ_ERROR; break; } entry->offset += chunk.length; pthread_mutex_unlock(&entry->mutex); } if (chunk.data) my_free(chunk.data); my_thread_end(); return (void *)(res); } static int mode_extract(int n_threads, int argc __attribute__((unused)), char **argv __attribute__((unused))) { xb_rstream_t *stream = NULL; HASH filehash; ds_ctxt_t *ds_ctxt = NULL; extract_ctxt_t ctxt; int i; pthread_t *tids = NULL; void **retvals = NULL; pthread_mutex_t mutex; int ret = 0; if (my_hash_init(&filehash, &my_charset_bin, START_FILE_HASH_SIZE, 0, 0, (my_hash_get_key) get_file_entry_key, (my_hash_free_key) file_entry_free, MYF(0))) { msg("%s: failed to initialize file hash.\n", my_progname); return 1; } if (pthread_mutex_init(&mutex, NULL)) { msg("%s: failed to initialize mutex.\n", my_progname); my_hash_free(&filehash); return 1; } /* If --directory is specified, it is already set as CWD by now. */ ds_ctxt = ds_create(".", DS_TYPE_LOCAL); if (ds_ctxt == NULL) { ret = 1; goto exit; } stream = xb_stream_read_new(); if (stream == NULL) { msg("%s: xb_stream_read_new() failed.\n", my_progname); pthread_mutex_destroy(&mutex); ret = 1; goto exit; } ctxt.stream = stream; ctxt.filehash = &filehash; ctxt.ds_ctxt = ds_ctxt; ctxt.mutex = &mutex; tids = calloc(n_threads, sizeof(pthread_t)); retvals = calloc(n_threads, sizeof(void*)); for (i = 0; i < n_threads; i++) pthread_create(tids + i, NULL, extract_worker_thread_func, &ctxt); for (i = 0; i < n_threads; i++) pthread_join(tids[i], retvals + i); for (i = 0; i < n_threads; i++) { if ((size_t)retvals[i] == XB_STREAM_READ_ERROR) { ret = 1; goto exit; } } exit: pthread_mutex_destroy(&mutex); free(tids); free(retvals); my_hash_free(&filehash); if (ds_ctxt != NULL) { ds_destroy(ds_ctxt); } xb_stream_read_done(stream); return ret; }
756917.c
/* * Copyright (c) 2011-2020 Seagate Technology LLC and/or its Affiliates * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For any questions about this software or licensing, * please email [email protected] or [email protected]. * */ #include <stdlib.h> /* system */ #include <stdio.h> /* fopen, fgetc, ... */ #include <unistd.h> /* unlink */ #include <sys/stat.h> /* mkdir */ #include <sys/types.h> /* mkdir */ #include "lib/misc.h" /* M0_SET0 */ #include "lib/memory.h" /* m0_alloc_align */ #include "lib/errno.h" #include "lib/finject.h" /* M0_FI_ENABLED */ #include "lib/ub.h" #include "ut/stob.h" #include "ut/ut.h" #include "lib/assert.h" #include "lib/arith.h" #include "stob/domain.h" #include "stob/io.h" #include "stob/stob.h" #include "fol/fol.h" #include "balloc/balloc.h" /* M0_BALLOC_NON_SPARE_ZONE */ /** @addtogroup stob @{ */ enum { NR = 3, NR_SORT = 256, MIN_BUF_SIZE = 4096, MIN_BUF_SIZE_IN_BLOCKS = 4, }; enum { M0_STOB_UT_DOMAIN_KEY = 0x01, M0_STOB_UT_STOB_KEY = 0x02, }; /** @todo move vars to a context */ static const char linux_location[] = "linuxstob:./__s"; static const char perf_location[] = "perfstob:./__s"; static struct m0_stob_domain *dom; static struct m0_stob *obj; static const char linux_path[] = "./__s/o/100000000000000:2"; static const char perf_path[] = "./__s/backstore/o/100000000000000:2"; static struct m0_stob_io io; static m0_bcount_t user_vec[NR]; static char *user_buf[NR]; static char *read_buf[NR]; static char *user_bufs[NR]; static char *read_bufs[NR]; static m0_bindex_t stob_vec[NR]; static struct m0_clink clink; static FILE *f; static uint32_t block_shift; static uint32_t buf_size; static int test_adieu_init(const char *location, const char *dom_cfg, const char *stob_cfg) { int i; int rc; struct m0_stob_id stob_id; rc = m0_stob_domain_create(location, NULL, M0_STOB_UT_DOMAIN_KEY, dom_cfg, &dom); M0_ASSERT(rc == 0); M0_ASSERT(dom != NULL); m0_stob_id_make(0, M0_STOB_UT_STOB_KEY, &dom->sd_id, &stob_id); rc = m0_stob_find(&stob_id, &obj); M0_ASSERT(rc == 0); rc = m0_stob_locate(obj); M0_ASSERT(rc == 0); rc = m0_ut_stob_create(obj, stob_cfg, NULL); M0_ASSERT(rc == 0); block_shift = m0_stob_block_shift(obj); /* buf_size is chosen so it would be at least MIN_BUF_SIZE in bytes * or it would consist of at least MIN_BUF_SIZE_IN_BLOCKS blocks */ buf_size = max_check(MIN_BUF_SIZE, (1 << block_shift) * MIN_BUF_SIZE_IN_BLOCKS); for (i = 0; i < ARRAY_SIZE(user_buf); ++i) { user_buf[i] = m0_alloc_aligned(buf_size, block_shift); M0_ASSERT(user_buf[i] != NULL); } for (i = 0; i < ARRAY_SIZE(read_buf); ++i) { read_buf[i] = m0_alloc_aligned(buf_size, block_shift); M0_ASSERT(read_buf[i] != NULL); } for (i = 0; i < NR; ++i) { user_bufs[i] = m0_stob_addr_pack(user_buf[i], block_shift); read_bufs[i] = m0_stob_addr_pack(read_buf[i], block_shift); user_vec[i] = buf_size >> block_shift; stob_vec[i] = (buf_size * (2 * i + 1)) >> block_shift; memset(user_buf[i], ('a' + i)|1, buf_size); } return rc; } static void test_adieu_fini(void) { int i; int rc; rc = m0_stob_destroy(obj, NULL); M0_ASSERT(rc == 0); rc = m0_stob_domain_destroy(dom); M0_ASSERT(rc == 0); for (i = 0; i < ARRAY_SIZE(user_buf); ++i) m0_free(user_buf[i]); for (i = 0; i < ARRAY_SIZE(read_buf); ++i) m0_free(read_buf[i]); } static void test_write(int i) { int rc; struct m0_fol_frag *fol_frag; M0_ALLOC_PTR(fol_frag); M0_UB_ASSERT(fol_frag != NULL); m0_stob_io_init(&io); io.si_opcode = SIO_WRITE; io.si_flags = 0; io.si_fol_frag = fol_frag; io.si_user.ov_vec.v_nr = i; io.si_user.ov_vec.v_count = user_vec; io.si_user.ov_buf = (void **)user_bufs; io.si_stob.iv_vec.v_nr = i; io.si_stob.iv_vec.v_count = user_vec; io.si_stob.iv_index = stob_vec; m0_clink_init(&clink, NULL); m0_clink_add_lock(&io.si_wait, &clink); rc = m0_stob_io_prepare_and_launch(&io, obj, NULL, NULL); M0_ASSERT(rc == 0); m0_chan_wait(&clink); M0_ASSERT(io.si_rc == 0); M0_ASSERT(io.si_count == (buf_size * i) >> block_shift); m0_clink_del_lock(&clink); m0_clink_fini(&clink); m0_stob_io_fini(&io); } static void test_read(int i) { int rc; m0_stob_io_init(&io); io.si_opcode = SIO_READ; io.si_flags = 0; io.si_user.ov_vec.v_nr = i; io.si_user.ov_vec.v_count = user_vec; io.si_user.ov_buf = (void **)read_bufs; io.si_stob.iv_vec.v_nr = i; io.si_stob.iv_vec.v_count = user_vec; io.si_stob.iv_index = stob_vec; m0_clink_init(&clink, NULL); m0_clink_add_lock(&io.si_wait, &clink); rc = m0_stob_io_prepare_and_launch(&io, obj, NULL, NULL); M0_ASSERT(rc == 0); m0_chan_wait(&clink); M0_ASSERT(io.si_rc == 0); M0_ASSERT(io.si_count == (buf_size * i) >> block_shift); m0_clink_del_lock(&clink); m0_clink_fini(&clink); m0_stob_io_fini(&io); } /** Adieu unit-test. */ static void test_adieu(const char *path) { int ch; int i; int j; for (i = 1; i < NR; ++i) { test_write(i); /* this works only for linuxstob */ f = fopen(path, "r"); for (j = 0; j < i; ++j) { int k; for (k = 0; k < buf_size; ++k) { ch = fgetc(f); M0_ASSERT(ch == '\0'); M0_ASSERT(!feof(f)); } for (k = 0; k < buf_size; ++k) { ch = fgetc(f); M0_ASSERT(ch != '\0'); M0_ASSERT(!feof(f)); } } ch = fgetc(f); M0_ASSERT(ch == EOF); fclose(f); } for (i = 1; i < NR; ++i) { test_read(i); M0_ASSERT(memcmp(user_buf[i - 1], read_buf[i - 1], buf_size) == 0); } } void m0_stob_ut_adieu_linux(void) { int rc; rc = test_adieu_init(linux_location, NULL, NULL); M0_ASSERT(rc == 0); test_adieu(linux_path); test_adieu_fini(); } void m0_stob_ut_adieu_perf(void) { int rc; rc = test_adieu_init(perf_location, NULL, NULL); M0_ASSERT(rc == 0); test_adieu(perf_path); test_adieu_fini(); } /* Adieu unit-benchmark */ static void ub_write(int i) { test_write(NR - 1); } static void ub_read(int i) { test_read(NR - 1); } static m0_bcount_t user_vec1[NR_SORT]; static char *user_bufs1[NR_SORT]; static m0_bindex_t stob_vec1[NR_SORT]; static void ub_iovec_init() { int i; for (i = 0; i < NR_SORT ; i++) stob_vec1[i] = MIN_BUF_SIZE * i; m0_stob_io_init(&io); io.si_opcode = SIO_WRITE; io.si_flags = 0; io.si_user.ov_vec.v_nr = NR_SORT; io.si_user.ov_vec.v_count = user_vec1; io.si_user.ov_buf = (void **)user_bufs1; io.si_stob.iv_vec.v_nr = NR_SORT; io.si_stob.iv_vec.v_count = user_vec1; io.si_stob.iv_index = stob_vec1; } static void ub_iovec_invert() { int i; bool swapped; /* Reverse sort index vecs. */ do { swapped = false; for (i = 0; i < NR_SORT - 1; i++) { if (stob_vec1[i] < stob_vec1[i + 1]) { m0_bindex_t tmp = stob_vec1[i]; stob_vec1[i] = stob_vec1[i + 1]; stob_vec1[i + 1] = tmp; swapped = true; } } } while(swapped); } static void ub_iovec_sort() { m0_stob_iovec_sort(&io); } static void ub_iovec_sort_invert() { ub_iovec_invert(); m0_stob_iovec_sort(&io); } static int ub_init(const char *opts M0_UNUSED) { return test_adieu_init(linux_location, NULL, NULL); } static void ub_fini(void) { test_adieu_fini(); } enum { UB_ITER = 100, UB_ITER_SORT = 100000 }; struct m0_ub_set m0_adieu_ub = { .us_name = "adieu-ub", .us_init = ub_init, .us_fini = ub_fini, .us_run = { { .ub_name = "write-prime", .ub_iter = 1, .ub_round = ub_write, .ub_block_size = MIN_BUF_SIZE, .ub_blocks_per_op = MIN_BUF_SIZE_IN_BLOCKS }, { .ub_name = "write", .ub_iter = UB_ITER, .ub_block_size = MIN_BUF_SIZE, .ub_blocks_per_op = MIN_BUF_SIZE_IN_BLOCKS, .ub_round = ub_write }, { .ub_name = "read", .ub_iter = UB_ITER, .ub_block_size = MIN_BUF_SIZE, .ub_blocks_per_op = MIN_BUF_SIZE_IN_BLOCKS, .ub_round = ub_read }, { .ub_name = "iovec-sort", .ub_iter = UB_ITER_SORT, .ub_init = ub_iovec_init, .ub_block_size = MIN_BUF_SIZE, .ub_blocks_per_op = MIN_BUF_SIZE_IN_BLOCKS, .ub_round = ub_iovec_sort }, { .ub_name = "iovec-sort-invert", .ub_iter = UB_ITER_SORT, .ub_init = ub_iovec_init, .ub_block_size = MIN_BUF_SIZE, .ub_blocks_per_op = MIN_BUF_SIZE_IN_BLOCKS, .ub_round = ub_iovec_sort_invert }, { .ub_name = NULL } } }; /** @} end group stob */ /* * Local variables: * c-indentation-style: "K&R" * c-basic-offset: 8 * tab-width: 8 * fill-column: 80 * scroll-step: 1 * End: */
270584.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE244_Failure_to_Clear_Heap_Before_Release__w32_wchar_t_15.c Label Definition File: CWE244_Failure_to_Clear_Heap_Before_Release__w32.label.xml Template File: point-flaw-15.tmpl.c */ /* * @description * CWE: 244 Failure to Clear Heap Before Release * Sinks: * GoodSink: Clear the password buffer before releasing the memory from the heap * BadSink : Release password from the heap without first clearing the buffer * Flow Variant: 15 Control flow: switch(6) * * */ #include "std_testcase.h" #include <wchar.h> #ifdef _WIN32 # include <windows.h> #pragma comment(lib, "advapi32.lib") #endif #ifndef OMITBAD void CWE244_Failure_to_Clear_Heap_Before_Release__w32_wchar_t_15_bad() { switch(6) { case 6: { wchar_t * password = (wchar_t *)malloc(100*sizeof(wchar_t)); size_t password_len = 0; HANDLE pHandle; wchar_t * username = L"User"; wchar_t * domain = L"Domain"; /* Initialize password */ password[0] = L'\0'; fgetws(password, 100, stdin); /* Remove the carriage return from the string that is inserted by fgetws() */ password_len = wcslen(password); if (password_len > 0) { password[password_len-1] = L'\0'; } /* Use the password in LogonUser() to establish that it is "sensitive" */ if (LogonUserW( username, domain, password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &pHandle) != 0) { printLine("User logged in successfully."); CloseHandle(pHandle); } else { printLine("Unable to login."); } /* FLAW: free() password without clearing the password buffer */ free(password); } break; default: /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { wchar_t * password = (wchar_t *)malloc(100*sizeof(wchar_t)); size_t password_len = 0; HANDLE pHandle; wchar_t * username = L"User"; wchar_t * domain = L"Domain"; /* Initialize password */ password[0] = L'\0'; fgetws(password, 100, stdin); /* Remove the carriage return from the string that is inserted by fgetws() */ password_len = wcslen(password); if (password_len > 0) { password[password_len-1] = L'\0'; } /* Use the password in LogonUser() to establish that it is "sensitive" */ if (LogonUserW( username, domain, password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &pHandle) != 0) { printLine("User logged in successfully."); CloseHandle(pHandle); } else { printLine("Unable to login."); } password_len = wcslen(password); /* FIX: Clear password prior to freeing */ SecureZeroMemory(password, password_len * sizeof(wchar_t)); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* good1() changes the switch to switch(5) */ static void good1() { switch(5) { case 6: /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { wchar_t * password = (wchar_t *)malloc(100*sizeof(wchar_t)); size_t password_len = 0; HANDLE pHandle; wchar_t * username = L"User"; wchar_t * domain = L"Domain"; /* Initialize password */ password[0] = L'\0'; fgetws(password, 100, stdin); /* Remove the carriage return from the string that is inserted by fgetws() */ password_len = wcslen(password); if (password_len > 0) { password[password_len-1] = L'\0'; } /* Use the password in LogonUser() to establish that it is "sensitive" */ if (LogonUserW( username, domain, password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &pHandle) != 0) { printLine("User logged in successfully."); CloseHandle(pHandle); } else { printLine("Unable to login."); } /* FLAW: free() password without clearing the password buffer */ free(password); } break; default: { wchar_t * password = (wchar_t *)malloc(100*sizeof(wchar_t)); size_t password_len = 0; HANDLE pHandle; wchar_t * username = L"User"; wchar_t * domain = L"Domain"; /* Initialize password */ password[0] = L'\0'; fgetws(password, 100, stdin); /* Remove the carriage return from the string that is inserted by fgetws() */ password_len = wcslen(password); if (password_len > 0) { password[password_len-1] = L'\0'; } /* Use the password in LogonUser() to establish that it is "sensitive" */ if (LogonUserW( username, domain, password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &pHandle) != 0) { printLine("User logged in successfully."); CloseHandle(pHandle); } else { printLine("Unable to login."); } password_len = wcslen(password); /* FIX: Clear password prior to freeing */ SecureZeroMemory(password, password_len * sizeof(wchar_t)); } } } /* good2() reverses the blocks in the switch */ static void good2() { switch(6) { case 6: { wchar_t * password = (wchar_t *)malloc(100*sizeof(wchar_t)); size_t password_len = 0; HANDLE pHandle; wchar_t * username = L"User"; wchar_t * domain = L"Domain"; /* Initialize password */ password[0] = L'\0'; fgetws(password, 100, stdin); /* Remove the carriage return from the string that is inserted by fgetws() */ password_len = wcslen(password); if (password_len > 0) { password[password_len-1] = L'\0'; } /* Use the password in LogonUser() to establish that it is "sensitive" */ if (LogonUserW( username, domain, password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &pHandle) != 0) { printLine("User logged in successfully."); CloseHandle(pHandle); } else { printLine("Unable to login."); } password_len = wcslen(password); /* FIX: Clear password prior to freeing */ SecureZeroMemory(password, password_len * sizeof(wchar_t)); } break; default: /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { wchar_t * password = (wchar_t *)malloc(100*sizeof(wchar_t)); size_t password_len = 0; HANDLE pHandle; wchar_t * username = L"User"; wchar_t * domain = L"Domain"; /* Initialize password */ password[0] = L'\0'; fgetws(password, 100, stdin); /* Remove the carriage return from the string that is inserted by fgetws() */ password_len = wcslen(password); if (password_len > 0) { password[password_len-1] = L'\0'; } /* Use the password in LogonUser() to establish that it is "sensitive" */ if (LogonUserW( username, domain, password, LOGON32_LOGON_NETWORK, LOGON32_PROVIDER_DEFAULT, &pHandle) != 0) { printLine("User logged in successfully."); CloseHandle(pHandle); } else { printLine("Unable to login."); } /* FLAW: free() password without clearing the password buffer */ free(password); } } } void CWE244_Failure_to_Clear_Heap_Before_Release__w32_wchar_t_15_good() { good1(); good2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE244_Failure_to_Clear_Heap_Before_Release__w32_wchar_t_15_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE244_Failure_to_Clear_Heap_Before_Release__w32_wchar_t_15_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
1005176.c
/**************************************************************************************** * The Sentential Decision Diagram Package * sdd version 1.1.1, January 31, 2014 * http://reasoning.cs.ucla.edu/sdd ****************************************************************************************/ #include "sddapi.h" #include "compiler.h" #include "parameters.h" /**************************************************************************************** * this file contains the fnf-to-sdd compiler, with AUTO gc and sdd-minimize * * NOTE: this file is currently set to use the vtree search algorithm distributed * with the SDD package. this is meant to allow users to modify this search algorithm, * with the hope that they will improve on it. * * by commenting in/out TWO lines of code below, this can be changed to use the vtree * search algorithm built into the SDD library (look for SWITCH-TO-LIBRARY-SEARCH). * * the two algorithms are identical though, so the results should match. ****************************************************************************************/ // local declarations static SddNode* apply_vtree_auto(Vtree* vtree, BoolOp op, SddManager* manager); SddNode* apply_litset_auto(LitSet* litset, SddManager* manager); /**************************************************************************************** * compiles a cnf or dnf into an sdd ****************************************************************************************/ //fnf is either a cnf or a dnf SddNode* fnf_to_sdd_auto(Fnf* fnf, SddManager* manager) { sdd_manager_auto_gc_and_minimize_on(manager); //to SWITCH-TO-LIBRARY-SEARCH, comment in the next line, comment out the one after sdd_manager_set_minimize_function(vtree_search,manager); //user-defined search algorithm // sdd_manager_set_minimize_function(sdd_vtree_minimize,manager); //library's search algorithm distribute_fnf_over_vtree(fnf,manager); SddNode* node = apply_vtree_auto(sdd_manager_vtree(manager),fnf->op,manager); free_vtree_data(sdd_manager_vtree(manager)); //root may have changed return node; } //each vtree node is associated with a set of litsets (clauses or terms) //the totality of these litsets represent an fnf (cnf or dnf) //returns an sdd which is equivalent to the cnf/dnf associated with vtree SddNode* apply_vtree_auto(Vtree* vtree, BoolOp op, SddManager* manager) { //get litsets associated with vtree node //do this first as vtree root may be changed by dynamic vtree search LitSet** litsets = DATA(vtree,litsets); SddSize litset_count = DATA(vtree,litset_count); sort_litsets_by_lca(litsets,litset_count,manager); SddNode* node; if(sdd_vtree_is_leaf(vtree)) node = ONE(manager,op); else { SddNode* l_node = apply_vtree_auto(sdd_vtree_left(vtree),op,manager); sdd_ref(l_node,manager); SddNode* r_node = apply_vtree_auto(sdd_vtree_right(vtree),op,manager); sdd_deref(l_node,manager); node = sdd_apply(l_node,r_node,op,manager); } while(litset_count--) { //compile and integrate litset sdd_ref(node,manager); SddNode* litset = apply_litset_auto(*litsets++,manager); //may gc node sdd_deref(node,manager); node = sdd_apply(litset,node,op,manager); //recompute lcas of remaining clauses and sort again sort_litsets_by_lca(litsets,litset_count,manager); } return node; } //converts a clause/term into an equivalent sdd SddNode* apply_litset_auto(LitSet* litset, SddManager* manager) { BoolOp op = litset->op; //conjoin (term) or disjoin (clause) SddLiteral* literals = litset->literals; SddNode* node = ONE(manager,op); //will not be gc'd for(SddLiteral i=0; i<litset->literal_count; i++) { SddNode* literal = sdd_manager_literal(literals[i],manager); node = sdd_apply(node,literal,op,manager); } return node; } /**************************************************************************************** * end ****************************************************************************************/
100221.c
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "ringbuf.h" #include <stdbool.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #define RB_TAG "RINGBUF" ringbuf_t* rb_init(const char* name, uint32_t size) { ringbuf_t* r; unsigned char* buf; if (size < 2 || !name) { return NULL; } r = malloc(sizeof(ringbuf_t)); assert(r); #if (CONFIG_SPIRAM_SUPPORT && \ (CONFIG_SPIRAM_USE_CAPS_ALLOC || CONFIG_SPIRAM_USE_MALLOC)) buf = heap_caps_calloc(1, size, MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT); #else buf = calloc(1, size); #endif assert(buf); r->name = (char*)name; r->base = r->readptr = r->writeptr = buf; r->fill_cnt = 0; r->size = size; //vSemaphoreCreateBinary(r->can_read); aos_sem_new(&r->can_read, 0); assert(r->can_read); // vSemaphoreCreateBinary(r->can_write); aos_sem_new(&r->can_write, 0); assert(r->can_write); // r->lock = xSemaphoreCreateMutex(); aos_mutex_new(&r->lock); assert(r->lock); r->abort_read = 0; r->abort_write = 0; r->writer_finished = 0; r->reader_unblock = 0; return r; } void rb_cleanup(ringbuf_t* rb) { free(rb->base); rb->base = NULL; // vSemaphoreDelete(rb->can_read); aos_sem_free(&rb->can_read); rb->can_read = NULL; // vSemaphoreDelete(rb->can_write); aos_sem_free(&rb->can_write); rb->can_write = NULL; // vSemaphoreDelete(rb->lock); aos_mutex_free(&rb->lock); rb->lock = NULL; free(rb); } /* * @brief: get the number of filled bytes in the buffer */ ssize_t rb_filled(ringbuf_t* rb) { return rb->fill_cnt; } /* * @brief: get the number of empty bytes available in the buffer */ ssize_t rb_available(ringbuf_t* rb) { LOGD(RB_TAG, "rb leftover %d bytes", rb->size - rb->fill_cnt); return (rb->size - rb->fill_cnt); } int rb_read(ringbuf_t* rb, uint8_t* buf, int buf_len, uint32_t ticks_to_wait) { int read_size; int total_read_size = 0; /** * In case where we are able to read buf_len in one go, * we are not able to check for abort and keep returning buf_len as bytes * read. Check for argument validity check and abort case before entering * memcpy loop. */ if (rb == NULL || rb->abort_read == 1) { return RB_FAIL; } //xSemaphoreTake(rb->lock, portMAX_DELAY); aos_mutex_lock(&rb->lock, AOS_WAIT_FOREVER); while (buf_len) { if (rb->fill_cnt < buf_len) { read_size = rb->fill_cnt; } else { read_size = buf_len; } if ((rb->readptr + read_size) > (rb->base + rb->size)) { int rlen1 = rb->base + rb->size - rb->readptr; int rlen2 = read_size - rlen1; if (buf) { memcpy(buf, rb->readptr, rlen1); memcpy(buf + rlen1, rb->base, rlen2); } rb->readptr = rb->base + rlen2; } else { if (buf) { memcpy(buf, rb->readptr, read_size); } rb->readptr = rb->readptr + read_size; } buf_len -= read_size; rb->fill_cnt -= read_size; total_read_size += read_size; if (buf) { buf += read_size; } // xSemaphoreGive(rb->can_write); aos_sem_signal(&rb->can_write); if (buf_len == 0) { break; } // xSemaphoreGive(rb->lock); aos_mutex_unlock(&rb->lock); if (!rb->writer_finished && !rb->abort_read && !rb->reader_unblock) { // if (xSemaphoreTake(rb->can_read, ticks_to_wait) != pdTRUE) { if (aos_sem_wait(&rb->can_read, ticks_to_wait) != pdTRUE) { goto out; } } if (rb->abort_read == 1) { total_read_size = RB_ABORT; goto out; } if (rb->writer_finished == 1) { goto out; } if (rb->reader_unblock == 1) { if (total_read_size == 0) { total_read_size = RB_READER_UNBLOCK; } goto out; } aos_mutex_lock(&rb->lock, AOS_WAIT_FOREVER); //xSemaphoreTake(rb->lock, portMAX_DELAY); } // xSemaphoreGive(rb->lock); aos_mutex_unlock(&rb->lock); out: if (rb->writer_finished == 1 && total_read_size == 0) { total_read_size = RB_WRITER_FINISHED; } rb->reader_unblock = 0; /* We are anyway unblocking reader */ return total_read_size; } int rb_write(ringbuf_t* rb, const uint8_t* buf, int buf_len, uint32_t ticks_to_wait) { int write_size; int total_write_size = 0; /** * In case where we are able to write buf_len in one go, * we are not able to check for abort and keep returning buf_len as bytes * written. Check for arguments' validity and abort case before entering * memcpy loop. */ if (rb == NULL || buf == NULL || rb->abort_write == 1) { return RB_FAIL; } //xSemaphoreTake(rb->lock, portMAX_DELAY); aos_mutex_lock(&rb->lock, AOS_WAIT_FOREVER); while (buf_len) { if ((rb->size - rb->fill_cnt) < buf_len) { write_size = rb->size - rb->fill_cnt; } else { write_size = buf_len; } if ((rb->writeptr + write_size) > (rb->base + rb->size)) { int wlen1 = rb->base + rb->size - rb->writeptr; int wlen2 = write_size - wlen1; memcpy(rb->writeptr, buf, wlen1); memcpy(rb->base, buf + wlen1, wlen2); rb->writeptr = rb->base + wlen2; } else { memcpy(rb->writeptr, buf, write_size); rb->writeptr = rb->writeptr + write_size; } buf_len -= write_size; rb->fill_cnt += write_size; total_write_size += write_size; buf += write_size; // xSemaphoreGive(rb->can_read); aos_sem_signal(&rb->can_read); if (buf_len == 0) { break; } // xSemaphoreGive(rb->lock); aos_mutex_unlock(&rb->lock); if (rb->writer_finished) { return write_size > 0 ? write_size : RB_WRITER_FINISHED; } // if (xSemaphoreTake(rb->can_write, ticks_to_wait) != pdTRUE) { if (aos_sem_wait(&rb->can_write, ticks_to_wait) != pdTRUE) { goto out; } if (rb->abort_write == 1) { goto out; } // xSemaphoreTake(rb->lock, portMAX_DELAY); aos_mutex_lock(&rb->lock, AOS_WAIT_FOREVER); } // xSemaphoreGive(rb->lock); aos_mutex_unlock(&rb->lock); out: return total_write_size; } /** * abort and set abort_read and abort_write to asked values. */ static void _rb_reset(ringbuf_t* rb, int abort_read, int abort_write) { if (rb == NULL) { return; } // xSemaphoreTake(rb->lock, portMAX_DELAY); aos_mutex_lock(&rb->lock, AOS_WAIT_FOREVER); rb->readptr = rb->writeptr = rb->base; rb->fill_cnt = 0; rb->writer_finished = 0; rb->reader_unblock = 0; rb->abort_read = abort_read; rb->abort_write = abort_write; // xSemaphoreGive(rb->lock); aos_mutex_unlock(&rb->lock); } void rb_reset(ringbuf_t* rb) { _rb_reset(rb, 0, 0); } void rb_abort_read(ringbuf_t* rb) { if (rb == NULL) { return; } rb->abort_read = 1; // xSemaphoreGive(rb->can_read); // xSemaphoreGive(rb->lock); aos_sem_signal(&rb->can_read); aos_mutex_unlock(&rb->lock); } void rb_abort_write(ringbuf_t* rb) { if (rb == NULL) { return; } rb->abort_write = 1; // xSemaphoreGive(rb->can_write); // xSemaphoreGive(rb->lock); aos_sem_signal(&rb->can_write); aos_mutex_unlock(&rb->lock); } void rb_abort(ringbuf_t* rb) { if (rb == NULL) { return; } rb->abort_read = 1; rb->abort_write = 1; // xSemaphoreGive(rb->can_read); // xSemaphoreGive(rb->can_write); // xSemaphoreGive(rb->lock); aos_sem_signal(&rb->can_read); aos_sem_signal(&rb->can_write); aos_mutex_unlock(&rb->lock); } /** * Reset the ringbuffer and keep rb_write aborted. * Note that we are taking lock before even toggling `abort_write` variable. * This serves a special purpose to not allow this abort to be mixed with * rb_write. */ void rb_reset_and_abort_write(ringbuf_t* rb) { _rb_reset(rb, 0, 1); // xSemaphoreGive(rb->can_write); aos_sem_signal(&rb->can_write); } void rb_signal_writer_finished(ringbuf_t* rb) { if (rb == NULL) { return; } rb->writer_finished = 1; // xSemaphoreGive(rb->can_read); aos_sem_signal(&rb->can_read); } int rb_is_writer_finished(ringbuf_t* rb) { if (rb == NULL) { return RB_FAIL; } return (rb->writer_finished); } void rb_wakeup_reader(ringbuf_t* rb) { if (rb == NULL) { return; } rb->reader_unblock = 1; // xSemaphoreGive(rb->can_read); aos_sem_signal(&rb->can_read); } void rb_stat(ringbuf_t* rb) { aos_mutex_lock(&rb->lock, AOS_WAIT_FOREVER); //xSemaphoreTake(rb->lock, portMAX_DELAY); LOGI(RB_TAG, "filled: %d, base: %p, read_ptr: %p, write_ptr: %p, size: %d\n", rb->fill_cnt, rb->base, rb->readptr, rb->writeptr, rb->size); // xSemaphoreGive(rb->lock); aos_mutex_unlock(&rb->lock); }
127567.c
/* * fs/nfs/nfs4xdr.c * * Client-side XDR for NFSv4. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <[email protected]> * Andy Adamson <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/param.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/in.h> #include <linux/pagemap.h> #include <linux/proc_fs.h> #include <linux/kdev_t.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/gss_api.h> #include <linux/nfs.h> #include <linux/nfs4.h> #include <linux/nfs_fs.h> #include <linux/nfs_idmap.h> #include "nfs4_fs.h" #include "internal.h" #include "pnfs.h" #define NFSDBG_FACILITY NFSDBG_XDR /* Mapping from NFS error code to "errno" error code. */ #define errno_NFSERR_IO EIO static int nfs4_stat_to_errno(int); /* NFSv4 COMPOUND tags are only wanted for debugging purposes */ #ifdef DEBUG #define NFS4_MAXTAGLEN 20 #else #define NFS4_MAXTAGLEN 0 #endif /* lock,open owner id: * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ #define open_owner_id_maxsz (1 + 1 + 4) #define lock_owner_id_maxsz (1 + 1 + 4) #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define op_encode_hdr_maxsz (1) #define op_decode_hdr_maxsz (2) #define encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE)) #define decode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE)) #define encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE)) #define decode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE)) #define encode_putfh_maxsz (op_encode_hdr_maxsz + 1 + \ (NFS4_FHSIZE >> 2)) #define decode_putfh_maxsz (op_decode_hdr_maxsz) #define encode_putrootfh_maxsz (op_encode_hdr_maxsz) #define decode_putrootfh_maxsz (op_decode_hdr_maxsz) #define encode_getfh_maxsz (op_encode_hdr_maxsz) #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ ((3+NFS4_FHSIZE) >> 2)) #define nfs4_fattr_bitmap_maxsz 4 #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) #define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) #define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) /* This is based on getfattr, which uses the most attributes: */ #define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz)) #define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ nfs4_fattr_value_maxsz) #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) #define encode_attrs_maxsz (nfs4_fattr_bitmap_maxsz + \ 1 + 2 + 1 + \ nfs4_owner_maxsz + \ nfs4_group_maxsz + \ 4 + 4) #define encode_savefh_maxsz (op_encode_hdr_maxsz) #define decode_savefh_maxsz (op_decode_hdr_maxsz) #define encode_restorefh_maxsz (op_encode_hdr_maxsz) #define decode_restorefh_maxsz (op_decode_hdr_maxsz) #define encode_fsinfo_maxsz (encode_getattr_maxsz) #define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 15) #define encode_renew_maxsz (op_encode_hdr_maxsz + 3) #define decode_renew_maxsz (op_decode_hdr_maxsz) #define encode_setclientid_maxsz \ (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \ XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \ 1 /* sc_prog */ + \ XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \ XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \ 1) /* sc_cb_ident */ #define decode_setclientid_maxsz \ (op_decode_hdr_maxsz + \ 2 + \ 1024) /* large value for CLID_INUSE */ #define encode_setclientid_confirm_maxsz \ (op_encode_hdr_maxsz + \ 3 + (NFS4_VERIFIER_SIZE >> 2)) #define decode_setclientid_confirm_maxsz \ (op_decode_hdr_maxsz) #define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) #define decode_lookup_maxsz (op_decode_hdr_maxsz) #define encode_share_access_maxsz \ (2) #define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz) #define encode_opentype_maxsz (1 + encode_createmode_maxsz) #define encode_claim_null_maxsz (1 + nfs4_name_maxsz) #define encode_open_maxsz (op_encode_hdr_maxsz + \ 2 + encode_share_access_maxsz + 2 + \ open_owner_id_maxsz + \ encode_opentype_maxsz + \ encode_claim_null_maxsz) #define decode_ace_maxsz (3 + nfs4_owner_maxsz) #define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \ decode_ace_maxsz) #define decode_change_info_maxsz (5) #define decode_open_maxsz (op_decode_hdr_maxsz + \ decode_stateid_maxsz + \ decode_change_info_maxsz + 1 + \ nfs4_fattr_bitmap_maxsz + \ decode_delegation_maxsz) #define encode_open_confirm_maxsz \ (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 1) #define decode_open_confirm_maxsz \ (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_open_downgrade_maxsz \ (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 1 + \ encode_share_access_maxsz) #define decode_open_downgrade_maxsz \ (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_close_maxsz (op_encode_hdr_maxsz + \ 1 + encode_stateid_maxsz) #define decode_close_maxsz (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_setattr_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + \ encode_attrs_maxsz) #define decode_setattr_maxsz (op_decode_hdr_maxsz + \ nfs4_fattr_bitmap_maxsz) #define encode_read_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 3) #define decode_read_maxsz (op_decode_hdr_maxsz + 2) #define encode_readdir_maxsz (op_encode_hdr_maxsz + \ 2 + encode_verifier_maxsz + 5) #define decode_readdir_maxsz (op_decode_hdr_maxsz + \ decode_verifier_maxsz) #define encode_readlink_maxsz (op_encode_hdr_maxsz) #define decode_readlink_maxsz (op_decode_hdr_maxsz + 1) #define encode_write_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 4) #define decode_write_maxsz (op_decode_hdr_maxsz + \ 2 + decode_verifier_maxsz) #define encode_commit_maxsz (op_encode_hdr_maxsz + 3) #define decode_commit_maxsz (op_decode_hdr_maxsz + \ decode_verifier_maxsz) #define encode_remove_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_remove_maxsz (op_decode_hdr_maxsz + \ decode_change_info_maxsz) #define encode_rename_maxsz (op_encode_hdr_maxsz + \ 2 * nfs4_name_maxsz) #define decode_rename_maxsz (op_decode_hdr_maxsz + \ decode_change_info_maxsz + \ decode_change_info_maxsz) #define encode_link_maxsz (op_encode_hdr_maxsz + \ nfs4_name_maxsz) #define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) #define encode_lockowner_maxsz (7) #define encode_lock_maxsz (op_encode_hdr_maxsz + \ 7 + \ 1 + encode_stateid_maxsz + 1 + \ encode_lockowner_maxsz) #define decode_lock_denied_maxsz \ (8 + decode_lockowner_maxsz) #define decode_lock_maxsz (op_decode_hdr_maxsz + \ decode_lock_denied_maxsz) #define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \ encode_lockowner_maxsz) #define decode_lockt_maxsz (op_decode_hdr_maxsz + \ decode_lock_denied_maxsz) #define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \ encode_stateid_maxsz + \ 4) #define decode_locku_maxsz (op_decode_hdr_maxsz + \ decode_stateid_maxsz) #define encode_release_lockowner_maxsz \ (op_encode_hdr_maxsz + \ encode_lockowner_maxsz) #define decode_release_lockowner_maxsz \ (op_decode_hdr_maxsz) #define encode_access_maxsz (op_encode_hdr_maxsz + 1) #define decode_access_maxsz (op_decode_hdr_maxsz + 2) #define encode_symlink_maxsz (op_encode_hdr_maxsz + \ 1 + nfs4_name_maxsz + \ 1 + \ nfs4_fattr_maxsz) #define decode_symlink_maxsz (op_decode_hdr_maxsz + 8) #define encode_create_maxsz (op_encode_hdr_maxsz + \ 1 + 2 + nfs4_name_maxsz + \ encode_attrs_maxsz) #define decode_create_maxsz (op_decode_hdr_maxsz + \ decode_change_info_maxsz + \ nfs4_fattr_bitmap_maxsz) #define encode_statfs_maxsz (encode_getattr_maxsz) #define decode_statfs_maxsz (decode_getattr_maxsz) #define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4) #define decode_delegreturn_maxsz (op_decode_hdr_maxsz) #define encode_getacl_maxsz (encode_getattr_maxsz) #define decode_getacl_maxsz (op_decode_hdr_maxsz + \ nfs4_fattr_bitmap_maxsz + 1) #define encode_setacl_maxsz (op_encode_hdr_maxsz + \ encode_stateid_maxsz + 3) #define decode_setacl_maxsz (decode_setattr_maxsz) #define encode_fs_locations_maxsz \ (encode_getattr_maxsz) #define decode_fs_locations_maxsz \ (0) #define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) #define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4)) #if defined(CONFIG_NFS_V4_1) #define NFS4_MAX_MACHINE_NAME_LEN (64) #define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \ encode_verifier_maxsz + \ 1 /* co_ownerid.len */ + \ XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \ 1 /* flags */ + \ 1 /* spa_how */ + \ 0 /* SP4_NONE (for now) */ + \ 1 /* zero implemetation id array */) #define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \ 2 /* eir_clientid */ + \ 1 /* eir_sequenceid */ + \ 1 /* eir_flags */ + \ 1 /* spr_how */ + \ 0 /* SP4_NONE (for now) */ + \ 2 /* eir_server_owner.so_minor_id */ + \ /* eir_server_owner.so_major_id<> */ \ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \ /* eir_server_scope<> */ \ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \ 1 /* eir_server_impl_id array length */ + \ 0 /* ignored eir_server_impl_id contents */) #define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */) #define decode_channel_attrs_maxsz (6 + \ 1 /* ca_rdma_ird.len */ + \ 1 /* ca_rdma_ird */) #define encode_create_session_maxsz (op_encode_hdr_maxsz + \ 2 /* csa_clientid */ + \ 1 /* csa_sequence */ + \ 1 /* csa_flags */ + \ encode_channel_attrs_maxsz + \ encode_channel_attrs_maxsz + \ 1 /* csa_cb_program */ + \ 1 /* csa_sec_parms.len (1) */ + \ 1 /* cb_secflavor (AUTH_SYS) */ + \ 1 /* stamp */ + \ 1 /* machinename.len */ + \ XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \ 1 /* uid */ + \ 1 /* gid */ + \ 1 /* gids.len (0) */) #define decode_create_session_maxsz (op_decode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 1 /* csr_sequence */ + \ 1 /* csr_flags */ + \ decode_channel_attrs_maxsz + \ decode_channel_attrs_maxsz) #define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4) #define decode_destroy_session_maxsz (op_decode_hdr_maxsz) #define encode_sequence_maxsz (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4) #define decode_sequence_maxsz (op_decode_hdr_maxsz + \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) #define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4) #define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4) #define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \ XDR_QUADLEN(NFS4_DEVICEID4_SIZE)) #define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \ 1 /* layout type */ + \ 1 /* opaque devaddr4 length */ + \ /* devaddr4 payload is read into page */ \ 1 /* notification bitmap length */ + \ 1 /* notification bitmap */) #define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \ encode_stateid_maxsz) #define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \ decode_stateid_maxsz + \ XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE)) #define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \ 2 /* offset */ + \ 2 /* length */ + \ 1 /* reclaim */ + \ encode_stateid_maxsz + \ 1 /* new offset (true) */ + \ 2 /* last byte written */ + \ 1 /* nt_timechanged (false) */ + \ 1 /* layoutupdate4 layout type */ + \ 1 /* NULL filelayout layoutupdate4 payload */) #define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3) #define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \ encode_stateid_maxsz + \ 1 /* FIXME: opaque lrf_body always empty at the moment */) #define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \ 1 + decode_stateid_maxsz) #else /* CONFIG_NFS_V4_1 */ #define encode_sequence_maxsz 0 #define decode_sequence_maxsz 0 #endif /* CONFIG_NFS_V4_1 */ #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */ #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */ #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_read_maxsz) #define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_read_maxsz) #define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_readlink_maxsz) #define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_readlink_maxsz) #define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_readdir_maxsz) #define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_readdir_maxsz) #define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_write_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_write_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_commit_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_commit_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_open_maxsz + \ encode_getfh_maxsz + \ encode_getattr_maxsz + \ encode_restorefh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_open_maxsz + \ decode_getfh_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_open_confirm_sz \ (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_open_confirm_maxsz) #define NFS4_dec_open_confirm_sz \ (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ decode_open_confirm_maxsz) #define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_open_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_open_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_open_downgrade_sz \ (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_open_downgrade_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_open_downgrade_sz \ (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_open_downgrade_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_close_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_close_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_setattr_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_setattr_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_fsinfo_maxsz) #define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_fsinfo_maxsz) #define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \ encode_renew_maxsz) #define NFS4_dec_renew_sz (compound_decode_hdr_maxsz + \ decode_renew_maxsz) #define NFS4_enc_setclientid_sz (compound_encode_hdr_maxsz + \ encode_setclientid_maxsz) #define NFS4_dec_setclientid_sz (compound_decode_hdr_maxsz + \ decode_setclientid_maxsz) #define NFS4_enc_setclientid_confirm_sz \ (compound_encode_hdr_maxsz + \ encode_setclientid_confirm_maxsz + \ encode_putrootfh_maxsz + \ encode_fsinfo_maxsz) #define NFS4_dec_setclientid_confirm_sz \ (compound_decode_hdr_maxsz + \ decode_setclientid_confirm_maxsz + \ decode_putrootfh_maxsz + \ decode_fsinfo_maxsz) #define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lock_maxsz) #define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lock_maxsz) #define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lockt_maxsz) #define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lockt_maxsz) #define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_locku_maxsz) #define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_locku_maxsz) #define NFS4_enc_release_lockowner_sz \ (compound_encode_hdr_maxsz + \ encode_lockowner_maxsz) #define NFS4_dec_release_lockowner_sz \ (compound_decode_hdr_maxsz + \ decode_lockowner_maxsz) #define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_access_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_access_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lookup_maxsz + \ encode_getattr_maxsz + \ encode_getfh_maxsz) #define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lookup_maxsz + \ decode_getattr_maxsz + \ decode_getfh_maxsz) #define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putrootfh_maxsz + \ encode_getattr_maxsz + \ encode_getfh_maxsz) #define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putrootfh_maxsz + \ decode_getattr_maxsz + \ decode_getfh_maxsz) #define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_remove_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_remove_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ encode_rename_maxsz + \ encode_getattr_maxsz + \ encode_restorefh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ decode_rename_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ encode_link_maxsz + \ decode_getattr_maxsz + \ encode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ decode_link_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_symlink_maxsz + \ encode_getattr_maxsz + \ encode_getfh_maxsz) #define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_symlink_maxsz + \ decode_getattr_maxsz + \ decode_getfh_maxsz) #define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_create_maxsz + \ encode_getfh_maxsz + \ encode_getattr_maxsz + \ encode_restorefh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_create_maxsz + \ decode_getfh_maxsz + \ decode_getattr_maxsz + \ decode_restorefh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_statfs_maxsz) #define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_statfs_maxsz) #define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_delegreturn_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_delegreturn_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_getacl_maxsz) #define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_getacl_maxsz) #define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_setacl_maxsz) #define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_setacl_maxsz) #define NFS4_enc_fs_locations_sz \ (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_lookup_maxsz + \ encode_fs_locations_maxsz) #define NFS4_dec_fs_locations_sz \ (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_lookup_maxsz + \ decode_fs_locations_maxsz) #define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_secinfo_maxsz) #define NFS4_dec_secinfo_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_secinfo_maxsz) #if defined(CONFIG_NFS_V4_1) #define NFS4_enc_exchange_id_sz \ (compound_encode_hdr_maxsz + \ encode_exchange_id_maxsz) #define NFS4_dec_exchange_id_sz \ (compound_decode_hdr_maxsz + \ decode_exchange_id_maxsz) #define NFS4_enc_create_session_sz \ (compound_encode_hdr_maxsz + \ encode_create_session_maxsz) #define NFS4_dec_create_session_sz \ (compound_decode_hdr_maxsz + \ decode_create_session_maxsz) #define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \ encode_destroy_session_maxsz) #define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \ decode_destroy_session_maxsz) #define NFS4_enc_sequence_sz \ (compound_decode_hdr_maxsz + \ encode_sequence_maxsz) #define NFS4_dec_sequence_sz \ (compound_decode_hdr_maxsz + \ decode_sequence_maxsz) #define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putrootfh_maxsz + \ encode_fsinfo_maxsz) #define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putrootfh_maxsz + \ decode_fsinfo_maxsz) #define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_reclaim_complete_maxsz) #define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_reclaim_complete_maxsz) #define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz +\ encode_getdeviceinfo_maxsz) #define NFS4_dec_getdeviceinfo_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_getdeviceinfo_maxsz) #define NFS4_enc_layoutget_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_layoutget_maxsz) #define NFS4_dec_layoutget_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_layoutget_maxsz) #define NFS4_enc_layoutcommit_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz +\ encode_putfh_maxsz + \ encode_layoutcommit_maxsz + \ encode_getattr_maxsz) #define NFS4_dec_layoutcommit_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_layoutcommit_maxsz + \ decode_getattr_maxsz) #define NFS4_enc_layoutreturn_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_layoutreturn_maxsz) #define NFS4_dec_layoutreturn_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_layoutreturn_maxsz) const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_encode_hdr_maxsz + encode_sequence_maxsz + encode_putfh_maxsz + encode_getattr_maxsz) * XDR_UNIT); const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH + compound_decode_hdr_maxsz + decode_sequence_maxsz + decode_putfh_maxsz) * XDR_UNIT); #endif /* CONFIG_NFS_V4_1 */ static const umode_t nfs_type2fmt[] = { [NF4BAD] = 0, [NF4REG] = S_IFREG, [NF4DIR] = S_IFDIR, [NF4BLK] = S_IFBLK, [NF4CHR] = S_IFCHR, [NF4LNK] = S_IFLNK, [NF4SOCK] = S_IFSOCK, [NF4FIFO] = S_IFIFO, [NF4ATTRDIR] = 0, [NF4NAMEDATTR] = 0, }; struct compound_hdr { int32_t status; uint32_t nops; __be32 * nops_p; uint32_t taglen; char * tag; uint32_t replen; /* expected reply words */ u32 minorversion; }; static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes) { __be32 *p = xdr_reserve_space(xdr, nbytes); BUG_ON(!p); return p; } static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str) { __be32 *p; p = xdr_reserve_space(xdr, 4 + len); BUG_ON(p == NULL); xdr_encode_opaque(p, str, len); } static void encode_compound_hdr(struct xdr_stream *xdr, struct rpc_rqst *req, struct compound_hdr *hdr) { __be32 *p; struct rpc_auth *auth = req->rq_cred->cr_auth; /* initialize running count of expected bytes in reply. * NOTE: the replied tag SHOULD be the same is the one sent, * but this is not required as a MUST for the server to do so. */ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen; dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag); BUG_ON(hdr->taglen > NFS4_MAXTAGLEN); p = reserve_space(xdr, 4 + hdr->taglen + 8); p = xdr_encode_opaque(p, hdr->tag, hdr->taglen); *p++ = cpu_to_be32(hdr->minorversion); hdr->nops_p = p; *p = cpu_to_be32(hdr->nops); } static void encode_nops(struct compound_hdr *hdr) { BUG_ON(hdr->nops > NFS4_MAX_OPS); *hdr->nops_p = htonl(hdr->nops); } static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) { __be32 *p; p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE); BUG_ON(p == NULL); xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE); } static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server) { char owner_name[IDMAP_NAMESZ]; char owner_group[IDMAP_NAMESZ]; int owner_namelen = 0; int owner_grouplen = 0; __be32 *p; __be32 *q; int len; uint32_t bmval0 = 0; uint32_t bmval1 = 0; /* * We reserve enough space to write the entire attribute buffer at once. * In the worst-case, this would be * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime) * = 36 bytes, plus any contribution from variable-length fields * such as owner/group. */ len = 16; /* Sigh */ if (iap->ia_valid & ATTR_SIZE) len += 8; if (iap->ia_valid & ATTR_MODE) len += 4; if (iap->ia_valid & ATTR_UID) { owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ); if (owner_namelen < 0) { dprintk("nfs: couldn't resolve uid %d to string\n", iap->ia_uid); /* XXX */ strcpy(owner_name, "nobody"); owner_namelen = sizeof("nobody") - 1; /* goto out; */ } len += 4 + (XDR_QUADLEN(owner_namelen) << 2); } if (iap->ia_valid & ATTR_GID) { owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, owner_group, IDMAP_NAMESZ); if (owner_grouplen < 0) { dprintk("nfs: couldn't resolve gid %d to string\n", iap->ia_gid); strcpy(owner_group, "nobody"); owner_grouplen = sizeof("nobody") - 1; /* goto out; */ } len += 4 + (XDR_QUADLEN(owner_grouplen) << 2); } if (iap->ia_valid & ATTR_ATIME_SET) len += 16; else if (iap->ia_valid & ATTR_ATIME) len += 4; if (iap->ia_valid & ATTR_MTIME_SET) len += 16; else if (iap->ia_valid & ATTR_MTIME) len += 4; p = reserve_space(xdr, len); /* * We write the bitmap length now, but leave the bitmap and the attribute * buffer length to be backfilled at the end of this routine. */ *p++ = cpu_to_be32(2); q = p; p += 3; if (iap->ia_valid & ATTR_SIZE) { bmval0 |= FATTR4_WORD0_SIZE; p = xdr_encode_hyper(p, iap->ia_size); } if (iap->ia_valid & ATTR_MODE) { bmval1 |= FATTR4_WORD1_MODE; *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO); } if (iap->ia_valid & ATTR_UID) { bmval1 |= FATTR4_WORD1_OWNER; p = xdr_encode_opaque(p, owner_name, owner_namelen); } if (iap->ia_valid & ATTR_GID) { bmval1 |= FATTR4_WORD1_OWNER_GROUP; p = xdr_encode_opaque(p, owner_group, owner_grouplen); } if (iap->ia_valid & ATTR_ATIME_SET) { bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME); *p++ = cpu_to_be32(0); *p++ = cpu_to_be32(iap->ia_atime.tv_sec); *p++ = cpu_to_be32(iap->ia_atime.tv_nsec); } else if (iap->ia_valid & ATTR_ATIME) { bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET; *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME); } if (iap->ia_valid & ATTR_MTIME_SET) { bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME); *p++ = cpu_to_be32(0); *p++ = cpu_to_be32(iap->ia_mtime.tv_sec); *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec); } else if (iap->ia_valid & ATTR_MTIME) { bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME); } /* * Now we backfill the bitmap and the attribute buffer length. */ if (len != ((char *)p - (char *)q) + 4) { printk(KERN_ERR "nfs: Attr length error, %u != %Zu\n", len, ((char *)p - (char *)q) + 4); BUG(); } len = (char *)p - (char *)q - 12; *q++ = htonl(bmval0); *q++ = htonl(bmval1); *q = htonl(len); /* out: */ } static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_ACCESS); *p = cpu_to_be32(access); hdr->nops++; hdr->replen += decode_access_maxsz; } static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_CLOSE); *p++ = cpu_to_be32(arg->seqid->sequence->counter); xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_close_maxsz; } static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 16); *p++ = cpu_to_be32(OP_COMMIT); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); hdr->nops++; hdr->replen += decode_commit_maxsz; } static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_CREATE); *p = cpu_to_be32(create->ftype); switch (create->ftype) { case NF4LNK: p = reserve_space(xdr, 4); *p = cpu_to_be32(create->u.symlink.len); xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len); break; case NF4BLK: case NF4CHR: p = reserve_space(xdr, 8); *p++ = cpu_to_be32(create->u.device.specdata1); *p = cpu_to_be32(create->u.device.specdata2); break; default: break; } encode_string(xdr, create->name->len, create->name->name); hdr->nops++; hdr->replen += decode_create_maxsz; encode_attrs(xdr, create->attrs, create->server); } static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_GETATTR); *p++ = cpu_to_be32(1); *p = cpu_to_be32(bitmap); hdr->nops++; hdr->replen += decode_getattr_maxsz; } static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 16); *p++ = cpu_to_be32(OP_GETATTR); *p++ = cpu_to_be32(2); *p++ = cpu_to_be32(bm0); *p = cpu_to_be32(bm1); hdr->nops++; hdr->replen += decode_getattr_maxsz; } static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0], bitmask[1] & nfs4_fattr_bitmap[1], hdr); } static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], bitmask[1] & nfs4_fsinfo_bitmap[1], hdr); } static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0], bitmask[1] & nfs4_fs_locations_bitmap[1], hdr); } static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_GETFH); hdr->nops++; hdr->replen += decode_getfh_maxsz; } static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8 + name->len); *p++ = cpu_to_be32(OP_LINK); xdr_encode_opaque(p, name->name, name->len); hdr->nops++; hdr->replen += decode_link_maxsz; } static inline int nfs4_lock_type(struct file_lock *fl, int block) { if ((fl->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) == F_RDLCK) return block ? NFS4_READW_LT : NFS4_READ_LT; return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT; } static inline uint64_t nfs4_lock_length(struct file_lock *fl) { if (fl->fl_end == OFFSET_MAX) return ~(uint64_t)0; return fl->fl_end - fl->fl_start + 1; } static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner) { __be32 *p; p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, lowner->clientid); *p++ = cpu_to_be32(20); p = xdr_encode_opaque_fixed(p, "lock id:", 8); *p++ = cpu_to_be32(lowner->s_dev); xdr_encode_hyper(p, lowner->id); } /* * opcode,type,reclaim,offset,length,new_lock_owner = 32 * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40 */ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 32); *p++ = cpu_to_be32(OP_LOCK); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block)); *p++ = cpu_to_be32(args->reclaim); p = xdr_encode_hyper(p, args->fl->fl_start); p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); *p = cpu_to_be32(args->new_lock_owner); if (args->new_lock_owner){ p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); *p++ = cpu_to_be32(args->open_seqid->sequence->counter); p = xdr_encode_opaque_fixed(p, args->open_stateid->data, NFS4_STATEID_SIZE); *p++ = cpu_to_be32(args->lock_seqid->sequence->counter); encode_lockowner(xdr, &args->lock_owner); } else { p = reserve_space(xdr, NFS4_STATEID_SIZE+4); p = xdr_encode_opaque_fixed(p, args->lock_stateid->data, NFS4_STATEID_SIZE); *p = cpu_to_be32(args->lock_seqid->sequence->counter); } hdr->nops++; hdr->replen += decode_lock_maxsz; } static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 24); *p++ = cpu_to_be32(OP_LOCKT); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); p = xdr_encode_hyper(p, args->fl->fl_start); p = xdr_encode_hyper(p, nfs4_lock_length(args->fl)); encode_lockowner(xdr, &args->lock_owner); hdr->nops++; hdr->replen += decode_lockt_maxsz; } static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12+NFS4_STATEID_SIZE+16); *p++ = cpu_to_be32(OP_LOCKU); *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0)); *p++ = cpu_to_be32(args->seqid->sequence->counter); p = xdr_encode_opaque_fixed(p, args->stateid->data, NFS4_STATEID_SIZE); p = xdr_encode_hyper(p, args->fl->fl_start); xdr_encode_hyper(p, nfs4_lock_length(args->fl)); hdr->nops++; hdr->replen += decode_locku_maxsz; } static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_RELEASE_LOCKOWNER); encode_lockowner(xdr, lowner); hdr->nops++; hdr->replen += decode_release_lockowner_maxsz; } static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { int len = name->len; __be32 *p; p = reserve_space(xdr, 8 + len); *p++ = cpu_to_be32(OP_LOOKUP); xdr_encode_opaque(p, name->name, len); hdr->nops++; hdr->replen += decode_lookup_maxsz; } static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) { __be32 *p; p = reserve_space(xdr, 8); switch (fmode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ); break; case FMODE_WRITE: *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE); break; case FMODE_READ|FMODE_WRITE: *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH); break; default: *p++ = cpu_to_be32(0); } *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */ } static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; /* * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4, * owner 4 = 32 */ p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_OPEN); *p = cpu_to_be32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, arg->clientid); *p++ = cpu_to_be32(20); p = xdr_encode_opaque_fixed(p, "open id:", 8); *p++ = cpu_to_be32(arg->server->s_dev); xdr_encode_hyper(p, arg->id); } static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; struct nfs_client *clp; p = reserve_space(xdr, 4); switch(arg->open_flags & O_EXCL) { case 0: *p = cpu_to_be32(NFS4_CREATE_UNCHECKED); encode_attrs(xdr, arg->u.attrs, arg->server); break; default: clp = arg->server->nfs_client; if (clp->cl_mvops->minor_version > 0) { if (nfs4_has_persistent_session(clp)) { *p = cpu_to_be32(NFS4_CREATE_GUARDED); encode_attrs(xdr, arg->u.attrs, arg->server); } else { struct iattr dummy; *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1); encode_nfs4_verifier(xdr, &arg->u.verifier); dummy.ia_valid = 0; encode_attrs(xdr, &dummy, arg->server); } } else { *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE); encode_nfs4_verifier(xdr, &arg->u.verifier); } } } static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg) { __be32 *p; p = reserve_space(xdr, 4); switch (arg->open_flags & O_CREAT) { case 0: *p = cpu_to_be32(NFS4_OPEN_NOCREATE); break; default: BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL); *p = cpu_to_be32(NFS4_OPEN_CREATE); encode_createmode(xdr, arg); } } static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delegation_type) { __be32 *p; p = reserve_space(xdr, 4); switch (delegation_type) { case 0: *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE); break; case FMODE_READ: *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ); break; case FMODE_WRITE|FMODE_READ: *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE); break; default: BUG(); } } static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL); encode_string(xdr, name->len, name->name); } static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS); encode_delegation_type(xdr, type); } static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR); xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE); encode_string(xdr, name->len, name->name); } static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr) { encode_openhdr(xdr, arg); encode_opentype(xdr, arg); switch (arg->claim) { case NFS4_OPEN_CLAIM_NULL: encode_claim_null(xdr, arg->name); break; case NFS4_OPEN_CLAIM_PREVIOUS: encode_claim_previous(xdr, arg->u.delegation_type); break; case NFS4_OPEN_CLAIM_DELEGATE_CUR: encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation); break; default: BUG(); } hdr->nops++; hdr->replen += decode_open_maxsz; } static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); *p++ = cpu_to_be32(OP_OPEN_CONFIRM); p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); *p = cpu_to_be32(arg->seqid->sequence->counter); hdr->nops++; hdr->replen += decode_open_confirm_maxsz; } static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE+4); *p++ = cpu_to_be32(OP_OPEN_DOWNGRADE); p = xdr_encode_opaque_fixed(p, arg->stateid->data, NFS4_STATEID_SIZE); *p = cpu_to_be32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); hdr->nops++; hdr->replen += decode_open_downgrade_maxsz; } static void encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr) { int len = fh->size; __be32 *p; p = reserve_space(xdr, 8 + len); *p++ = cpu_to_be32(OP_PUTFH); xdr_encode_opaque(p, fh->data, len); hdr->nops++; hdr->replen += decode_putfh_maxsz; } static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_PUTROOTFH); hdr->nops++; hdr->replen += decode_putrootfh_maxsz; } static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid) { nfs4_stateid stateid; __be32 *p; p = reserve_space(xdr, NFS4_STATEID_SIZE); if (ctx->state != NULL) { nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid); if (zero_seqid) stateid.stateid.seqid = 0; xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE); } else xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); } static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_READ); encode_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 12); p = xdr_encode_hyper(p, args->offset); *p = cpu_to_be32(args->count); hdr->nops++; hdr->replen += decode_read_maxsz; } static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) { uint32_t attrs[2] = { FATTR4_WORD0_RDATTR_ERROR, FATTR4_WORD1_MOUNTED_ON_FILEID, }; uint32_t dircount = readdir->count >> 1; __be32 *p; if (readdir->plus) { attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE| FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID; attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER| FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV| FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS| FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; dircount >>= 1; } /* Use mounted_on_fileid only if the server supports it */ if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) attrs[0] |= FATTR4_WORD0_FILEID; p = reserve_space(xdr, 12+NFS4_VERIFIER_SIZE+20); *p++ = cpu_to_be32(OP_READDIR); p = xdr_encode_hyper(p, readdir->cookie); p = xdr_encode_opaque_fixed(p, readdir->verifier.data, NFS4_VERIFIER_SIZE); *p++ = cpu_to_be32(dircount); *p++ = cpu_to_be32(readdir->count); *p++ = cpu_to_be32(2); *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]); *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]); hdr->nops++; hdr->replen += decode_readdir_maxsz; dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", __func__, (unsigned long long)readdir->cookie, ((u32 *)readdir->verifier.data)[0], ((u32 *)readdir->verifier.data)[1], attrs[0] & readdir->bitmask[0], attrs[1] & readdir->bitmask[1]); } static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_READLINK); hdr->nops++; hdr->replen += decode_readlink_maxsz; } static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8 + name->len); *p++ = cpu_to_be32(OP_REMOVE); xdr_encode_opaque(p, name->name, name->len); hdr->nops++; hdr->replen += decode_remove_maxsz; } static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_RENAME); encode_string(xdr, oldname->len, oldname->name); encode_string(xdr, newname->len, newname->name); hdr->nops++; hdr->replen += decode_rename_maxsz; } static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12); *p++ = cpu_to_be32(OP_RENEW); xdr_encode_hyper(p, client_stateid->cl_clientid); hdr->nops++; hdr->replen += decode_renew_maxsz; } static void encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_RESTOREFH); hdr->nops++; hdr->replen += decode_restorefh_maxsz; } static void encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_SETATTR); xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE); p = reserve_space(xdr, 2*4); *p++ = cpu_to_be32(1); *p = cpu_to_be32(FATTR4_WORD0_ACL); BUG_ON(arg->acl_len % 4); p = reserve_space(xdr, 4); *p = cpu_to_be32(arg->acl_len); xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); hdr->nops++; hdr->replen += decode_setacl_maxsz; } static void encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_SAVEFH); hdr->nops++; hdr->replen += decode_savefh_maxsz; } static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_SETATTR); xdr_encode_opaque_fixed(p, arg->stateid.data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_setattr_maxsz; encode_attrs(xdr, arg->iap, server); } static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + NFS4_VERIFIER_SIZE); *p++ = cpu_to_be32(OP_SETCLIENTID); xdr_encode_opaque_fixed(p, setclientid->sc_verifier->data, NFS4_VERIFIER_SIZE); encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); p = reserve_space(xdr, 4); *p = cpu_to_be32(setclientid->sc_prog); encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid); encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); p = reserve_space(xdr, 4); *p = cpu_to_be32(setclientid->sc_cb_ident); hdr->nops++; hdr->replen += decode_setclientid_maxsz; } static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 12 + NFS4_VERIFIER_SIZE); *p++ = cpu_to_be32(OP_SETCLIENTID_CONFIRM); p = xdr_encode_hyper(p, arg->clientid); xdr_encode_opaque_fixed(p, arg->confirm.data, NFS4_VERIFIER_SIZE); hdr->nops++; hdr->replen += decode_setclientid_confirm_maxsz; } static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4); *p = cpu_to_be32(OP_WRITE); encode_stateid(xdr, args->context, args->lock_context, hdr->minorversion); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->offset); *p++ = cpu_to_be32(args->stable); *p = cpu_to_be32(args->count); xdr_write_pages(xdr, args->pages, args->pgbase, args->count); hdr->nops++; hdr->replen += decode_write_maxsz; } static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4+NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_DELEGRETURN); xdr_encode_opaque_fixed(p, stateid->data, NFS4_STATEID_SIZE); hdr->nops++; hdr->replen += decode_delegreturn_maxsz; } static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { int len = name->len; __be32 *p; p = reserve_space(xdr, 8 + len); *p++ = cpu_to_be32(OP_SECINFO); xdr_encode_opaque(p, name->name, len); hdr->nops++; hdr->replen += decode_secinfo_maxsz; } #if defined(CONFIG_NFS_V4_1) /* NFSv4.1 operations */ static void encode_exchange_id(struct xdr_stream *xdr, struct nfs41_exchange_id_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + sizeof(args->verifier->data)); *p++ = cpu_to_be32(OP_EXCHANGE_ID); xdr_encode_opaque_fixed(p, args->verifier->data, sizeof(args->verifier->data)); encode_string(xdr, args->id_len, args->id); p = reserve_space(xdr, 12); *p++ = cpu_to_be32(args->flags); *p++ = cpu_to_be32(0); /* zero length state_protect4_a */ *p = cpu_to_be32(0); /* zero length implementation id array */ hdr->nops++; hdr->replen += decode_exchange_id_maxsz; } static void encode_create_session(struct xdr_stream *xdr, struct nfs41_create_session_args *args, struct compound_hdr *hdr) { __be32 *p; char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; uint32_t len; struct nfs_client *clp = args->client; u32 max_resp_sz_cached; /* * Assumes OPEN is the biggest non-idempotent compound. * 2 is the verifier. */ max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; len = scnprintf(machine_name, sizeof(machine_name), "%s", clp->cl_ipaddr); p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); *p++ = cpu_to_be32(OP_CREATE_SESSION); p = xdr_encode_hyper(p, clp->cl_clientid); *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ /* Fore Channel */ *p++ = cpu_to_be32(0); /* header padding size */ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */ *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ /* Back Channel */ *p++ = cpu_to_be32(0); /* header padding size */ *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ *p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */ *p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ *p++ = cpu_to_be32(args->cb_program); /* cb_program */ *p++ = cpu_to_be32(1); *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */ /* authsys_parms rfc1831 */ *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */ p = xdr_encode_opaque(p, machine_name, len); *p++ = cpu_to_be32(0); /* UID */ *p++ = cpu_to_be32(0); /* GID */ *p = cpu_to_be32(0); /* No more gids */ hdr->nops++; hdr->replen += decode_create_session_maxsz; } static void encode_destroy_session(struct xdr_stream *xdr, struct nfs4_session *session, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(OP_DESTROY_SESSION); xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); hdr->nops++; hdr->replen += decode_destroy_session_maxsz; } static void encode_reclaim_complete(struct xdr_stream *xdr, struct nfs41_reclaim_complete_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 8); *p++ = cpu_to_be32(OP_RECLAIM_COMPLETE); *p++ = cpu_to_be32(args->one_fs); hdr->nops++; hdr->replen += decode_reclaim_complete_maxsz; } #endif /* CONFIG_NFS_V4_1 */ static void encode_sequence(struct xdr_stream *xdr, const struct nfs4_sequence_args *args, struct compound_hdr *hdr) { #if defined(CONFIG_NFS_V4_1) struct nfs4_session *session = args->sa_session; struct nfs4_slot_table *tp; struct nfs4_slot *slot; __be32 *p; if (!session) return; tp = &session->fc_slot_table; WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE); slot = tp->slots + args->sa_slotid; p = reserve_space(xdr, 4 + NFS4_MAX_SESSIONID_LEN + 16); *p++ = cpu_to_be32(OP_SEQUENCE); /* * Sessionid + seqid + slotid + max slotid + cache_this */ dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d " "max_slotid=%d cache_this=%d\n", __func__, ((u32 *)session->sess_id.data)[0], ((u32 *)session->sess_id.data)[1], ((u32 *)session->sess_id.data)[2], ((u32 *)session->sess_id.data)[3], slot->seq_nr, args->sa_slotid, tp->highest_used_slotid, args->sa_cache_this); p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); *p++ = cpu_to_be32(slot->seq_nr); *p++ = cpu_to_be32(args->sa_slotid); *p++ = cpu_to_be32(tp->highest_used_slotid); *p = cpu_to_be32(args->sa_cache_this); hdr->nops++; hdr->replen += decode_sequence_maxsz; #endif /* CONFIG_NFS_V4_1 */ } #ifdef CONFIG_NFS_V4_1 static void encode_getdeviceinfo(struct xdr_stream *xdr, const struct nfs4_getdeviceinfo_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 16 + NFS4_DEVICEID4_SIZE); *p++ = cpu_to_be32(OP_GETDEVICEINFO); p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data, NFS4_DEVICEID4_SIZE); *p++ = cpu_to_be32(args->pdev->layout_type); *p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */ *p++ = cpu_to_be32(0); /* bitmap length 0 */ hdr->nops++; hdr->replen += decode_getdeviceinfo_maxsz; } static void encode_layoutget(struct xdr_stream *xdr, const struct nfs4_layoutget_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_LAYOUTGET); *p++ = cpu_to_be32(0); /* Signal layout available */ *p++ = cpu_to_be32(args->type); *p++ = cpu_to_be32(args->range.iomode); p = xdr_encode_hyper(p, args->range.offset); p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); *p = cpu_to_be32(args->maxcount); dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", __func__, args->type, args->range.iomode, (unsigned long)args->range.offset, (unsigned long)args->range.length, args->maxcount); hdr->nops++; hdr->replen += decode_layoutget_maxsz; } static int encode_layoutcommit(struct xdr_stream *xdr, struct inode *inode, const struct nfs4_layoutcommit_args *args, struct compound_hdr *hdr) { __be32 *p; dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten, NFS_SERVER(args->inode)->pnfs_curr_ld->id); p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); *p++ = cpu_to_be32(OP_LAYOUTCOMMIT); /* Only whole file layouts */ p = xdr_encode_hyper(p, 0); /* offset */ p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */ *p++ = cpu_to_be32(0); /* reclaim */ p = xdr_encode_opaque_fixed(p, args->stateid.data, NFS4_STATEID_SIZE); *p++ = cpu_to_be32(1); /* newoffset = TRUE */ p = xdr_encode_hyper(p, args->lastbytewritten); *p++ = cpu_to_be32(0); /* Never send time_modify_changed */ *p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */ if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit) NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit( NFS_I(inode)->layout, xdr, args); else { p = reserve_space(xdr, 4); *p = cpu_to_be32(0); /* no layout-type payload */ } hdr->nops++; hdr->replen += decode_layoutcommit_maxsz; return 0; } static void encode_layoutreturn(struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args, struct compound_hdr *hdr) { __be32 *p; p = reserve_space(xdr, 20); *p++ = cpu_to_be32(OP_LAYOUTRETURN); *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */ *p++ = cpu_to_be32(args->layout_type); *p++ = cpu_to_be32(IOMODE_ANY); *p = cpu_to_be32(RETURN_FILE); p = reserve_space(xdr, 16 + NFS4_STATEID_SIZE); p = xdr_encode_hyper(p, 0); p = xdr_encode_hyper(p, NFS4_MAX_UINT64); spin_lock(&args->inode->i_lock); xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); spin_unlock(&args->inode->i_lock); if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) { NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn( NFS_I(args->inode)->layout, xdr, args); } else { p = reserve_space(xdr, 4); *p = cpu_to_be32(0); } hdr->nops++; hdr->replen += decode_layoutreturn_maxsz; } #endif /* CONFIG_NFS_V4_1 */ /* * END OF "GENERIC" ENCODE ROUTINES. */ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args) { #if defined(CONFIG_NFS_V4_1) if (args->sa_session) return args->sa_session->clp->cl_mvops->minor_version; #endif /* CONFIG_NFS_V4_1 */ return 0; } /* * Encode an ACCESS request */ static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_accessargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_access(xdr, args->access, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LOOKUP request */ static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_lookup_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_lookup(xdr, args->name, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LOOKUP_ROOT request */ static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_lookup_root_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putrootfh(xdr, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode REMOVE request */ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_removeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_remove(xdr, &args->name, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode RENAME request */ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs_renameargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->old_dir, &hdr); encode_savefh(xdr, &hdr); encode_putfh(xdr, args->new_dir, &hdr); encode_rename(xdr, args->old_name, args->new_name, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LINK request */ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_link_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_savefh(xdr, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_link(xdr, args->name, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode CREATE request */ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_create_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_savefh(xdr, &hdr); encode_create(xdr, args, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode SYMLINK request */ static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_create_arg *args) { nfs4_xdr_enc_create(req, xdr, args); } /* * Encode GETATTR request */ static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_getattr_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode a CLOSE request */ static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_closeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_close(xdr, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode an OPEN request */ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_openargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_savefh(xdr, &hdr); encode_open(xdr, args, &hdr); encode_getfh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_restorefh(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode an OPEN_CONFIRM request */ static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_open_confirmargs *args) { struct compound_hdr hdr = { .nops = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_open_confirm(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode an OPEN request with no attributes. */ static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_openargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_open(xdr, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode an OPEN_DOWNGRADE request */ static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_closeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_open_downgrade(xdr, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode a LOCK request */ static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_lock_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_lock(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode a LOCKT request */ static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_lockt_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_lockt(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode a LOCKU request */ static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_locku_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_locku(xdr, args, &hdr); encode_nops(&hdr); } static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_release_lockowner_args *args) { struct compound_hdr hdr = { .minorversion = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_release_lockowner(xdr, &args->lock_owner, &hdr); encode_nops(&hdr); } /* * Encode a READLINK request */ static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_readlink *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_readlink(xdr, args, req, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->pglen); encode_nops(&hdr); } /* * Encode a READDIR request */ static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_readdir_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_readdir(xdr, args, req, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); dprintk("%s: inlined page args = (%u, %p, %u, %u)\n", __func__, hdr.replen << 2, args->pages, args->pgbase, args->count); encode_nops(&hdr); } /* * Encode a READ request */ static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_readargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_read(xdr, args, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); req->rq_rcv_buf.flags |= XDRBUF_READ; encode_nops(&hdr); } /* * Encode an SETATTR request */ static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_setattrargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_setattr(xdr, args, args->server, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode a GETACL request */ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_getaclargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1; encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, args->acl_pages, args->acl_pgbase, args->acl_len); encode_nops(&hdr); } /* * Encode a WRITE request */ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_write(xdr, args, &hdr); req->rq_snd_buf.flags |= XDRBUF_WRITE; if (args->bitmask) encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * a COMMIT request */ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_commit(xdr, args, &hdr); if (args->bitmask) encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * FSINFO request */ static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fsinfo_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_fsinfo(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * a PATHCONF request */ static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_pathconf_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0], &hdr); encode_nops(&hdr); } /* * a STATFS request */ static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_statfs_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0], args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); encode_nops(&hdr); } /* * GETATTR_BITMAP request */ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_server_caps_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fhandle, &hdr); encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS| FATTR4_WORD0_LINK_SUPPORT| FATTR4_WORD0_SYMLINK_SUPPORT| FATTR4_WORD0_ACLSUPPORT, &hdr); encode_nops(&hdr); } /* * a RENEW request */ static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_client *clp) { struct compound_hdr hdr = { .nops = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_renew(xdr, clp, &hdr); encode_nops(&hdr); } /* * a SETCLIENTID request */ static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_setclientid *sc) { struct compound_hdr hdr = { .nops = 0, }; encode_compound_hdr(xdr, req, &hdr); encode_setclientid(xdr, sc, &hdr); encode_nops(&hdr); } /* * a SETCLIENTID_CONFIRM request */ static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_setclientid_res *arg) { struct compound_hdr hdr = { .nops = 0, }; const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; encode_compound_hdr(xdr, req, &hdr); encode_setclientid_confirm(xdr, arg, &hdr); encode_putrootfh(xdr, &hdr); encode_fsinfo(xdr, lease_bitmap, &hdr); encode_nops(&hdr); } /* * DELEGRETURN request */ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nfs4_delegreturnargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fhandle, &hdr); encode_delegreturn(xdr, args->stateid, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode FS_LOCATIONS request */ static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fs_locations_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_lookup(xdr, args->name, &hdr); replen = hdr.replen; /* get the attribute into args->page */ encode_fs_locations(xdr, args->bitmask, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page, 0, PAGE_SIZE); encode_nops(&hdr); } /* * Encode SECINFO request */ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_secinfo_arg *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->dir_fh, &hdr); encode_secinfo(xdr, args->name, &hdr); encode_nops(&hdr); } #if defined(CONFIG_NFS_V4_1) /* * EXCHANGE_ID request */ static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_exchange_id_args *args) { struct compound_hdr hdr = { .minorversion = args->client->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); encode_exchange_id(xdr, args, &hdr); encode_nops(&hdr); } /* * a CREATE_SESSION request */ static void nfs4_xdr_enc_create_session(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_create_session_args *args) { struct compound_hdr hdr = { .minorversion = args->client->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); encode_create_session(xdr, args, &hdr); encode_nops(&hdr); } /* * a DESTROY_SESSION request */ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_session *session) { struct compound_hdr hdr = { .minorversion = session->clp->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); encode_destroy_session(xdr, session, &hdr); encode_nops(&hdr); } /* * a SEQUENCE request */ static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_sequence_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, args, &hdr); encode_nops(&hdr); } /* * a GET_LEASE_TIME request */ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_get_lease_time_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->la_seq_args), }; const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->la_seq_args, &hdr); encode_putrootfh(xdr, &hdr); encode_fsinfo(xdr, lease_bitmap, &hdr); encode_nops(&hdr); } /* * a RECLAIM_COMPLETE request */ static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs41_reclaim_complete_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args) }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_reclaim_complete(xdr, args, &hdr); encode_nops(&hdr); } /* * Encode GETDEVICEINFO request */ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_getdeviceinfo_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_getdeviceinfo(xdr, args, &hdr); /* set up reply kvec. Subtract notification bitmap max size (2) * so that notification bitmap is put in xdr_buf tail */ xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2, args->pdev->pages, args->pdev->pgbase, args->pdev->pglen); encode_nops(&hdr); } /* * Encode LAYOUTGET request */ static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_layoutget_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, NFS_FH(args->inode), &hdr); encode_layoutget(xdr, args, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->layout.pages, 0, args->layout.pglen); encode_nops(&hdr); } /* * Encode LAYOUTCOMMIT request */ static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_layoutcommit_args *args) { struct nfs4_layoutcommit_data *data = container_of(args, struct nfs4_layoutcommit_data, args); struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, NFS_FH(args->inode), &hdr); encode_layoutcommit(xdr, data->args.inode, args, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } /* * Encode LAYOUTRETURN request */ static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_layoutreturn_args *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, NFS_FH(args->inode), &hdr); encode_layoutreturn(xdr, args, &hdr); encode_nops(&hdr); } #endif /* CONFIG_NFS_V4_1 */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { dprintk("nfs: %s: prematurely hit end of receive buffer. " "Remaining buffer length is %tu words.\n", func, xdr->end - xdr->p); } static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *len = be32_to_cpup(p); p = xdr_inline_decode(xdr, *len); if (unlikely(!p)) goto out_overflow; *string = (char *)p; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; hdr->status = be32_to_cpup(p++); hdr->taglen = be32_to_cpup(p); p = xdr_inline_decode(xdr, hdr->taglen + 4); if (unlikely(!p)) goto out_overflow; hdr->tag = (char *)p; p += XDR_QUADLEN(hdr->taglen); hdr->nops = be32_to_cpup(p); if (unlikely(hdr->nops < 1)) return nfs4_stat_to_errno(hdr->status); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) { __be32 *p; uint32_t opnum; int32_t nfserr; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; opnum = be32_to_cpup(p++); if (opnum != expected) { dprintk("nfs: Server returned operation" " %d but we issued a request for %d\n", opnum, expected); return -EIO; } nfserr = be32_to_cpup(p); if (nfserr != NFS_OK) return nfs4_stat_to_errno(nfserr); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* Dummy routine */ static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp) { __be32 *p; unsigned int strlen; char *str; p = xdr_inline_decode(xdr, 12); if (likely(p)) return decode_opaque_inline(xdr, &strlen, &str); print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap) { uint32_t bmlen; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bmlen = be32_to_cpup(p); bitmap[0] = bitmap[1] = 0; p = xdr_inline_decode(xdr, (bmlen << 2)); if (unlikely(!p)) goto out_overflow; if (bmlen > 0) { bitmap[0] = be32_to_cpup(p++); if (bmlen > 1) bitmap[1] = be32_to_cpup(p); } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *attrlen = be32_to_cpup(p); *savep = xdr->p; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask) { if (likely(bitmap[0] & FATTR4_WORD0_SUPPORTED_ATTRS)) { int ret; ret = decode_attr_bitmap(xdr, bitmask); if (unlikely(ret < 0)) return ret; bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS; } else bitmask[0] = bitmask[1] = 0; dprintk("%s: bitmask=%08x:%08x\n", __func__, bitmask[0], bitmask[1]); return 0; } static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *type) { __be32 *p; int ret = 0; *type = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *type = be32_to_cpup(p); if (*type < NF4REG || *type > NF4NAMEDATTR) { dprintk("%s: bad type %d\n", __func__, *type); return -EIO; } bitmap[0] &= ~FATTR4_WORD0_TYPE; ret = NFS_ATTR_FATTR_TYPE; } dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change) { __be32 *p; int ret = 0; *change = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, change); bitmap[0] &= ~FATTR4_WORD0_CHANGE; ret = NFS_ATTR_FATTR_CHANGE; } dprintk("%s: change attribute=%Lu\n", __func__, (unsigned long long)*change); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size) { __be32 *p; int ret = 0; *size = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, size); bitmap[0] &= ~FATTR4_WORD0_SIZE; ret = NFS_ATTR_FATTR_SIZE; } dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT; } dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true"); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT; } dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true"); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid) { __be32 *p; int ret = 0; fsid->major = 0; fsid->minor = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FSID)) { p = xdr_inline_decode(xdr, 16); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &fsid->major); xdr_decode_hyper(p, &fsid->minor); bitmap[0] &= ~FATTR4_WORD0_FSID; ret = NFS_ATTR_FATTR_FSID; } dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __func__, (unsigned long long)fsid->major, (unsigned long long)fsid->minor); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = 60; if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME; } dprintk("%s: file size=%u\n", __func__, (unsigned int)*res); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res) { __be32 *p; if (unlikely(bitmap[0] & (FATTR4_WORD0_RDATTR_ERROR - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_RDATTR_ERROR)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; *res = -be32_to_cpup(p); } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh) { __be32 *p; int len; if (fh != NULL) memset(fh, 0, sizeof(*fh)); if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEHANDLE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILEHANDLE)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len > NFS4_FHSIZE) return -EIO; p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (fh != NULL) { memcpy(fh->data, p, len); fh->size = len; } bitmap[0] &= ~FATTR4_WORD0_FILEHANDLE; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; *res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL; if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *res = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT; } dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) { __be32 *p; int ret = 0; *fileid = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, fileid); bitmap[0] &= ~FATTR4_WORD0_FILEID; ret = NFS_ATTR_FATTR_FILEID; } dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid) { __be32 *p; int ret = 0; *fileid = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, fileid); bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID; } dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL; } dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_FILES_FREE; } dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL; } dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path) { u32 n; __be32 *p; int status = 0; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; n = be32_to_cpup(p); if (n == 0) goto root_path; dprintk("path "); path->ncomponents = 0; while (path->ncomponents < n) { struct nfs4_string *component = &path->components[path->ncomponents]; status = decode_opaque_inline(xdr, &component->len, &component->data); if (unlikely(status != 0)) goto out_eio; if (path->ncomponents != n) dprintk("/"); dprintk("%s", component->data); if (path->ncomponents < NFS4_PATHNAME_MAXCOMPONENTS) path->ncomponents++; else { dprintk("cannot parse %d components in path\n", n); goto out_eio; } } out: dprintk("\n"); return status; root_path: /* a root pathname is sent as a zero component4 */ path->ncomponents = 1; path->components[0].len=0; path->components[0].data=NULL; dprintk("path /\n"); goto out; out_eio: dprintk(" status %d", status); status = -EIO; goto out; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res) { int n; __be32 *p; int status = -EIO; if (unlikely(bitmap[0] & (FATTR4_WORD0_FS_LOCATIONS -1U))) goto out; status = 0; if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS))) goto out; dprintk("%s: fsroot ", __func__); status = decode_pathname(xdr, &res->fs_path); if (unlikely(status != 0)) goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; n = be32_to_cpup(p); if (n <= 0) goto out_eio; res->nlocations = 0; while (res->nlocations < n) { u32 m; struct nfs4_fs_location *loc = &res->locations[res->nlocations]; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; m = be32_to_cpup(p); loc->nservers = 0; dprintk("%s: servers ", __func__); while (loc->nservers < m) { struct nfs4_string *server = &loc->servers[loc->nservers]; status = decode_opaque_inline(xdr, &server->len, &server->data); if (unlikely(status != 0)) goto out_eio; dprintk("%s ", server->data); if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS) loc->nservers++; else { unsigned int i; dprintk("%s: using first %u of %u servers " "returned for location %u\n", __func__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations); for (i = loc->nservers; i < m; i++) { unsigned int len; char *data; status = decode_opaque_inline(xdr, &len, &data); if (unlikely(status != 0)) goto out_eio; } } } status = decode_pathname(xdr, &loc->rootpath); if (unlikely(status != 0)) goto out_eio; if (res->nlocations < NFS4_FS_LOCATIONS_MAXENTRIES) res->nlocations++; } if (res->nlocations != 0) status = NFS_ATTR_FATTR_V4_REFERRAL; out: dprintk("%s: fs_locations done, error = %d\n", __func__, status); return status; out_overflow: print_overflow_msg(__func__, xdr); out_eio: status = -EIO; goto out; } static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE; } dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink) { __be32 *p; int status = 0; *maxlink = 1; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *maxlink = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_MAXLINK; } dprintk("%s: maxlink=%u\n", __func__, *maxlink); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname) { __be32 *p; int status = 0; *maxname = 1024; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *maxname = be32_to_cpup(p); bitmap[0] &= ~FATTR4_WORD0_MAXNAME; } dprintk("%s: maxname=%u\n", __func__, *maxname); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; int status = 0; *res = 1024; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXREAD - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) { uint64_t maxread; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, &maxread); if (maxread > 0x7FFFFFFF) maxread = 0x7FFFFFFF; *res = (uint32_t)maxread; bitmap[0] &= ~FATTR4_WORD0_MAXREAD; } dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res) { __be32 *p; int status = 0; *res = 1024; if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXWRITE - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) { uint64_t maxwrite; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, &maxwrite); if (maxwrite > 0x7FFFFFFF) maxwrite = 0x7FFFFFFF; *res = (uint32_t)maxwrite; bitmap[0] &= ~FATTR4_WORD0_MAXWRITE; } dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode) { uint32_t tmp; __be32 *p; int ret = 0; *mode = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_MODE)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; tmp = be32_to_cpup(p); *mode = tmp & ~S_IFMT; bitmap[1] &= ~FATTR4_WORD1_MODE; ret = NFS_ATTR_FATTR_MODE; } dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink) { __be32 *p; int ret = 0; *nlink = 1; if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; *nlink = be32_to_cpup(p); bitmap[1] &= ~FATTR4_WORD1_NUMLINKS; ret = NFS_ATTR_FATTR_NLINK; } dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, uint32_t *uid, int may_sleep) { uint32_t len; __be32 *p; int ret = 0; *uid = -2; if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (!may_sleep) { /* do nothing */ } else if (len < XDR_MAX_NETOBJ) { if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0) ret = NFS_ATTR_FATTR_OWNER; else dprintk("%s: nfs_map_name_to_uid failed!\n", __func__); } else dprintk("%s: name too long (%u)!\n", __func__, len); bitmap[1] &= ~FATTR4_WORD1_OWNER; } dprintk("%s: uid=%d\n", __func__, (int)*uid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, const struct nfs_server *server, uint32_t *gid, int may_sleep) { uint32_t len; __be32 *p; int ret = 0; *gid = -2; if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; if (!may_sleep) { /* do nothing */ } else if (len < XDR_MAX_NETOBJ) { if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0) ret = NFS_ATTR_FATTR_GROUP; else dprintk("%s: nfs_map_group_to_gid failed!\n", __func__); } else dprintk("%s: name too long (%u)!\n", __func__, len); bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP; } dprintk("%s: gid=%d\n", __func__, (int)*gid); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev) { uint32_t major = 0, minor = 0; __be32 *p; int ret = 0; *rdev = MKDEV(0,0); if (unlikely(bitmap[1] & (FATTR4_WORD1_RAWDEV - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) { dev_t tmp; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; major = be32_to_cpup(p++); minor = be32_to_cpup(p); tmp = MKDEV(major, minor); if (MAJOR(tmp) == major && MINOR(tmp) == minor) *rdev = tmp; bitmap[1] &= ~ FATTR4_WORD1_RAWDEV; ret = NFS_ATTR_FATTR_RDEV; } dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL; } dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE; } dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res) { __be32 *p; int status = 0; *res = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, res); bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL; } dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used) { __be32 *p; int ret = 0; *used = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) { p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, used); bitmap[1] &= ~FATTR4_WORD1_SPACE_USED; ret = NFS_ATTR_FATTR_SPACE_USED; } dprintk("%s: space used=%Lu\n", __func__, (unsigned long long)*used); return ret; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time) { __be32 *p; uint64_t sec; uint32_t nsec; p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &sec); nsec = be32_to_cpup(p); time->tv_sec = (time_t)sec; time->tv_nsec = (long)nsec; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_ACCESS - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_ACCESS)) { status = decode_attr_time(xdr, time); if (status == 0) status = NFS_ATTR_FATTR_ATIME; bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS; } dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec); return status; } static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_METADATA - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) { status = decode_attr_time(xdr, time); if (status == 0) status = NFS_ATTR_FATTR_CTIME; bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA; } dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec); return status; } static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_DELTA - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_DELTA)) { status = decode_attr_time(xdr, time); bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA; } dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec, (long)time->tv_nsec); return status; } static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time) { int status = 0; time->tv_sec = 0; time->tv_nsec = 0; if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_MODIFY - 1U))) return -EIO; if (likely(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) { status = decode_attr_time(xdr, time); if (status == 0) status = NFS_ATTR_FATTR_MTIME; bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY; } dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec); return status; } static int verify_attr_len(struct xdr_stream *xdr, __be32 *savep, uint32_t attrlen) { unsigned int attrwords = XDR_QUADLEN(attrlen); unsigned int nwords = xdr->p - savep; if (unlikely(attrwords != nwords)) { dprintk("%s: server returned incorrect attribute length: " "%u %c %u\n", __func__, attrwords << 2, (attrwords < nwords) ? '<' : '>', nwords << 2); return -EIO; } return 0; } static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { __be32 *p; p = xdr_inline_decode(xdr, 20); if (unlikely(!p)) goto out_overflow; cinfo->atomic = be32_to_cpup(p++); p = xdr_decode_hyper(p, &cinfo->before); xdr_decode_hyper(p, &cinfo->after); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access) { __be32 *p; uint32_t supp, acc; int status; status = decode_op_hdr(xdr, OP_ACCESS); if (status) return status; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; supp = be32_to_cpup(p++); acc = be32_to_cpup(p); access->supported = supp; access->access = acc; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len) { __be32 *p; p = xdr_inline_decode(xdr, len); if (likely(p)) { memcpy(buf, p, len); return 0; } print_overflow_msg(__func__, xdr); return -EIO; } static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) { return decode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); } static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res) { int status; status = decode_op_hdr(xdr, OP_CLOSE); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_verifier(struct xdr_stream *xdr, void *verifier) { return decode_opaque_fixed(xdr, verifier, 8); } static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res) { int status; status = decode_op_hdr(xdr, OP_COMMIT); if (!status) status = decode_verifier(xdr, res->verf->verifier); return status; } static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { __be32 *p; uint32_t bmlen; int status; status = decode_op_hdr(xdr, OP_CREATE); if (status) return status; if ((status = decode_change_info(xdr, cinfo))) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bmlen = be32_to_cpup(p); p = xdr_inline_decode(xdr, bmlen << 2); if (likely(p)) return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res) { __be32 *savep; uint32_t attrlen, bitmap[2] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_supported(xdr, bitmap, res->attr_bitmask)) != 0) goto xdr_error; if ((status = decode_attr_link_support(xdr, bitmap, &res->has_links)) != 0) goto xdr_error; if ((status = decode_attr_symlink_support(xdr, bitmap, &res->has_symlinks)) != 0) goto xdr_error; if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat) { __be32 *savep; uint32_t attrlen, bitmap[2] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0) goto xdr_error; if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0) goto xdr_error; if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0) goto xdr_error; if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0) goto xdr_error; if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0) goto xdr_error; if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf) { __be32 *savep; uint32_t attrlen, bitmap[2] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; if ((status = decode_attr_maxlink(xdr, bitmap, &pathconf->max_link)) != 0) goto xdr_error; if ((status = decode_attr_maxname(xdr, bitmap, &pathconf->max_namelen)) != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fattr *fattr, struct nfs_fh *fh, const struct nfs_server *server, int may_sleep) { int status; umode_t fmode = 0; uint32_t type; int32_t err; status = decode_attr_type(xdr, bitmap, &type); if (status < 0) goto xdr_error; fattr->mode = 0; if (status != 0) { fattr->mode |= nfs_type2fmt[type]; fattr->valid |= status; } status = decode_attr_change(xdr, bitmap, &fattr->change_attr); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_size(xdr, bitmap, &fattr->size); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_fsid(xdr, bitmap, &fattr->fsid); if (status < 0) goto xdr_error; fattr->valid |= status; err = 0; status = decode_attr_error(xdr, bitmap, &err); if (status < 0) goto xdr_error; if (err == -NFS4ERR_WRONGSEC) nfs_fixup_secinfo_attributes(fattr, fh); status = decode_attr_filehandle(xdr, bitmap, fh); if (status < 0) goto xdr_error; status = decode_attr_fileid(xdr, bitmap, &fattr->fileid); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_fs_locations(xdr, bitmap, container_of(fattr, struct nfs4_fs_locations, fattr)); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_mode(xdr, bitmap, &fmode); if (status < 0) goto xdr_error; if (status != 0) { fattr->mode |= fmode; fattr->valid |= status; } status = decode_attr_nlink(xdr, bitmap, &fattr->nlink); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_rdev(xdr, bitmap, &fattr->rdev); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_space_used(xdr, bitmap, &fattr->du.nfs3.used); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_time_access(xdr, bitmap, &fattr->atime); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime); if (status < 0) goto xdr_error; fattr->valid |= status; status = decode_attr_mounted_on_fileid(xdr, bitmap, &fattr->mounted_on_fileid); if (status < 0) goto xdr_error; fattr->valid |= status; xdr_error: dprintk("%s: xdr returned %d\n", __func__, -status); return status; } static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr, struct nfs_fh *fh, const struct nfs_server *server, int may_sleep) { __be32 *savep; uint32_t attrlen, bitmap[2] = {0}; int status; status = decode_op_hdr(xdr, OP_GETATTR); if (status < 0) goto xdr_error; status = decode_attr_bitmap(xdr, bitmap); if (status < 0) goto xdr_error; status = decode_attr_length(xdr, &attrlen, &savep); if (status < 0) goto xdr_error; status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, server, may_sleep); if (status < 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d\n", __func__, -status); return status; } static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, const struct nfs_server *server, int may_sleep) { return decode_getfattr_generic(xdr, fattr, NULL, server, may_sleep); } /* * Decode potentially multiple layout types. Currently we only support * one layout driver per file system. */ static int decode_first_pnfs_layout_type(struct xdr_stream *xdr, uint32_t *layouttype) { uint32_t *p; int num; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; num = be32_to_cpup(p); /* pNFS is not supported by the underlying file system */ if (num == 0) { *layouttype = 0; return 0; } if (num > 1) printk(KERN_INFO "%s: Warning: Multiple pNFS layout drivers " "per filesystem not supported\n", __func__); /* Decode and set first layout type, move xdr->p past unused types */ p = xdr_inline_decode(xdr, num * 4); if (unlikely(!p)) goto out_overflow; *layouttype = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * The type of file system exported. * Note we must ensure that layouttype is set in any non-error case. */ static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *layouttype) { int status = 0; dprintk("%s: bitmap is %x\n", __func__, bitmap[1]); if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U))) return -EIO; if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) { status = decode_first_pnfs_layout_type(xdr, layouttype); bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES; } else *layouttype = 0; return status; } static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) { __be32 *savep; uint32_t attrlen, bitmap[2]; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto xdr_error; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto xdr_error; fsinfo->rtmult = fsinfo->wtmult = 512; /* ??? */ if ((status = decode_attr_lease_time(xdr, bitmap, &fsinfo->lease_time)) != 0) goto xdr_error; if ((status = decode_attr_maxfilesize(xdr, bitmap, &fsinfo->maxfilesize)) != 0) goto xdr_error; if ((status = decode_attr_maxread(xdr, bitmap, &fsinfo->rtmax)) != 0) goto xdr_error; fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax; if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0) goto xdr_error; fsinfo->wtpref = fsinfo->wtmax; status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta); if (status != 0) goto xdr_error; status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype); if (status != 0) goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh) { __be32 *p; uint32_t len; int status; /* Zero handle first to allow comparisons */ memset(fh, 0, sizeof(*fh)); status = decode_op_hdr(xdr, OP_GETFH); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len > NFS4_FHSIZE) return -EIO; fh->size = len; p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; memcpy(fh->data, p, len); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { int status; status = decode_op_hdr(xdr, OP_LINK); if (status) return status; return decode_change_info(xdr, cinfo); } /* * We create the owner, so we know a proper owner.id length is 4. */ static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl) { uint64_t offset, length, clientid; __be32 *p; uint32_t namelen, type; p = xdr_inline_decode(xdr, 32); /* read 32 bytes */ if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */ p = xdr_decode_hyper(p, &length); type = be32_to_cpup(p++); /* 4 byte read */ if (fl != NULL) { /* manipulate file lock */ fl->fl_start = (loff_t)offset; fl->fl_end = fl->fl_start + (loff_t)length - 1; if (length == ~(uint64_t)0) fl->fl_end = OFFSET_MAX; fl->fl_type = F_WRLCK; if (type & 1) fl->fl_type = F_RDLCK; fl->fl_pid = 0; } p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */ namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */ p = xdr_inline_decode(xdr, namelen); /* variable size field */ if (likely(p)) return -NFS4ERR_DENIED; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res) { int status; status = decode_op_hdr(xdr, OP_LOCK); if (status == -EIO) goto out; if (status == 0) { status = decode_stateid(xdr, &res->stateid); if (unlikely(status)) goto out; } else if (status == -NFS4ERR_DENIED) status = decode_lock_denied(xdr, NULL); if (res->open_seqid != NULL) nfs_increment_open_seqid(status, res->open_seqid); nfs_increment_lock_seqid(status, res->lock_seqid); out: return status; } static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res) { int status; status = decode_op_hdr(xdr, OP_LOCKT); if (status == -NFS4ERR_DENIED) return decode_lock_denied(xdr, res->denied); return status; } static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res) { int status; status = decode_op_hdr(xdr, OP_LOCKU); if (status != -EIO) nfs_increment_lock_seqid(status, res->seqid); if (status == 0) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_release_lockowner(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER); } static int decode_lookup(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_LOOKUP); } /* This is too sick! */ static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize) { __be32 *p; uint32_t limit_type, nblocks, blocksize; p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; limit_type = be32_to_cpup(p++); switch (limit_type) { case 1: xdr_decode_hyper(p, maxsize); break; case 2: nblocks = be32_to_cpup(p++); blocksize = be32_to_cpup(p); *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) { __be32 *p; uint32_t delegation_type; int status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; delegation_type = be32_to_cpup(p); if (delegation_type == NFS4_OPEN_DELEGATE_NONE) { res->delegation_type = 0; return 0; } status = decode_stateid(xdr, &res->delegation); if (unlikely(status)) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->do_recall = be32_to_cpup(p); switch (delegation_type) { case NFS4_OPEN_DELEGATE_READ: res->delegation_type = FMODE_READ; break; case NFS4_OPEN_DELEGATE_WRITE: res->delegation_type = FMODE_WRITE|FMODE_READ; if (decode_space_limit(xdr, &res->maxsize) < 0) return -EIO; } return decode_ace(xdr, NULL, res->server->nfs_client); out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) { __be32 *p; uint32_t savewords, bmlen, i; int status; status = decode_op_hdr(xdr, OP_OPEN); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); if (unlikely(status)) return status; decode_change_info(xdr, &res->cinfo); p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; res->rflags = be32_to_cpup(p++); bmlen = be32_to_cpup(p); if (bmlen > 10) goto xdr_error; p = xdr_inline_decode(xdr, bmlen << 2); if (unlikely(!p)) goto out_overflow; savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE); for (i = 0; i < savewords; ++i) res->attrset[i] = be32_to_cpup(p++); for (; i < NFS4_BITMAP_SIZE; i++) res->attrset[i] = 0; return decode_delegation(xdr, res); xdr_error: dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res) { int status; status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res) { int status; status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); if (!status) status = decode_stateid(xdr, &res->stateid); return status; } static int decode_putfh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_PUTFH); } static int decode_putrootfh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_PUTROOTFH); } static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res) { struct kvec *iov = req->rq_rcv_buf.head; __be32 *p; uint32_t count, eof, recvd, hdrlen; int status; status = decode_op_hdr(xdr, OP_READ); if (status) return status; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; eof = be32_to_cpup(p++); count = be32_to_cpup(p); hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (count > recvd) { dprintk("NFS: server cheating in read reply: " "count %u > recvd %u\n", count, recvd); count = recvd; eof = 0; } xdr_read_pages(xdr, count); res->eof = eof; res->count = count; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir) { struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct kvec *iov = rcvbuf->head; size_t hdrlen; u32 recvd, pglen = rcvbuf->page_len; int status; status = decode_op_hdr(xdr, OP_READDIR); if (!status) status = decode_verifier(xdr, readdir->verifier.data); if (unlikely(status)) return status; dprintk("%s: verifier = %08x:%08x\n", __func__, ((u32 *)readdir->verifier.data)[0], ((u32 *)readdir->verifier.data)[1]); hdrlen = (char *) xdr->p - (char *) iov->iov_base; recvd = rcvbuf->len - hdrlen; if (pglen > recvd) pglen = recvd; xdr_read_pages(xdr, pglen); return pglen; } static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req) { struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct kvec *iov = rcvbuf->head; size_t hdrlen; u32 len, recvd; __be32 *p; int status; status = decode_op_hdr(xdr, OP_READLINK); if (status) return status; /* Convert length of symlink */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len >= rcvbuf->page_len || len <= 0) { dprintk("nfs: server returned giant symlink!\n"); return -ENAMETOOLONG; } hdrlen = (char *) xdr->p - (char *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (recvd < len) { dprintk("NFS: server cheating in readlink reply: " "count %u > recvd %u\n", len, recvd); return -EIO; } xdr_read_pages(xdr, len); /* * The XDR encode routine has set things up so that * the link text will be copied directly into the * buffer. We just have to do overflow-checking, * and and null-terminate the text (the VFS expects * null-termination). */ xdr_terminate_string(rcvbuf, len); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { int status; status = decode_op_hdr(xdr, OP_REMOVE); if (status) goto out; status = decode_change_info(xdr, cinfo); out: return status; } static int decode_rename(struct xdr_stream *xdr, struct nfs4_change_info *old_cinfo, struct nfs4_change_info *new_cinfo) { int status; status = decode_op_hdr(xdr, OP_RENAME); if (status) goto out; if ((status = decode_change_info(xdr, old_cinfo))) goto out; status = decode_change_info(xdr, new_cinfo); out: return status; } static int decode_renew(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_RENEW); } static int decode_restorefh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_RESTOREFH); } static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, size_t *acl_len) { __be32 *savep; uint32_t attrlen, bitmap[2] = {0}; struct kvec *iov = req->rq_rcv_buf.head; int status; *acl_len = 0; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto out; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto out; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) goto out; if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U))) return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_ACL)) { size_t hdrlen; u32 recvd; /* We ignore &savep and don't do consistency checks on * the attr length. Let userspace figure it out.... */ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (attrlen > recvd) { dprintk("NFS: server cheating in getattr" " acl reply: attrlen %u > recvd %u\n", attrlen, recvd); return -EINVAL; } xdr_read_pages(xdr, attrlen); *acl_len = attrlen; } else status = -EOPNOTSUPP; out: return status; } static int decode_savefh(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_SAVEFH); } static int decode_setattr(struct xdr_stream *xdr) { __be32 *p; uint32_t bmlen; int status; status = decode_op_hdr(xdr, OP_SETATTR); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; bmlen = be32_to_cpup(p); p = xdr_inline_decode(xdr, bmlen << 2); if (likely(p)) return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res) { __be32 *p; uint32_t opnum; int32_t nfserr; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; opnum = be32_to_cpup(p++); if (opnum != OP_SETCLIENTID) { dprintk("nfs: decode_setclientid: Server returned operation" " %d\n", opnum); return -EIO; } nfserr = be32_to_cpup(p); if (nfserr == NFS_OK) { p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &res->clientid); memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE); } else if (nfserr == NFSERR_CLID_INUSE) { uint32_t len; /* skip netid string */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; /* skip uaddr string */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); p = xdr_inline_decode(xdr, len); if (unlikely(!p)) goto out_overflow; return -NFSERR_CLID_INUSE; } else return nfs4_stat_to_errno(nfserr); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_setclientid_confirm(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM); } static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res) { __be32 *p; int status; status = decode_op_hdr(xdr, OP_WRITE); if (status) return status; p = xdr_inline_decode(xdr, 16); if (unlikely(!p)) goto out_overflow; res->count = be32_to_cpup(p++); res->verf->committed = be32_to_cpup(p++); memcpy(res->verf->verifier, p, 8); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_delegreturn(struct xdr_stream *xdr) { return decode_op_hdr(xdr, OP_DELEGRETURN); } static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; flavor->gss.sec_oid4.len = be32_to_cpup(p); if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN) goto out_err; p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len); if (unlikely(!p)) goto out_overflow; memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len); p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; flavor->gss.qop4 = be32_to_cpup(p++); flavor->gss.service = be32_to_cpup(p); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; out_err: return -EINVAL; } static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) { struct nfs4_secinfo_flavor *sec_flavor; int status; __be32 *p; int i, num_flavors; status = decode_op_hdr(xdr, OP_SECINFO); if (status) goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->flavors->num_flavors = 0; num_flavors = be32_to_cpup(p); for (i = 0; i < num_flavors; i++) { sec_flavor = &res->flavors->flavors[i]; if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE) break; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; sec_flavor->flavor = be32_to_cpup(p); if (sec_flavor->flavor == RPC_AUTH_GSS) { status = decode_secinfo_gss(xdr, sec_flavor); if (status) goto out; } res->flavors->num_flavors++; } out: return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } #if defined(CONFIG_NFS_V4_1) static int decode_exchange_id(struct xdr_stream *xdr, struct nfs41_exchange_id_res *res) { __be32 *p; uint32_t dummy; char *dummy_str; int status; struct nfs_client *clp = res->client; status = decode_op_hdr(xdr, OP_EXCHANGE_ID); if (status) return status; p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; xdr_decode_hyper(p, &clp->cl_clientid); p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; clp->cl_seqid = be32_to_cpup(p++); clp->cl_exchange_flags = be32_to_cpup(p++); /* We ask for SP4_NONE */ dummy = be32_to_cpup(p); if (dummy != SP4_NONE) return -EIO; /* Throw away minor_id */ p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; /* Throw away Major id */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; /* Throw away server_scope */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; /* Throw away Implementation id array */ status = decode_opaque_inline(xdr, &dummy, &dummy_str); if (unlikely(status)) return status; return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_chan_attrs(struct xdr_stream *xdr, struct nfs4_channel_attrs *attrs) { __be32 *p; u32 nr_attrs, val; p = xdr_inline_decode(xdr, 28); if (unlikely(!p)) goto out_overflow; val = be32_to_cpup(p++); /* headerpadsz */ if (val) return -EINVAL; /* no support for header padding yet */ attrs->max_rqst_sz = be32_to_cpup(p++); attrs->max_resp_sz = be32_to_cpup(p++); attrs->max_resp_sz_cached = be32_to_cpup(p++); attrs->max_ops = be32_to_cpup(p++); attrs->max_reqs = be32_to_cpup(p++); nr_attrs = be32_to_cpup(p); if (unlikely(nr_attrs > 1)) { printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n", __func__, nr_attrs); return -EINVAL; } if (nr_attrs == 1) { p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */ if (unlikely(!p)) goto out_overflow; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid) { return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN); } static int decode_create_session(struct xdr_stream *xdr, struct nfs41_create_session_res *res) { __be32 *p; int status; struct nfs_client *clp = res->client; struct nfs4_session *session = clp->cl_session; status = decode_op_hdr(xdr, OP_CREATE_SESSION); if (!status) status = decode_sessionid(xdr, &session->sess_id); if (unlikely(status)) return status; /* seqid, flags */ p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; clp->cl_seqid = be32_to_cpup(p++); session->flags = be32_to_cpup(p); /* Channel attributes */ status = decode_chan_attrs(xdr, &session->fc_attrs); if (!status) status = decode_chan_attrs(xdr, &session->bc_attrs); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_destroy_session(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_DESTROY_SESSION); } static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy) { return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE); } #endif /* CONFIG_NFS_V4_1 */ static int decode_sequence(struct xdr_stream *xdr, struct nfs4_sequence_res *res, struct rpc_rqst *rqstp) { #if defined(CONFIG_NFS_V4_1) struct nfs4_sessionid id; u32 dummy; int status; __be32 *p; if (!res->sr_session) return 0; status = decode_op_hdr(xdr, OP_SEQUENCE); if (!status) status = decode_sessionid(xdr, &id); if (unlikely(status)) goto out_err; /* * If the server returns different values for sessionID, slotID or * sequence number, the server is looney tunes. */ status = -EREMOTEIO; if (memcmp(id.data, res->sr_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("%s Invalid session id\n", __func__); goto out_err; } p = xdr_inline_decode(xdr, 20); if (unlikely(!p)) goto out_overflow; /* seqid */ dummy = be32_to_cpup(p++); if (dummy != res->sr_slot->seq_nr) { dprintk("%s Invalid sequence number\n", __func__); goto out_err; } /* slot id */ dummy = be32_to_cpup(p++); if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) { dprintk("%s Invalid slot id\n", __func__); goto out_err; } /* highest slot id - currently not processed */ dummy = be32_to_cpup(p++); /* target highest slot id - currently not processed */ dummy = be32_to_cpup(p++); /* result flags */ res->sr_status_flags = be32_to_cpup(p); status = 0; out_err: res->sr_status = status; return status; out_overflow: print_overflow_msg(__func__, xdr); status = -EIO; goto out_err; #else /* CONFIG_NFS_V4_1 */ return 0; #endif /* CONFIG_NFS_V4_1 */ } #if defined(CONFIG_NFS_V4_1) static int decode_getdeviceinfo(struct xdr_stream *xdr, struct pnfs_device *pdev) { __be32 *p; uint32_t len, type; int status; status = decode_op_hdr(xdr, OP_GETDEVICEINFO); if (status) { if (status == -ETOOSMALL) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; pdev->mincount = be32_to_cpup(p); dprintk("%s: Min count too small. mincnt = %u\n", __func__, pdev->mincount); } return status; } p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; type = be32_to_cpup(p++); if (type != pdev->layout_type) { dprintk("%s: layout mismatch req: %u pdev: %u\n", __func__, pdev->layout_type, type); return -EINVAL; } /* * Get the length of the opaque device_addr4. xdr_read_pages places * the opaque device_addr4 in the xdr_buf->pages (pnfs_device->pages) * and places the remaining xdr data in xdr_buf->tail */ pdev->mincount = be32_to_cpup(p); xdr_read_pages(xdr, pdev->mincount); /* include space for the length */ /* Parse notification bitmap, verifying that it is zero. */ p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; len = be32_to_cpup(p); if (len) { uint32_t i; p = xdr_inline_decode(xdr, 4 * len); if (unlikely(!p)) goto out_overflow; for (i = 0; i < len; i++, p++) { if (be32_to_cpup(p)) { dprintk("%s: notifications not supported\n", __func__); return -EIO; } } } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_layoutget_res *res) { __be32 *p; int status; u32 layout_count; struct xdr_buf *rcvbuf = &req->rq_rcv_buf; struct kvec *iov = rcvbuf->head; u32 hdrlen, recvd; status = decode_op_hdr(xdr, OP_LAYOUTGET); if (status) return status; p = xdr_inline_decode(xdr, 8 + NFS4_STATEID_SIZE); if (unlikely(!p)) goto out_overflow; res->return_on_close = be32_to_cpup(p++); p = xdr_decode_opaque_fixed(p, res->stateid.data, NFS4_STATEID_SIZE); layout_count = be32_to_cpup(p); if (!layout_count) { dprintk("%s: server responded with empty layout array\n", __func__); return -EINVAL; } p = xdr_inline_decode(xdr, 28); if (unlikely(!p)) goto out_overflow; p = xdr_decode_hyper(p, &res->range.offset); p = xdr_decode_hyper(p, &res->range.length); res->range.iomode = be32_to_cpup(p++); res->type = be32_to_cpup(p++); res->layoutp->len = be32_to_cpup(p); dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n", __func__, (unsigned long)res->range.offset, (unsigned long)res->range.length, res->range.iomode, res->type, res->layoutp->len); hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (res->layoutp->len > recvd) { dprintk("NFS: server cheating in layoutget reply: " "layout len %u > recvd %u\n", res->layoutp->len, recvd); return -EINVAL; } xdr_read_pages(xdr, res->layoutp->len); if (layout_count > 1) { /* We only handle a length one array at the moment. Any * further entries are just ignored. Note that this means * the client may see a response that is less than the * minimum it requested. */ dprintk("%s: server responded with %d layouts, dropping tail\n", __func__, layout_count); } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_layoutreturn(struct xdr_stream *xdr, struct nfs4_layoutreturn_res *res) { __be32 *p; int status; status = decode_op_hdr(xdr, OP_LAYOUTRETURN); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; res->lrs_present = be32_to_cpup(p); if (res->lrs_present) status = decode_stateid(xdr, &res->stateid); return status; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } static int decode_layoutcommit(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_layoutcommit_res *res) { __be32 *p; __u32 sizechanged; int status; status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT); if (status) return status; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; sizechanged = be32_to_cpup(p); if (sizechanged) { /* throw away new size */ p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; } return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } #endif /* CONFIG_NFS_V4_1 */ /* * END OF "GENERIC" DECODE ROUTINES. */ /* * Decode OPEN_DOWNGRADE response */ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_closeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_open_downgrade(xdr, res); if (status != 0) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode ACCESS response */ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_accessres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status != 0) goto out; status = decode_access(xdr, res); if (status != 0) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LOOKUP response */ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_lookup_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lookup(xdr); if (status) goto out; status = decode_getfh(xdr, res->fh); if (status) goto out; status = decode_getfattr(xdr, res->fattr, res->server ,!RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LOOKUP_ROOT response */ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_lookup_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putrootfh(xdr); if (status) goto out; status = decode_getfh(xdr, res->fh); if (status == 0) status = decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode REMOVE response */ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_removeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_remove(xdr, &res->cinfo); if (status) goto out; decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode RENAME response */ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_renameres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo); if (status) goto out; /* Current FH is target directory */ if (decode_getfattr(xdr, res->new_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; status = decode_restorefh(xdr); if (status) goto out; decode_getfattr(xdr, res->old_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LINK response */ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_link_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_link(xdr, &res->cinfo); if (status) goto out; /* * Note order: OP_LINK leaves the directory as the current * filehandle. */ if (decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; status = decode_restorefh(xdr); if (status) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode CREATE response */ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_create_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_create(xdr, &res->dir_cinfo); if (status) goto out; status = decode_getfh(xdr, res->fh); if (status) goto out; if (decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; status = decode_restorefh(xdr); if (status) goto out; decode_getfattr(xdr, res->dir_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode SYMLINK response */ static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_create_res *res) { return nfs4_xdr_dec_create(rqstp, xdr, res); } /* * Decode GETATTR response */ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_getattr_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Encode an SETACL request */ static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_setaclargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); encode_setacl(xdr, args, &hdr); encode_nops(&hdr); } /* * Decode SETACL response */ static int nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_setaclres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_setattr(xdr); out: return status; } /* * Decode GETACL response */ static int nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_getaclres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_getacl(xdr, rqstp, &res->acl_len); out: return status; } /* * Decode CLOSE response */ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_closeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_close(xdr, res); if (status != 0) goto out; /* * Note: Server may do delete on close for this file * in which case the getattr call will fail with * an ESTALE error. Shouldn't be a problem, * though, since fattr->valid will remain unset. */ decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode OPEN response */ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_openres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_savefh(xdr); if (status) goto out; status = decode_open(xdr, res); if (status) goto out; if (decode_getfh(xdr, &res->fh) != 0) goto out; if (decode_getfattr(xdr, res->f_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; if (decode_restorefh(xdr) != 0) goto out; decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode OPEN_CONFIRM response */ static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_open_confirmres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_open_confirm(xdr, res); out: return status; } /* * Decode OPEN response */ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_openres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_open(xdr, res); if (status) goto out; decode_getfattr(xdr, res->f_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode SETATTR response */ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_setattrres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_setattr(xdr); if (status) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode LOCK response */ static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_lock_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lock(xdr, res); out: return status; } /* * Decode LOCKT response */ static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_lockt_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lockt(xdr, res); out: return status; } /* * Decode LOCKU response */ static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_locku_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_locku(xdr, res); out: return status; } static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *dummy) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_release_lockowner(xdr); return status; } /* * Decode READLINK response */ static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_readlink_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_readlink(xdr, rqstp); out: return status; } /* * Decode READDIR response */ static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_readdir_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_readdir(xdr, rqstp, res); out: return status; } /* * Decode Read response */ static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_readres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_read(xdr, rqstp, res); if (!status) status = res->count; out: return status; } /* * Decode WRITE response */ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_writeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_write(xdr, res); if (status) goto out; if (res->fattr) decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); if (!status) status = res->count; out: return status; } /* * Decode COMMIT response */ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_writeres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_commit(xdr, res); if (status) goto out; if (res->fattr) decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode FSINFO response */ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fsinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, req); if (!status) status = decode_putfh(xdr); if (!status) status = decode_fsinfo(xdr, res->fsinfo); return status; } /* * Decode PATHCONF response */ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_pathconf_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, req); if (!status) status = decode_putfh(xdr); if (!status) status = decode_pathconf(xdr, res->pathconf); return status; } /* * Decode STATFS response */ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_statfs_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, req); if (!status) status = decode_putfh(xdr); if (!status) status = decode_statfs(xdr, res->fsstat); return status; } /* * Decode GETATTR_BITMAP response */ static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_server_caps_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, req); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_server_caps(xdr, res); out: return status; } /* * Decode RENEW response */ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *__unused) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_renew(xdr); return status; } /* * Decode SETCLIENTID response */ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_setclientid_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_setclientid(xdr, res); return status; } /* * Decode SETCLIENTID_CONFIRM response */ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_setclientid_confirm(xdr); if (!status) status = decode_putrootfh(xdr); if (!status) status = decode_fsinfo(xdr, fsinfo); return status; } /* * Decode DELEGRETURN response */ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_delegreturnres *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status != 0) goto out; status = decode_delegreturn(xdr); if (status != 0) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } /* * Decode FS_LOCATIONS response */ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fs_locations_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, req); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_lookup(xdr); if (status) goto out; xdr_enter_page(xdr, PAGE_SIZE); status = decode_getfattr(xdr, &res->fs_locations->fattr, res->fs_locations->server, !RPC_IS_ASYNC(req->rq_task)); out: return status; } /* * Decode SECINFO response */ static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_secinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_secinfo(xdr, res); if (status) goto out; out: return status; } #if defined(CONFIG_NFS_V4_1) /* * Decode EXCHANGE_ID response */ static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_exchange_id(xdr, res); return status; } /* * Decode CREATE_SESSION response */ static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs41_create_session_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_create_session(xdr, res); return status; } /* * Decode DESTROY_SESSION response */ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, struct xdr_stream *xdr, void *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_destroy_session(xdr, res); return status; } /* * Decode SEQUENCE response */ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_sequence_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, res, rqstp); return status; } /* * Decode GET_LEASE_TIME response */ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_get_lease_time_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->lr_seq_res, rqstp); if (!status) status = decode_putrootfh(xdr); if (!status) status = decode_fsinfo(xdr, res->lr_fsinfo); return status; } /* * Decode RECLAIM_COMPLETE response */ static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs41_reclaim_complete_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (!status) status = decode_sequence(xdr, &res->seq_res, rqstp); if (!status) status = decode_reclaim_complete(xdr, (void *)NULL); return status; } /* * Decode GETDEVINFO response */ static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_getdeviceinfo_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status != 0) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status != 0) goto out; status = decode_getdeviceinfo(xdr, res->pdev); out: return status; } /* * Decode LAYOUTGET response */ static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_layoutget_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_layoutget(xdr, rqstp, res); out: return status; } /* * Decode LAYOUTRETURN response */ static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_layoutreturn_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_layoutreturn(xdr, res); out: return status; } /* * Decode LAYOUTCOMMIT response */ static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs4_layoutcommit_res *res) { struct compound_hdr hdr; int status; status = decode_compound_hdr(xdr, &hdr); if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; status = decode_putfh(xdr); if (status) goto out; status = decode_layoutcommit(xdr, rqstp, res); if (status) goto out; decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; } #endif /* CONFIG_NFS_V4_1 */ /** * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in * the local page cache. * @xdr: XDR stream where entry resides * @entry: buffer to fill in with entry data * @plus: boolean indicating whether this should be a readdirplus entry * * Returns zero if successful, otherwise a negative errno value is * returned. * * This function is not invoked during READDIR reply decoding, but * rather whenever an application invokes the getdents(2) system call * on a directory already in our cache. */ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, int plus) { uint32_t bitmap[2] = {0}; uint32_t len; __be32 *p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; if (*p == xdr_zero) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; if (*p == xdr_zero) return -EAGAIN; entry->eof = 1; return -EBADCOOKIE; } p = xdr_inline_decode(xdr, 12); if (unlikely(!p)) goto out_overflow; entry->prev_cookie = entry->cookie; p = xdr_decode_hyper(p, &entry->cookie); entry->len = be32_to_cpup(p); p = xdr_inline_decode(xdr, entry->len); if (unlikely(!p)) goto out_overflow; entry->name = (const char *) p; /* * In case the server doesn't return an inode number, * we fake one here. (We don't use inode number 0, * since glibc seems to choke on it...) */ entry->ino = 1; entry->fattr->valid = 0; if (decode_attr_bitmap(xdr, bitmap) < 0) goto out_overflow; if (decode_attr_length(xdr, &len, &p) < 0) goto out_overflow; if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, entry->server, 1) < 0) goto out_overflow; if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) entry->ino = entry->fattr->mounted_on_fileid; else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) entry->ino = entry->fattr->fileid; entry->d_type = DT_UNKNOWN; if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); return 0; out_overflow: print_overflow_msg(__func__, xdr); return -EAGAIN; } /* * We need to translate between nfs status return values and * the local errno values which may not be the same. */ static struct { int stat; int errno; } nfs_errtbl[] = { { NFS4_OK, 0 }, { NFS4ERR_PERM, -EPERM }, { NFS4ERR_NOENT, -ENOENT }, { NFS4ERR_IO, -errno_NFSERR_IO}, { NFS4ERR_NXIO, -ENXIO }, { NFS4ERR_ACCESS, -EACCES }, { NFS4ERR_EXIST, -EEXIST }, { NFS4ERR_XDEV, -EXDEV }, { NFS4ERR_NOTDIR, -ENOTDIR }, { NFS4ERR_ISDIR, -EISDIR }, { NFS4ERR_INVAL, -EINVAL }, { NFS4ERR_FBIG, -EFBIG }, { NFS4ERR_NOSPC, -ENOSPC }, { NFS4ERR_ROFS, -EROFS }, { NFS4ERR_MLINK, -EMLINK }, { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG }, { NFS4ERR_NOTEMPTY, -ENOTEMPTY }, { NFS4ERR_DQUOT, -EDQUOT }, { NFS4ERR_STALE, -ESTALE }, { NFS4ERR_BADHANDLE, -EBADHANDLE }, { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, { -1, -EIO } }; /* * Convert an NFS error code to a local one. * This one is used jointly by NFSv2 and NFSv3. */ static int nfs4_stat_to_errno(int stat) { int i; for (i = 0; nfs_errtbl[i].stat != -1; i++) { if (nfs_errtbl[i].stat == stat) return nfs_errtbl[i].errno; } if (stat <= 10000 || stat > 10100) { /* The server is looney tunes. */ return -EREMOTEIO; } /* If we cannot translate the error, the recovery routines should * handle it. * Note: remaining NFSv4 error codes have values > 10000, so should * not conflict with native Linux error codes. */ return -stat; } #define PROC(proc, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_COMPOUND, \ .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \ .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \ .p_arglen = NFS4_##argtype##_sz, \ .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CLNT_##proc, \ .p_name = #proc, \ } struct rpc_procinfo nfs4_procedures[] = { PROC(READ, enc_read, dec_read), PROC(WRITE, enc_write, dec_write), PROC(COMMIT, enc_commit, dec_commit), PROC(OPEN, enc_open, dec_open), PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm), PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr), PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade), PROC(CLOSE, enc_close, dec_close), PROC(SETATTR, enc_setattr, dec_setattr), PROC(FSINFO, enc_fsinfo, dec_fsinfo), PROC(RENEW, enc_renew, dec_renew), PROC(SETCLIENTID, enc_setclientid, dec_setclientid), PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm), PROC(LOCK, enc_lock, dec_lock), PROC(LOCKT, enc_lockt, dec_lockt), PROC(LOCKU, enc_locku, dec_locku), PROC(ACCESS, enc_access, dec_access), PROC(GETATTR, enc_getattr, dec_getattr), PROC(LOOKUP, enc_lookup, dec_lookup), PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), PROC(REMOVE, enc_remove, dec_remove), PROC(RENAME, enc_rename, dec_rename), PROC(LINK, enc_link, dec_link), PROC(SYMLINK, enc_symlink, dec_symlink), PROC(CREATE, enc_create, dec_create), PROC(PATHCONF, enc_pathconf, dec_pathconf), PROC(STATFS, enc_statfs, dec_statfs), PROC(READLINK, enc_readlink, dec_readlink), PROC(READDIR, enc_readdir, dec_readdir), PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), PROC(GETACL, enc_getacl, dec_getacl), PROC(SETACL, enc_setacl, dec_setacl), PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations), PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), PROC(SECINFO, enc_secinfo, dec_secinfo), #if defined(CONFIG_NFS_V4_1) PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), PROC(CREATE_SESSION, enc_create_session, dec_create_session), PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), PROC(SEQUENCE, enc_sequence, dec_sequence), PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), PROC(LAYOUTGET, enc_layoutget, dec_layoutget), PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit), PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn), #endif /* CONFIG_NFS_V4_1 */ }; struct rpc_version nfs_version4 = { .number = 4, .nrprocs = ARRAY_SIZE(nfs4_procedures), .procs = nfs4_procedures }; /* * Local variables: * c-basic-offset: 8 * End: */
367735.c
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #pragma ident "%Z%%M% %I% %E% SMI" #include <stdio.h> #include <assert.h> #include <strings.h> #include <kmfapi.h> #include "kssladm.h" /* * Extract the Certificate and raw key data from a PKCS#12 file. * The password needed for decrypting the PKCS#12 PDU is stored * in plaintext in the given "password_file" parameter. */ int PKCS12_get_rsa_key_certs(KMF_HANDLE_T kmfh, const char *filename, const char *password_file, KMF_RAW_KEY_DATA **rsa, KMF_X509_DER_CERT **certs) { char password_buf[1024]; KMF_RETURN rv = KMF_OK; KMF_CREDENTIAL pk12cred; KMF_X509_DER_CERT *tcerts; KMF_RAW_KEY_DATA *keys; int ncerts, nkeys; char *err = NULL; tcerts = NULL; keys = NULL; ncerts = 0; nkeys = 0; if (get_passphrase(password_file, password_buf, sizeof (password_buf)) <= 0) { perror("Unable to read passphrase"); goto done; } pk12cred.cred = password_buf; pk12cred.credlen = strlen(password_buf); rv = kmf_import_objects(kmfh, (char *)filename, &pk12cred, &tcerts, &ncerts, &keys, &nkeys); if (rv != KMF_OK) { REPORT_KMF_ERROR(rv, "Error importing PKCS12 data", err); } done: if (rv != KMF_OK) { int i; if (tcerts != NULL) { for (i = 0; i < ncerts; i++) kmf_free_kmf_cert(kmfh, &tcerts[i]); free(tcerts); } tcerts = NULL; ncerts = 0; if (keys != NULL) { for (i = 0; i < nkeys; i++) kmf_free_raw_key(&keys[i]); free(keys); } keys = NULL; } *certs = tcerts; *rsa = keys; return (ncerts); } /* * Parse a PEM file which should contain RSA private keys and * their associated X.509v3 certificates. More than 1 may * be present in the file. */ int PEM_get_rsa_key_certs(KMF_HANDLE_T kmfh, const char *filename, char *password_file, KMF_RAW_KEY_DATA **rsa, KMF_X509_DER_CERT **certs) { KMF_RETURN rv = KMF_OK; KMF_CREDENTIAL creds; KMF_X509_DER_CERT *tcerts; KMF_RAW_KEY_DATA *keys; int ncerts, nkeys; char *err = NULL; char password_buf[1024]; tcerts = NULL; keys = NULL; ncerts = 0; nkeys = 0; if (get_passphrase(password_file, password_buf, sizeof (password_buf)) <= 0) { perror("Unable to read passphrase"); goto done; } creds.cred = password_buf; creds.credlen = strlen(password_buf); rv = kmf_import_objects(kmfh, (char *)filename, &creds, &tcerts, &ncerts, &keys, &nkeys); if (rv != KMF_OK) { REPORT_KMF_ERROR(rv, "Error importing key data", err); } done: if (rv != KMF_OK) { int i; if (tcerts != NULL) { for (i = 0; i < ncerts; i++) kmf_free_kmf_cert(kmfh, &tcerts[i]); free(tcerts); } tcerts = NULL; ncerts = 0; if (keys != NULL) { for (i = 0; i < nkeys; i++) kmf_free_raw_key(&keys[i]); free(keys); } keys = NULL; } if (certs != NULL) *certs = tcerts; if (rsa != NULL) *rsa = keys; return (ncerts); }
340073.c
#include "NotifierTest.h" #include "interfaces/Notifier.h" #include <stdio.h> #include <assert.h> int main(int argc, char *argv[]) { testInstance(); testRegisterCommandAndSendNotification(); puts("NotifierTest: Success"); return 0; } typedef struct { int value; int result; } Object; /** * test Notifier */ void testInstance() { // create notifier instance Notifier *notifier = $Notifier.new(); // initialize facade notifier->initializeNotifier(notifier, "Test1"); // assert assert(notifier != NULL); assert(notifier->getFacade(notifier) != NULL); $Facade.removeFacade("Test1"); $Notifier.delete(notifier); } /** * an implementation that multiplies the input value by 4 and stores into result */ static void execute(SimpleCommand *self, Notification *notification) { Object *temp = notification->getBody(notification); // fabricate a result temp->result = temp->value * 4; } /** * Create a simple command */ static SimpleCommand *NewCommand() { SimpleCommand *command = $SimpleCommand.new(); command->execute = execute; return command; } /** * Register a command and test by sending a notification */ void testRegisterCommandAndSendNotification() { // create a notifier Notifier *notifier = $Notifier.new(); // initialize facade notifier->initializeNotifier(notifier, "Test2"); Object temp = {4}; // get facade instance Facade *facade = notifier->getFacade(notifier); // register a command and send notification facade->registerCommand(facade, "TestNote", NewCommand); notifier->sendNotification(notifier, "TestNote", &temp, NULL); // assert result assert(temp.result == 16); facade->removeCommand(facade, "TestNote"); $Facade.removeFacade("Test2"); $Notifier.delete(notifier); }
249006.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* update_editor.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: kguibout <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2020/06/04 17:48:53 by kguibout #+# #+# */ /* Updated: 2020/07/13 03:53:09 by kguibout ### ########.fr */ /* */ /* ************************************************************************** */ #include "editor.h" #include "inputs.h" static bool update_2d_mode(t_env *env) { update_notify(env, &env->editor.notify); if (!is_over_gui_window(env) && env->main_window->is_over) { zoom_inputs(env); basic_inputs(env, env->ph_context.frame_time); } get_closest_wall(env); set_grid_info(env); get_snap_point(env); if (get_key(SDL_SCANCODE_K)) env->need_redraw = true; return (true); } static bool update_key_3d(t_env *env) { if (get_key_down(env->shortcuts.editor.gravity)) { env->use_gravity = !env->use_gravity; if (env->use_gravity) { env->player.physic.velocity = vec3f(0, 0, 0); } } if (get_key_down(env->shortcuts.editor.texture)) { if (!selec(env)) return (false); } return (true); } static bool update_3d_mode_p2(t_env *env) { if (!set_listener_position(env->player.transform->position) || !set_listener_orientation(camera_forward(&env->camera), camera_up(&env->camera))) return (false); return (true); } static bool update_3d_mode(t_env *env) { env->player.state = 0; if (!update_pause_menu(env, &env->gui.pause_menu)) return (false); if (!update_key_3d(env)) return (false); if (env->use_gravity) { if (get_key_down(env->shortcuts.game.user.jump) && env->player.state & 1 << ST_ON_GROUND) env->player.physic.velocity.y = 4; } if (!ph_loop(&env->ph_context)) return (false); if (env->current_map->level.start.available && env->current_map->level.end.available) if (!update_level(env, env->ph_context.dt)) return (false); return (update_3d_mode_p2(env)); } bool update_editor(void *param) { t_env *env; env = param; if (env->app_state == APP_EDITOR) update_2d_mode(env); if (!update_input(env)) return (false); if (env->app_state != APP_EDITOR) update_3d_mode(env); if (env->editor.history.available) reset_history_window_pos(env, env->editor.history.window); return (true); }
508981.c
/* * Copyright (c) 2016, The OpenThread Authors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the copyright holder nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /** * @file * This file implements a pseudo-random number generator. * */ #include "openthread-core-config.h" #include "platform-posix.h" #include <assert.h> #include <stdio.h> #include <openthread/platform/random.h> #include "code_utils.h" static uint32_t sState = 1; void platformRandomInit(void) { #if __SANITIZE_ADDRESS__ == 0 otError error; error = otPlatRandomGetTrue((uint8_t *)&sState, sizeof(sState)); assert(error == OT_ERROR_NONE); #else // __SANITIZE_ADDRESS__ // Multiplying gNodeId assures that no two nodes gets the same seed within an hour. sState = (uint32_t)time(NULL) + (3600 * gNodeId); #endif // __SANITIZE_ADDRESS__ } uint32_t otPlatRandomGet(void) { uint32_t mlcg, p, q; uint64_t tmpstate; tmpstate = (uint64_t)33614 * (uint64_t)sState; q = tmpstate & 0xffffffff; q = q >> 1; p = tmpstate >> 32; mlcg = p + q; if (mlcg & 0x80000000) { mlcg &= 0x7fffffff; mlcg++; } sState = mlcg; return mlcg; } otError otPlatRandomGetTrue(uint8_t *aOutput, uint16_t aOutputLength) { otError error = OT_ERROR_NONE; #if __SANITIZE_ADDRESS__ == 0 FILE * file = NULL; size_t readLength; otEXPECT_ACTION(aOutput && aOutputLength, error = OT_ERROR_INVALID_ARGS); file = fopen("/dev/urandom", "rb"); otEXPECT_ACTION(file != NULL, error = OT_ERROR_FAILED); readLength = fread(aOutput, 1, aOutputLength, file); otEXPECT_ACTION(readLength == aOutputLength, error = OT_ERROR_FAILED); exit: if (file != NULL) { fclose(file); } #else // __SANITIZE_ADDRESS__ /* * THE IMPLEMENTATION BELOW IS NOT COMPLIANT WITH THE THREAD SPECIFICATION. * * Address Sanitizer triggers test failures when reading random * values from /dev/urandom. The pseudo-random number generator * implementation below is only used to enable continuous * integration checks with Address Sanitizer enabled. */ otEXPECT_ACTION(aOutput && aOutputLength, error = OT_ERROR_INVALID_ARGS); for (uint16_t length = 0; length < aOutputLength; length++) { aOutput[length] = (uint8_t)otPlatRandomGet(); } exit: #endif // __SANITIZE_ADDRESS__ return error; }
580685.c
/* Generic definitions */ #define PACKAGE it.unimi.dsi.fastutil.doubles #define VALUE_PACKAGE it.unimi.dsi.fastutil.objects /* Assertions (useful to generate conditional code) */ #unassert keyclass #assert keyclass(Double) #unassert keys #assert keys(primitive) #unassert valueclass #assert valueclass(Object) #unassert values #assert values(reference) /* Current type and class (and size, if applicable) */ #define KEY_TYPE double #define VALUE_TYPE Object #define KEY_CLASS Double #define VALUE_CLASS Object #if #keyclass(Object) || #keyclass(Reference) #define KEY_GENERIC_CLASS K #define KEY_GENERIC_TYPE K #define KEY_GENERIC <K> #define KEY_GENERIC_WILDCARD <?> #define KEY_EXTENDS_GENERIC <? extends K> #define KEY_SUPER_GENERIC <? super K> #define KEY_GENERIC_CAST (K) #define KEY_GENERIC_ARRAY_CAST (K[]) #define KEY_GENERIC_BIG_ARRAY_CAST (K[][]) #else #define KEY_GENERIC_CLASS KEY_CLASS #define KEY_GENERIC_TYPE KEY_TYPE #define KEY_GENERIC #define KEY_GENERIC_WILDCARD #define KEY_EXTENDS_GENERIC #define KEY_SUPER_GENERIC #define KEY_GENERIC_CAST #define KEY_GENERIC_ARRAY_CAST #define KEY_GENERIC_BIG_ARRAY_CAST #endif #if #valueclass(Object) || #valueclass(Reference) #define VALUE_GENERIC_CLASS V #define VALUE_GENERIC_TYPE V #define VALUE_GENERIC <V> #define VALUE_EXTENDS_GENERIC <? extends V> #define VALUE_GENERIC_CAST (V) #define VALUE_GENERIC_ARRAY_CAST (V[]) #else #define VALUE_GENERIC_CLASS VALUE_CLASS #define VALUE_GENERIC_TYPE VALUE_TYPE #define VALUE_GENERIC #define VALUE_EXTENDS_GENERIC #define VALUE_GENERIC_CAST #define VALUE_GENERIC_ARRAY_CAST #endif #if #keyclass(Object) || #keyclass(Reference) #if #valueclass(Object) || #valueclass(Reference) #define KEY_VALUE_GENERIC <K,V> #define KEY_VALUE_EXTENDS_GENERIC <? extends K, ? extends V> #else #define KEY_VALUE_GENERIC <K> #define KEY_VALUE_EXTENDS_GENERIC <? extends K> #endif #else #if #valueclass(Object) || #valueclass(Reference) #define KEY_VALUE_GENERIC <V> #define KEY_VALUE_EXTENDS_GENERIC <? extends V> #else #define KEY_VALUE_GENERIC #define KEY_VALUE_EXTENDS_GENERIC #endif #endif /* Value methods */ #define KEY_VALUE doubleValue #define VALUE_VALUE ObjectValue /* Interfaces (keys) */ #define COLLECTION DoubleCollection #define SET DoubleSet #define HASH DoubleHash #define SORTED_SET DoubleSortedSet #define STD_SORTED_SET DoubleSortedSet #define FUNCTION Double2ObjectFunction #define MAP Double2ObjectMap #define SORTED_MAP Double2ObjectSortedMap #if #keyclass(Object) || #keyclass(Reference) #define STD_SORTED_MAP SortedMap #define STRATEGY Strategy #else #define STD_SORTED_MAP Double2ObjectSortedMap #define STRATEGY PACKAGE.DoubleHash.Strategy #endif #define LIST DoubleList #define BIG_LIST DoubleBigList #define STACK DoubleStack #define PRIORITY_QUEUE DoublePriorityQueue #define INDIRECT_PRIORITY_QUEUE DoubleIndirectPriorityQueue #define INDIRECT_DOUBLE_PRIORITY_QUEUE DoubleIndirectDoublePriorityQueue #define KEY_ITERATOR DoubleIterator #define KEY_ITERABLE DoubleIterable #define KEY_BIDI_ITERATOR DoubleBidirectionalIterator #define KEY_LIST_ITERATOR DoubleListIterator #define KEY_BIG_LIST_ITERATOR DoubleBigListIterator #define STD_KEY_ITERATOR DoubleIterator #define KEY_COMPARATOR DoubleComparator /* Interfaces (values) */ #define VALUE_COLLECTION ObjectCollection #define VALUE_ARRAY_SET ObjectArraySet #define VALUE_ITERATOR ObjectIterator #define VALUE_LIST_ITERATOR ObjectListIterator /* Abstract implementations (keys) */ #define ABSTRACT_COLLECTION AbstractDoubleCollection #define ABSTRACT_SET AbstractDoubleSet #define ABSTRACT_SORTED_SET AbstractDoubleSortedSet #define ABSTRACT_FUNCTION AbstractDouble2ObjectFunction #define ABSTRACT_MAP AbstractDouble2ObjectMap #define ABSTRACT_FUNCTION AbstractDouble2ObjectFunction #define ABSTRACT_SORTED_MAP AbstractDouble2ObjectSortedMap #define ABSTRACT_LIST AbstractDoubleList #define ABSTRACT_BIG_LIST AbstractDoubleBigList #define SUBLIST DoubleSubList #define ABSTRACT_PRIORITY_QUEUE AbstractDoublePriorityQueue #define ABSTRACT_STACK AbstractDoubleStack #define KEY_ABSTRACT_ITERATOR AbstractDoubleIterator #define KEY_ABSTRACT_BIDI_ITERATOR AbstractDoubleBidirectionalIterator #define KEY_ABSTRACT_LIST_ITERATOR AbstractDoubleListIterator #define KEY_ABSTRACT_BIG_LIST_ITERATOR AbstractDoubleBigListIterator #if #keyclass(Object) #define KEY_ABSTRACT_COMPARATOR Comparator #else #define KEY_ABSTRACT_COMPARATOR AbstractDoubleComparator #endif /* Abstract implementations (values) */ #define VALUE_ABSTRACT_COLLECTION AbstractObjectCollection #define VALUE_ABSTRACT_ITERATOR AbstractObjectIterator #define VALUE_ABSTRACT_BIDI_ITERATOR AbstractObjectBidirectionalIterator /* Static containers (keys) */ #define COLLECTIONS DoubleCollections #define SETS DoubleSets #define SORTED_SETS DoubleSortedSets #define LISTS DoubleLists #define BIG_LISTS DoubleBigLists #define MAPS Double2ObjectMaps #define FUNCTIONS Double2ObjectFunctions #define SORTED_MAPS Double2ObjectSortedMaps #define PRIORITY_QUEUES DoublePriorityQueues #define HEAPS DoubleHeaps #define SEMI_INDIRECT_HEAPS DoubleSemiIndirectHeaps #define INDIRECT_HEAPS DoubleIndirectHeaps #define ARRAYS DoubleArrays #define BIG_ARRAYS DoubleBigArrays #define ITERATORS DoubleIterators #define BIG_LIST_ITERATORS DoubleBigListIterators #define COMPARATORS DoubleComparators /* Static containers (values) */ #define VALUE_COLLECTIONS ObjectCollections #define VALUE_SETS ObjectSets #define VALUE_ARRAYS ObjectArrays /* Implementations */ #define OPEN_HASH_SET DoubleOpenHashSet #define OPEN_HASH_BIG_SET DoubleOpenHashBigSet #define OPEN_DOUBLE_HASH_SET DoubleOpenDoubleHashSet #define OPEN_HASH_MAP Double2ObjectOpenHashMap #define STRIPED_OPEN_HASH_MAP StripedDouble2ObjectOpenHashMap #define OPEN_DOUBLE_HASH_MAP Double2ObjectOpenDoubleHashMap #define ARRAY_SET DoubleArraySet #define ARRAY_MAP Double2ObjectArrayMap #define LINKED_OPEN_HASH_SET DoubleLinkedOpenHashSet #define AVL_TREE_SET DoubleAVLTreeSet #define RB_TREE_SET DoubleRBTreeSet #define AVL_TREE_MAP Double2ObjectAVLTreeMap #define RB_TREE_MAP Double2ObjectRBTreeMap #define ARRAY_LIST DoubleArrayList #define BIG_ARRAY_BIG_LIST DoubleBigArrayBigList #define ARRAY_FRONT_CODED_LIST DoubleArrayFrontCodedList #define HEAP_PRIORITY_QUEUE DoubleHeapPriorityQueue #define HEAP_SEMI_INDIRECT_PRIORITY_QUEUE DoubleHeapSemiIndirectPriorityQueue #define HEAP_INDIRECT_PRIORITY_QUEUE DoubleHeapIndirectPriorityQueue #define HEAP_SESQUI_INDIRECT_DOUBLE_PRIORITY_QUEUE DoubleHeapSesquiIndirectDoublePriorityQueue #define HEAP_INDIRECT_DOUBLE_PRIORITY_QUEUE DoubleHeapIndirectDoublePriorityQueue #define ARRAY_FIFO_QUEUE DoubleArrayFIFOQueue #define ARRAY_PRIORITY_QUEUE DoubleArrayPriorityQueue #define ARRAY_INDIRECT_PRIORITY_QUEUE DoubleArrayIndirectPriorityQueue #define ARRAY_INDIRECT_DOUBLE_PRIORITY_QUEUE DoubleArrayIndirectDoublePriorityQueue /* Synchronized wrappers */ #define SYNCHRONIZED_COLLECTION SynchronizedDoubleCollection #define SYNCHRONIZED_SET SynchronizedDoubleSet #define SYNCHRONIZED_SORTED_SET SynchronizedDoubleSortedSet #define SYNCHRONIZED_FUNCTION SynchronizedDouble2ObjectFunction #define SYNCHRONIZED_MAP SynchronizedDouble2ObjectMap #define SYNCHRONIZED_LIST SynchronizedDoubleList /* Unmodifiable wrappers */ #define UNMODIFIABLE_COLLECTION UnmodifiableDoubleCollection #define UNMODIFIABLE_SET UnmodifiableDoubleSet #define UNMODIFIABLE_SORTED_SET UnmodifiableDoubleSortedSet #define UNMODIFIABLE_FUNCTION UnmodifiableDouble2ObjectFunction #define UNMODIFIABLE_MAP UnmodifiableDouble2ObjectMap #define UNMODIFIABLE_LIST UnmodifiableDoubleList #define UNMODIFIABLE_KEY_ITERATOR UnmodifiableDoubleIterator #define UNMODIFIABLE_KEY_BIDI_ITERATOR UnmodifiableDoubleBidirectionalIterator #define UNMODIFIABLE_KEY_LIST_ITERATOR UnmodifiableDoubleListIterator /* Other wrappers */ #define KEY_READER_WRAPPER DoubleReaderWrapper #define KEY_DATA_INPUT_WRAPPER DoubleDataInputWrapper /* Methods (keys) */ #define NEXT_KEY nextDouble #define PREV_KEY previousDouble #define FIRST_KEY firstDoubleKey #define LAST_KEY lastDoubleKey #define GET_KEY getDouble #define REMOVE_KEY removeDouble #define READ_KEY readDouble #define WRITE_KEY writeDouble #define DEQUEUE dequeueDouble #define DEQUEUE_LAST dequeueLastDouble #define SUBLIST_METHOD doubleSubList #define SINGLETON_METHOD doubleSingleton #define FIRST firstDouble #define LAST lastDouble #define TOP topDouble #define PEEK peekDouble #define POP popDouble #define KEY_ITERATOR_METHOD doubleIterator #define KEY_LIST_ITERATOR_METHOD doubleListIterator #define KEY_EMPTY_ITERATOR_METHOD emptyDoubleIterator #define AS_KEY_ITERATOR asDoubleIterator #define TO_KEY_ARRAY toDoubleArray #define ENTRY_GET_KEY getDoubleKey #define REMOVE_FIRST_KEY removeFirstDouble #define REMOVE_LAST_KEY removeLastDouble #define PARSE_KEY parseDouble #define LOAD_KEYS loadDoubles #define LOAD_KEYS_BIG loadDoublesBig #define STORE_KEYS storeDoubles /* Methods (values) */ #define NEXT_VALUE next #define PREV_VALUE previous #define READ_VALUE readObject #define WRITE_VALUE writeObject #define VALUE_ITERATOR_METHOD objectIterator #define ENTRY_GET_VALUE getValue #define REMOVE_FIRST_VALUE removeFirst #define REMOVE_LAST_VALUE removeLast /* Methods (keys/values) */ #define ENTRYSET double2ObjectEntrySet /* Methods that have special names depending on keys (but the special names depend on values) */ #if #keyclass(Object) || #keyclass(Reference) #define GET_VALUE get #define REMOVE_VALUE remove #else #define GET_VALUE get #define REMOVE_VALUE remove #endif /* Equality */ #ifdef Custom #define KEY_EQUALS(x,y) ( strategy.equals( (x), KEY_GENERIC_CAST (y) ) ) #else #if #keyclass(Object) #define KEY_EQUALS(x,y) ( (x) == null ? (y) == null : (x).equals(y) ) #define KEY_EQUALS_NOT_NULL(x,y) ( (x).equals(y) ) #else #define KEY_EQUALS(x,y) ( (x) == (y) ) #define KEY_EQUALS_NOT_NULL(x,y) ( (x) == (y) ) #endif #endif #if #valueclass(Object) #define VALUE_EQUALS(x,y) ( (x) == null ? (y) == null : (x).equals(y) ) #else #define VALUE_EQUALS(x,y) ( (x) == (y) ) #endif /* Object/Reference-only definitions (keys) */ #if #keyclass(Object) || #keyclass(Reference) #define REMOVE remove #define KEY_OBJ2TYPE(x) (x) #define KEY_CLASS2TYPE(x) (x) #define KEY2OBJ(x) (x) #if #keyclass(Object) #ifdef Custom #define KEY2JAVAHASH(x) ( strategy.hashCode( KEY_GENERIC_CAST (x)) ) #define KEY2INTHASH(x) ( it.unimi.dsi.fastutil.HashCommon.murmurHash3( strategy.hashCode( KEY_GENERIC_CAST (x)) ) ) #define KEY2LONGHASH(x) ( it.unimi.dsi.fastutil.HashCommon.murmurHash3( (long)strategy.hashCode( KEY_GENERIC_CAST (x)) ) ) #else #define KEY2JAVAHASH(x) ( (x) == null ? 0 : (x).hashCode() ) #define KEY2INTHASH(x) ( (x) == null ? 0x87fcd5c : it.unimi.dsi.fastutil.HashCommon.murmurHash3( (x).hashCode() ) ) #define KEY2LONGHASH(x) ( (x) == null ? 0x810879608e4259ccL : it.unimi.dsi.fastutil.HashCommon.murmurHash3( (long)(x).hashCode() ) ) #endif #else #define KEY2JAVAHASH(x) ( (x) == null ? 0 : System.identityHashCode(x) ) #define KEY2INTHASH(x) ( (x) == null ? 0x87fcd5c : it.unimi.dsi.fastutil.HashCommon.murmurHash3( System.identityHashCode(x) ) ) #define KEY2LONGHASH(x) ( (x) == null ? 0x810879608e4259ccL : it.unimi.dsi.fastutil.HashCommon.murmurHash3( (long)System.identityHashCode(x) ) ) #endif #define KEY_CMP(x,y) ( ((Comparable<KEY_GENERIC_CLASS>)(x)).compareTo(y) ) #define KEY_CMP_EQ(x,y) ( ((Comparable<KEY_GENERIC_CLASS>)(x)).compareTo(y) == 0 ) #define KEY_LESS(x,y) ( ((Comparable<KEY_GENERIC_CLASS>)(x)).compareTo(y) < 0 ) #define KEY_LESSEQ(x,y) ( ((Comparable<KEY_GENERIC_CLASS>)(x)).compareTo(y) <= 0 ) #define KEY_NULL (null) #else /* Primitive-type-only definitions (keys) */ #define REMOVE rem #define KEY_CLASS2TYPE(x) ((x).KEY_VALUE()) #define KEY_OBJ2TYPE(x) (KEY_CLASS2TYPE((KEY_CLASS)(x))) #define KEY2OBJ(x) (KEY_CLASS.valueOf(x)) #if #keyclass(Boolean) #define KEY_CMP_EQ(x,y) ( (x) == (y) ) #define KEY_NULL (false) #define KEY_CMP(x,y) ( !(x) && (y) ? -1 : ( (x) == (y) ? 0 : 1 ) ) #define KEY_LESS(x,y) ( !(x) && (y) ) #define KEY_LESSEQ(x,y) ( !(x) || (y) ) #else #define KEY_NULL ((KEY_TYPE)0) #if #keyclass(Float) || #keyclass(Double) #define KEY_CMP_EQ(x,y) ( KEY_CLASS.compare((x),(y)) == 0 ) #define KEY_CMP(x,y) ( KEY_CLASS.compare((x),(y)) ) #define KEY_LESS(x,y) ( KEY_CLASS.compare((x),(y)) < 0 ) #define KEY_LESSEQ(x,y) ( KEY_CLASS.compare((x),(y)) <= 0 ) #else #define KEY_CMP_EQ(x,y) ( (x) == (y) ) #define KEY_CMP(x,y) ( (x) < (y) ? -1 : ( (x) == (y) ? 0 : 1 ) ) #define KEY_LESS(x,y) ( (x) < (y) ) #define KEY_LESSEQ(x,y) ( (x) <= (y) ) #endif #if #keyclass(Float) #define KEY2LEXINT(x) fixFloat(x) #elif #keyclass(Double) #define KEY2LEXINT(x) fixDouble(x) #else #define KEY2LEXINT(x) (x) #endif #endif #ifdef Custom #define KEY2JAVAHASH(x) ( strategy.hashCode(x) ) #define KEY2INTHASH(x) ( it.unimi.dsi.fastutil.HashCommon.murmurHash3( strategy.hashCode(x) ) ) #define KEY2LONGHASH(x) ( it.unimi.dsi.fastutil.HashCommon.murmurHash3( (long)strategy.hashCode(x) ) ) #else #if #keyclass(Float) #define KEY2JAVAHASH(x) it.unimi.dsi.fastutil.HashCommon.float2int(x) #define KEY2INTHASH(x) it.unimi.dsi.fastutil.HashCommon.murmurHash3( it.unimi.dsi.fastutil.HashCommon.float2int(x) ) #define KEY2LONGHASH(x) it.unimi.dsi.fastutil.HashCommon.murmurHash3( (long)it.unimi.dsi.fastutil.HashCommon.float2int(x) ) #elif #keyclass(Double) #define KEY2JAVAHASH(x) it.unimi.dsi.fastutil.HashCommon.double2int(x) #define KEY2INTHASH(x) (int)it.unimi.dsi.fastutil.HashCommon.murmurHash3(Double.doubleToRawLongBits(x)) #define KEY2LONGHASH(x) it.unimi.dsi.fastutil.HashCommon.murmurHash3(Double.doubleToRawLongBits(x)) #elif #keyclass(Long) #define KEY2JAVAHASH(x) it.unimi.dsi.fastutil.HashCommon.long2int(x) #define KEY2INTHASH(x) (int)it.unimi.dsi.fastutil.HashCommon.murmurHash3(x) #define KEY2LONGHASH(x) it.unimi.dsi.fastutil.HashCommon.murmurHash3(x) #elif #keyclass(Boolean) #define KEY2JAVAHASH(x) ((x) ? 1231 : 1237) #define KEY2INTHASH(x) ((x) ? 0xfab5368 : 0xcba05e7b) #define KEY2LONGHASH(x) ((x) ? 0x74a19fc8b6428188L : 0xbaeca2031a4fd9ecL) #else #define KEY2JAVAHASH(x) (x) #define KEY2INTHASH(x) ( it.unimi.dsi.fastutil.HashCommon.murmurHash3( (x) ) ) #define KEY2LONGHASH(x) ( it.unimi.dsi.fastutil.HashCommon.murmurHash3( (long)(x) ) ) #endif #endif #endif /* Object/Reference-only definitions (values) */ #if #valueclass(Object) || #valueclass(Reference) #define VALUE_OBJ2TYPE(x) (x) #define VALUE_CLASS2TYPE(x) (x) #define VALUE2OBJ(x) (x) #if #valueclass(Object) #define VALUE2JAVAHASH(x) ( (x) == null ? 0 : (x).hashCode() ) #else #define VALUE2JAVAHASH(x) ( (x) == null ? 0 : System.identityHashCode(x) ) #endif #define VALUE_NULL (null) #define OBJECT_DEFAULT_RETURN_VALUE (this.defRetValue) #else /* Primitive-type-only definitions (values) */ #define VALUE_CLASS2TYPE(x) ((x).VALUE_VALUE()) #define VALUE_OBJ2TYPE(x) (VALUE_CLASS2TYPE((VALUE_CLASS)(x))) #define VALUE2OBJ(x) (VALUE_CLASS.valueOf(x)) #if #valueclass(Float) || #valueclass(Double) || #valueclass(Long) #define VALUE_NULL (0) #define VALUE2JAVAHASH(x) it.unimi.dsi.fastutil.HashCommon.Object2int(x) #elif #valueclass(Boolean) #define VALUE_NULL (false) #define VALUE2JAVAHASH(x) (x ? 1231 : 1237) #else #if #valueclass(Integer) #define VALUE_NULL (0) #else #define VALUE_NULL ((VALUE_TYPE)0) #endif #define VALUE2JAVAHASH(x) (x) #endif #define OBJECT_DEFAULT_RETURN_VALUE (null) #endif #include "drv/HeapIndirectPriorityQueue.drv"
721219.c
/** ****************************************************************************** * @file stm32f4xx_crc.c * @author MCD Application Team * @version V1.8.0 * @date 04-November-2016 * @brief This file provides all the CRC firmware functions. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2016 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_crc.h" /** @addtogroup STM32F4xx_StdPeriph_Driver * @{ */ /** @defgroup CRC * @brief CRC driver modules * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /* Private functions ---------------------------------------------------------*/ /** @defgroup CRC_Private_Functions * @{ */ /** * @brief Resets the CRC Data register (DR). * @param None * @retval None */ void CRC_ResetDR(void) { /* Reset CRC generator */ CRC->CR = CRC_CR_RESET; } /** * @brief Computes the 32-bit CRC of a given data word(32-bit). * @param Data: data word(32-bit) to compute its CRC * @retval 32-bit CRC */ uint32_t CRC_CalcCRC(uint32_t Data) { CRC->DR = Data; return (CRC->DR); } /** * @brief Computes the 32-bit CRC of a given buffer of data word(32-bit). * @param pBuffer: pointer to the buffer containing the data to be computed * @param BufferLength: length of the buffer to be computed * @retval 32-bit CRC */ uint32_t CRC_CalcBlockCRC(uint32_t pBuffer[], uint32_t BufferLength) { uint32_t index = 0; for(index = 0; index < BufferLength; index++) { CRC->DR = pBuffer[index]; } return (CRC->DR); } /** * @brief Returns the current CRC value. * @param None * @retval 32-bit CRC */ uint32_t CRC_GetCRC(void) { return (CRC->DR); } /** * @brief Stores a 8-bit data in the Independent Data(ID) register. * @param IDValue: 8-bit value to be stored in the ID register * @retval None */ void CRC_SetIDRegister(uint8_t IDValue) { CRC->IDR = IDValue; } /** * @brief Returns the 8-bit data stored in the Independent Data(ID) register * @param None * @retval 8-bit value of the ID register */ uint8_t CRC_GetIDRegister(void) { return (CRC->IDR); } /** * @} */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
168863.c
/* * FreeRTOS V202111.00 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * http://www.FreeRTOS.org * http://aws.amazon.com/freertos * * 1 tab == 4 spaces! */ /* FreeRTOS includes. */ #include "FreeRTOS.h" #include "task.h" /* Xilinx includes. */ #include "platform.h" #include "xttcps.h" #include "xscugic.h" /* Timer used to generate the tick interrupt. */ static XTtcPs xRTOSTickTimerInstance; /*-----------------------------------------------------------*/ void vConfigureTickInterrupt( void ) { BaseType_t xStatus; XTtcPs_Config *pxTimerConfiguration; XInterval usInterval; uint8_t ucPrescale; const uint8_t ucLevelSensitive = 1; extern XScuGic xInterruptController; pxTimerConfiguration = XTtcPs_LookupConfig( XPAR_XTTCPS_3_DEVICE_ID ); /* Initialise the device. */ xStatus = XTtcPs_CfgInitialize( &xRTOSTickTimerInstance, pxTimerConfiguration, pxTimerConfiguration->BaseAddress ); if( xStatus != XST_SUCCESS ) { /* Not sure how to do this before XTtcPs_CfgInitialize is called as *xRTOSTickTimerInstance is set within XTtcPs_CfgInitialize(). */ XTtcPs_Stop( &xRTOSTickTimerInstance ); xStatus = XTtcPs_CfgInitialize( &xRTOSTickTimerInstance, pxTimerConfiguration, pxTimerConfiguration->BaseAddress ); configASSERT( xStatus == XST_SUCCESS ); } /* Set the options. */ XTtcPs_SetOptions( &xRTOSTickTimerInstance, ( XTTCPS_OPTION_INTERVAL_MODE | XTTCPS_OPTION_WAVE_DISABLE ) ); /* Derive values from the tick rate. */ XTtcPs_CalcIntervalFromFreq( &xRTOSTickTimerInstance, configTICK_RATE_HZ, &( usInterval ), &( ucPrescale ) ); /* Set the interval and prescale. */ XTtcPs_SetInterval( &xRTOSTickTimerInstance, usInterval ); XTtcPs_SetPrescaler( &xRTOSTickTimerInstance, ucPrescale ); /* The priority must be the lowest possible. */ XScuGic_SetPriorityTriggerType( &xInterruptController, XPAR_XTTCPS_3_INTR, portLOWEST_USABLE_INTERRUPT_PRIORITY << portPRIORITY_SHIFT, ucLevelSensitive ); /* Connect to the interrupt controller. */ xStatus = XScuGic_Connect( &xInterruptController, XPAR_XTTCPS_3_INTR, (Xil_ExceptionHandler) FreeRTOS_Tick_Handler, ( void * ) &xRTOSTickTimerInstance ); configASSERT( xStatus == XST_SUCCESS); /* Enable the interrupt in the GIC. */ XScuGic_Enable( &xInterruptController, XPAR_XTTCPS_3_INTR ); /* Enable the interrupts in the timer. */ XTtcPs_EnableInterrupts( &xRTOSTickTimerInstance, XTTCPS_IXR_INTERVAL_MASK ); /* Start the timer. */ XTtcPs_Start( &xRTOSTickTimerInstance ); } /*-----------------------------------------------------------*/ void vClearTickInterrupt( void ) { volatile uint32_t ulInterruptStatus; /* Read the interrupt status, then write it back to clear the interrupt. */ ulInterruptStatus = XTtcPs_GetInterruptStatus( &xRTOSTickTimerInstance ); XTtcPs_ClearInterruptStatus( &xRTOSTickTimerInstance, ulInterruptStatus ); __asm volatile( "DSB SY" ); __asm volatile( "ISB SY" ); } /*-----------------------------------------------------------*/ void vApplicationIRQHandler( uint32_t ulICCIAR ) { extern const XScuGic_Config XScuGic_ConfigTable[]; static const XScuGic_VectorTableEntry *pxVectorTable = XScuGic_ConfigTable[ XPAR_SCUGIC_SINGLE_DEVICE_ID ].HandlerTable; uint32_t ulInterruptID; const XScuGic_VectorTableEntry *pxVectorEntry; /* Interrupts cannot be re-enabled until the source of the interrupt is cleared. The ID of the interrupt is obtained by bitwise ANDing the ICCIAR value with 0x3FF. */ ulInterruptID = ulICCIAR & 0x3FFUL; if( ulInterruptID < XSCUGIC_MAX_NUM_INTR_INPUTS ) { /* Call the function installed in the array of installed handler functions. */ pxVectorEntry = &( pxVectorTable[ ulInterruptID ] ); configASSERT( pxVectorEntry ); pxVectorEntry->Handler( pxVectorEntry->CallBackRef ); } }
387273.c
/**************************************************************************** NAME: packet.c -- a packet-sniffing engine for reading from GPS devices DESCRIPTION: Initial conditions of the problem: 1. We have a file descriptor open for (possibly non-blocking) read. The device on the other end is sending packets at us. 2. It may require more than one read to gather a packet. Reads may span packet boundaries. 3. There may be leading garbage before the first packet. After the first start-of-packet, the input should be well-formed. The problem: how do we recognize which kind of packet we're getting? No need to handle Garmin USB binary, we know that type by the fact we're connected to the Garmin kernel driver. But we need to be able to tell the others apart and distinguish them from baud barf. PERMISSIONS This file is Copyright (c) 2010 by the GPSD project BSD terms apply: see the file COPYING in the distribution root for details. ***************************************************************************/ #include <sys/types.h> #include <stdio.h> #include <stdbool.h> #include <ctype.h> #include <string.h> #include <errno.h> #include <netinet/in.h> #include <arpa/inet.h> /* for htons() */ #include <unistd.h> #include "bits.h" #include "gpsd.h" #include "crc24q.h" #include "strfuncs.h" /* * The packet-recognition state machine. This takes an incoming byte stream * and tries to segment it into packets. There are four types of packets: * * 1) Comments. These begin with # and end with \r\n. * * 2) NMEA lines. These begin with $, and with \r\n, and have a checksum. * * 3) Checksummed binary packets. These begin with some fixed leader * character(s), have a length embedded in them, and end with a * checksum (and possibly some fixed trailing bytes). * * 4) ISGPS packets. The input may be a bitstream containing IS-GPS-200 * packets. Each includes a fixed leader byte, a length, and check bits. * In this case, it is not guaranted that packet starts begin on byte * bounaries; the recognizer has to run a separate state machine against * each byte just to achieve synchronization lock with the bitstream. * * 5) Un-checksummed binary packets. Like case 3, but without * a checksum it's much easier to get a false match from garbage. * The packet recognizer gives checksummed types higher priority. * * Adding support for a new GPS protocol typically reqires adding state * transitions to support whatever binary packet structure it has. The * goal is for the lexer to be able to cope with arbitrarily mixed packet * types on the input stream. This is a requirement because (1) sometimes * gpsd wants to switch a device that supports both NMEA and a binary * packet protocol to the latter for more detailed reporting, and (b) in * the presence of device hotplugging, the type of GPS report coming * in is subject to change at any time. * * Caller should consume a packet when it sees one of the *_RECOGNIZED * states. It's good practice to follow the _RECOGNIZED transition * with one that recognizes a leader of the same packet type rather * than dropping back to ground state -- this for example will prevent * the state machine from hopping between recognizing TSIP and * EverMore packets that both start with a DLE. * * Error handling is brutally simple; any time we see an unexpected * character, go to GROUND_STATE and reset the machine (except that a * $ in an NMEA payload only resets back to NMEA_DOLLAR state). Because * another good packet will usually be along in less than a second * repeating the same data, Boyer-Moore-like attempts to do parallel * recognition beyond the headers would make no sense in this * application, they'd just add complexity. * * The NMEA portion of the state machine allows the following talker IDs: * $GP -- Global Positioning System. * $GL -- GLONASS, according to IEIC 61162-1 * $GN -- Mixed GPS and GLONASS data, according to IEIC 61162-1 * $BD -- Beidou * $GB -- Beidou * $QZ -- QZSS GPS augmentation system * $II -- Integrated Instrumentation (Raytheon's SeaTalk system). * $IN -- Integrated Navigation (Garmin uses this). * $WI -- Weather instrument (Airmar PB200, Radio Ocean ROWIND, Vaisala WXT520). * $HC -- Heading/compass (Airmar PB200). * $TI -- Turn indicator (Airmar PB200). * $EC -- Electronic Chart Display & Information System (ECDIS) * $SD -- Depth Sounder * $YX -- Transducer (used by some Airmar equipment including PB100) * $P -- Vendor-specific sentence * * !AB -- NMEA 4.0 Base AIS station * !AD -- MMEA 4.0 Dependent AIS Base Station * !AI -- Mobile AIS station * !AN -- NMEA 4.0 Aid to Navigation AIS station * !AR -- NMEA 4.0 AIS Receiving Station * !AX -- NMEA 4.0 Repeater AIS station * !AS -- NMEA 4.0 Limited Base Station * !AT -- NMEA 4.0 AIS Transmitting Station * !BS -- Base AIS station (deprecated in NMEA 4.0) * !SA -- NMEA 4.0 Physical Shore AIS Station */ enum { #include "packet_states.h" }; static char *state_table[] = { #include "packet_names.h" }; #define SOH (unsigned char)0x01 #define DLE (unsigned char)0x10 #define STX (unsigned char)0x02 #define ETX (unsigned char)0x03 #ifdef ONCORE_ENABLE static size_t oncore_payload_cksum_length(unsigned char id1, unsigned char id2) { size_t l; /* For the packet sniffer to not terminate the message due to * payload data looking like a trailer, the known payload lengths * including the checksum are given. Return -1 for unknown IDs. */ #define ONCTYPE(id2,id3) ((((unsigned int)id2)<<8)|(id3)) /* *INDENT-OFF* */ switch (ONCTYPE(id1,id2)) { case ONCTYPE('A','b'): l = 10; break; /* GMT offset */ case ONCTYPE('A','w'): l = 8; break; /* time mode */ case ONCTYPE('A','c'): l = 11; break; /* date */ case ONCTYPE('A','a'): l = 10; break; /* time of day */ case ONCTYPE('A','d'): l = 11; break; /* latitude */ case ONCTYPE('A','e'): l = 11; break; /* longitude */ case ONCTYPE('A','f'): l = 15; break; /* height */ case ONCTYPE('E','a'): l = 76; break; /* position/status/data */ case ONCTYPE('A','g'): l = 8; break; /* satellite mask angle */ case ONCTYPE('B','b'): l = 92; break; /* visible satellites status */ case ONCTYPE('B','j'): l = 8; break; /* leap seconds pending */ case ONCTYPE('A','q'): l = 8; break; /* atmospheric correction mode */ case ONCTYPE('A','p'): l = 25; break; /* set user datum / select datum */ /* Command "Ao" gives "Ap" response (select datum) */ case ONCTYPE('C','h'): l = 9; break; /* almanac input ("Cb" response) */ case ONCTYPE('C','b'): l = 33; break; /* almanac output ("Be" response) */ case ONCTYPE('S','z'): l = 8; break; /* system power-on failure */ case ONCTYPE('C','j'): l = 294; break; /* receiver ID */ case ONCTYPE('F','a'): l = 9; break; /* self-test */ case ONCTYPE('C','f'): l = 7; break; /* set-to-defaults */ case ONCTYPE('E','q'): l = 96; break; /* ASCII position */ case ONCTYPE('A','u'): l = 12; break; /* altitide hold height */ case ONCTYPE('A','v'): l = 8; break; /* altitude hold mode */ case ONCTYPE('A','N'): l = 8; break; /* velocity filter */ case ONCTYPE('A','O'): l = 8; break; /* RTCM report mode */ case ONCTYPE('C','c'): l = 80; break; /* ephemeris data input ("Bf") */ case ONCTYPE('C','k'): l = 7; break; /* pseudorng correction inp. ("Ce")*/ /* Command "Ci" (switch to NMEA, GT versions only) has no response */ case ONCTYPE('B','o'): l = 8; break; /* UTC offset status */ case ONCTYPE('A','z'): l = 11; break; /* 1PPS cable delay */ case ONCTYPE('A','y'): l = 11; break; /* 1PPS offset */ case ONCTYPE('A','P'): l = 8; break; /* pulse mode */ case ONCTYPE('A','s'): l = 20; break; /* position-hold position */ case ONCTYPE('A','t'): l = 8; break; /* position-hold mode */ case ONCTYPE('E','n'): l = 69; break; /* time RAIM setup and status */ default: return 0; } /* *INDENT-ON* */ return l - 6; /* Subtract header and trailer. */ } #endif /* ONCORE_ENABLE */ static bool character_pushback(struct gps_lexer_t *lexer, unsigned int newstate) /* push back the last character grabbed, setting a specified state */ { --lexer->inbufptr; --lexer->char_counter; lexer->state = newstate; if (lexer->errout.debug >= LOG_RAW + 2) { unsigned char c = *lexer->inbufptr; gpsd_log(&lexer->errout, LOG_RAW + 2, "%08ld: character '%c' [%02x] pushed back, state set to %s\n", lexer->char_counter, (isprint((int)c) ? c : '.'), c, state_table[lexer->state]); } return false; } static bool nextstate(struct gps_lexer_t *lexer, unsigned char c) { static int n = 0; #ifdef RTCM104V2_ENABLE enum isgpsstat_t isgpsstat; #endif /* RTCM104V2_ENABLE */ #ifdef SUPERSTAR2_ENABLE static unsigned char ctmp; #endif /* SUPERSTAR2_ENABLE */ n++; switch (lexer->state) { case GROUND_STATE: n = 0; #ifdef STASH_ENABLE lexer->stashbuflen = 0; #endif if (c == '#') { lexer->state = COMMENT_BODY; break; } #ifdef NMEA0183_ENABLE if (c == '$') { lexer->state = NMEA_DOLLAR; break; } if (c == '!') { lexer->state = NMEA_BANG; break; } #endif /* NMEA0183_ENABLE */ #if defined(TNT_ENABLE) || defined(GARMINTXT_ENABLE) || defined(ONCORE_ENABLE) if (c == '@') { #ifdef RTCM104V2_ENABLE if (rtcm2_decode(lexer, c) == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = AT1_LEADER; break; } #endif #ifdef SIRF_ENABLE if (c == 0xa0) { lexer->state = SIRF_LEADER_1; break; } #endif /* SIRF_ENABLE */ #ifdef SUPERSTAR2_ENABLE if (c == SOH) { lexer->state = SUPERSTAR2_LEADER; break; } #endif /* SUPERSTAR2_ENABLE */ #if defined(TSIP_ENABLE) || defined(EVERMORE_ENABLE) || defined(GARMIN_ENABLE) if (c == DLE) { lexer->state = DLE_LEADER; break; } #endif /* TSIP_ENABLE || EVERMORE_ENABLE || GARMIN_ENABLE */ #ifdef TRIPMATE_ENABLE if (c == 'A') { #ifdef RTCM104V2_ENABLE if (rtcm2_decode(lexer, c) == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = ASTRAL_1; break; } #endif /* TRIPMATE_ENABLE */ #ifdef EARTHMATE_ENABLE if (c == 'E') { #ifdef RTCM104V2_ENABLE if (rtcm2_decode(lexer, c) == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = EARTHA_1; break; } #endif /* EARTHMATE_ENABLE */ #ifdef ZODIAC_ENABLE if (c == 0xff) { lexer->state = ZODIAC_LEADER_1; break; } #endif /* ZODIAC_ENABLE */ #ifdef UBLOX_ENABLE if (c == 0xb5) { lexer->state = UBX_LEADER_1; break; } #endif /* UBLOX_ENABLE */ #ifdef ITRAX_ENABLE if (c == '<') { lexer->state = ITALK_LEADER_1; break; } #endif /* ITRAX_ENABLE */ #ifdef NAVCOM_ENABLE if (c == 0x02) { lexer->state = NAVCOM_LEADER_1; break; } #endif /* NAVCOM_ENABLE */ #ifdef GEOSTAR_ENABLE if (c == 'P') { lexer->state = GEOSTAR_LEADER_1; break; } #endif /* GEOSTAR_ENABLE */ #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ #ifdef RTCM104V3_ENABLE if (c == 0xD3) { lexer->state = RTCM3_LEADER_1; break; } #endif /* RTCM104V3_ENABLE */ #ifdef PASSTHROUGH_ENABLE if (c == '{') return character_pushback(lexer, JSON_LEADER); #endif /* PASSTHROUGH_ENABLE */ break; case COMMENT_BODY: if (c == '\n') lexer->state = COMMENT_RECOGNIZED; else if (!isprint(c)) return character_pushback(lexer, GROUND_STATE); break; #ifdef NMEA0183_ENABLE case NMEA_DOLLAR: if (c == 'G') lexer->state = NMEA_PUB_LEAD; else if (c == 'P') /* vendor sentence */ lexer->state = NMEA_VENDOR_LEAD; else if (c == 'I') /* Seatalk */ lexer->state = SEATALK_LEAD_1; else if (c == 'W') /* Weather instrument */ lexer->state = WEATHER_LEAD_1; else if (c == 'H') /* Heading/compass */ lexer->state = HEADCOMP_LEAD_1; else if (c == 'T') /* Turn indicator */ lexer->state = TURN_LEAD_1; else if (c == 'A') /* SiRF Ack */ lexer->state = SIRF_ACK_LEAD_1; else if (c == 'E') /* ECDIS */ lexer->state = ECDIS_LEAD_1; else if (c == 'S') lexer->state = SOUNDER_LEAD_1; else if (c == 'Y') lexer->state = TRANSDUCER_LEAD_1; else if (c == 'B') lexer->state = BEIDOU_LEAD_1; else if (c == 'Q') lexer->state = QZSS_LEAD_1; #ifdef OCEANSERVER_ENABLE else if (c == 'C') lexer->state = NMEA_LEADER_END; #endif /* OCEANSERVER_ENABLE */ else (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_PUB_LEAD: /* * $GP == GPS, $GL = GLONASS only, $GN = mixed GPS and GLONASS, * according to NMEA (IEIC 61162-1) DRAFT 02/06/2009. * We have a log from China with a Beidou device using $GB * rather than $BD. */ if (c == 'B' || c == 'P' || c == 'N' || c == 'L') lexer->state = NMEA_LEADER_END; else (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_VENDOR_LEAD: if (c == 'A') lexer->state = NMEA_PASHR_A; else if (isalpha(c)) lexer->state = NMEA_LEADER_END; else (void) character_pushback(lexer, GROUND_STATE); break; /* * Without the following six states, DLE in a $PASHR can fool the * sniffer into thinking it sees a TSIP packet. Hilarity ensues. */ case NMEA_PASHR_A: if (c == 'S') lexer->state = NMEA_PASHR_S; else if (isalpha(c)) lexer->state = NMEA_LEADER_END; else (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_PASHR_S: if (c == 'H') lexer->state = NMEA_PASHR_H; else if (isalpha(c)) lexer->state = NMEA_LEADER_END; else (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_PASHR_H: if (c == 'R') lexer->state = NMEA_BINARY_BODY; else if (isalpha(c)) lexer->state = NMEA_LEADER_END; else (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_BINARY_BODY: if (c == '\r') lexer->state = NMEA_BINARY_CR; break; case NMEA_BINARY_CR: if (c == '\n') lexer->state = NMEA_BINARY_NL; else lexer->state = NMEA_BINARY_BODY; break; case NMEA_BINARY_NL: if (c == '$') (void) character_pushback(lexer, NMEA_RECOGNIZED); else lexer->state = NMEA_BINARY_BODY; break; case NMEA_BANG: if (c == 'A') lexer->state = AIS_LEAD_1; else if (c == 'B') lexer->state = AIS_LEAD_ALT1; else if (c == 'S') lexer->state = AIS_LEAD_ALT3; else return character_pushback(lexer, GROUND_STATE); break; case AIS_LEAD_1: if (strchr("BDINRSTX", c) != NULL) lexer->state = AIS_LEAD_2; else return character_pushback(lexer, GROUND_STATE); break; case AIS_LEAD_2: if (isalpha(c)) lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case AIS_LEAD_ALT1: if (c == 'S') lexer->state = AIS_LEAD_ALT2; else return character_pushback(lexer, GROUND_STATE); break; case AIS_LEAD_ALT2: if (isalpha(c)) lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case AIS_LEAD_ALT3: if (c == 'A') lexer->state = AIS_LEAD_ALT4; else return character_pushback(lexer, GROUND_STATE); break; case AIS_LEAD_ALT4: if (isalpha(c)) lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; #if defined(TNT_ENABLE) || defined(GARMINTXT_ENABLE) || defined(ONCORE_ENABLE) case AT1_LEADER: switch (c) { #ifdef ONCORE_ENABLE case '@': lexer->state = ONCORE_AT2; break; #endif /* ONCORE_ENABLE */ #ifdef TNT_ENABLE case '*': /* * TNT has similar structure to NMEA packet, '*' before * optional checksum ends the packet. Since '*' cannot be * received from GARMIN working in TEXT mode, use this * difference to tell that this is not GARMIN TEXT packet, * could be TNT. */ lexer->state = NMEA_LEADER_END; break; #endif /* TNT_ENABLE */ #if defined(GARMINTXT_ENABLE) case '\r': /* stay in this state, next character should be '\n' */ /* in the theory we can stop search here and don't wait for '\n' */ lexer->state = AT1_LEADER; break; case '\n': /* end of packet found */ lexer->state = GTXT_RECOGNIZED; break; #endif /* GARMINTXT_ENABLE */ default: if (!isprint(c)) return character_pushback(lexer, GROUND_STATE); } break; #endif /* defined(TNT_ENABLE) || defined(GARMINTXT_ENABLE) || defined(ONCORE_ENABLE) */ case NMEA_LEADER_END: if (c == '\r') lexer->state = NMEA_CR; else if (c == '\n') /* not strictly correct, but helps for interpreting logfiles */ lexer->state = NMEA_RECOGNIZED; else if (c == '$') { #ifdef STASH_ENABLE (void) character_pushback(lexer, STASH_RECOGNIZED); #else (void) character_pushback(lexer, GROUND_STATE); #endif } else if (!isprint(c)) (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_CR: if (c == '\n') lexer->state = NMEA_RECOGNIZED; /* * There's a GPS called a Jackson Labs Firefly-1a that emits \r\r\n * at the end of each sentence. Don't be confused by this. */ else if (c == '\r') lexer->state = NMEA_CR; else (void) character_pushback(lexer, GROUND_STATE); break; case NMEA_RECOGNIZED: if (c == '#') lexer->state = COMMENT_BODY; else if (c == '$') lexer->state = NMEA_DOLLAR; else if (c == '!') lexer->state = NMEA_BANG; #ifdef UBLOX_ENABLE else if (c == 0xb5) /* LEA-5H can and will output NMEA and UBX back to back */ lexer->state = UBX_LEADER_1; #endif #ifdef PASSTHROUGH_ENABLE else if (c == '{') return character_pushback(lexer, JSON_LEADER); #endif /* PASSTHROUGH_ENABLE */ else return character_pushback(lexer, GROUND_STATE); break; case SEATALK_LEAD_1: if (c == 'I' || c == 'N') /* II or IN are accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case WEATHER_LEAD_1: if (c == 'I') /* Weather instrument leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case HEADCOMP_LEAD_1: if (c == 'C') /* Heading/compass leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case TURN_LEAD_1: if (c == 'I') /* Turn indicator leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case ECDIS_LEAD_1: if (c == 'C') /* ECDIS leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case SOUNDER_LEAD_1: if (c == 'D') /* Depth-sounder leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case TRANSDUCER_LEAD_1: if (c == 'X') /* Transducer leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case BEIDOU_LEAD_1: if (c == 'D') /* Beidou leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; case QZSS_LEAD_1: if (c == 'Z') /* QZSS leader accepted */ lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; #ifdef TRIPMATE_ENABLE case ASTRAL_1: if (c == 'S') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = ASTRAL_2; } else (void) character_pushback(lexer, GROUND_STATE); break; case ASTRAL_2: if (c == 'T') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = ASTRAL_3; } else (void) character_pushback(lexer, GROUND_STATE); break; case ASTRAL_3: if (c == 'R') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = ASTRAL_5; } else (void) character_pushback(lexer, GROUND_STATE); break; case ASTRAL_4: if (c == 'A') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = ASTRAL_2; } else (void) character_pushback(lexer, GROUND_STATE); break; case ASTRAL_5: if (c == 'L') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = NMEA_RECOGNIZED; } else (void) character_pushback(lexer, GROUND_STATE); break; #endif /* TRIPMATE_ENABLE */ #ifdef EARTHMATE_ENABLE case EARTHA_1: if (c == 'A') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = EARTHA_2; } else (void) character_pushback(lexer, GROUND_STATE); break; case EARTHA_2: if (c == 'R') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = EARTHA_3; } else (void) character_pushback(lexer, GROUND_STATE); break; case EARTHA_3: if (c == 'T') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = EARTHA_4; } else (void) character_pushback(lexer, GROUND_STATE); break; case EARTHA_4: if (c == 'H') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = EARTHA_5; } else (void) character_pushback(lexer, GROUND_STATE); break; case EARTHA_5: if (c == 'A') { #ifdef RTCM104V2_ENABLE if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else if (isgpsstat == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } #endif /* RTCM104V2_ENABLE */ lexer->state = NMEA_RECOGNIZED; } else (void) character_pushback(lexer, GROUND_STATE); break; #endif /* EARTHMATE_ENABLE */ case SIRF_ACK_LEAD_1: if (c == 'c') lexer->state = SIRF_ACK_LEAD_2; else if (c == 'I') lexer->state = AIS_LEAD_2; else return character_pushback(lexer, GROUND_STATE); break; case SIRF_ACK_LEAD_2: if (c == 'k') lexer->state = NMEA_LEADER_END; else return character_pushback(lexer, GROUND_STATE); break; #endif /* NMEA0183_ENABLE */ #ifdef SIRF_ENABLE case SIRF_LEADER_1: if (c == 0xa2) lexer->state = SIRF_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case SIRF_LEADER_2: lexer->length = (size_t) (c << 8); lexer->state = SIRF_LENGTH_1; break; case SIRF_LENGTH_1: lexer->length += c + 2; if (lexer->length <= MAX_PACKET_LENGTH) lexer->state = SIRF_PAYLOAD; else return character_pushback(lexer, GROUND_STATE); break; case SIRF_PAYLOAD: if (--lexer->length == 0) lexer->state = SIRF_DELIVERED; break; case SIRF_DELIVERED: if (c == 0xb0) lexer->state = SIRF_TRAILER_1; else return character_pushback(lexer, GROUND_STATE); break; case SIRF_TRAILER_1: if (c == 0xb3) lexer->state = SIRF_RECOGNIZED; else return character_pushback(lexer, GROUND_STATE); break; case SIRF_RECOGNIZED: if (c == 0xa0) lexer->state = SIRF_LEADER_1; else return character_pushback(lexer, GROUND_STATE); break; #endif /* SIRF_ENABLE */ #ifdef SUPERSTAR2_ENABLE case SUPERSTAR2_LEADER: ctmp = c; lexer->state = SUPERSTAR2_ID1; break; case SUPERSTAR2_ID1: if ((ctmp ^ 0xff) == c) lexer->state = SUPERSTAR2_ID2; else return character_pushback(lexer, GROUND_STATE); break; case SUPERSTAR2_ID2: lexer->length = (size_t) c; /* how many data bytes follow this byte */ if (lexer->length) lexer->state = SUPERSTAR2_PAYLOAD; else lexer->state = SUPERSTAR2_CKSUM1; /* no data, jump to checksum */ break; case SUPERSTAR2_PAYLOAD: if (--lexer->length == 0) lexer->state = SUPERSTAR2_CKSUM1; break; case SUPERSTAR2_CKSUM1: lexer->state = SUPERSTAR2_CKSUM2; break; case SUPERSTAR2_CKSUM2: lexer->state = SUPERSTAR2_RECOGNIZED; break; case SUPERSTAR2_RECOGNIZED: if (c == SOH) lexer->state = SUPERSTAR2_LEADER; else return character_pushback(lexer, GROUND_STATE); break; #endif /* SUPERSTAR2_ENABLE */ #ifdef ONCORE_ENABLE case ONCORE_AT2: if (isupper(c)) { lexer->length = (size_t) c; lexer->state = ONCORE_ID1; } else return character_pushback(lexer, GROUND_STATE); break; case ONCORE_ID1: if (isalpha(c)) { lexer->length = oncore_payload_cksum_length((unsigned char)lexer->length, c); if (lexer->length != 0) { lexer->state = ONCORE_PAYLOAD; break; } } else return character_pushback(lexer, GROUND_STATE); break; case ONCORE_PAYLOAD: if (--lexer->length == 0) lexer->state = ONCORE_CHECKSUM; break; case ONCORE_CHECKSUM: if (c != '\r') return character_pushback(lexer, GROUND_STATE); else lexer->state = ONCORE_CR; break; case ONCORE_CR: if (c == '\n') lexer->state = ONCORE_RECOGNIZED; else lexer->state = ONCORE_PAYLOAD; break; case ONCORE_RECOGNIZED: if (c == '@') lexer->state = AT1_LEADER; else return character_pushback(lexer, GROUND_STATE); break; #endif /* ONCORE_ENABLE */ #if defined(TSIP_ENABLE) || defined(EVERMORE_ENABLE) || defined(GARMIN_ENABLE) case DLE_LEADER: #ifdef EVERMORE_ENABLE if (c == STX) { lexer->state = EVERMORE_LEADER_2; break; } #endif /* EVERMORE_ENABLE */ #if defined(TSIP_ENABLE) || defined(GARMIN_ENABLE) || defined(NAVCOM_ENABLE) /* garmin is special case of TSIP */ /* check last because there's no checksum */ #if defined(TSIP_ENABLE) if (c >= 0x13) { lexer->state = TSIP_PAYLOAD; break; } #endif /* TSIP_ENABLE */ if (c == DLE) { lexer->state = GROUND_STATE; break; } // FALL-THRU!!!!! no break here #endif /* TSIP_ENABLE */ #ifdef NAVCOM_ENABLE case NAVCOM_LEADER_1: if (c == 0x99) lexer->state = NAVCOM_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case NAVCOM_LEADER_2: if (c == 0x66) lexer->state = NAVCOM_LEADER_3; else return character_pushback(lexer, GROUND_STATE); break; case NAVCOM_LEADER_3: lexer->state = NAVCOM_ID; break; case NAVCOM_ID: lexer->length = (size_t) c - 4; lexer->state = NAVCOM_LENGTH_1; break; case NAVCOM_LENGTH_1: lexer->length += (c << 8); lexer->state = NAVCOM_LENGTH_2; break; case NAVCOM_LENGTH_2: if (--lexer->length == 0) lexer->state = NAVCOM_PAYLOAD; break; case NAVCOM_PAYLOAD: { unsigned char csum = lexer->inbuffer[3]; for (n = 4; (unsigned char *)(lexer->inbuffer + n) < lexer->inbufptr - 1; n++) csum ^= lexer->inbuffer[n]; if (csum != c) { gpsd_log(&lexer->errout, LOG_IO, "Navcom packet type 0x%hhx bad checksum 0x%hhx, expecting 0x%x\n", lexer->inbuffer[3], csum, c); lexer->state = GROUND_STATE; break; } } lexer->state = NAVCOM_CSUM; break; case NAVCOM_CSUM: if (c == 0x03) lexer->state = NAVCOM_RECOGNIZED; else return character_pushback(lexer, GROUND_STATE); break; case NAVCOM_RECOGNIZED: if (c == 0x02) lexer->state = NAVCOM_LEADER_1; else return character_pushback(lexer, GROUND_STATE); break; #endif /* NAVCOM_ENABLE */ #endif /* TSIP_ENABLE || EVERMORE_ENABLE || GARMIN_ENABLE */ #ifdef RTCM104V3_ENABLE case RTCM3_LEADER_1: /* high 6 bits must be zero, low 2 bits are MSB of a 10-bit length */ if ((c & 0xFC) == 0) { lexer->length = (size_t) (c << 8); lexer->state = RTCM3_LEADER_2; } else return character_pushback(lexer, GROUND_STATE); break; case RTCM3_LEADER_2: /* third byte is the low 8 bits of the RTCM3 packet length */ lexer->length |= c; lexer->length += 3; /* to get the three checksum bytes */ lexer->state = RTCM3_PAYLOAD; break; case RTCM3_PAYLOAD: if (--lexer->length == 0) lexer->state = RTCM3_RECOGNIZED; break; #endif /* RTCM104V3_ENABLE */ #ifdef ZODIAC_ENABLE case ZODIAC_EXPECTED: case ZODIAC_RECOGNIZED: if (c == 0xff) lexer->state = ZODIAC_LEADER_1; else return character_pushback(lexer, GROUND_STATE); break; case ZODIAC_LEADER_1: if (c == 0x81) lexer->state = ZODIAC_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case ZODIAC_LEADER_2: lexer->state = ZODIAC_ID_1; break; case ZODIAC_ID_1: lexer->state = ZODIAC_ID_2; break; case ZODIAC_ID_2: lexer->length = (size_t) c; lexer->state = ZODIAC_LENGTH_1; break; case ZODIAC_LENGTH_1: lexer->length += (c << 8); lexer->state = ZODIAC_LENGTH_2; break; case ZODIAC_LENGTH_2: lexer->state = ZODIAC_FLAGS_1; break; case ZODIAC_FLAGS_1: lexer->state = ZODIAC_FLAGS_2; break; case ZODIAC_FLAGS_2: lexer->state = ZODIAC_HSUM_1; break; case ZODIAC_HSUM_1: { #define getword(i) (short)(lexer->inbuffer[2*(i)] | (lexer->inbuffer[2*(i)+1] << 8)) short sum = getword(0) + getword(1) + getword(2) + getword(3); sum *= -1; if (sum != getword(4)) { gpsd_log(&lexer->errout, LOG_IO, "Zodiac Header checksum 0x%hx expecting 0x%hx\n", sum, getword(4)); lexer->state = GROUND_STATE; break; } } gpsd_log(&lexer->errout, LOG_RAW + 1, "Zodiac header id=%hd len=%hd flags=%hx\n", getword(1), getword(2), getword(3)); #undef getword if (lexer->length == 0) { lexer->state = ZODIAC_RECOGNIZED; break; } lexer->length *= 2; /* word count to byte count */ lexer->length += 2; /* checksum */ /* 10 bytes is the length of the Zodiac header */ if (lexer->length <= MAX_PACKET_LENGTH - 10) lexer->state = ZODIAC_PAYLOAD; else return character_pushback(lexer, GROUND_STATE); break; case ZODIAC_PAYLOAD: if (--lexer->length == 0) lexer->state = ZODIAC_RECOGNIZED; break; #endif /* ZODIAC_ENABLE */ #ifdef UBLOX_ENABLE case UBX_LEADER_1: if (c == 0x62) lexer->state = UBX_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case UBX_LEADER_2: lexer->state = UBX_CLASS_ID; break; case UBX_CLASS_ID: lexer->state = UBX_MESSAGE_ID; break; case UBX_MESSAGE_ID: lexer->length = (size_t) c; lexer->state = UBX_LENGTH_1; break; case UBX_LENGTH_1: lexer->length += (c << 8); if (lexer->length <= MAX_PACKET_LENGTH) lexer->state = UBX_LENGTH_2; else return character_pushback(lexer, GROUND_STATE); break; case UBX_LENGTH_2: lexer->state = UBX_PAYLOAD; break; case UBX_PAYLOAD: if (--lexer->length == 0) lexer->state = UBX_CHECKSUM_A; /* else stay in payload state */ break; case UBX_CHECKSUM_A: lexer->state = UBX_RECOGNIZED; break; case UBX_RECOGNIZED: if (c == 0xb5) lexer->state = UBX_LEADER_1; #ifdef NMEA0183_ENABLE else if (c == '$') /* LEA-5H can and will output NMEA and UBX back to back */ lexer->state = NMEA_DOLLAR; #endif /* NMEA0183_ENABLE */ #ifdef PASSTHROUGH_ENABLE else if (c == '{') return character_pushback(lexer, JSON_LEADER); #endif /* PASSTHROUGH_ENABLE */ else return character_pushback(lexer, GROUND_STATE); break; #endif /* UBLOX_ENABLE */ #ifdef EVERMORE_ENABLE case EVERMORE_LEADER_1: if (c == STX) lexer->state = EVERMORE_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case EVERMORE_LEADER_2: lexer->length = (size_t) c; if (c == DLE) lexer->state = EVERMORE_PAYLOAD_DLE; else lexer->state = EVERMORE_PAYLOAD; break; case EVERMORE_PAYLOAD: if (c == DLE) lexer->state = EVERMORE_PAYLOAD_DLE; else if (--lexer->length == 0) return character_pushback(lexer, GROUND_STATE); break; case EVERMORE_PAYLOAD_DLE: switch (c) { case DLE: lexer->state = EVERMORE_PAYLOAD; break; case ETX: lexer->state = EVERMORE_RECOGNIZED; break; default: lexer->state = GROUND_STATE; } break; case EVERMORE_RECOGNIZED: if (c == DLE) lexer->state = EVERMORE_LEADER_1; else return character_pushback(lexer, GROUND_STATE); break; #endif /* EVERMORE_ENABLE */ #ifdef ITRAX_ENABLE case ITALK_LEADER_1: if (c == '!') lexer->state = ITALK_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case ITALK_LEADER_2: lexer->length = (size_t) (lexer->inbuffer[6] & 0xff); lexer->state = ITALK_LENGTH; break; case ITALK_LENGTH: lexer->length += 1; /* fix number of words in payload */ lexer->length *= 2; /* convert to number of bytes */ lexer->length += 3; /* add trailer length */ lexer->state = ITALK_PAYLOAD; break; case ITALK_PAYLOAD: /* lookahead for "<!" because sometimes packets are short but valid */ if ((c == '>') && (lexer->inbufptr[0] == '<') && (lexer->inbufptr[1] == '!')) { lexer->state = ITALK_RECOGNIZED; gpsd_log(&lexer->errout, LOG_IO, "ITALK: trying to process runt packet\n"); break; } else if (--lexer->length == 0) lexer->state = ITALK_DELIVERED; break; case ITALK_DELIVERED: if (c == '>') lexer->state = ITALK_RECOGNIZED; else return character_pushback(lexer, GROUND_STATE); break; case ITALK_RECOGNIZED: if (c == '<') lexer->state = ITALK_LEADER_1; else return character_pushback(lexer, GROUND_STATE); break; #endif /* ITRAX_ENABLE */ #ifdef GEOSTAR_ENABLE case GEOSTAR_LEADER_1: if (c == 'S') lexer->state = GEOSTAR_LEADER_2; else return character_pushback(lexer, GROUND_STATE); break; case GEOSTAR_LEADER_2: if (c == 'G') lexer->state = GEOSTAR_LEADER_3; else return character_pushback(lexer, GROUND_STATE); break; case GEOSTAR_LEADER_3: if (c == 'G') lexer->state = GEOSTAR_LEADER_4; else return character_pushback(lexer, GROUND_STATE); break; case GEOSTAR_LEADER_4: lexer->state = GEOSTAR_MESSAGE_ID_1; break; case GEOSTAR_MESSAGE_ID_1: lexer->state = GEOSTAR_MESSAGE_ID_2; break; case GEOSTAR_MESSAGE_ID_2: lexer->length = (size_t)(c * 4); lexer->state = GEOSTAR_LENGTH_1; break; case GEOSTAR_LENGTH_1: lexer->length += (c << 8) * 4; if (lexer->length <= MAX_PACKET_LENGTH) lexer->state = GEOSTAR_LENGTH_2; else return character_pushback(lexer, GROUND_STATE); break; case GEOSTAR_LENGTH_2: lexer->state = GEOSTAR_PAYLOAD; break; case GEOSTAR_PAYLOAD: if (--lexer->length == 0) lexer->state = GEOSTAR_CHECKSUM_A; /* else stay in payload state */ break; case GEOSTAR_CHECKSUM_A: lexer->state = GEOSTAR_CHECKSUM_B; break; case GEOSTAR_CHECKSUM_B: lexer->state = GEOSTAR_CHECKSUM_C; break; case GEOSTAR_CHECKSUM_C: lexer->state = GEOSTAR_RECOGNIZED; break; case GEOSTAR_RECOGNIZED: if (c == 'P') lexer->state = GEOSTAR_LEADER_1; else return character_pushback(lexer, GROUND_STATE); break; #endif /* GEOSTAR_ENABLE */ #ifdef TSIP_ENABLE case TSIP_LEADER: /* unused case */ if (c >= 0x13) lexer->state = TSIP_PAYLOAD; else return character_pushback(lexer, GROUND_STATE); break; case TSIP_PAYLOAD: if (c == DLE) lexer->state = TSIP_DLE; break; case TSIP_DLE: switch (c) { case ETX: lexer->state = TSIP_RECOGNIZED; break; case DLE: lexer->state = TSIP_PAYLOAD; break; default: lexer->state = GROUND_STATE; break; } break; case TSIP_RECOGNIZED: if (c == DLE) /* * Don't go to TSIP_LEADER state -- TSIP packets aren't * checksummed, so false positives are easy. We might be * looking at another DLE-stuffed protocol like EverMore * or Garmin streaming binary. */ lexer->state = DLE_LEADER; else return character_pushback(lexer, GROUND_STATE); break; #endif /* TSIP_ENABLE */ #ifdef RTCM104V2_ENABLE case RTCM2_SYNC_STATE: case RTCM2_SKIP_STATE: if ((isgpsstat = rtcm2_decode(lexer, c)) == ISGPS_MESSAGE) { lexer->state = RTCM2_RECOGNIZED; break; } else if (isgpsstat == ISGPS_NO_SYNC) lexer->state = GROUND_STATE; break; case RTCM2_RECOGNIZED: if (c == '#') /* * There's a remote possibility this could fire when # = * 0x23 is legitimate in-stream RTCM2 data. No help for * it, the test framework needs this case so it can inject * # EOF and we'll miss a packet. */ return character_pushback(lexer, GROUND_STATE); else if (rtcm2_decode(lexer, c) == ISGPS_SYNC) { lexer->state = RTCM2_SYNC_STATE; break; } else lexer->state = GROUND_STATE; break; #endif /* RTCM104V2_ENABLE */ #ifdef PASSTHROUGH_ENABLE case JSON_LEADER: if (c == '{' || c == '[') { lexer->json_depth++; } else if (c == '}' || c == ']') { if (--lexer->json_depth == 0) lexer->state = JSON_RECOGNIZED; } else if (isspace(c) || c == ',') break; else if (c == '"') { lexer->state = JSON_STRINGLITERAL; lexer->json_after = JSON_END_ATTRIBUTE; } else { gpsd_log(&lexer->errout, LOG_RAW + 2, "%08ld: missing attribute start after header\n", lexer->char_counter); lexer->state = GROUND_STATE; } break; case JSON_STRINGLITERAL: if (c == '\\') lexer->state = JSON_STRING_SOLIDUS; else if (c == '"') lexer->state = lexer->json_after; break; case JSON_STRING_SOLIDUS: lexer->state = JSON_STRINGLITERAL; break; case JSON_END_ATTRIBUTE: if (isspace(c)) break; else if (c == ':') lexer->state = JSON_EXPECT_VALUE; else /* saw something other than value start after colon */ return character_pushback(lexer, GROUND_STATE); break; case JSON_EXPECT_VALUE: if (isspace(c)) break; else if (c == '"') { lexer->state = JSON_STRINGLITERAL; lexer->json_after = JSON_END_VALUE; } else if (c == '{' || c == '[') { return character_pushback(lexer, JSON_LEADER); } else if (strchr("-0123456789", c) != NULL) { lexer->state = JSON_NUMBER; } else if (c == 't' || c == 'f' || c == 'n') /* * This is a bit more permissive than strictly necessary, as * GPSD JSON does not include the null token. Still, it's * futureproofing. */ lexer->state = JSON_SPECIAL; else /* couldn't recognize start of value literal */ return character_pushback(lexer, GROUND_STATE); break; case JSON_NUMBER: /* * Will recognize some ill-formed numeric literals. * Should be OK as we're already three stages deep inside * JSON recognition; odds that we'll actually see an * ill-formed literal are quite low. and the worst * possible result if it happens is our JSON parser will * quietly chuck out the object. */ if (strchr("1234567890.eE+-", c) == NULL) { return character_pushback(lexer, JSON_END_VALUE); } break; case JSON_SPECIAL: if (strchr("truefalsnil", c) == NULL) return character_pushback(lexer, JSON_END_VALUE); break; case JSON_END_VALUE: if (isspace(c)) break; else if (c == ',') lexer->state = JSON_LEADER; else if (c == '}' || c == ']') return character_pushback(lexer, JSON_LEADER); else /* trailing garbage after JSON value */ return character_pushback(lexer, GROUND_STATE); break; #endif /* PASSTHROUGH_ENABLE */ #ifdef STASH_ENABLE case STASH_RECOGNIZED: if (c == '$') lexer->state = NMEA_DOLLAR; else return character_pushback(lexer, GROUND_STATE); break; #endif /* STASH_ENABLE */ } return true; /* no pushback */ } static void packet_accept(struct gps_lexer_t *lexer, int packet_type) /* packet grab succeeded, move to output buffer */ { size_t packetlen = lexer->inbufptr - lexer->inbuffer; if (packetlen < sizeof(lexer->outbuffer)) { memcpy(lexer->outbuffer, lexer->inbuffer, packetlen); lexer->outbuflen = packetlen; lexer->outbuffer[packetlen] = '\0'; lexer->type = packet_type; if (lexer->errout.debug >= LOG_RAW+1) { char scratchbuf[MAX_PACKET_LENGTH*2+1]; gpsd_log(&lexer->errout, LOG_RAW+1, "Packet type %d accepted %zu = %s\n", packet_type, packetlen, gpsd_packetdump(scratchbuf, sizeof(scratchbuf), (char *)lexer->outbuffer, lexer->outbuflen)); } } else { gpsd_log(&lexer->errout, LOG_ERROR, "Rejected too long packet type %d len %zu\n", packet_type, packetlen); } } static void packet_discard(struct gps_lexer_t *lexer) /* shift the input buffer to discard all data up to current input pointer */ { size_t discard = lexer->inbufptr - lexer->inbuffer; size_t remaining = lexer->inbuflen - discard; lexer->inbufptr = memmove(lexer->inbuffer, lexer->inbufptr, remaining); lexer->inbuflen = remaining; if (lexer->errout.debug >= LOG_RAW+1) { char scratchbuf[MAX_PACKET_LENGTH*2+1]; gpsd_log(&lexer->errout, LOG_RAW + 1, "Packet discard of %zu, chars remaining is %zu = %s\n", discard, remaining, gpsd_packetdump(scratchbuf, sizeof(scratchbuf), (char *)lexer->inbuffer, lexer->inbuflen)); } } #ifdef STASH_ENABLE static void packet_stash(struct gps_lexer_t *lexer) /* stash the input buffer up to current input pointer */ { size_t stashlen = lexer->inbufptr - lexer->inbuffer; memcpy(lexer->stashbuffer, lexer->inbuffer, stashlen); lexer->stashbuflen = stashlen; if (lexer->errout.debug >= LOG_RAW+1) { char scratchbuf[MAX_PACKET_LENGTH*2+1]; gpsd_log(&lexer->errout, LOG_RAW+1, "Packet stash of %zu = %s\n", stashlen, gpsd_packetdump(scratchbuf, sizeof(scratchbuf), (char *)lexer->stashbuffer, lexer->stashbuflen)); } } static void packet_unstash(struct gps_lexer_t *lexer) /* return stash to start of input buffer */ { size_t available = sizeof(lexer->inbuffer) - lexer->inbuflen; size_t stashlen = lexer->stashbuflen; if (stashlen <= available) { memmove(lexer->inbuffer + stashlen, lexer->inbuffer, lexer->inbuflen); memcpy(lexer->inbuffer, lexer->stashbuffer, stashlen); lexer->inbuflen += stashlen; lexer->stashbuflen = 0; if (lexer->errout.debug >= LOG_RAW+1) { char scratchbuf[MAX_PACKET_LENGTH*2+1]; gpsd_log(&lexer->errout, LOG_RAW+1, "Packet unstash of %zu, reconstructed is %zu = %s\n", stashlen, lexer->inbuflen, gpsd_packetdump(scratchbuf, sizeof(scratchbuf), (char *)lexer->inbuffer, lexer->inbuflen)); } } else { gpsd_log(&lexer->errout, LOG_ERROR, "Rejected too long unstash of %zu\n", stashlen); lexer->stashbuflen = 0; } } #endif /* STASH_ENABLE */ static void character_discard(struct gps_lexer_t *lexer) /* shift the input buffer to discard one character and reread data */ { memmove(lexer->inbuffer, lexer->inbuffer + 1, (size_t)-- lexer->inbuflen); lexer->inbufptr = lexer->inbuffer; if (lexer->errout.debug >= LOG_RAW+1) { char scratchbuf[MAX_PACKET_LENGTH*2+1]; gpsd_log(&lexer->errout, LOG_RAW + 1, "Character discarded, buffer %zu chars = %s\n", lexer->inbuflen, gpsd_packetdump(scratchbuf, sizeof(scratchbuf), (char *)lexer->inbuffer, lexer->inbuflen)); } } /* get 0-origin big-endian words relative to start of packet buffer */ #define getword(i) (short)(lexer->inbuffer[2*(i)] | (lexer->inbuffer[2*(i)+1] << 8)) /* entry points begin here */ void lexer_init(struct gps_lexer_t *lexer) { lexer->char_counter = 0; lexer->retry_counter = 0; #ifdef PASSTHROUGH_ENABLE lexer->json_depth = 0; #endif /* PASSTHROUGH_ENABLE */ #ifdef TIMING_ENABLE lexer->start_time = 0.0; #endif /* TIMING_ENABLE */ packet_reset(lexer); errout_reset(&lexer->errout); } void packet_parse(struct gps_lexer_t *lexer) /* grab a packet from the input buffer */ { lexer->outbuflen = 0; while (packet_buffered_input(lexer) > 0) { unsigned char c = *lexer->inbufptr++; unsigned int oldstate = lexer->state; if (!nextstate(lexer, c)) continue; gpsd_log(&lexer->errout, LOG_RAW + 2, "%08ld: character '%c' [%02x], %s -> %s\n", lexer->char_counter, (isprint(c) ? c : '.'), c, state_table[oldstate], state_table[lexer->state]); lexer->char_counter++; if (lexer->state == GROUND_STATE) { character_discard(lexer); } else if (lexer->state == COMMENT_RECOGNIZED) { packet_accept(lexer, COMMENT_PACKET); packet_discard(lexer); lexer->state = GROUND_STATE; break; } #ifdef NMEA0183_ENABLE else if (lexer->state == NMEA_RECOGNIZED) { /* * $PASHR packets have no checksum. Avoid the possibility * that random garbage might make it look like they do. */ if (!str_starts_with((const char *)lexer->inbuffer, "$PASHR,")) { bool checksum_ok = true; char csum[3] = { '0', '0', '0' }; char *end; /* * Back up past any whitespace. Need to do this because * at least one GPS (the Firefly 1a) emits \r\r\n */ for (end = (char *)lexer->inbufptr - 1; isspace((unsigned char) *end); end--) continue; while (strchr("0123456789ABCDEF", *end)) --end; if (*end == '*') { unsigned int n, crc = 0; for (n = 1; (char *)lexer->inbuffer + n < end; n++) crc ^= lexer->inbuffer[n]; (void)snprintf(csum, sizeof(csum), "%02X", crc); checksum_ok = (csum[0] == toupper((unsigned char) end[1]) && csum[1] == toupper((unsigned char) end[2])); } if (!checksum_ok) { gpsd_log(&lexer->errout, LOG_WARN, "bad checksum in NMEA packet; expected %s.\n", csum); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; packet_discard(lexer); break; /* exit case */ } } /* checksum passed or not present */ #ifdef AIVDM_ENABLE if (str_starts_with((char *)lexer->inbuffer, "!AIVDM")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!AIVDO")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!BSVDM")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!BSVDO")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!ABVDM")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!ABVDO")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!ANVDM")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!ANVDO")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!SAVDM")) packet_accept(lexer, AIVDM_PACKET); else if (str_starts_with((char *)lexer->inbuffer, "!SAVDO")) packet_accept(lexer, AIVDM_PACKET); else #endif /* AIVDM_ENABLE */ packet_accept(lexer, NMEA_PACKET); packet_discard(lexer); #ifdef STASH_ENABLE if (lexer->stashbuflen) packet_unstash(lexer); #endif /* STASH_ENABLE */ break; } #endif /* NMEA0183_ENABLE */ #ifdef SIRF_ENABLE else if (lexer->state == SIRF_RECOGNIZED) { unsigned char *trailer = lexer->inbufptr - 4; unsigned int checksum = (unsigned)((trailer[0] << 8) | trailer[1]); unsigned int n, crc = 0; for (n = 4; n < (unsigned)(trailer - lexer->inbuffer); n++) crc += (int)lexer->inbuffer[n]; crc &= 0x7fff; if (checksum == crc) packet_accept(lexer, SIRF_PACKET); else { packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } packet_discard(lexer); break; } #endif /* SIRF_ENABLE */ #ifdef SUPERSTAR2_ENABLE else if (lexer->state == SUPERSTAR2_RECOGNIZED) { unsigned a = 0, b; size_t n; lexer->length = 4 + (size_t) lexer->inbuffer[3] + 2; for (n = 0; n < lexer->length - 2; n++) a += (unsigned)lexer->inbuffer[n]; b = (unsigned)getleu16(lexer->inbuffer, lexer->length - 2); gpsd_log(&lexer->errout, LOG_IO, "SuperStarII pkt dump: type %u len %u\n", lexer->inbuffer[1], (unsigned int)lexer->length); if (a != b) { gpsd_log(&lexer->errout, LOG_IO, "REJECT SuperStarII packet type 0x%02x" "%zd bad checksum 0x%04x, expecting 0x%04x\n", lexer->inbuffer[1], lexer->length, a, b); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } else { packet_accept(lexer, SUPERSTAR2_PACKET); } packet_discard(lexer); break; } #endif /* SUPERSTAR2_ENABLE */ #ifdef ONCORE_ENABLE else if (lexer->state == ONCORE_RECOGNIZED) { char a, b; int i, len; len = lexer->inbufptr - lexer->inbuffer; a = (char)(lexer->inbuffer[len - 3]); b = '\0'; for (i = 2; i < len - 3; i++) b ^= lexer->inbuffer[i]; if (a == b) { gpsd_log(&lexer->errout, LOG_IO, "Accept OnCore packet @@%c%c len %d\n", lexer->inbuffer[2], lexer->inbuffer[3], len); packet_accept(lexer, ONCORE_PACKET); } else { gpsd_log(&lexer->errout, LOG_IO, "REJECT OnCore packet @@%c%c len %d\n", lexer->inbuffer[2], lexer->inbuffer[3], len); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } packet_discard(lexer); break; } #endif /* ONCORE_ENABLE */ #if defined(TSIP_ENABLE) || defined(GARMIN_ENABLE) else if (lexer->state == TSIP_RECOGNIZED) { size_t packetlen = lexer->inbufptr - lexer->inbuffer; #ifdef TSIP_ENABLE unsigned int pos, dlecnt; /* don't count stuffed DLEs in the length */ dlecnt = 0; for (pos = 0; pos < (unsigned int)packetlen; pos++) if (lexer->inbuffer[pos] == DLE) dlecnt++; if (dlecnt > 2) { dlecnt -= 2; dlecnt /= 2; gpsd_log(&lexer->errout, LOG_RAW, "Unstuffed %d DLEs\n", dlecnt); packetlen -= dlecnt; } #endif /* TSIP_ENABLE */ if (packetlen < 5) { lexer->state = GROUND_STATE; } else { unsigned int pkt_id; #ifdef GARMIN_ENABLE unsigned int len; size_t n; unsigned int ch, chksum; n = 0; #ifdef TSIP_ENABLE /* shortcut garmin */ if (TSIP_PACKET == lexer->type) goto not_garmin; #endif /* TSIP_ENABLE */ if (lexer->inbuffer[n++] != DLE) goto not_garmin; pkt_id = lexer->inbuffer[n++]; /* packet ID */ len = lexer->inbuffer[n++]; chksum = len + pkt_id; if (len == DLE) { if (lexer->inbuffer[n++] != DLE) goto not_garmin; } for (; len > 0; len--) { chksum += lexer->inbuffer[n]; if (lexer->inbuffer[n++] == DLE) { if (lexer->inbuffer[n++] != DLE) goto not_garmin; } } /* check sum byte */ ch = lexer->inbuffer[n++]; chksum += ch; if (ch == DLE) { if (lexer->inbuffer[n++] != DLE) goto not_garmin; } if (lexer->inbuffer[n++] != DLE) goto not_garmin; /* we used to say n++ here, but scan-build complains */ if (lexer->inbuffer[n] != ETX) goto not_garmin; chksum &= 0xff; if (chksum) { gpsd_log(&lexer->errout, LOG_IO, "Garmin checksum failed: %02x!=0\n", chksum); goto not_garmin; } packet_accept(lexer, GARMIN_PACKET); packet_discard(lexer); break; not_garmin:; gpsd_log(&lexer->errout, LOG_RAW + 1, "Not a Garmin packet\n"); #endif /* GARMIN_ENABLE */ #ifdef TSIP_ENABLE /* check for some common TSIP packet types: * 0x13, TSIP Parsing Error Notification * 0x38, Request SV system data * 0x1c, Hardware/Software Version Information * 0x41, GPS time, data length 10 * 0x42, Single Precision Fix, data length 16 * 0x43, Velocity Fix, data length 20 * 0x45, Software Version Information, data length 10 * 0x46, Health of Receiver, data length 2 * 0x48, GPS System Messages, data length 22 * 0x49, Almanac Health Page, data length 32 * 0x4a, LLA Position, data length 20 * 0x4b, Machine Code Status, data length 3 * 0x4c, Operating Parameters Report, data length 17 * 0x54, One Satellite Bias, data length 4 * 0x56, Velocity Fix (ENU), data length 20 * 0x57, Last Computed Fix Report, data length 8 * 0x5a, Raw Measurements * 0x5b, Satellite Ephemeris Status, data length 16 * 0x5c, Satellite Tracking Status, data length 24 * 0x5e, Additional Fix Status Report * 0x6d, All-In-View Satellite Selection, data length 16+numSV * 0x82, Differential Position Fix Mode, data length 1 * 0x83, Double Precision XYZ, data length 36 * 0x84, Double Precision LLA, data length 36 * 0xbb, GPS Navigation Configuration * 0xbc, Receiver Port Configuration * * <DLE>[pkt id] [data] <DLE><ETX> * * The best description is in [TSIP], the Trimble Standard * Interface Protocol manual; unless otherwise specified * that is where these type/length notifications are from. * * Note that not all Trimble chips conform perfectly to this * specification, nor does it cover every packet type we * may see on the wire. */ pkt_id = lexer->inbuffer[1]; /* packet ID */ /* *INDENT-OFF* */ if (!((0x13 == pkt_id) || (0x1c == pkt_id) || (0xbb == pkt_id) || (0xbc == pkt_id) || (0x38 == pkt_id)) && ((0x41 > pkt_id) || (0x8f < pkt_id))) { gpsd_log(&lexer->errout, LOG_IO, "Packet ID 0x%02x out of range for TSIP\n", pkt_id); goto not_tsip; } /* *INDENT-ON* */ #define TSIP_ID_AND_LENGTH(id, len) ((id == pkt_id) && (len == packetlen-4)) if ((0x13 == pkt_id) && (1 <= packetlen)) /* pass */ ; /* * Not in [TSIP], Accutime Gold only. Variable length. */ else if ((0x1c == pkt_id) && (11 <= packetlen)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x41, 10)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x42, 16)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x43, 20)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x45, 10)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x46, 2)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x48, 22)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x49, 32)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x4a, 20)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x4b, 3)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x4c, 17)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x54, 12)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x55, 4)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x56, 20)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x57, 8)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x5a, 25)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x5b, 16)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x5c, 24)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x5e, 2)) /* pass */ ; /* * Not in [TSIP]. It's unclear where this test came from or * why it's here; the TSIP driver doesn't use type 0x5f. */ else if (TSIP_ID_AND_LENGTH(0x5f, 66)) /* pass */ ; /* 0x6d is variable length depending on the sat picture */ else if ((0x6d == pkt_id) && ((0x14 <= packetlen) && (0x20 >= packetlen))) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x82, 1)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x83, 36)) /* pass */ ; else if (TSIP_ID_AND_LENGTH(0x84, 36)) /* pass */ ; /* super packets, variable length */ else if ((0x8e == pkt_id) || (0x8f == pkt_id)) /* pass */ ; /* * This is according to [TSIP]. */ else if (TSIP_ID_AND_LENGTH(0xbb, 40)) /* pass */ ; /* * The Accutime Gold ships a version of this packet with a * 43-byte payload. We only use the first 21 bytes, and * parts after byte 27 are padding. */ else if (TSIP_ID_AND_LENGTH(0xbb, 43)) /* pass */ ; else { /* pass */ ; gpsd_log(&lexer->errout, LOG_IO, "TSIP REJECT pkt_id = %#02x, packetlen= %zu\n", pkt_id, packetlen); goto not_tsip; } #undef TSIP_ID_AND_LENGTH /* Debug */ gpsd_log(&lexer->errout, LOG_RAW, "TSIP pkt_id = %#02x, packetlen= %zu\n", pkt_id, packetlen); packet_accept(lexer, TSIP_PACKET); packet_discard(lexer); break; not_tsip: gpsd_log(&lexer->errout, LOG_RAW + 1, "Not a TSIP packet\n"); /* * More attempts to recognize ambiguous TSIP-like * packet types could go here. */ packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; packet_discard(lexer); break; #endif /* TSIP_ENABLE */ } } #endif /* TSIP_ENABLE || GARMIN_ENABLE */ #ifdef RTCM104V3_ENABLE else if (lexer->state == RTCM3_RECOGNIZED) { if (crc24q_check(lexer->inbuffer, lexer->inbufptr - lexer->inbuffer)) { packet_accept(lexer, RTCM3_PACKET); } else { gpsd_log(&lexer->errout, LOG_IO, "RTCM3 data checksum failure, " "%0x against %02x %02x %02x\n", crc24q_hash(lexer->inbuffer, lexer->inbufptr - lexer->inbuffer - 3), lexer->inbufptr[-3], lexer->inbufptr[-2], lexer->inbufptr[-1]); packet_accept(lexer, BAD_PACKET); } packet_discard(lexer); lexer->state = GROUND_STATE; break; } #endif /* RTCM104V3_ENABLE */ #ifdef ZODIAC_ENABLE else if (lexer->state == ZODIAC_RECOGNIZED) { short len, n, sum; len = getword(2); for (n = sum = 0; n < len; n++) sum += getword(5 + n); sum *= -1; if (len == 0 || sum == getword(5 + len)) { packet_accept(lexer, ZODIAC_PACKET); } else { gpsd_log(&lexer->errout, LOG_IO, "Zodiac data checksum 0x%hx over length %hd, expecting 0x%hx\n", sum, len, getword(5 + len)); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } packet_discard(lexer); break; } #endif /* ZODIAC_ENABLE */ #ifdef UBLOX_ENABLE else if (lexer->state == UBX_RECOGNIZED) { /* UBX use a TCP like checksum */ int n, len; unsigned char ck_a = (unsigned char)0; unsigned char ck_b = (unsigned char)0; len = lexer->inbufptr - lexer->inbuffer; gpsd_log(&lexer->errout, LOG_IO, "UBX: len %d\n", len); for (n = 2; n < (len - 2); n++) { ck_a += lexer->inbuffer[n]; ck_b += ck_a; } if (ck_a == lexer->inbuffer[len - 2] && ck_b == lexer->inbuffer[len - 1]) packet_accept(lexer, UBX_PACKET); else { gpsd_log(&lexer->errout, LOG_IO, "UBX checksum 0x%02hhx%02hhx over length %d," " expecting 0x%02hhx%02hhx (type 0x%02hhx%02hhx)\n", ck_a, ck_b, len, lexer->inbuffer[len - 2], lexer->inbuffer[len - 1], lexer->inbuffer[2], lexer->inbuffer[3]); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } packet_discard(lexer); break; } #endif /* UBLOX_ENABLE */ #ifdef EVERMORE_ENABLE else if (lexer->state == EVERMORE_RECOGNIZED) { unsigned int n, crc, checksum, len; n = 0; if (lexer->inbuffer[n++] != DLE) goto not_evermore; if (lexer->inbuffer[n++] != STX) goto not_evermore; len = lexer->inbuffer[n++]; if (len == DLE) { if (lexer->inbuffer[n++] != DLE) goto not_evermore; } len -= 2; crc = 0; for (; len > 0; len--) { crc += lexer->inbuffer[n]; if (lexer->inbuffer[n++] == DLE) { if (lexer->inbuffer[n++] != DLE) goto not_evermore; } } checksum = lexer->inbuffer[n++]; if (checksum == DLE) { if (lexer->inbuffer[n++] != DLE) goto not_evermore; } if (lexer->inbuffer[n++] != DLE) goto not_evermore; /* we used to say n++ here, but scan-build complains */ if (lexer->inbuffer[n] != ETX) goto not_evermore; crc &= 0xff; if (crc != checksum) { gpsd_log(&lexer->errout, LOG_IO, "EverMore checksum failed: %02x != %02x\n", crc, checksum); goto not_evermore; } packet_accept(lexer, EVERMORE_PACKET); packet_discard(lexer); break; not_evermore: packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; packet_discard(lexer); break; } #endif /* EVERMORE_ENABLE */ /* XXX CSK */ #ifdef ITRAX_ENABLE #define getib(j) ((uint8_t)lexer->inbuffer[(j)]) #define getiw(i) ((uint16_t)(((uint16_t)getib((i)+1) << 8) | (uint16_t)getib((i)))) else if (lexer->state == ITALK_RECOGNIZED) { volatile uint16_t len, n, csum, xsum; /* number of words */ len = (uint16_t) (lexer->inbuffer[6] & 0xff); /* expected checksum */ xsum = getiw(7 + 2 * len); csum = 0; for (n = 0; n < len; n++) { volatile uint16_t tmpw = getiw(7 + 2 * n); volatile uint32_t tmpdw = (csum + 1) * (tmpw + n); csum ^= (tmpdw & 0xffff) ^ ((tmpdw >> 16) & 0xffff); } if (len == 0 || csum == xsum) packet_accept(lexer, ITALK_PACKET); else { gpsd_log(&lexer->errout, LOG_IO, "ITALK: checksum failed - " "type 0x%02x expected 0x%04x got 0x%04x\n", lexer->inbuffer[4], xsum, csum); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } packet_discard(lexer); break; } #undef getiw #undef getib #endif /* ITRAX_ENABLE */ #ifdef NAVCOM_ENABLE else if (lexer->state == NAVCOM_RECOGNIZED) { /* By the time we got here we know checksum is OK */ packet_accept(lexer, NAVCOM_PACKET); packet_discard(lexer); break; } #endif /* NAVCOM_ENABLE */ #ifdef GEOSTAR_ENABLE else if (lexer->state == GEOSTAR_RECOGNIZED) { /* GeoStar uses a XOR 32bit checksum */ int n, len; unsigned int cs = 0L; len = lexer->inbufptr - lexer->inbuffer; /* Calculate checksum */ for (n = 0; n < len; n += 4) { cs ^= getleu32(lexer->inbuffer, n); } if (cs == 0) packet_accept(lexer, GEOSTAR_PACKET); else { gpsd_log(&lexer->errout, LOG_IO, "GeoStar checksum failed 0x%x over length %d\n", cs, len); packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } packet_discard(lexer); break; } #endif /* GEOSTAR_ENABLE */ #ifdef RTCM104V2_ENABLE else if (lexer->state == RTCM2_RECOGNIZED) { /* * RTCM packets don't have checksums. The six bits of parity * per word and the preamble better be good enough. */ packet_accept(lexer, RTCM2_PACKET); packet_discard(lexer); break; } #endif /* RTCM104V2_ENABLE */ #ifdef GARMINTXT_ENABLE else if (lexer->state == GTXT_RECOGNIZED) { size_t packetlen = lexer->inbufptr - lexer->inbuffer; if (57 <= packetlen) { packet_accept(lexer, GARMINTXT_PACKET); packet_discard(lexer); lexer->state = GROUND_STATE; break; } else { packet_accept(lexer, BAD_PACKET); lexer->state = GROUND_STATE; } } #endif #ifdef PASSTHROUGH_ENABLE else if (lexer->state == JSON_RECOGNIZED) { size_t packetlen = lexer->inbufptr - lexer->inbuffer; if (packetlen >= 11) /* {"class": } */ packet_accept(lexer, JSON_PACKET); else packet_accept(lexer, BAD_PACKET); packet_discard(lexer); lexer->state = GROUND_STATE; break; } #endif /* PASSTHROUGH_ENABLE */ #ifdef STASH_ENABLE else if (lexer->state == STASH_RECOGNIZED) { packet_stash(lexer); packet_discard(lexer); } #endif /* STASH_ENABLE */ } /* while */ } #undef getword ssize_t packet_get(int fd, struct gps_lexer_t *lexer) /* grab a packet; return -1=>I/O error, 0=>EOF, or a length */ { ssize_t recvd; errno = 0; recvd = read(fd, lexer->inbuffer + lexer->inbuflen, sizeof(lexer->inbuffer) - (lexer->inbuflen)); if (recvd == -1) { if ((errno == EAGAIN) || (errno == EINTR)) { gpsd_log(&lexer->errout, LOG_RAW + 2, "no bytes ready\n"); recvd = 0; /* fall through, input buffer may be nonempty */ } else { gpsd_log(&lexer->errout, LOG_RAW + 2, "errno: %s\n", strerror(errno)); return -1; } } else { if (lexer->errout.debug >= LOG_RAW+1) { char scratchbuf[MAX_PACKET_LENGTH*2+1]; gpsd_log(&lexer->errout, LOG_RAW + 1, "Read %zd chars to buffer offset %zd (total %zd): %s\n", recvd, lexer->inbuflen, lexer->inbuflen + recvd, gpsd_packetdump(scratchbuf, sizeof(scratchbuf), (char *)lexer->inbufptr, (size_t) recvd)); } lexer->inbuflen += recvd; } gpsd_log(&lexer->errout, LOG_SPIN, "packet_get() fd %d -> %zd (%d)\n", fd, recvd, errno); /* * Bail out, indicating no more input, only if we just received * nothing from the device and there is nothing waiting in the * packet input buffer. */ if (recvd <= 0 && packet_buffered_input(lexer) <= 0) return recvd; /* Otherwise, consume from the packet input buffer */ /* coverity[tainted_data] */ packet_parse(lexer); /* if input buffer is full, discard */ if (sizeof(lexer->inbuffer) == (lexer->inbuflen)) { /* coverity[tainted_data] */ packet_discard(lexer); lexer->state = GROUND_STATE; } /* * If we gathered a packet, return its length; it will have been * consumed out of the input buffer and moved to the output * buffer. We don't care whether the read() returned 0 or -1 and * gathered packet data was all buffered or whether it was partly * just physically read. * * Note: this choice greatly simplifies life for callers of * packet_get(), but means that they cannot tell when a nonzero * return means there was a successful physical read. They will * thus credit a data source that drops out with being alive * slightly longer than it actually was. This is unlikely to * matter as long as any policy timeouts are large compared to * the time required to consume the greatest possible amount * of buffered input, but if you hack this code you need to * be aware of the issue. It might also slightly affect * performance profiling. */ if (lexer->outbuflen > 0) return (ssize_t) lexer->outbuflen; else /* * Otherwise recvd is the size of whatever packet fragment we got. * It can still be 0 or -1 at this point even if buffer data * was consumed. */ return recvd; } void packet_reset(struct gps_lexer_t *lexer) /* return the packet machine to the ground state */ { lexer->type = BAD_PACKET; lexer->state = GROUND_STATE; lexer->inbuflen = 0; lexer->inbufptr = lexer->inbuffer; #ifdef BINARY_ENABLE isgps_init(lexer); #endif /* BINARY_ENABLE */ #ifdef STASH_ENABLE lexer->stashbuflen = 0; #endif /* STASH_ENABLE */ } #ifdef __UNUSED__ void packet_pushback(struct gps_lexer_t *lexer) /* push back the last packet grabbed */ { if (lexer->outbuflen + lexer->inbuflen < MAX_PACKET_LENGTH) { memmove(lexer->inbuffer + lexer->outbuflen, lexer->inbuffer, lexer->inbuflen); memmove(lexer->inbuffer, lexer->outbuffer, lexer->outbuflen); lexer->inbuflen += lexer->outbuflen; lexer->inbufptr += lexer->outbuflen; lexer->outbuflen = 0; } } #endif /* __UNUSED */
488578.c
/* Implicit rule searching for GNU Make. Copyright (C) 1988, 89, 90, 91, 92, 93, 94 Free Software Foundation, Inc. This file is part of GNU Make. GNU Make is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU Make is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU Make; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "make.h" #include "rule.h" #include "dep.h" #include "filedef.h" static int pattern_search PARAMS ((struct file *file, int archive, unsigned int depth, unsigned int recursions)); /* For a FILE which has no commands specified, try to figure out some from the implicit pattern rules. Returns 1 if a suitable implicit rule was found, after modifying FILE to contain the appropriate commands and deps, or returns 0 if no implicit rule was found. */ int try_implicit_rule (file, depth) struct file *file; unsigned int depth; { DEBUGPR ("Looking for an implicit rule for `%s'.\n"); /* The order of these searches was previously reversed. My logic now is that since the non-archive search uses more information in the target (the archive search omits the archive name), it is more specific and should come first. */ if (pattern_search (file, 0, depth, 0)) return 1; #ifndef NO_ARCHIVES /* If this is an archive member reference, use just the archive member name to search for implicit rules. */ if (ar_name (file->name)) { DEBUGPR ("Looking for archive-member implicit rule for `%s'.\n"); if (pattern_search (file, 1, depth, 0)) return 1; } #endif return 0; } #define DEBUGP2(msg, a1, a2) \ do { \ if (debug_flag) \ { print_spaces (depth); printf (msg, a1, a2); fflush (stdout); } \ } while (0) /* Search the pattern rules for a rule with an existing dependency to make FILE. If a rule is found, the appropriate commands and deps are put in FILE and 1 is returned. If not, 0 is returned. If ARCHIVE is nonzero, FILE->name is of the form "LIB(MEMBER)". A rule for "(MEMBER)" will be searched for, and "(MEMBER)" will not be chopped up into directory and filename parts. If an intermediate file is found by pattern search, the intermediate file is set up as a target by the recursive call and is also made a dependency of FILE. DEPTH is used for debugging messages. */ static int pattern_search (file, archive, depth, recursions) struct file *file; int archive; unsigned int depth; unsigned int recursions; { /* Filename we are searching for a rule for. */ char *filename = archive ? index (file->name, '(') : file->name; /* Length of FILENAME. */ unsigned int namelen = strlen (filename); /* The last slash in FILENAME (or nil if there is none). */ char *lastslash; /* This is a file-object used as an argument in recursive calls. It never contains any data except during a recursive call. */ struct file *intermediate_file = 0; /* List of dependencies found recursively. */ struct file **intermediate_files = (struct file **) alloca (max_pattern_deps * sizeof (struct file *)); /* List of the patterns used to find intermediate files. */ char **intermediate_patterns = (char **) alloca (max_pattern_deps * sizeof (char *)); /* This buffer records all the dependencies actually found for a rule. */ char **found_files = (char **) alloca (max_pattern_deps * sizeof (char *)); /* Number of dep names now in FOUND_FILES. */ unsigned int deps_found; /* Names of possible dependencies are constructed in this buffer. */ register char *depname = (char *) alloca (namelen + max_pattern_dep_length); /* The start and length of the stem of FILENAME for the current rule. */ register char *stem; register unsigned int stemlen; /* Buffer in which we store all the rules that are possibly applicable. */ struct rule **tryrules = (struct rule **) alloca (num_pattern_rules * max_pattern_targets * sizeof (struct rule *)); /* Number of valid elements in TRYRULES. */ unsigned int nrules; /* The numbers of the rule targets of each rule in TRYRULES that matched the target file. */ unsigned int *matches = (unsigned int *) alloca (num_pattern_rules * sizeof (unsigned int)); /* Each element is nonzero if LASTSLASH was used in matching the corresponding element of TRYRULES. */ char *checked_lastslash = (char *) alloca (num_pattern_rules * sizeof (char)); /* The index in TRYRULES of the rule we found. */ unsigned int foundrule; /* Nonzero if should consider intermediate files as dependencies. */ int intermed_ok; /* Nonzero if we have matched a pattern-rule target that is not just `%'. */ int specific_rule_matched = 0; register unsigned int i; register struct rule *rule; register struct dep *dep; char *p; #ifndef NO_ARCHIVES if (archive || ar_name (filename)) lastslash = 0; else #endif { /* Set LASTSLASH to point at the last slash in FILENAME but not counting any slash at the end. (foo/bar/ counts as bar/ in directory foo/, not empty in directory foo/bar/.) */ #ifdef VMS lastslash = rindex (filename, ']'); #else lastslash = rindex (filename, '/'); #endif if (lastslash != 0 && lastslash[1] == '\0') lastslash = 0; } /* First see which pattern rules match this target and may be considered. Put them in TRYRULES. */ nrules = 0; for (rule = pattern_rules; rule != 0; rule = rule->next) { /* If the pattern rule has deps but no commands, ignore it. Users cancel built-in rules by redefining them without commands. */ if (rule->deps != 0 && rule->cmds == 0) continue; /* If this rule is in use by a parent pattern_search, don't use it here. */ if (rule->in_use) { DEBUGP2 ("Avoiding implicit rule recursion.%s%s\n", "", ""); continue; } for (i = 0; rule->targets[i] != 0; ++i) { char *target = rule->targets[i]; char *suffix = rule->suffixes[i]; int check_lastslash; /* Rules that can match any filename and are not terminal are ignored if we're recursing, so that they cannot be intermediate files. */ if (recursions > 0 && target[1] == '\0' && !rule->terminal) continue; if (rule->lens[i] > namelen) /* It can't possibly match. */ continue; /* From the lengths of the filename and the pattern parts, find the stem: the part of the filename that matches the %. */ stem = filename + (suffix - target - 1); stemlen = namelen - rule->lens[i] + 1; /* Set CHECK_LASTSLASH if FILENAME contains a directory prefix and the target pattern does not contain a slash. */ #ifdef VMS check_lastslash = lastslash != 0 && index (target, ']') == 0; #else check_lastslash = lastslash != 0 && index (target, '/') == 0; #endif if (check_lastslash) { /* In that case, don't include the directory prefix in STEM here. */ unsigned int difference = lastslash - filename + 1; if (difference > stemlen) continue; stemlen -= difference; stem += difference; } /* Check that the rule pattern matches the text before the stem. */ if (check_lastslash) { if (stem > (lastslash + 1) && strncmp (target, lastslash + 1, stem - lastslash - 1)) continue; } else if (stem > filename && strncmp (target, filename, stem - filename)) continue; /* Check that the rule pattern matches the text after the stem. We could test simply use streq, but this way we compare the first two characters immediately. This saves time in the very common case where the first character matches because it is a period. */ if (*suffix != stem[stemlen] || (*suffix != '\0' && !streq (&suffix[1], &stem[stemlen + 1]))) continue; /* Record if we match a rule that not all filenames will match. */ if (target[1] != '\0') specific_rule_matched = 1; /* A rule with no dependencies and no commands exists solely to set specific_rule_matched when it matches. Don't try to use it. */ if (rule->deps == 0 && rule->cmds == 0) continue; /* Record this rule in TRYRULES and the index of the matching target in MATCHES. If several targets of the same rule match, that rule will be in TRYRULES more than once. */ tryrules[nrules] = rule; matches[nrules] = i; checked_lastslash[nrules] = check_lastslash; ++nrules; } } /* If we have found a matching rule that won't match all filenames, retroactively reject any non-"terminal" rules that do always match. */ if (specific_rule_matched) for (i = 0; i < nrules; ++i) if (!tryrules[i]->terminal) { register unsigned int j; for (j = 0; tryrules[i]->targets[j] != 0; ++j) if (tryrules[i]->targets[j][1] == '\0') break; if (tryrules[i]->targets[j] != 0) tryrules[i] = 0; } /* Try each rule once without intermediate files, then once with them. */ for (intermed_ok = 0; intermed_ok == !!intermed_ok; ++intermed_ok) { /* Try each pattern rule till we find one that applies. If it does, copy the names of its dependencies (as substituted) and store them in FOUND_FILES. DEPS_FOUND is the number of them. */ for (i = 0; i < nrules; i++) { int check_lastslash; rule = tryrules[i]; /* RULE is nil when we discover that a rule, already placed in TRYRULES, should not be applied. */ if (rule == 0) continue; /* Reject any terminal rules if we're looking to make intermediate files. */ if (intermed_ok && rule->terminal) continue; /* Mark this rule as in use so a recursive pattern_search won't try to use it. */ rule->in_use = 1; /* From the lengths of the filename and the matching pattern parts, find the stem: the part of the filename that matches the %. */ stem = filename + (rule->suffixes[matches[i]] - rule->targets[matches[i]]) - 1; stemlen = namelen - rule->lens[matches[i]] + 1; check_lastslash = checked_lastslash[i]; if (check_lastslash) { stem += lastslash - filename + 1; stemlen -= (lastslash - filename) + 1; } DEBUGP2 ("Trying pattern rule with stem `%.*s'.\n", (int) stemlen, stem); /* Try each dependency; see if it "exists". */ deps_found = 0; for (dep = rule->deps; dep != 0; dep = dep->next) { /* If the dependency name has a %, substitute the stem. */ p = index (dep_name (dep), '%'); if (p != 0) { register unsigned int i; if (check_lastslash) { /* Copy directory name from the original FILENAME. */ i = lastslash - filename + 1; bcopy (filename, depname, i); } else i = 0; bcopy (dep_name (dep), depname + i, p - dep_name (dep)); i += p - dep_name (dep); bcopy (stem, depname + i, stemlen); i += stemlen; strcpy (depname + i, p + 1); p = depname; } else p = dep_name (dep); /* P is now the actual dependency name as substituted. */ if (file_impossible_p (p)) { /* If this dependency has already been ruled "impossible", then the rule fails and don't bother trying it on the second pass either since we know that will fail too. */ DEBUGP2 ("Rejecting impossible %s dependency `%s'.\n", p == depname ? "implicit" : "rule", p); tryrules[i] = 0; break; } intermediate_files[deps_found] = 0; DEBUGP2 ("Trying %s dependency `%s'.\n", p == depname ? "implicit" : "rule", p); /* The DEP->changed flag says that this dependency resides in a nonexistent directory. So we normally can skip looking for the file. However, if CHECK_LASTSLASH is set, then the dependency file we are actually looking for is in a different directory (the one gotten by prepending FILENAME's directory), so it might actually exist. */ if ((!dep->changed || check_lastslash) && (lookup_file (p) != 0 || file_exists_p (p))) { found_files[deps_found++] = savestring (p, strlen (p)); continue; } /* This code, given FILENAME = "lib/foo.o", dependency name "lib/foo.c", and VPATH=src, searches for "src/lib/foo.c". */ if (vpath_search (&p, (time_t *) 0)) { DEBUGP2 ("Found dependency as `%s'.%s\n", p, ""); found_files[deps_found++] = p; continue; } /* We could not find the file in any place we should look. Try to make this dependency as an intermediate file, but only on the second pass. */ if (intermed_ok) { if (intermediate_file == 0) intermediate_file = (struct file *) alloca (sizeof (struct file)); DEBUGP2 ("Looking for a rule with %s file `%s'.\n", "intermediate", p); bzero ((char *) intermediate_file, sizeof (struct file)); intermediate_file->name = p; if (pattern_search (intermediate_file, 0, depth + 1, recursions + 1)) { p = savestring (p, strlen (p)); intermediate_patterns[deps_found] = intermediate_file->name; intermediate_file->name = p; intermediate_files[deps_found] = intermediate_file; intermediate_file = 0; /* Allocate an extra copy to go in FOUND_FILES, because every elt of FOUND_FILES is consumed or freed later. */ found_files[deps_found] = savestring (p, strlen (p)); ++deps_found; continue; } /* If we have tried to find P as an intermediate file and failed, mark that name as impossible so we won't go through the search again later. */ file_impossible (p); } /* A dependency of this rule does not exist. Therefore, this rule fails. */ break; } /* This rule is no longer `in use' for recursive searches. */ rule->in_use = 0; if (dep != 0) { /* This pattern rule does not apply. If some of its dependencies succeeded, free the data structure describing them. */ while (deps_found-- > 0) { register struct file *f = intermediate_files[deps_found]; free (found_files[deps_found]); if (f != 0 && (f->stem < f->name || f->stem > f->name + strlen (f->name))) free (f->stem); } } else /* This pattern rule does apply. Stop looking for one. */ break; } /* If we found an applicable rule without intermediate files, don't try with them. */ if (i < nrules) break; rule = 0; } /* RULE is nil if the loop went all the way through the list and everything failed. */ if (rule == 0) return 0; foundrule = i; /* If we are recursing, store the pattern that matched FILENAME in FILE->name for use in upper levels. */ if (recursions > 0) /* Kludge-o-matic */ file->name = rule->targets[matches[foundrule]]; /* FOUND_FILES lists the dependencies for the rule we found. This includes the intermediate files, if any. Convert them into entries on the deps-chain of FILE. */ while (deps_found-- > 0) { register char *s; if (intermediate_files[deps_found] != 0) { /* If we need to use an intermediate file, make sure it is entered as a target, with the info that was found for it in the recursive pattern_search call. We know that the intermediate file did not already exist as a target; therefore we can assume that the deps and cmds of F below are null before we change them. */ struct file *imf = intermediate_files[deps_found]; register struct file *f = enter_file (imf->name); f->deps = imf->deps; f->cmds = imf->cmds; f->stem = imf->stem; imf = lookup_file (intermediate_patterns[deps_found]); if (imf != 0 && imf->precious) f->precious = 1; f->intermediate = 1; f->tried_implicit = 1; for (dep = f->deps; dep != 0; dep = dep->next) { dep->file = enter_file (dep->name); dep->name = 0; dep->file->tried_implicit |= dep->changed; } num_intermediates++; } dep = (struct dep *) xmalloc (sizeof (struct dep)); s = found_files[deps_found]; if (recursions == 0) { dep->name = 0; dep->file = lookup_file (s); if (dep->file == 0) /* enter_file consumes S's storage. */ dep->file = enter_file (s); else /* A copy of S is already allocated in DEP->file->name. So we can free S. */ free (s); } else { dep->name = s; dep->file = 0; dep->changed = 0; } if (intermediate_files[deps_found] == 0 && tryrules[foundrule]->terminal) { /* If the file actually existed (was not an intermediate file), and the rule that found it was a terminal one, then we want to mark the found file so that it will not have implicit rule search done for it. If we are not entering a `struct file' for it now, we indicate this with the `changed' flag. */ if (dep->file == 0) dep->changed = 1; else dep->file->tried_implicit = 1; } dep->next = file->deps; file->deps = dep; } if (!checked_lastslash[foundrule]) /* Always allocate new storage, since STEM might be on the stack for an intermediate file. */ file->stem = savestring (stem, stemlen); else { /* We want to prepend the directory from the original FILENAME onto the stem. */ file->stem = (char *) xmalloc (((lastslash + 1) - filename) + stemlen + 1); bcopy (filename, file->stem, (lastslash + 1) - filename); bcopy (stem, file->stem + ((lastslash + 1) - filename), stemlen); file->stem[((lastslash + 1) - filename) + stemlen] = '\0'; } file->cmds = rule->cmds; /* Put the targets other than the one that matched into FILE's `also_make' member. */ /* If there was only one target, there is nothing to do. */ if (rule->targets[1] != 0) for (i = 0; rule->targets[i] != 0; ++i) if (i != matches[foundrule]) { struct dep *new = (struct dep *) xmalloc (sizeof (struct dep)); new->name = p = (char *) xmalloc (rule->lens[i] + stemlen + 1); bcopy (rule->targets[i], p, rule->suffixes[i] - rule->targets[i] - 1); p += rule->suffixes[i] - rule->targets[i] - 1; bcopy (stem, p, stemlen); p += stemlen; bcopy (rule->suffixes[i], p, rule->lens[i] - (rule->suffixes[i] - rule->targets[i]) + 1); new->file = enter_file (new->name); new->next = file->also_make; file->also_make = new; } return 1; }
814175.c
/* $NoKeywords:$ */ /** * @file * * Config Fch LPC controller * * Init LPC Controller features. * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: FCH * @e \$Revision: 44324 $ @e \$Date: 2010-12-22 17:16:51 +0800 (Wed, 22 Dec 2010) $ * */ /* ***************************************************************************** * * Copyright (c) 2011, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **************************************************************************** */ #include "FchPlatform.h" #define FILECODE PROC_FCH_SPI_LPCLATE_FILECODE /** * FchInitLateLpc - Prepare Ir controller to boot to OS. * * @param[in] FchDataPtr Fch configuration structure pointer. * */ VOID FchInitLateLpc ( IN VOID *FchDataPtr ) { RwPci ((LPC_BUS_DEV_FUN << 16) + FCH_LPC_REGBB, AccessWidth8, 0xBF, BIT3 + BIT4 + BIT5, ((FCH_DATA_BLOCK *)FchDataPtr)->StdHeader); }
720191.c
/*** * IMPORTANT: Change the values of the variables marked by 'CHANGEME' ***/ #include <stdio.h> #include <stdlib.h> #include <math.h> #ifdef _CIVL $input double global_x[6]; $output int global_retval; $output double global_f; $output double global_grad[6]; #else //#pragma CIVL input double global_x[6]; //#pragma CIVL output int global_retval; //#pragma CIVL output double global_f; //#pragma CIVL output double global_grad[6]; #endif int g_fcn(double *obj, double g_x[6], const double x[6]); int main(void) { double x[6],obj,gradient[6]; int retval; int i; #ifndef _CIVL // for (i=0;i<6;i++) global_x[i] = pow(i,0.6); FILE* f1 = fopen("x.in", "r"); double in = 0; i = 0; while( fscanf(f1, "%lf,", &in) > 0 ) // parse %d followed by ',' { global_x[i++] = in; } fclose(f1); #endif // printf("==================== INPUT STARTS HERE ====================\n"); // for (i=0;i<6;i++) printf("global_x[%d] = %f\n", i, global_x[i]); // printf("==================== INPUT ENDS HERE ====================\n"); for (i=0;i<6;i++) x[i] = global_x[i]; // Flip second and third vertices if determinant is negative if ((x[1]-x[0])*(x[5]-x[3])-(x[4]-x[3])*(x[2]-x[0]) < 0.0) { obj = x[1]; x[1] = x[2]; x[2] = obj; obj = x[4]; x[4] = x[5]; x[5] = obj; } retval = g_fcn(&obj,gradient,x); global_retval = retval; #ifdef _CIVL // printf("==================== RESULT STARTS HERE ====================\n"); #endif // printf("global_retval=%d\n", retval); global_f = obj; //printf("global_f=%f\n", obj); for (i=0;i<6;i++) global_grad[i] = gradient[i]; FILE* f2 = fopen("g.out", "w"); fprintf(f2, "%d\n", retval); for (i=0;i<6;i++) fprintf(f2, "%.17g\n", gradient[i]); fclose(f2); // for (i=0;i<6;i++) printf("global_grad[%d]=%f\n", i, gradient[i]); #ifdef _CIVL // printf("==================== RESULT ENDS HERE ====================\n"); #endif return 0; }
833633.c
/* ** $Id: lua.c,v 1.160.1.2 2007/12/28 15:32:23 roberto Exp $ ** Lua stand-alone interpreter ** See Copyright Notice in lua.h */ /* * Most of this file is from lua.c */ #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "lua_core.h" #include "lua_interpreter.h" #include "lua.h" #include "lauxlib.h" #include "lualib.h" #include "load_jit_proto.h" #define ENABLE_PARSER_HOOK 1 #include "hook_parser.c" static lua_State *globalL = NULL; static const char *progname = LUA_PROGNAME; static void lstop (lua_State *L, lua_Debug *ar) { (void)ar; /* unused arg. */ lua_sethook(L, NULL, 0, 0); luaL_error(L, "interrupted!"); } static void laction (int i) { signal(i, SIG_DFL); /* if another SIGINT happens before lstop, terminate process (default action) */ lua_sethook(globalL, lstop, LUA_MASKCALL | LUA_MASKRET | LUA_MASKCOUNT, 1); } static void l_message (const char *pname, const char *msg) { if (pname) fprintf(stderr, "%s: ", pname); fprintf(stderr, "%s\n", msg); fflush(stderr); } static int report (lua_State *L, int status) { if (status && !lua_isnil(L, -1)) { const char *msg = lua_tostring(L, -1); if (msg == NULL) msg = "(error object is not a string)"; l_message(progname, msg); lua_pop(L, 1); } return status; } static int traceback (lua_State *L) { if (!lua_isstring(L, 1)) /* 'message' not a string? */ return 1; /* keep it intact */ lua_getfield(L, LUA_GLOBALSINDEX, "debug"); if (!lua_istable(L, -1)) { lua_pop(L, 1); return 1; } lua_getfield(L, -1, "traceback"); if (!lua_isfunction(L, -1)) { lua_pop(L, 2); return 1; } lua_pushvalue(L, 1); /* pass error message */ lua_pushinteger(L, 2); /* skip this function and traceback */ lua_call(L, 2, 1); /* call debug.traceback */ return 1; } static int docall (lua_State *L, int narg, int clear) { int status; int base = lua_gettop(L) - narg; /* function index */ lua_pushcfunction(L, traceback); /* push traceback function */ lua_insert(L, base); /* put it under chunk and args */ signal(SIGINT, laction); status = lua_pcall(L, narg, (clear ? 0 : LUA_MULTRET), base); signal(SIGINT, SIG_DFL); lua_remove(L, base); /* remove traceback function */ /* force a complete garbage collection in case of errors */ if (status != 0) lua_gc(L, LUA_GCCOLLECT, 0); return status; } static int getargs (lua_State *L, int argc, char **argv) { int i; luaL_checkstack(L, argc + 3, "too many arguments to script"); for (i=0; i < argc; i++) lua_pushstring(L, argv[i]); lua_createtable(L, argc, 0); for (i=0; i < argc; i++) { lua_pushstring(L, argv[i]); lua_rawseti(L, -2, i); } return argc; } static int dofile (lua_State *L, const char *name) { int status = luaL_loadfile(L, name) || docall(L, 0, 1); return report(L, status); } static int dostring (lua_State *L, const char *s, const char *name) { int status = luaL_loadbuffer(L, s, strlen(s), name) || docall(L, 0, 1); return report(L, status); } static int handle_script (lua_State *L, int argc, char **argv) { int status; int narg = getargs(L, argc, argv); /* collect arguments */ lua_setglobal(L, "arg"); status = load_compiled_protos(L, &jit_proto_init); lua_insert(L, -(narg+1)); if (status == 0) status = docall(L, narg, 0); else lua_pop(L, narg); return report(L, status); } /* check that argument has no extra characters at the end */ #define notail(x) {if ((x)[2] != '\0') return -1;} static int handle_luainit (lua_State *L) { const char *init = getenv(LUA_INIT); if (init == NULL) return 0; /* status OK */ else if (init[0] == '@') return dofile(L, init+1); else return dostring(L, init, "=" LUA_INIT); } struct Smain { int argc; char **argv; int status; }; static int pmain (lua_State *L) { struct Smain *s = (struct Smain *)lua_touserdata(L, 1); char **argv = s->argv; globalL = L; if (argv[0] && argv[0][0]) progname = argv[0]; lua_gc(L, LUA_GCSTOP, 0); /* stop collector during initialization */ luaL_openlibs(L); /* open libraries */ lua_gc(L, LUA_GCRESTART, 0); s->status = handle_luainit(L); if (s->status != 0) return 0; s->status = handle_script(L, s->argc, argv); if (s->status != 0) return 0; return 0; } int main (int argc, char **argv) { int status; struct Smain s; lua_State *L = lua_open(); /* create state */ if (L == NULL) { l_message(argv[0], "cannot create state: not enough memory"); return EXIT_FAILURE; } s.argc = argc; s.argv = argv; status = lua_cpcall(L, &pmain, &s); report(L, status); lua_close(L); return (status || s.status) ? EXIT_FAILURE : EXIT_SUCCESS; }
547485.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_freesplit.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: snicolet <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2016/08/25 20:46:15 by snicolet #+# #+# */ /* Updated: 2016/09/14 02:16:54 by edelangh ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" #include <stdlib.h> void ft_freesplit(char **split) { int p; if (!split) return ; p = 0; while (split[p]) free(split[p++]); free(split); } void ft_freesplit_multi(char ***split, int size) { if (size < 0) return ; while (size--) ft_freesplit(split[size]); }
859122.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) { for (t4=max(max(ceild(t1-126,128),ceild(8*t2-Nz-499,512)),ceild(32*t3-Ny-499,512));t4<=min(min(floord(4*Nt+Nx-9,512),floord(4*t1+Nx-1,512)),floord(32*t3+Nx+19,512));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),128*t4+126);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
950157.c
/* * tramp-ppc.c: JIT trampoline code for PowerPC * * Authors: * Dietmar Maurer ([email protected]) * Paolo Molaro ([email protected]) * Carlos Valiente <[email protected]> * Andreas Faerber <[email protected]> * * (C) 2001 Ximian, Inc. * (C) 2007-2008 Andreas Faerber */ #include <config.h> #include <glib.h> #include <mono/metadata/appdomain.h> #include <mono/metadata/marshal.h> #include <mono/metadata/tabledefs.h> #include <mono/arch/ppc/ppc-codegen.h> #include "mini.h" #include "mini-ppc.h" static guint8* nullified_class_init_trampoline; /* Same as mono_create_ftnptr, but doesn't require a domain */ static gpointer mono_ppc_create_ftnptr (guint8 *code) { #ifdef PPC_USES_FUNCTION_DESCRIPTOR MonoPPCFunctionDescriptor *ftnptr = mono_global_codeman_reserve (sizeof (MonoPPCFunctionDescriptor)); ftnptr->code = code; ftnptr->toc = NULL; ftnptr->env = NULL; return ftnptr; #else return code; #endif } /* * Return the instruction to jump from code to target, 0 if not * reachable with a single instruction */ static guint32 branch_for_target_reachable (guint8 *branch, guint8 *target) { gint diff = target - branch; g_assert ((diff & 3) == 0); if (diff >= 0) { if (diff <= 33554431) return (18 << 26) | (diff); } else { /* diff between 0 and -33554432 */ if (diff >= -33554432) return (18 << 26) | (diff & ~0xfc000000); } return 0; } /* * get_unbox_trampoline: * @m: method pointer * @addr: pointer to native code for @m * * when value type methods are called through the vtable we need to unbox the * this argument. This method returns a pointer to a trampoline which does * unboxing before calling the method */ gpointer mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr) { guint8 *code, *start; int this_pos = 3; guint32 short_branch; MonoDomain *domain = mono_domain_get (); int size = MONO_PPC_32_64_CASE (20, 32) + PPC_FTNPTR_SIZE; addr = mono_get_addr_from_ftnptr (addr); mono_domain_lock (domain); start = code = mono_domain_code_reserve (domain, size); code = mono_ppc_create_pre_code_ftnptr (code); short_branch = branch_for_target_reachable (code + 4, addr); if (short_branch) mono_domain_code_commit (domain, code, size, 8); mono_domain_unlock (domain); if (short_branch) { ppc_addi (code, this_pos, this_pos, sizeof (MonoObject)); ppc_emit32 (code, short_branch); } else { ppc_load_ptr (code, ppc_r0, addr); ppc_mtctr (code, ppc_r0); ppc_addi (code, this_pos, this_pos, sizeof (MonoObject)); ppc_bcctr (code, 20, 0); } mono_arch_flush_icache (start, code - start); g_assert ((code - start) <= size); /*g_print ("unbox trampoline at %d for %s:%s\n", this_pos, m->klass->name, m->name); g_print ("unbox code is at %p for method at %p\n", start, addr);*/ return start; } /* * mono_arch_get_static_rgctx_trampoline: * * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR. */ gpointer mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr) { guint8 *code, *start, *p; guint8 imm_buf [128]; guint32 short_branch; MonoDomain *domain = mono_domain_get (); int imm_size; int size = MONO_PPC_32_64_CASE (24, (PPC_LOAD_SEQUENCE_LENGTH * 2) + 8) + PPC_FTNPTR_SIZE; addr = mono_get_addr_from_ftnptr (addr); /* Compute size of code needed to emit mrgctx */ p = imm_buf; ppc_load_ptr (p, MONO_ARCH_RGCTX_REG, mrgctx); imm_size = p - imm_buf; mono_domain_lock (domain); start = code = mono_domain_code_reserve (domain, size); code = mono_ppc_create_pre_code_ftnptr (code); short_branch = branch_for_target_reachable (code + imm_size, addr); if (short_branch) mono_domain_code_commit (domain, code, size, imm_size + 4); mono_domain_unlock (domain); if (short_branch) { ppc_load_ptr (code, MONO_ARCH_RGCTX_REG, mrgctx); ppc_emit32 (code, short_branch); } else { ppc_load_ptr (code, ppc_r0, addr); ppc_mtctr (code, ppc_r0); ppc_load_ptr (code, MONO_ARCH_RGCTX_REG, mrgctx); ppc_bcctr (code, 20, 0); } mono_arch_flush_icache (start, code - start); g_assert ((code - start) <= size); return start; } void mono_arch_patch_callsite (guint8 *method_start, guint8 *code_ptr, guint8 *addr) { guint32 *code = (guint32*)code_ptr; addr = mono_get_addr_from_ftnptr (addr); /* This is the 'blrl' instruction */ --code; /* * Note that methods are called also with the bl opcode. */ if (((*code) >> 26) == 18) { /*g_print ("direct patching\n");*/ ppc_patch ((guint8*)code, addr); mono_arch_flush_icache ((guint8*)code, 4); return; } /* Sanity check */ g_assert (mono_ppc_is_direct_call_sequence (code)); ppc_patch ((guint8*)code, addr); } void mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr) { guint32 ins1, ins2, offset; /* Patch the jump table entry used by the plt entry */ /* Should be a lis+ori */ ins1 = ((guint32*)code)[0]; g_assert (ins1 >> 26 == 15); ins2 = ((guint32*)code)[1]; g_assert (ins2 >> 26 == 24); offset = ((ins1 & 0xffff) << 16) | (ins2 & 0xffff); /* Either got or regs is set */ if (!got) got = (gpointer*)(gsize) regs [30]; *(guint8**)((guint8*)got + offset) = addr; } void mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs) { mono_arch_patch_callsite (NULL, code, nullified_class_init_trampoline); } void mono_arch_nullify_plt_entry (guint8 *code, mgreg_t *regs) { if (mono_aot_only && !nullified_class_init_trampoline) nullified_class_init_trampoline = mono_aot_get_trampoline ("nullified_class_init_trampoline"); mono_arch_patch_plt_entry (code, NULL, regs, nullified_class_init_trampoline); } /* Stack size for trampoline function * PPC_MINIMAL_STACK_SIZE + 16 (args + alignment to ppc_magic_trampoline) * + MonoLMF + 14 fp regs + 13 gregs + alignment */ #define STACK (((PPC_MINIMAL_STACK_SIZE + 4 * sizeof (mgreg_t) + sizeof (MonoLMF) + 14 * sizeof (double) + 31 * sizeof (mgreg_t)) + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~(MONO_ARCH_FRAME_ALIGNMENT - 1)) /* Method-specific trampoline code fragment size */ #define METHOD_TRAMPOLINE_SIZE 64 /* Jump-specific trampoline code fragment size */ #define JUMP_TRAMPOLINE_SIZE 64 #ifdef PPC_USES_FUNCTION_DESCRIPTOR #define PPC_TOC_REG ppc_r2 #else #define PPC_TOC_REG -1 #endif /* * Stack frame description when the generic trampoline is called. * caller frame * -------------------- * MonoLMF * ------------------- * Saved FP registers 0-13 * ------------------- * Saved general registers 0-30 * ------------------- * param area for 3 args to ppc_magic_trampoline * ------------------- * linkage area * ------------------- */ guchar* mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot) { guint8 *buf, *code = NULL; int i, offset; gconstpointer tramp_handler; int size = MONO_PPC_32_64_CASE (600, 800); GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; /* Now we'll create in 'buf' the PowerPC trampoline code. This is the trampoline code common to all methods */ code = buf = mono_global_codeman_reserve (size); ppc_str_update (code, ppc_r1, -STACK, ppc_r1); /* start building the MonoLMF on the stack */ offset = STACK - sizeof (double) * MONO_SAVED_FREGS; for (i = 14; i < 32; i++) { ppc_stfd (code, i, offset, ppc_r1); offset += sizeof (double); } /* * now the integer registers. */ offset = STACK - sizeof (MonoLMF) + G_STRUCT_OFFSET (MonoLMF, iregs); ppc_str_multiple (code, ppc_r13, offset, ppc_r1); /* Now save the rest of the registers below the MonoLMF struct, first 14 * fp regs and then the 31 gregs. */ offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)); for (i = 0; i < 14; i++) { ppc_stfd (code, i, offset, ppc_r1); offset += sizeof (double); } #define GREGS_OFFSET (STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (31 * sizeof (mgreg_t))) offset = GREGS_OFFSET; for (i = 0; i < 31; i++) { ppc_str (code, i, offset, ppc_r1); offset += sizeof (mgreg_t); } /* we got here through a jump to the ctr reg, we must save the lr * in the parent frame (we do it here to reduce the size of the * method-specific trampoline) */ ppc_mflr (code, ppc_r0); ppc_str (code, ppc_r0, STACK + PPC_RET_ADDR_OFFSET, ppc_r1); /* ok, now we can continue with the MonoLMF setup, mostly untouched * from emit_prolog in mini-ppc.c */ if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr"); #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r2, sizeof (gpointer), ppc_r11); ppc_ldptr (code, ppc_r11, 0, ppc_r11); #endif ppc_mtlr (code, ppc_r11); ppc_blrl (code); } else { ppc_load_func (code, ppc_r0, mono_get_lmf_addr); ppc_mtlr (code, ppc_r0); ppc_blrl (code); } /* we build the MonoLMF structure on the stack - see mini-ppc.h * The pointer to the struct is put in ppc_r11. */ ppc_addi (code, ppc_r11, ppc_sp, STACK - sizeof (MonoLMF)); ppc_stptr (code, ppc_r3, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11); /* new_lmf->previous_lmf = *lmf_addr */ ppc_ldptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11); /* *(lmf_addr) = r11 */ ppc_stptr (code, ppc_r11, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r3); /* save method info (it's stored on the stack, so get it first). */ if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) { ppc_ldr (code, ppc_r0, GREGS_OFFSET, ppc_r1); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11); } else { ppc_load (code, ppc_r0, 0); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, method), ppc_r11); } /* store the frame pointer of the calling method */ ppc_addi (code, ppc_r0, ppc_sp, STACK); ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, ebp), ppc_r11); /* save the IP (caller ip) */ if (tramp_type == MONO_TRAMPOLINE_JUMP) { ppc_li (code, ppc_r0, 0); } else { ppc_ldr (code, ppc_r0, STACK + PPC_RET_ADDR_OFFSET, ppc_r1); } ppc_stptr (code, ppc_r0, G_STRUCT_OFFSET(MonoLMF, eip), ppc_r11); /* * Now we're ready to call trampoline (mgreg_t *regs, guint8 *code, gpointer value, guint8 *tramp) * Note that the last argument is unused. */ /* Arg 1: a pointer to the registers */ ppc_addi (code, ppc_r3, ppc_r1, GREGS_OFFSET); /* Arg 2: code (next address to the instruction that called us) */ if (tramp_type == MONO_TRAMPOLINE_JUMP) ppc_li (code, ppc_r4, 0); else ppc_ldr (code, ppc_r4, STACK + PPC_RET_ADDR_OFFSET, ppc_r1); /* Arg 3: trampoline argument */ if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) ppc_ldr (code, ppc_r5, GREGS_OFFSET + MONO_ARCH_VTABLE_REG * sizeof (mgreg_t), ppc_r1); else ppc_ldr (code, ppc_r5, GREGS_OFFSET, ppc_r1); if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("trampoline_func_%d", tramp_type)); #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r2, sizeof (gpointer), ppc_r11); ppc_ldptr (code, ppc_r11, 0, ppc_r11); #endif ppc_mtlr (code, ppc_r11); ppc_blrl (code); } else { tramp_handler = mono_get_trampoline_func (tramp_type); ppc_load_func (code, ppc_r0, tramp_handler); ppc_mtlr (code, ppc_r0); ppc_blrl (code); } /* OK, code address is now on r3. Move it to the counter reg * so it will be ready for the final jump: this is safe since we * won't do any more calls. */ if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) { #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r2, sizeof (gpointer), ppc_r3); ppc_ldptr (code, ppc_r3, 0, ppc_r3); #endif ppc_mtctr (code, ppc_r3); } /* * Now we restore the MonoLMF (see emit_epilogue in mini-ppc.c) * and the rest of the registers, so the method called will see * the same state as before we executed. * The pointer to MonoLMF is in ppc_r11. */ ppc_addi (code, ppc_r11, ppc_r1, STACK - sizeof (MonoLMF)); /* r5 = previous_lmf */ ppc_ldptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r11); /* r6 = lmf_addr */ ppc_ldptr (code, ppc_r6, G_STRUCT_OFFSET(MonoLMF, lmf_addr), ppc_r11); /* *(lmf_addr) = previous_lmf */ ppc_stptr (code, ppc_r5, G_STRUCT_OFFSET(MonoLMF, previous_lmf), ppc_r6); /* restore iregs */ ppc_ldr_multiple (code, ppc_r13, G_STRUCT_OFFSET(MonoLMF, iregs), ppc_r11); /* restore fregs */ for (i = 14; i < 32; i++) ppc_lfd (code, i, G_STRUCT_OFFSET(MonoLMF, fregs) + ((i-14) * sizeof (gdouble)), ppc_r11); /* restore the volatile registers, we skip r1, of course */ offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)); for (i = 0; i < 14; i++) { ppc_lfd (code, i, offset, ppc_r1); offset += sizeof (double); } offset = STACK - sizeof (MonoLMF) - (14 * sizeof (double)) - (31 * sizeof (mgreg_t)); ppc_ldr (code, ppc_r0, offset, ppc_r1); offset += 2 * sizeof (mgreg_t); for (i = 2; i < 13; i++) { if (i != PPC_TOC_REG && (i != 3 || tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)) ppc_ldr (code, i, offset, ppc_r1); offset += sizeof (mgreg_t); } /* Non-standard function epilogue. Instead of doing a proper * return, we just jump to the compiled code. */ /* Restore stack pointer and LR and jump to the code */ ppc_ldr (code, ppc_r1, 0, ppc_r1); ppc_ldr (code, ppc_r11, PPC_RET_ADDR_OFFSET, ppc_r1); ppc_mtlr (code, ppc_r11); if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) ppc_blr (code); else ppc_bcctr (code, 20, 0); /* Flush instruction cache, since we've generated code */ mono_arch_flush_icache (buf, code - buf); /* Sanity check */ g_assert ((code - buf) <= size); if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) { /* Initialize the nullified class init trampoline */ nullified_class_init_trampoline = mono_ppc_create_ftnptr (mono_arch_get_nullified_class_init_trampoline (NULL)); } if (info) *info = mono_tramp_info_create (mono_get_generic_trampoline_name (tramp_type), buf, code - buf, ji, unwind_ops); return buf; } #define TRAMPOLINE_SIZE (MONO_PPC_32_64_CASE (24, (5+5+1+1)*4)) gpointer mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len) { guint8 *code, *buf, *tramp; guint32 short_branch; tramp = mono_get_trampoline_code (tramp_type); mono_domain_lock (domain); code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, 4); short_branch = branch_for_target_reachable (code + MONO_PPC_32_64_CASE (8, 5*4), tramp); #ifdef __mono_ppc64__ /* FIXME: make shorter if possible */ #else if (short_branch) mono_domain_code_commit (domain, code, TRAMPOLINE_SIZE, 12); #endif mono_domain_unlock (domain); if (short_branch) { ppc_load_sequence (code, ppc_r0, (mgreg_t)(gsize) arg1); ppc_emit32 (code, short_branch); } else { /* Prepare the jump to the generic trampoline code.*/ ppc_load_ptr (code, ppc_r0, tramp); ppc_mtctr (code, ppc_r0); /* And finally put 'arg1' in r0 and fly! */ ppc_load_ptr (code, ppc_r0, arg1); ppc_bcctr (code, 20, 0); } /* Flush instruction cache, since we've generated code */ mono_arch_flush_icache (buf, code - buf); g_assert ((code - buf) <= TRAMPOLINE_SIZE); if (code_len) *code_len = code - buf; return buf; } static guint8* emit_trampoline_jump (guint8 *code, guint8 *tramp) { guint32 short_branch = branch_for_target_reachable (code, tramp); /* FIXME: we can save a few bytes here by committing if the short branch is possible */ if (short_branch) { ppc_emit32 (code, short_branch); } else { ppc_load_ptr (code, ppc_r0, tramp); ppc_mtctr (code, ppc_r0); ppc_bcctr (code, 20, 0); } return code; } gpointer mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot) { #ifdef MONO_ARCH_VTABLE_REG guint8 *tramp; guint8 *code, *buf; guint8 **rgctx_null_jumps; int tramp_size; int depth, index; int i; gboolean mrgctx; MonoJumpInfo *ji = NULL; GSList *unwind_ops = NULL; mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot); index = MONO_RGCTX_SLOT_INDEX (slot); if (mrgctx) index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer); for (depth = 0; ; ++depth) { int size = mono_class_rgctx_get_array_size (depth, mrgctx); if (index < size - 1) break; index -= size - 1; } tramp_size = MONO_PPC_32_64_CASE (40, 52) + 12 * depth; if (mrgctx) tramp_size += 4; else tramp_size += 12; if (aot) tramp_size += 32; code = buf = mono_global_codeman_reserve (tramp_size); rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2)); if (mrgctx) { /* get mrgctx ptr */ ppc_mr (code, ppc_r4, PPC_FIRST_ARG_REG); } else { /* load rgctx ptr from vtable */ ppc_ldptr (code, ppc_r4, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), PPC_FIRST_ARG_REG); /* is the rgctx ptr null? */ ppc_compare_reg_imm (code, 0, ppc_r4, 0); /* if yes, jump to actual trampoline */ rgctx_null_jumps [0] = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0); } for (i = 0; i < depth; ++i) { /* load ptr to next array */ if (mrgctx && i == 0) ppc_ldptr (code, ppc_r4, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, ppc_r4); else ppc_ldptr (code, ppc_r4, 0, ppc_r4); /* is the ptr null? */ ppc_compare_reg_imm (code, 0, ppc_r4, 0); /* if yes, jump to actual trampoline */ rgctx_null_jumps [i + 1] = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0); } /* fetch slot */ ppc_ldptr (code, ppc_r4, sizeof (gpointer) * (index + 1), ppc_r4); /* is the slot null? */ ppc_compare_reg_imm (code, 0, ppc_r4, 0); /* if yes, jump to actual trampoline */ rgctx_null_jumps [depth + 1] = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0); /* otherwise return r4 */ /* FIXME: if we use r3 as the work register we can avoid this copy */ ppc_mr (code, ppc_r3, ppc_r4); ppc_blr (code); for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i) ppc_patch (rgctx_null_jumps [i], code); g_free (rgctx_null_jumps); /* move the rgctx pointer to the VTABLE register */ ppc_mr (code, MONO_ARCH_VTABLE_REG, ppc_r3); if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot)); /* Branch to the trampoline */ #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r11, 0, ppc_r11); #endif ppc_mtctr (code, ppc_r11); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL); /* jump to the actual trampoline */ code = emit_trampoline_jump (code, tramp); } mono_arch_flush_icache (buf, code - buf); g_assert (code - buf <= tramp_size); if (info) { char *name = mono_get_rgctx_fetch_trampoline_name (slot); *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops); g_free (name); } return buf; #else g_assert_not_reached (); #endif } gpointer mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot) { guint8 *tramp; guint8 *code, *buf; static int byte_offset = -1; static guint8 bitmask; guint8 *jump; int tramp_size; GSList *unwind_ops = NULL; MonoJumpInfo *ji = NULL; tramp_size = MONO_PPC_32_64_CASE (32, 44); if (aot) tramp_size += 32; code = buf = mono_global_codeman_reserve (tramp_size); if (byte_offset < 0) mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask); ppc_lbz (code, ppc_r4, byte_offset, MONO_ARCH_VTABLE_REG); ppc_andid (code, ppc_r4, ppc_r4, bitmask); jump = code; ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0); ppc_blr (code); ppc_patch (jump, code); if (aot) { code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init"); /* Branch to the trampoline */ #ifdef PPC_USES_FUNCTION_DESCRIPTOR ppc_ldptr (code, ppc_r11, 0, ppc_r11); #endif ppc_mtctr (code, ppc_r11); ppc_bcctr (code, PPC_BR_ALWAYS, 0); } else { tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL); /* jump to the actual trampoline */ code = emit_trampoline_jump (code, tramp); } mono_arch_flush_icache (buf, code - buf); g_assert (code - buf <= tramp_size); if (info) *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops); return buf; } gpointer mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info) { guint8 *code, *buf; guint32 tramp_size = 64; code = buf = mono_global_codeman_reserve (tramp_size); ppc_blr (code); mono_arch_flush_icache (buf, code - buf); g_assert (code - buf <= tramp_size); if (info) *info = mono_tramp_info_create ("nullified_class_init_trampoline", buf, code - buf, NULL, NULL); return buf; } guint8* mono_arch_get_call_target (guint8 *code) { /* Should be a bl */ guint32 ins = ((guint32*)(gpointer)code) [-1]; if ((ins >> 26 == 18) && ((ins & 1) == 1) && ((ins & 2) == 0)) { gint32 disp = (((gint32)ins) >> 2) & 0xffffff; guint8 *target = code - 4 + (disp * 4); return target; } else { return NULL; } } guint32 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code) { #ifdef PPC_USES_FUNCTION_DESCRIPTOR return ((guint32*)plt_entry) [8]; #else return ((guint32*)plt_entry) [6]; #endif }
977611.c
/* * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The OpenAirInterface Software Alliance licenses this file to You under * the OAI Public License, Version 1.1 (the "License"); you may not use this file * except in compliance with the License. * You may obtain a copy of the License at * * http://www.openairinterface.org/?page_id=698 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------------------- * For more information about the OpenAirInterface (OAI) Software Alliance: * [email protected] */ #define RLC_UM_MODULE 1 #define RLC_UM_DAR_C 1 #include "platform_types.h" #include "assertions.h" //----------------------------------------------------------------------------- #include "msc.h" #include "rlc.h" #include "rlc_um.h" #include "rlc_primitives.h" #include "mac_primitives.h" #include "list.h" #include "common/utils/LOG/log.h" #include "common/utils/LOG/vcd_signal_dumper.h" //----------------------------------------------------------------------------- signed int rlc_um_get_pdu_infos( const protocol_ctxt_t* const ctxt_pP, const rlc_um_entity_t* const rlc_pP, rlc_um_pdu_sn_10_t * const header_pP, const sdu_size_t total_sizeP, rlc_um_pdu_info_t * const pdu_info_pP, const uint8_t sn_lengthP) { sdu_size_t sum_li = 0; memset(pdu_info_pP, 0, sizeof (rlc_um_pdu_info_t)); pdu_info_pP->num_li = 0; //AssertFatal( total_sizeP > 0 , "RLC UM PDU LENGTH %d", total_sizeP); if(total_sizeP <= 0) { LOG_E(RLC, "RLC UM PDU LENGTH %d\n", total_sizeP); return -1; } if (sn_lengthP == 10) { pdu_info_pP->fi = (header_pP->b1 >> 3) & 0x03; pdu_info_pP->e = (header_pP->b1 >> 2) & 0x01; pdu_info_pP->sn = header_pP->b2 + (((uint16_t)(header_pP->b1 & 0x03)) << 8); pdu_info_pP->header_size = 2; pdu_info_pP->payload = &header_pP->data[0]; } else if (sn_lengthP == 5) { pdu_info_pP->fi = (header_pP->b1 >> 6) & 0x03; pdu_info_pP->e = (header_pP->b1 >> 5) & 0x01; pdu_info_pP->sn = header_pP->b1 & 0x1F; pdu_info_pP->header_size = 1; pdu_info_pP->payload = &header_pP->b2; } else { //AssertFatal( sn_lengthP == 5 || sn_lengthP == 10, "RLC UM SN LENGTH %d", sn_lengthP); if(!(sn_lengthP == 5 || sn_lengthP == 10)) { LOG_E(RLC, "RLC UM SN LENGTH %d\n", sn_lengthP); return -1; } } if (pdu_info_pP->e) { rlc_am_e_li_t *e_li_p; unsigned int li_length_in_bytes = 1; unsigned int li_to_read = 1; e_li_p = (rlc_am_e_li_t*)(pdu_info_pP->payload); while (li_to_read) { li_length_in_bytes = li_length_in_bytes ^ 3; if (li_length_in_bytes == 2) { //AssertFatal( total_sizeP >= ((uint64_t)(&e_li_p->b2) - (uint64_t)header_pP), // "DECODING PDU TOO FAR PDU size %d", total_sizeP); if(total_sizeP < ((uint64_t)(&e_li_p->b2) - (uint64_t)header_pP)) { LOG_E(RLC, "DECODING PDU TOO FAR PDU size %d\n", total_sizeP); return -1; } pdu_info_pP->li_list[pdu_info_pP->num_li] = ((uint16_t)(e_li_p->b1 << 4)) & 0x07F0; pdu_info_pP->li_list[pdu_info_pP->num_li] |= (((uint8_t)(e_li_p->b2 >> 4)) & 0x000F); li_to_read = e_li_p->b1 & 0x80; pdu_info_pP->header_size += 2; } else { //AssertFatal( total_sizeP >= ((uint64_t)(&e_li_p->b3) - (uint64_t)header_pP), // "DECODING PDU TOO FAR PDU size %d", total_sizeP); if(total_sizeP < ((uint64_t)(&e_li_p->b3) - (uint64_t)header_pP)) { LOG_E(RLC, "DECODING PDU TOO FAR PDU size %d\n", total_sizeP); return -1; } pdu_info_pP->li_list[pdu_info_pP->num_li] = ((uint16_t)(e_li_p->b2 << 8)) & 0x0700; pdu_info_pP->li_list[pdu_info_pP->num_li] |= e_li_p->b3; li_to_read = e_li_p->b2 & 0x08; e_li_p++; pdu_info_pP->header_size += 1; } //AssertFatal( pdu_info_pP->num_li <= RLC_UM_SEGMENT_NB_MAX_LI_PER_PDU, // PROTOCOL_RLC_UM_CTXT_FMT"[GET PDU INFO] SN %04d TOO MANY LIs ", // PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), // pdu_info_pP->sn); if(pdu_info_pP->num_li > RLC_UM_SEGMENT_NB_MAX_LI_PER_PDU) { LOG_E(RLC, PROTOCOL_RLC_UM_CTXT_FMT"[GET PDU INFO] SN %04d TOO MANY LIs \n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), pdu_info_pP->sn); return -1; } sum_li += pdu_info_pP->li_list[pdu_info_pP->num_li]; pdu_info_pP->num_li = pdu_info_pP->num_li + 1; if (pdu_info_pP->num_li > RLC_UM_SEGMENT_NB_MAX_LI_PER_PDU) { return -2; } } if (li_length_in_bytes == 2) { pdu_info_pP->payload = &e_li_p->b3; } else { pdu_info_pP->payload = &e_li_p->b1; } } pdu_info_pP->payload_size = total_sizeP - pdu_info_pP->header_size; if (pdu_info_pP->payload_size > sum_li) { pdu_info_pP->hidden_size = pdu_info_pP->payload_size - sum_li; } return 0; } //----------------------------------------------------------------------------- int rlc_um_read_length_indicators(unsigned char**data_ppP, rlc_um_e_li_t* e_liP, unsigned int* li_array_pP, unsigned int *num_li_pP, sdu_size_t *data_size_pP) { int continue_loop = 1; unsigned int e1 = 0; unsigned int li1 = 0; unsigned int e2 = 0; unsigned int li2 = 0; *num_li_pP = 0; while ((continue_loop)) { //msg("[RLC_UM] e_liP->b1 = %02X\n", e_liP->b1); //msg("[RLC_UM] e_liP->b2 = %02X\n", e_liP->b2); e1 = ((unsigned int)e_liP->b1 & 0x00000080) >> 7; li1 = (((unsigned int)e_liP->b1 & 0x0000007F) << 4) + (((unsigned int)e_liP->b2 & 0x000000F0) >> 4); li_array_pP[*num_li_pP] = li1; *data_size_pP = *data_size_pP - li1 - 2; *num_li_pP = *num_li_pP +1; if ((e1)) { e2 = ((unsigned int)e_liP->b2 & 0x00000008) >> 3; li2 = (((unsigned int)e_liP->b2 & 0x00000007) << 8) + ((unsigned int)e_liP->b3 & 0x000000FF); li_array_pP[*num_li_pP] = li2; *data_size_pP = *data_size_pP - li2 - 1; *num_li_pP = *num_li_pP +1; if (!(*data_size_pP >= 0)) LOG_E(RLC, "Invalid data_size=%d! (pdu_size=%d loop=%d e1=%d e2=%d li2=%d e_liP=%02x.%02x.%02x.%02x.%02x.%02x.%02x.%02x.%02x)\n", *data_size_pP, *data_size_pP, continue_loop, e1, e2, li2, (e_liP-(continue_loop-1)+0)->b1, (e_liP-(continue_loop-1)+0)->b2, (e_liP-(continue_loop-1)+0)->b3, (e_liP-(continue_loop-1)+1)->b1, (e_liP-(continue_loop-1)+1)->b2, (e_liP-(continue_loop-1)+1)->b3, (e_liP-(continue_loop-1)+2)->b1, (e_liP-(continue_loop-1)+2)->b2, (e_liP-(continue_loop-1)+2)->b3); // AssertFatal(*data_size_pP >= 0, "Invalid data_size!"); if (e2 == 0) { continue_loop = 0; } else { e_liP++; continue_loop++; } } else { if (!(*data_size_pP >= 0)) LOG_E(RLC, "Invalid data_size=%d! (pdu_size=%d loop=%d e1=%d li1=%d e_liP=%02x.%02x.%02x.%02x.%02x.%02x.%02x.%02x.%02x)\n", *data_size_pP, *data_size_pP, continue_loop, e1, li1, (e_liP-(continue_loop-1)+0)->b1, (e_liP-(continue_loop-1)+0)->b2, (e_liP-(continue_loop-1)+0)->b3, (e_liP-(continue_loop-1)+1)->b1, (e_liP-(continue_loop-1)+1)->b2, (e_liP-(continue_loop-1)+1)->b3, (e_liP-(continue_loop-1)+2)->b1, (e_liP-(continue_loop-1)+2)->b2, (e_liP-(continue_loop-1)+2)->b3); continue_loop = 0; // AssertFatal(*data_size_pP >= 0, "Invalid data_size!"); } if (*num_li_pP > RLC_UM_SEGMENT_NB_MAX_LI_PER_PDU) { return -1; } } *data_ppP = *data_ppP + (((*num_li_pP*3) +1) >> 1); if (*data_size_pP > 0) { return 0; } else if (*data_size_pP == 0) { LOG_W(RLC, "Last RLC SDU size is zero!\n"); return -1; } else { LOG_W(RLC, "Last RLC SDU size is negative %d!\n", *data_size_pP); return -1; } } //----------------------------------------------------------------------------- void rlc_um_try_reassembly( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * rlc_pP, rlc_sn_t start_snP, rlc_sn_t end_snP) { mem_block_t *pdu_mem_p = NULL; struct mac_tb_ind *tb_ind_p = NULL; rlc_um_e_li_t *e_li_p = NULL; unsigned char *data_p = NULL; int e = 0; int fi = 0; sdu_size_t size = 0; rlc_sn_t sn = 0; unsigned int continue_reassembly = 0; unsigned int num_li = 0; unsigned int li_array[RLC_UM_SEGMENT_NB_MAX_LI_PER_PDU]; int i = 0; int reassembly_start_index = 0; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_TRY_REASSEMBLY,VCD_FUNCTION_IN); if (end_snP < 0) { end_snP = end_snP + rlc_pP->rx_sn_modulo; } if (start_snP < 0) { start_snP = start_snP + rlc_pP->rx_sn_modulo; } #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY FROM PDU SN=%03d+1 TO PDU SN=%03d SN Length = %d bits (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->last_reassemblied_sn, end_snP, rlc_pP->rx_sn_length, __FILE__, __LINE__); #endif // nothing to be reassemblied if (start_snP == end_snP) { VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_TRY_REASSEMBLY,VCD_FUNCTION_OUT); return; } continue_reassembly = 1; //sn = (rlc_pP->last_reassemblied_sn + 1) % rlc_pP->rx_sn_modulo; sn = start_snP; //check_mem_area(); while (continue_reassembly) { if ((pdu_mem_p = rlc_pP->dar_buffer[sn])) { if ((rlc_pP->last_reassemblied_sn+1)%rlc_pP->rx_sn_modulo != sn) { #if TRACE_RLC_UM_DAR LOG_W(RLC, PROTOCOL_RLC_UM_CTXT_FMT" FINDING a HOLE in RLC UM SN: CLEARING OUTPUT SDU BECAUSE NEW SN (%03d) TO REASSEMBLY NOT CONTIGUOUS WITH LAST REASSEMBLIED SN (%03d) (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, rlc_pP->last_reassemblied_sn, __FILE__, __LINE__); #endif rlc_um_clear_rx_sdu(ctxt_pP, rlc_pP); } rlc_pP->last_reassemblied_sn = sn; tb_ind_p = (struct mac_tb_ind *)(pdu_mem_p->data); if (rlc_pP->rx_sn_length == 10) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY 10 PDU SN=%03d\n (%s:%u)", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, __FILE__, __LINE__); #endif e = (((rlc_um_pdu_sn_10_t*)(tb_ind_p->data_ptr))->b1 & 0x04) >> 2; fi = (((rlc_um_pdu_sn_10_t*)(tb_ind_p->data_ptr))->b1 & 0x18) >> 3; e_li_p = (rlc_um_e_li_t*)((rlc_um_pdu_sn_10_t*)(tb_ind_p->data_ptr))->data; size = tb_ind_p->size - 2; data_p = &tb_ind_p->data_ptr[2]; } else { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY 5 PDU SN=%03d Byte 0=%02X (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, ((rlc_um_pdu_sn_5_t*)(tb_ind_p->data_ptr))->b1, __FILE__, __LINE__); #endif e = (((rlc_um_pdu_sn_5_t*)(tb_ind_p->data_ptr))->b1 & 0x00000020) >> 5; fi = (((rlc_um_pdu_sn_5_t*)(tb_ind_p->data_ptr))->b1 & 0x000000C0) >> 6; e_li_p = (rlc_um_e_li_t*)((rlc_um_pdu_sn_5_t*)(tb_ind_p->data_ptr))->data; size = tb_ind_p->size - 1; data_p = &tb_ind_p->data_ptr[1]; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" e=%01X fi=%01X\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), e, fi, __FILE__, __LINE__); #endif } //AssertFatal(size >= 0, "invalid size!"); //AssertFatal((e==0) || (e==1), "invalid e!"); //AssertFatal((fi >= 0) && (fi <= 3), "invalid fi!"); if((size < 0) || ((e!=0) && (e!=1)) || ((fi < 0) || (fi > 3))){ LOG_E(RLC, "invalid size %d, e %d, fi %d\n", size, e, fi); sn = (sn + 1) % rlc_pP->rx_sn_modulo; if ((sn == rlc_pP->vr_uh) || (sn == end_snP)) { continue_reassembly = 0; } continue; } if (e == RLC_E_FIXED_PART_DATA_FIELD_FOLLOW) { switch (fi) { case RLC_FI_1ST_BYTE_DATA_IS_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU NO E_LI FI=11 (00) (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif // one complete SDU //LGrlc_um_send_sdu(rlc_pP,ctxt_pP->frame,ctxt_pP->enb_flag); // may be not necessary rlc_um_clear_rx_sdu(ctxt_pP, rlc_pP); rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); rlc_um_send_sdu(ctxt_pP, rlc_pP); rlc_pP->reassembly_missing_sn_detected = 0; break; case RLC_FI_1ST_BYTE_DATA_IS_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_NOT_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU NO E_LI FI=10 (01) (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif // one beginning segment of SDU in PDU //LG rlc_um_send_sdu(rlc_pP,ctxt_pP->frame,ctxt_pP->enb_flag); // may be not necessary rlc_um_clear_rx_sdu(ctxt_pP, rlc_pP); rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); rlc_pP->reassembly_missing_sn_detected = 0; break; case RLC_FI_1ST_BYTE_DATA_IS_NOT_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU NO E_LI FI=01 (10) (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif // one last segment of SDU if (rlc_pP->reassembly_missing_sn_detected == 0) { rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); rlc_um_send_sdu(ctxt_pP, rlc_pP); } else { //clear sdu already done rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += tb_ind_p->size; } rlc_pP->reassembly_missing_sn_detected = 0; break; case RLC_FI_1ST_BYTE_DATA_IS_NOT_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_NOT_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU NO E_LI FI=00 (11) (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif if (rlc_pP->reassembly_missing_sn_detected == 0) { // one whole segment of SDU in PDU rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); } else { #if TRACE_RLC_UM_DAR LOG_W(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU NO E_LI FI=00 (11) MISSING SN DETECTED (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif //LOG_D(RLC, "[MSC_NBOX][FRAME %05u][%s][RLC_UM][MOD %u/%u][RB %u][Missing SN detected][RLC_UM][MOD %u/%u][RB %u]\n", // ctxt_pP->frame, rlc_pP->module_id,rlc_pP->rb_id, rlc_pP->module_id,rlc_pP->rb_id); rlc_pP->reassembly_missing_sn_detected = 1; // not necessary but for readability of the code rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += tb_ind_p->size; #if RLC_STOP_ON_LOST_PDU AssertFatal( rlc_pP->reassembly_missing_sn_detected == 1, PROTOCOL_RLC_UM_CTXT_FMT" MISSING PDU DETECTED (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif } break; default: //AssertFatal( 0 , PROTOCOL_RLC_UM_CTXT_FMT" fi=%d! TRY REASSEMBLY SHOULD NOT GO HERE (%s:%u)\n", // PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), // fi, // __FILE__, // __LINE__); LOG_E(RLC, PROTOCOL_RLC_UM_CTXT_FMT" fi=%d! TRY REASSEMBLY SHOULD NOT GO HERE (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), fi, __FILE__, __LINE__); } } else { if (rlc_um_read_length_indicators(&data_p, e_li_p, li_array, &num_li, &size ) >= 0) { switch (fi) { case RLC_FI_1ST_BYTE_DATA_IS_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU FI=11 (00) Li=", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); for (i=0; i < num_li; i++) { LOG_D(RLC, "%d ",li_array[i]); } LOG_D(RLC, " remaining size %d\n",size); #endif // N complete SDUs //LGrlc_um_send_sdu(rlc_pP,ctxt_pP->frame,ctxt_pP->enb_flag); rlc_um_clear_rx_sdu(ctxt_pP, rlc_pP); for (i = 0; i < num_li; i++) { rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, li_array[i]); rlc_um_send_sdu(ctxt_pP, rlc_pP); data_p = &data_p[li_array[i]]; } if (size > 0) { // normally should always be > 0 but just for help debug // data_p is already ok, done by last loop above rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); rlc_um_send_sdu(ctxt_pP, rlc_pP); } rlc_pP->reassembly_missing_sn_detected = 0; break; case RLC_FI_1ST_BYTE_DATA_IS_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_NOT_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU FI=10 (01) Li=", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); for (i=0; i < num_li; i++) { LOG_D(RLC, "%d ",li_array[i]); } LOG_D(RLC, " remaining size %d\n",size); #endif // N complete SDUs + one segment of SDU in PDU //LG rlc_um_send_sdu(rlc_pP,ctxt_pP->frame,ctxt_pP->enb_flag); rlc_um_clear_rx_sdu(ctxt_pP, rlc_pP); for (i = 0; i < num_li; i++) { rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, li_array[i]); rlc_um_send_sdu(ctxt_pP, rlc_pP); data_p = &data_p[li_array[i]]; } if (size > 0) { // normally should always be > 0 but just for help debug // data_p is already ok, done by last loop above rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); } rlc_pP->reassembly_missing_sn_detected = 0; break; case RLC_FI_1ST_BYTE_DATA_IS_NOT_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU FI=01 (10) Li=", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); for (i=0; i < num_li; i++) { LOG_D(RLC, "%d ",li_array[i]); } LOG_D(RLC, " remaining size %d\n",size); #endif if (rlc_pP->reassembly_missing_sn_detected) { reassembly_start_index = 1; data_p = &data_p[li_array[0]]; //rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += li_array[0]; } else { reassembly_start_index = 0; } // one last segment of SDU + N complete SDUs in PDU for (i = reassembly_start_index; i < num_li; i++) { rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, li_array[i]); rlc_um_send_sdu(ctxt_pP, rlc_pP); data_p = &data_p[li_array[i]]; } if (size > 0) { // normally should always be > 0 but just for help debug // data_p is already ok, done by last loop above rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); rlc_um_send_sdu(ctxt_pP, rlc_pP); } rlc_pP->reassembly_missing_sn_detected = 0; break; case RLC_FI_1ST_BYTE_DATA_IS_NOT_1ST_BYTE_SDU_LAST_BYTE_DATA_IS_NOT_LAST_BYTE_SDU: #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRY REASSEMBLY PDU FI=00 (11) Li=", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); for (i=0; i < num_li; i++) { LOG_D(RLC, "%d ",li_array[i]); } LOG_D(RLC, " remaining size %d\n",size); #endif if (rlc_pP->reassembly_missing_sn_detected) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" DISCARD FIRST LI %d (%s:%u)", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), li_array[0], __FILE__, __LINE__); #endif reassembly_start_index = 1; data_p = &data_p[li_array[0]]; //rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += li_array[0]; } else { reassembly_start_index = 0; } for (i = reassembly_start_index; i < num_li; i++) { rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, li_array[i]); rlc_um_send_sdu(ctxt_pP, rlc_pP); data_p = &data_p[li_array[i]]; } if (size > 0) { // normally should always be > 0 but just for help debug // data_p is already ok, done by last loop above rlc_um_reassembly (ctxt_pP, rlc_pP, data_p, size); } else { //AssertFatal( 0 !=0, PROTOCOL_RLC_UM_CTXT_FMT" size=%d! SHOULD NOT GO HERE (%s:%u)\n", // PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), // size, // __FILE__, // __LINE__); LOG_E(RLC, PROTOCOL_RLC_UM_CTXT_FMT" size=%d! SHOULD NOT GO HERE (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), size, __FILE__, __LINE__); //rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += size; } rlc_pP->reassembly_missing_sn_detected = 0; break; default: #if TRACE_RLC_UM_DAR LOG_W(RLC, PROTOCOL_RLC_UM_CTXT_FMT" Missing SN detected (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += tb_ind_p->size; rlc_pP->reassembly_missing_sn_detected = 1; #if RLC_STOP_ON_LOST_PDU AssertFatal( rlc_pP->reassembly_missing_sn_detected == 1, PROTOCOL_RLC_UM_CTXT_FMT" MISSING PDU DETECTED (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif } } else { rlc_pP->stat_rx_data_pdu_dropped += 1; rlc_pP->stat_rx_data_bytes_dropped += tb_ind_p->size; rlc_pP->reassembly_missing_sn_detected = 1; LOG_W(RLC, "[SN %d] Bad RLC header! Discard this RLC PDU (size=%d)\n", sn, size); } } #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" REMOVE PDU FROM DAR BUFFER SN=%03d (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, __FILE__, __LINE__); #endif free_mem_block(rlc_pP->dar_buffer[sn], __func__); rlc_pP->dar_buffer[sn] = NULL; } else { rlc_pP->last_reassemblied_missing_sn = sn; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" Missing SN %04d detected, clearing RX SDU (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, __FILE__, __LINE__); #endif rlc_pP->reassembly_missing_sn_detected = 1; rlc_um_clear_rx_sdu(ctxt_pP, rlc_pP); #if RLC_STOP_ON_LOST_PDU AssertFatal( rlc_pP->reassembly_missing_sn_detected == 1, PROTOCOL_RLC_UM_CTXT_FMT" MISSING PDU DETECTED (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), __FILE__, __LINE__); #endif } sn = (sn + 1) % rlc_pP->rx_sn_modulo; if ((sn == rlc_pP->vr_uh) || (sn == end_snP)) { continue_reassembly = 0; } } #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TRIED REASSEMBLY VR(UR)=%03d VR(UX)=%03d VR(UH)=%03d (%s:%u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ur, rlc_pP->vr_ux, rlc_pP->vr_uh, __FILE__, __LINE__); #endif VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_TRY_REASSEMBLY,VCD_FUNCTION_OUT); } //----------------------------------------------------------------------------- void rlc_um_stop_and_reset_timer_reordering( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * rlc_pP) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" [T-REORDERING] STOPPED AND RESET\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); #endif rlc_pP->t_reordering.running = 0; rlc_pP->t_reordering.ms_time_out = 0; rlc_pP->t_reordering.ms_start = 0; rlc_pP->t_reordering.timed_out = 0; } //----------------------------------------------------------------------------- void rlc_um_start_timer_reordering( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * rlc_pP) { rlc_pP->t_reordering.timed_out = 0; if (rlc_pP->t_reordering.ms_duration > 0) { rlc_pP->t_reordering.running = 1; rlc_pP->t_reordering.ms_time_out = PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP) + rlc_pP->t_reordering.ms_duration; rlc_pP->t_reordering.ms_start = PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP); #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" [T-REORDERING] STARTED (TIME-OUT = FRAME %05u)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->t_reordering.ms_time_out); #endif } else { LOG_T(RLC, PROTOCOL_RLC_UM_CTXT_FMT"[T-REORDERING] NOT STARTED, CAUSE CONFIGURED 0 ms\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP,rlc_pP)); } } //----------------------------------------------------------------------------- void rlc_um_init_timer_reordering( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, const uint32_t ms_durationP) { rlc_pP->t_reordering.running = 0; rlc_pP->t_reordering.ms_time_out = 0; rlc_pP->t_reordering.ms_start = 0; rlc_pP->t_reordering.ms_duration = ms_durationP; rlc_pP->t_reordering.timed_out = 0; } //----------------------------------------------------------------------------- void rlc_um_check_timer_dar_time_out( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP) { signed int in_window; rlc_usn_t old_vr_ur; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_CHECK_TIMER_DAR_TIME_OUT,VCD_FUNCTION_IN); if ((rlc_pP->t_reordering.running)) { if ( // CASE 1: start time out // +-----------+------------------+----------+ // | |******************| | // +-----------+------------------+----------+ //FRAME # 0 FRAME MAX ((rlc_pP->t_reordering.ms_start < rlc_pP->t_reordering.ms_time_out) && ((PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP) >= rlc_pP->t_reordering.ms_time_out) || (PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP) < rlc_pP->t_reordering.ms_start))) || // CASE 2: time out start // +-----------+------------------+----------+ // |***********| |**********| // +-----------+------------------+----------+ //FRAME # 0 FRAME MAX VALUE ((rlc_pP->t_reordering.ms_start > rlc_pP->t_reordering.ms_time_out) && (PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP) < rlc_pP->t_reordering.ms_start) && (PROTOCOL_CTXT_TIME_MILLI_SECONDS(ctxt_pP) >= rlc_pP->t_reordering.ms_time_out)) ) { //if ((uint32_t)((uint32_t)rlc_pP->timer_reordering + (uint32_t)rlc_pP->timer_reordering_init) <= ctxt_pP->frame) { // 5.1.2.2.4 Actions when t-Reordering expires // When t-Reordering expires, the receiving UM RLC entity shall: // -update VR(UR) to the SN of the first UMD PDU with SN >= VR(UX) that has not been received; // -reassemble RLC SDUs from any UMD PDUs with SN < updated VR(UR), remove RLC headers when doing so and deliver the reassembled RLC SDUs to upper layer in ascending order of the RLC SN if not delivered before; // -if VR(UH) > VR(UR): // -start t-Reordering; // -set VR(UX) to VR(UH). rlc_pP->stat_timer_reordering_timed_out += 1; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT"*****************************************************\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT"* T I M E - O U T *\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT"*****************************************************\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" TIMER t-Reordering expiration\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP)); LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" timer_reordering=%d frame=%d expire ms %d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->t_reordering.ms_duration, ctxt_pP->frame, rlc_pP->t_reordering.ms_time_out); LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" set VR(UR)=%03d to", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ur); #endif if (pthread_mutex_trylock(&rlc_pP->lock_dar_buffer) == 0) { old_vr_ur = rlc_pP->vr_ur; rlc_pP->vr_ur = rlc_pP->vr_ux; while (rlc_um_get_pdu_from_dar_buffer(ctxt_pP, rlc_pP, rlc_pP->vr_ur)) { rlc_pP->vr_ur = (rlc_pP->vr_ur+1)%rlc_pP->rx_sn_modulo; } #if TRACE_RLC_UM_DAR LOG_D(RLC, " %d", rlc_pP->vr_ur); LOG_D(RLC, "\n"); #endif rlc_um_try_reassembly(ctxt_pP, rlc_pP ,old_vr_ur, rlc_pP->vr_ur); in_window = rlc_um_in_window(ctxt_pP, rlc_pP, rlc_pP->vr_ur, rlc_pP->vr_uh, rlc_pP->vr_uh); if (in_window == 2) { rlc_um_start_timer_reordering(ctxt_pP, rlc_pP); rlc_pP->vr_ux = rlc_pP->vr_uh; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" restarting t-Reordering set VR(UX) to %d (VR(UH)>VR(UR))\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ux); #endif } else { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" STOP t-Reordering VR(UX) = %03d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ux); #endif rlc_um_stop_and_reset_timer_reordering(ctxt_pP, rlc_pP); } RLC_UM_MUTEX_UNLOCK(&rlc_pP->lock_dar_buffer); } } } VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_CHECK_TIMER_DAR_TIME_OUT,VCD_FUNCTION_OUT); } //----------------------------------------------------------------------------- mem_block_t* rlc_um_remove_pdu_from_dar_buffer( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, rlc_usn_t snP) { mem_block_t * pdu_p = rlc_pP->dar_buffer[snP]; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" REMOVE PDU FROM DAR BUFFER SN=%03d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), snP); #endif rlc_pP->dar_buffer[snP] = NULL; return pdu_p; } //----------------------------------------------------------------------------- mem_block_t* rlc_um_get_pdu_from_dar_buffer(const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, rlc_usn_t snP) { return rlc_pP->dar_buffer[snP]; } //----------------------------------------------------------------------------- void rlc_um_store_pdu_in_dar_buffer( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, mem_block_t *pdu_pP, rlc_usn_t snP) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" STORE PDU IN DAR BUFFER SN=%03d VR(UR)=%03d VR(UX)=%03d VR(UH)=%03d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), snP, rlc_pP->vr_ur, rlc_pP->vr_ux, rlc_pP->vr_uh); #endif rlc_pP->dar_buffer[snP] = pdu_pP; } //----------------------------------------------------------------------------- // returns -2 if lower_bound > sn // returns -1 if higher_bound < sn // returns 0 if lower_bound < sn < higher_bound // returns 1 if lower_bound == sn // returns 2 if higher_bound == sn // returns 3 if higher_bound == sn == lower_bound signed int rlc_um_in_window( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, rlc_sn_t lower_boundP, rlc_sn_t snP, rlc_sn_t higher_boundP) { rlc_sn_t modulus = (rlc_sn_t)rlc_pP->vr_uh - rlc_pP->rx_um_window_size; #if TRACE_RLC_UM_RX rlc_sn_t lower_bound = lower_boundP; rlc_sn_t higher_bound = higher_boundP; rlc_sn_t sn = snP; #endif lower_boundP = (lower_boundP - modulus) % rlc_pP->rx_sn_modulo; higher_boundP = (higher_boundP - modulus) % rlc_pP->rx_sn_modulo; snP = (snP - modulus) % rlc_pP->rx_sn_modulo; if ( lower_boundP > snP) { #if TRACE_RLC_UM_RX LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d not in WINDOW[%03d:%03d] (SN<LOWER BOUND)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, lower_bound, higher_bound); #endif return -2; } if ( higher_boundP < snP) { #if TRACE_RLC_UM_RX LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d not in WINDOW[%03d:%03d] (SN>HIGHER BOUND) <=> %d not in WINDOW[%03d:%03d]\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, lower_bound, higher_bound, snP, lower_boundP, higher_boundP); #endif return -1; } if ( lower_boundP == snP) { if ( higher_boundP == snP) { #if TRACE_RLC_UM_RX LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d in WINDOW[%03d:%03d] (SN=HIGHER BOUND=LOWER BOUND)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, lower_bound, higher_bound); #endif return 3; } #if TRACE_RLC_UM_RX LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d in WINDOW[%03d:%03d] (SN=LOWER BOUND)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, lower_bound, higher_bound); #endif return 1; } if ( higher_boundP == snP) { #if TRACE_RLC_UM_RX LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d in WINDOW[%03d:%03d] (SN=HIGHER BOUND)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, lower_bound, higher_bound); #endif return 2; } return 0; } //----------------------------------------------------------------------------- signed int rlc_um_in_reordering_window( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, const rlc_sn_t snP) { rlc_sn_t modulus = (signed int)rlc_pP->vr_uh - rlc_pP->rx_um_window_size; rlc_sn_t sn_mod = (snP - modulus) % rlc_pP->rx_sn_modulo; if ( 0 <= sn_mod) { if (sn_mod < rlc_pP->rx_um_window_size) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d IN REORDERING WINDOW[%03d:%03d[ SN %d IN [%03d:%03d[ VR(UR)=%03d VR(UH)=%03d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn_mod, 0, rlc_pP->rx_um_window_size, snP, (signed int)rlc_pP->vr_uh - rlc_pP->rx_um_window_size, rlc_pP->vr_uh, rlc_pP->vr_ur, rlc_pP->vr_uh); #endif return 0; } } #if TRACE_RLC_UM_DAR if (modulus < 0) { LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d NOT IN REORDERING WINDOW[%03d:%03d[ SN %d NOT IN [%03d:%03d[ VR(UR)=%03d VR(UH)=%03d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn_mod, modulus + 1024, rlc_pP->rx_um_window_size, snP, modulus + 1024 , rlc_pP->vr_uh, rlc_pP->vr_ur, rlc_pP->vr_uh); } else { LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" %d NOT IN REORDERING WINDOW[%03d:%03d[ SN %d NOT IN [%03d:%03d[ VR(UR)=%03d VR(UH)=%03d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn_mod, modulus, rlc_pP->rx_um_window_size, snP, modulus , rlc_pP->vr_uh, rlc_pP->vr_ur, rlc_pP->vr_uh); } #endif return -1; } //----------------------------------------------------------------------------- void rlc_um_receive_process_dar ( const protocol_ctxt_t* const ctxt_pP, rlc_um_entity_t * const rlc_pP, mem_block_t * pdu_mem_pP, rlc_um_pdu_sn_10_t * const pdu_pP, const sdu_size_t tb_sizeP) { // 36.322v9.3.0 section 5.1.2.2.1: // The receiving UM RLC entity shall maintain a reordering window according to state variable VR(UH) as follows: // -a SN falls within the reordering window if (VR(UH) – UM_Window_Size) <= SN < VR(UH); // -a SN falls outside of the reordering window otherwise. // When receiving an UMD PDU from lower layer, the receiving UM RLC entity shall: // -either discard the received UMD PDU or place it in the reception buffer (see sub clause 5.1.2.2.2); // -if the received UMD PDU was placed in the reception buffer: // -update state variables, reassemble and deliver RLC SDUs to upper layer and start/stop t-Reordering as needed (see sub clause 5.1.2.2.3); // When t-Reordering expires, the receiving UM RLC entity shall: // - update state variables, reassemble and deliver RLC SDUs to upper layer and start t-Reordering as needed (see sub clause 5.1.2.2.4). // When an UMD PDU with SN = x is received from lower layer, the receiving UM RLC entity shall: // -if VR(UR) < x < VR(UH) and the UMD PDU with SN = x has been received before; or // -if (VR(UH) – UM_Window_Size) <= x < VR(UR): // -discard the received UMD PDU; // -else: // -place the received UMD PDU in the reception buffer. rlc_sn_t sn = -1; signed int in_window; VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_RECEIVE_PROCESS_DAR, VCD_FUNCTION_IN); if (rlc_pP->rx_sn_length == 10) { sn = ((pdu_pP->b1 & 0x00000003) << 8) + pdu_pP->b2; } else if (rlc_pP->rx_sn_length == 5) { sn = pdu_pP->b1 & 0x1F; } else { free_mem_block(pdu_mem_pP, __func__); } RLC_UM_MUTEX_LOCK(&rlc_pP->lock_dar_buffer, ctxt_pP, rlc_pP); in_window = rlc_um_in_window(ctxt_pP, rlc_pP, rlc_pP->vr_uh - rlc_pP->rx_um_window_size, sn, rlc_pP->vr_ur); #if TRACE_RLC_PAYLOAD rlc_util_print_hex_octets(RLC, &pdu_pP->b1, tb_sizeP); #endif // rlc_um_in_window() returns -2 if lower_bound > sn // rlc_um_in_window() returns -1 if higher_bound < sn // rlc_um_in_window() returns 0 if lower_bound < sn < higher_bound // rlc_um_in_window() returns 1 if lower_bound == sn // rlc_um_in_window() returns 2 if higher_bound == sn // rlc_um_in_window() returns 3 if higher_bound == sn == lower_bound if ((in_window == 1) || (in_window == 0)) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" RX PDU VR(UH) – UM_Window_Size) <= SN %d < VR(UR) -> GARBAGE\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn); #endif rlc_pP->stat_rx_data_pdu_out_of_window += 1; rlc_pP->stat_rx_data_bytes_out_of_window += tb_sizeP; free_mem_block(pdu_mem_pP, __func__); pdu_mem_pP = NULL; RLC_UM_MUTEX_UNLOCK(&rlc_pP->lock_dar_buffer); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_RECEIVE_PROCESS_DAR, VCD_FUNCTION_OUT); return; } if ((rlc_um_get_pdu_from_dar_buffer(ctxt_pP, rlc_pP, sn))) { in_window = rlc_um_in_window(ctxt_pP, rlc_pP, rlc_pP->vr_ur, sn, rlc_pP->vr_uh); if (in_window == 0) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" RX PDU VR(UR) < SN %d < VR(UH) and RECEIVED BEFORE-> GARBAGE\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn); #endif //discard the PDU rlc_pP->stat_rx_data_pdus_duplicate += 1; rlc_pP->stat_rx_data_bytes_duplicate += tb_sizeP; free_mem_block(pdu_mem_pP, __func__); pdu_mem_pP = NULL; RLC_UM_MUTEX_UNLOCK(&rlc_pP->lock_dar_buffer); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_RECEIVE_PROCESS_DAR, VCD_FUNCTION_OUT); return; } // 2 lines to avoid memory leaks rlc_pP->stat_rx_data_pdus_duplicate += 1; rlc_pP->stat_rx_data_bytes_duplicate += tb_sizeP; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" RX PDU SN %03d REMOVE OLD PDU BEFORE STORING NEW PDU\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn); #endif mem_block_t *pdu = rlc_um_remove_pdu_from_dar_buffer(ctxt_pP, rlc_pP, sn); free_mem_block(pdu, __func__); } rlc_um_store_pdu_in_dar_buffer(ctxt_pP, rlc_pP, pdu_mem_pP, sn); // -if x falls outside of the reordering window: // -update VR(UH) to x + 1; // -reassemble RLC SDUs from any UMD PDUs with SN that falls outside of // the reordering window, remove RLC headers when doing so and deliver // the reassembled RLC SDUs to upper layer in ascending order of the // RLC SN if not delivered before; // // -if VR(UR) falls outside of the reordering window: // -set VR(UR) to (VR(UH) – UM_Window_Size); if (rlc_um_in_reordering_window(ctxt_pP, rlc_pP, sn) < 0) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" RX PDU SN %d OUTSIDE REORDERING WINDOW VR(UH)=%d UM_Window_Size=%d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), sn, rlc_pP->vr_uh, rlc_pP->rx_um_window_size); #endif rlc_pP->vr_uh = (sn + 1) % rlc_pP->rx_sn_modulo; if (rlc_um_in_reordering_window(ctxt_pP, rlc_pP, rlc_pP->vr_ur) != 0) { in_window = rlc_pP->vr_uh - rlc_pP->rx_um_window_size; if (in_window < 0) { in_window = in_window + rlc_pP->rx_sn_modulo; } rlc_um_try_reassembly(ctxt_pP, rlc_pP, rlc_pP->vr_ur, in_window); } if (rlc_um_in_reordering_window(ctxt_pP, rlc_pP, rlc_pP->vr_ur) < 0) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" VR(UR) %d OUTSIDE REORDERING WINDOW SET TO VR(UH) – UM_Window_Size = %d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ur, in_window); #endif rlc_pP->vr_ur = in_window; } } // -if the reception buffer contains an UMD PDU with SN = VR(UR): // -update VR(UR) to the SN of the first UMD PDU with SN > current // VR(UR) that has not been received; // -reassemble RLC SDUs from any UMD PDUs with SN < updated VR(UR), // remove RLC headers when doing so and deliver the reassembled RLC // SDUs to upper layer in ascending order of the RLC SN if not // delivered before; if ((sn == rlc_pP->vr_ur) && rlc_um_get_pdu_from_dar_buffer(ctxt_pP, rlc_pP, rlc_pP->vr_ur)) { //sn_tmp = rlc_pP->vr_ur; do { rlc_pP->vr_ur = (rlc_pP->vr_ur+1) % rlc_pP->rx_sn_modulo; } while (rlc_um_get_pdu_from_dar_buffer(ctxt_pP, rlc_pP, rlc_pP->vr_ur) && (rlc_pP->vr_ur != rlc_pP->vr_uh)); rlc_um_try_reassembly(ctxt_pP, rlc_pP, sn, rlc_pP->vr_ur); } // -if t-Reordering is running: // -if VR(UX) <= VR(UR); or // -if VR(UX) falls outside of the reordering window and VR(UX) is not // equal to VR(UH):: // -stop and reset t-Reordering; if (rlc_pP->t_reordering.running) { if (rlc_pP->vr_uh != rlc_pP->vr_ux) { in_window = rlc_um_in_reordering_window(ctxt_pP, rlc_pP, rlc_pP->vr_ux); if (in_window < 0) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" STOP and RESET t-Reordering because VR(UX) falls outside of the reordering window and VR(UX)=%d is not equal to VR(UH)=%d -or- VR(UX) <= VR(UR)\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ux, rlc_pP->vr_uh); #endif rlc_um_stop_and_reset_timer_reordering(ctxt_pP, rlc_pP); } } } if (rlc_pP->t_reordering.running) { in_window = rlc_um_in_window(ctxt_pP, rlc_pP, rlc_pP->vr_ur, rlc_pP->vr_ux, rlc_pP->vr_uh); if ((in_window == -2) || (in_window == 1)) { #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" STOP and RESET t-Reordering because VR(UX) falls outside of the reordering window and VR(UX)=%d is not equal to VR(UH)=%d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ux, rlc_pP->vr_uh); #endif rlc_um_stop_and_reset_timer_reordering(ctxt_pP, rlc_pP); } } // -if t-Reordering is not running (includes the case when t-Reordering is // stopped due to actions above): // -if VR(UH) > VR(UR): // -start t-Reordering; // -set VR(UX) to VR(UH). if (rlc_pP->t_reordering.running == 0) { in_window = rlc_um_in_window(ctxt_pP, rlc_pP, rlc_pP->vr_ur, rlc_pP->vr_uh, rlc_pP->vr_uh); if (in_window == 2) { rlc_um_start_timer_reordering(ctxt_pP, rlc_pP); rlc_pP->vr_ux = rlc_pP->vr_uh; #if TRACE_RLC_UM_DAR LOG_D(RLC, PROTOCOL_RLC_UM_CTXT_FMT" RESTART t-Reordering set VR(UX) to VR(UH) =%d\n", PROTOCOL_RLC_UM_CTXT_ARGS(ctxt_pP, rlc_pP), rlc_pP->vr_ux); #endif } } RLC_UM_MUTEX_UNLOCK(&rlc_pP->lock_dar_buffer); VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RLC_UM_RECEIVE_PROCESS_DAR, VCD_FUNCTION_OUT); }
125834.c
/* Type object implementation */ #include "Python.h" #include "internal/pystate.h" #include "frameobject.h" #include "structmember.h" #include <ctype.h> /*[clinic input] class type "PyTypeObject *" "&PyType_Type" class object "PyObject *" "&PyBaseObject_Type" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=4b94608d231c434b]*/ #include "clinic/typeobject.c.h" /* Support type attribute cache */ /* The cache can keep references to the names alive for longer than they normally would. This is why the maximum size is limited to MCACHE_MAX_ATTR_SIZE, since it might be a problem if very large strings are used as attribute names. */ #define MCACHE_MAX_ATTR_SIZE 100 #define MCACHE_SIZE_EXP 12 #define MCACHE_HASH(version, name_hash) \ (((unsigned int)(version) ^ (unsigned int)(name_hash)) \ & ((1 << MCACHE_SIZE_EXP) - 1)) #define MCACHE_HASH_METHOD(type, name) \ MCACHE_HASH((type)->tp_version_tag, \ ((PyASCIIObject *)(name))->hash) #define MCACHE_CACHEABLE_NAME(name) \ PyUnicode_CheckExact(name) && \ PyUnicode_IS_READY(name) && \ PyUnicode_GET_LENGTH(name) <= MCACHE_MAX_ATTR_SIZE struct method_cache_entry { unsigned int version; PyObject *name; /* reference to exactly a str or None */ PyObject *value; /* borrowed */ }; static struct method_cache_entry method_cache[1 << MCACHE_SIZE_EXP]; static unsigned int next_version_tag = 0; #define MCACHE_STATS 0 #if MCACHE_STATS static size_t method_cache_hits = 0; static size_t method_cache_misses = 0; static size_t method_cache_collisions = 0; #endif /* alphabetical order */ _Py_IDENTIFIER(__abstractmethods__); _Py_IDENTIFIER(__class__); _Py_IDENTIFIER(__class_getitem__); _Py_IDENTIFIER(__delitem__); _Py_IDENTIFIER(__dict__); _Py_IDENTIFIER(__doc__); _Py_IDENTIFIER(__getattribute__); _Py_IDENTIFIER(__getitem__); _Py_IDENTIFIER(__hash__); _Py_IDENTIFIER(__init_subclass__); _Py_IDENTIFIER(__len__); _Py_IDENTIFIER(__module__); _Py_IDENTIFIER(__name__); _Py_IDENTIFIER(__new__); _Py_IDENTIFIER(__set_name__); _Py_IDENTIFIER(__setitem__); _Py_IDENTIFIER(builtins); static PyObject * slot_tp_new(PyTypeObject *type, PyObject *args, PyObject *kwds); static void clear_slotdefs(void); static PyObject * lookup_maybe_method(PyObject *self, _Py_Identifier *attrid, int *unbound); /* * finds the beginning of the docstring's introspection signature. * if present, returns a pointer pointing to the first '('. * otherwise returns NULL. * * doesn't guarantee that the signature is valid, only that it * has a valid prefix. (the signature must also pass skip_signature.) */ static const char * find_signature(const char *name, const char *doc) { const char *dot; size_t length; if (!doc) return NULL; assert(name != NULL); /* for dotted names like classes, only use the last component */ dot = strrchr(name, '.'); if (dot) name = dot + 1; length = strlen(name); if (strncmp(doc, name, length)) return NULL; doc += length; if (*doc != '(') return NULL; return doc; } #define SIGNATURE_END_MARKER ")\n--\n\n" #define SIGNATURE_END_MARKER_LENGTH 6 /* * skips past the end of the docstring's instrospection signature. * (assumes doc starts with a valid signature prefix.) */ static const char * skip_signature(const char *doc) { while (*doc) { if ((*doc == *SIGNATURE_END_MARKER) && !strncmp(doc, SIGNATURE_END_MARKER, SIGNATURE_END_MARKER_LENGTH)) return doc + SIGNATURE_END_MARKER_LENGTH; if ((*doc == '\n') && (doc[1] == '\n')) return NULL; doc++; } return NULL; } #ifndef NDEBUG static int _PyType_CheckConsistency(PyTypeObject *type) { if (!(type->tp_flags & Py_TPFLAGS_READY)) { /* don't check types before PyType_Ready() */ return 1; } assert(!(type->tp_flags & Py_TPFLAGS_READYING)); assert(type->tp_mro != NULL && PyTuple_Check(type->tp_mro)); assert(type->tp_dict != NULL); return 1; } #endif static const char * _PyType_DocWithoutSignature(const char *name, const char *internal_doc) { const char *doc = find_signature(name, internal_doc); if (doc) { doc = skip_signature(doc); if (doc) return doc; } return internal_doc; } PyObject * _PyType_GetDocFromInternalDoc(const char *name, const char *internal_doc) { const char *doc = _PyType_DocWithoutSignature(name, internal_doc); if (!doc || *doc == '\0') { Py_RETURN_NONE; } return PyUnicode_FromString(doc); } PyObject * _PyType_GetTextSignatureFromInternalDoc(const char *name, const char *internal_doc) { const char *start = find_signature(name, internal_doc); const char *end; if (start) end = skip_signature(start); else end = NULL; if (!end) { Py_RETURN_NONE; } /* back "end" up until it points just past the final ')' */ end -= SIGNATURE_END_MARKER_LENGTH - 1; assert((end - start) >= 2); /* should be "()" at least */ assert(end[-1] == ')'); assert(end[0] == '\n'); return PyUnicode_FromStringAndSize(start, end - start); } unsigned int PyType_ClearCache(void) { Py_ssize_t i; unsigned int cur_version_tag = next_version_tag - 1; #if MCACHE_STATS size_t total = method_cache_hits + method_cache_collisions + method_cache_misses; fprintf(stderr, "-- Method cache hits = %zd (%d%%)\n", method_cache_hits, (int) (100.0 * method_cache_hits / total)); fprintf(stderr, "-- Method cache true misses = %zd (%d%%)\n", method_cache_misses, (int) (100.0 * method_cache_misses / total)); fprintf(stderr, "-- Method cache collisions = %zd (%d%%)\n", method_cache_collisions, (int) (100.0 * method_cache_collisions / total)); fprintf(stderr, "-- Method cache size = %zd KiB\n", sizeof(method_cache) / 1024); #endif for (i = 0; i < (1 << MCACHE_SIZE_EXP); i++) { method_cache[i].version = 0; Py_CLEAR(method_cache[i].name); method_cache[i].value = NULL; } next_version_tag = 0; /* mark all version tags as invalid */ PyType_Modified(&PyBaseObject_Type); return cur_version_tag; } void _PyType_Fini(void) { PyType_ClearCache(); clear_slotdefs(); } void PyType_Modified(PyTypeObject *type) { /* Invalidate any cached data for the specified type and all subclasses. This function is called after the base classes, mro, or attributes of the type are altered. Invariants: - Py_TPFLAGS_VALID_VERSION_TAG is never set if Py_TPFLAGS_HAVE_VERSION_TAG is not set (e.g. on type objects coming from non-recompiled extension modules) - before Py_TPFLAGS_VALID_VERSION_TAG can be set on a type, it must first be set on all super types. This function clears the Py_TPFLAGS_VALID_VERSION_TAG of a type (so it must first clear it on all subclasses). The tp_version_tag value is meaningless unless this flag is set. We don't assign new version tags eagerly, but only as needed. */ PyObject *raw, *ref; Py_ssize_t i; if (!PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG)) return; raw = type->tp_subclasses; if (raw != NULL) { assert(PyDict_CheckExact(raw)); i = 0; while (PyDict_Next(raw, &i, NULL, &ref)) { assert(PyWeakref_CheckRef(ref)); ref = PyWeakref_GET_OBJECT(ref); if (ref != Py_None) { PyType_Modified((PyTypeObject *)ref); } } } type->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; } static void type_mro_modified(PyTypeObject *type, PyObject *bases) { /* Check that all base classes or elements of the MRO of type are able to be cached. This function is called after the base classes or mro of the type are altered. Unset HAVE_VERSION_TAG and VALID_VERSION_TAG if the type has a custom MRO that includes a type which is not officially super type, or if the type implements its own mro() method. Called from mro_internal, which will subsequently be called on each subclass when their mro is recursively updated. */ Py_ssize_t i, n; int custom = (Py_TYPE(type) != &PyType_Type); int unbound; PyObject *mro_meth = NULL; PyObject *type_mro_meth = NULL; if (!PyType_HasFeature(type, Py_TPFLAGS_HAVE_VERSION_TAG)) return; if (custom) { _Py_IDENTIFIER(mro); mro_meth = lookup_maybe_method( (PyObject *)type, &PyId_mro, &unbound); if (mro_meth == NULL) goto clear; type_mro_meth = lookup_maybe_method( (PyObject *)&PyType_Type, &PyId_mro, &unbound); if (type_mro_meth == NULL) goto clear; if (mro_meth != type_mro_meth) goto clear; Py_XDECREF(mro_meth); Py_XDECREF(type_mro_meth); } n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); PyTypeObject *cls; assert(PyType_Check(b)); cls = (PyTypeObject *)b; if (!PyType_HasFeature(cls, Py_TPFLAGS_HAVE_VERSION_TAG) || !PyType_IsSubtype(type, cls)) { goto clear; } } return; clear: Py_XDECREF(mro_meth); Py_XDECREF(type_mro_meth); type->tp_flags &= ~(Py_TPFLAGS_HAVE_VERSION_TAG| Py_TPFLAGS_VALID_VERSION_TAG); } static int assign_version_tag(PyTypeObject *type) { /* Ensure that the tp_version_tag is valid and set Py_TPFLAGS_VALID_VERSION_TAG. To respect the invariant, this must first be done on all super classes. Return 0 if this cannot be done, 1 if Py_TPFLAGS_VALID_VERSION_TAG. */ Py_ssize_t i, n; PyObject *bases; if (PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG)) return 1; if (!PyType_HasFeature(type, Py_TPFLAGS_HAVE_VERSION_TAG)) return 0; if (!PyType_HasFeature(type, Py_TPFLAGS_READY)) return 0; type->tp_version_tag = next_version_tag++; /* for stress-testing: next_version_tag &= 0xFF; */ if (type->tp_version_tag == 0) { /* wrap-around or just starting Python - clear the whole cache by filling names with references to Py_None. Values are also set to NULL for added protection, as they are borrowed reference */ for (i = 0; i < (1 << MCACHE_SIZE_EXP); i++) { method_cache[i].value = NULL; Py_INCREF(Py_None); Py_XSETREF(method_cache[i].name, Py_None); } /* mark all version tags as invalid */ PyType_Modified(&PyBaseObject_Type); return 1; } bases = type->tp_bases; n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); assert(PyType_Check(b)); if (!assign_version_tag((PyTypeObject *)b)) return 0; } type->tp_flags |= Py_TPFLAGS_VALID_VERSION_TAG; return 1; } static PyMemberDef type_members[] = { {"__basicsize__", T_PYSSIZET, offsetof(PyTypeObject,tp_basicsize),READONLY}, {"__itemsize__", T_PYSSIZET, offsetof(PyTypeObject, tp_itemsize), READONLY}, {"__flags__", T_ULONG, offsetof(PyTypeObject, tp_flags), READONLY}, {"__weakrefoffset__", T_PYSSIZET, offsetof(PyTypeObject, tp_weaklistoffset), READONLY}, {"__base__", T_OBJECT, offsetof(PyTypeObject, tp_base), READONLY}, {"__dictoffset__", T_PYSSIZET, offsetof(PyTypeObject, tp_dictoffset), READONLY}, {"__mro__", T_OBJECT, offsetof(PyTypeObject, tp_mro), READONLY}, {0} }; static int check_set_special_type_attr(PyTypeObject *type, PyObject *value, const char *name) { if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { PyErr_Format(PyExc_TypeError, "can't set %s.%s", type->tp_name, name); return 0; } if (!value) { PyErr_Format(PyExc_TypeError, "can't delete %s.%s", type->tp_name, name); return 0; } return 1; } const char * _PyType_Name(PyTypeObject *type) { const char *s = strrchr(type->tp_name, '.'); if (s == NULL) { s = type->tp_name; } else { s++; } return s; } static PyObject * type_name(PyTypeObject *type, void *context) { if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { PyHeapTypeObject* et = (PyHeapTypeObject*)type; Py_INCREF(et->ht_name); return et->ht_name; } else { return PyUnicode_FromString(_PyType_Name(type)); } } static PyObject * type_qualname(PyTypeObject *type, void *context) { if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { PyHeapTypeObject* et = (PyHeapTypeObject*)type; Py_INCREF(et->ht_qualname); return et->ht_qualname; } else { return PyUnicode_FromString(_PyType_Name(type)); } } static int type_set_name(PyTypeObject *type, PyObject *value, void *context) { const char *tp_name; Py_ssize_t name_size; if (!check_set_special_type_attr(type, value, "__name__")) return -1; if (!PyUnicode_Check(value)) { PyErr_Format(PyExc_TypeError, "can only assign string to %s.__name__, not '%s'", type->tp_name, Py_TYPE(value)->tp_name); return -1; } tp_name = PyUnicode_AsUTF8AndSize(value, &name_size); if (tp_name == NULL) return -1; if (strlen(tp_name) != (size_t)name_size) { PyErr_SetString(PyExc_ValueError, "type name must not contain null characters"); return -1; } type->tp_name = tp_name; Py_INCREF(value); Py_SETREF(((PyHeapTypeObject*)type)->ht_name, value); return 0; } static int type_set_qualname(PyTypeObject *type, PyObject *value, void *context) { PyHeapTypeObject* et; if (!check_set_special_type_attr(type, value, "__qualname__")) return -1; if (!PyUnicode_Check(value)) { PyErr_Format(PyExc_TypeError, "can only assign string to %s.__qualname__, not '%s'", type->tp_name, Py_TYPE(value)->tp_name); return -1; } et = (PyHeapTypeObject*)type; Py_INCREF(value); Py_SETREF(et->ht_qualname, value); return 0; } static PyObject * type_module(PyTypeObject *type, void *context) { PyObject *mod; if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) { mod = _PyDict_GetItemId(type->tp_dict, &PyId___module__); if (mod == NULL) { PyErr_Format(PyExc_AttributeError, "__module__"); return NULL; } Py_INCREF(mod); } else { const char *s = strrchr(type->tp_name, '.'); if (s != NULL) { mod = PyUnicode_FromStringAndSize( type->tp_name, (Py_ssize_t)(s - type->tp_name)); if (mod != NULL) PyUnicode_InternInPlace(&mod); } else { mod = _PyUnicode_FromId(&PyId_builtins); Py_XINCREF(mod); } } return mod; } static int type_set_module(PyTypeObject *type, PyObject *value, void *context) { if (!check_set_special_type_attr(type, value, "__module__")) return -1; PyType_Modified(type); return _PyDict_SetItemId(type->tp_dict, &PyId___module__, value); } static PyObject * type_abstractmethods(PyTypeObject *type, void *context) { PyObject *mod = NULL; /* type itself has an __abstractmethods__ descriptor (this). Don't return that. */ if (type != &PyType_Type) mod = _PyDict_GetItemId(type->tp_dict, &PyId___abstractmethods__); if (!mod) { PyObject *message = _PyUnicode_FromId(&PyId___abstractmethods__); if (message) PyErr_SetObject(PyExc_AttributeError, message); return NULL; } Py_INCREF(mod); return mod; } static int type_set_abstractmethods(PyTypeObject *type, PyObject *value, void *context) { /* __abstractmethods__ should only be set once on a type, in abc.ABCMeta.__new__, so this function doesn't do anything special to update subclasses. */ int abstract, res; if (value != NULL) { abstract = PyObject_IsTrue(value); if (abstract < 0) return -1; res = _PyDict_SetItemId(type->tp_dict, &PyId___abstractmethods__, value); } else { abstract = 0; res = _PyDict_DelItemId(type->tp_dict, &PyId___abstractmethods__); if (res && PyErr_ExceptionMatches(PyExc_KeyError)) { PyObject *message = _PyUnicode_FromId(&PyId___abstractmethods__); if (message) PyErr_SetObject(PyExc_AttributeError, message); return -1; } } if (res == 0) { PyType_Modified(type); if (abstract) type->tp_flags |= Py_TPFLAGS_IS_ABSTRACT; else type->tp_flags &= ~Py_TPFLAGS_IS_ABSTRACT; } return res; } static PyObject * type_get_bases(PyTypeObject *type, void *context) { Py_INCREF(type->tp_bases); return type->tp_bases; } static PyTypeObject *best_base(PyObject *); static int mro_internal(PyTypeObject *, PyObject **); static int type_is_subtype_base_chain(PyTypeObject *, PyTypeObject *); static int compatible_for_assignment(PyTypeObject *, PyTypeObject *, const char *); static int add_subclass(PyTypeObject*, PyTypeObject*); static int add_all_subclasses(PyTypeObject *type, PyObject *bases); static void remove_subclass(PyTypeObject *, PyTypeObject *); static void remove_all_subclasses(PyTypeObject *type, PyObject *bases); static void update_all_slots(PyTypeObject *); typedef int (*update_callback)(PyTypeObject *, void *); static int update_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data); static int recurse_down_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data); static int mro_hierarchy(PyTypeObject *type, PyObject *temp) { int res; PyObject *new_mro, *old_mro; PyObject *tuple; PyObject *subclasses; Py_ssize_t i, n; res = mro_internal(type, &old_mro); if (res <= 0) /* error / reentrance */ return res; new_mro = type->tp_mro; if (old_mro != NULL) tuple = PyTuple_Pack(3, type, new_mro, old_mro); else tuple = PyTuple_Pack(2, type, new_mro); if (tuple != NULL) res = PyList_Append(temp, tuple); else res = -1; Py_XDECREF(tuple); if (res < 0) { type->tp_mro = old_mro; Py_DECREF(new_mro); return -1; } Py_XDECREF(old_mro); /* Obtain a copy of subclasses list to iterate over. Otherwise type->tp_subclasses might be altered in the middle of the loop, for example, through a custom mro(), by invoking type_set_bases on some subclass of the type which in turn calls remove_subclass/add_subclass on this type. Finally, this makes things simple avoiding the need to deal with dictionary iterators and weak references. */ subclasses = type___subclasses___impl(type); if (subclasses == NULL) return -1; n = PyList_GET_SIZE(subclasses); for (i = 0; i < n; i++) { PyTypeObject *subclass; subclass = (PyTypeObject *)PyList_GET_ITEM(subclasses, i); res = mro_hierarchy(subclass, temp); if (res < 0) break; } Py_DECREF(subclasses); return res; } static int type_set_bases(PyTypeObject *type, PyObject *new_bases, void *context) { int res = 0; PyObject *temp; PyObject *old_bases; PyTypeObject *new_base, *old_base; Py_ssize_t i; if (!check_set_special_type_attr(type, new_bases, "__bases__")) return -1; if (!PyTuple_Check(new_bases)) { PyErr_Format(PyExc_TypeError, "can only assign tuple to %s.__bases__, not %s", type->tp_name, Py_TYPE(new_bases)->tp_name); return -1; } if (PyTuple_GET_SIZE(new_bases) == 0) { PyErr_Format(PyExc_TypeError, "can only assign non-empty tuple to %s.__bases__, not ()", type->tp_name); return -1; } for (i = 0; i < PyTuple_GET_SIZE(new_bases); i++) { PyObject *ob; PyTypeObject *base; ob = PyTuple_GET_ITEM(new_bases, i); if (!PyType_Check(ob)) { PyErr_Format(PyExc_TypeError, "%s.__bases__ must be tuple of classes, not '%s'", type->tp_name, Py_TYPE(ob)->tp_name); return -1; } base = (PyTypeObject*)ob; if (PyType_IsSubtype(base, type) || /* In case of reentering here again through a custom mro() the above check is not enough since it relies on base->tp_mro which would gonna be updated inside mro_internal only upon returning from the mro(). However, base->tp_base has already been assigned (see below), which in turn may cause an inheritance cycle through tp_base chain. And this is definitely not what you want to ever happen. */ (base->tp_mro != NULL && type_is_subtype_base_chain(base, type))) { PyErr_SetString(PyExc_TypeError, "a __bases__ item causes an inheritance cycle"); return -1; } } new_base = best_base(new_bases); if (new_base == NULL) return -1; if (!compatible_for_assignment(type->tp_base, new_base, "__bases__")) return -1; Py_INCREF(new_bases); Py_INCREF(new_base); old_bases = type->tp_bases; old_base = type->tp_base; type->tp_bases = new_bases; type->tp_base = new_base; temp = PyList_New(0); if (temp == NULL) goto bail; if (mro_hierarchy(type, temp) < 0) goto undo; Py_DECREF(temp); /* Take no action in case if type->tp_bases has been replaced through reentrance. */ if (type->tp_bases == new_bases) { /* any base that was in __bases__ but now isn't, we need to remove |type| from its tp_subclasses. conversely, any class now in __bases__ that wasn't needs to have |type| added to its subclasses. */ /* for now, sod that: just remove from all old_bases, add to all new_bases */ remove_all_subclasses(type, old_bases); res = add_all_subclasses(type, new_bases); update_all_slots(type); } Py_DECREF(old_bases); Py_DECREF(old_base); assert(_PyType_CheckConsistency(type)); return res; undo: for (i = PyList_GET_SIZE(temp) - 1; i >= 0; i--) { PyTypeObject *cls; PyObject *new_mro, *old_mro = NULL; PyArg_UnpackTuple(PyList_GET_ITEM(temp, i), "", 2, 3, &cls, &new_mro, &old_mro); /* Do not rollback if cls has a newer version of MRO. */ if (cls->tp_mro == new_mro) { Py_XINCREF(old_mro); cls->tp_mro = old_mro; Py_DECREF(new_mro); } } Py_DECREF(temp); bail: if (type->tp_bases == new_bases) { assert(type->tp_base == new_base); type->tp_bases = old_bases; type->tp_base = old_base; Py_DECREF(new_bases); Py_DECREF(new_base); } else { Py_DECREF(old_bases); Py_DECREF(old_base); } assert(_PyType_CheckConsistency(type)); return -1; } static PyObject * type_dict(PyTypeObject *type, void *context) { if (type->tp_dict == NULL) { Py_RETURN_NONE; } return PyDictProxy_New(type->tp_dict); } static PyObject * type_get_doc(PyTypeObject *type, void *context) { PyObject *result; if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE) && type->tp_doc != NULL) { return _PyType_GetDocFromInternalDoc(type->tp_name, type->tp_doc); } result = _PyDict_GetItemId(type->tp_dict, &PyId___doc__); if (result == NULL) { result = Py_None; Py_INCREF(result); } else if (Py_TYPE(result)->tp_descr_get) { result = Py_TYPE(result)->tp_descr_get(result, NULL, (PyObject *)type); } else { Py_INCREF(result); } return result; } static PyObject * type_get_text_signature(PyTypeObject *type, void *context) { return _PyType_GetTextSignatureFromInternalDoc(type->tp_name, type->tp_doc); } static int type_set_doc(PyTypeObject *type, PyObject *value, void *context) { if (!check_set_special_type_attr(type, value, "__doc__")) return -1; PyType_Modified(type); return _PyDict_SetItemId(type->tp_dict, &PyId___doc__, value); } /*[clinic input] type.__instancecheck__ -> bool instance: object / Check if an object is an instance. [clinic start generated code]*/ static int type___instancecheck___impl(PyTypeObject *self, PyObject *instance) /*[clinic end generated code: output=08b6bf5f591c3618 input=cdbfeaee82c01a0f]*/ { return _PyObject_RealIsInstance(instance, (PyObject *)self); } /*[clinic input] type.__subclasscheck__ -> bool subclass: object / Check if a class is a subclass. [clinic start generated code]*/ static int type___subclasscheck___impl(PyTypeObject *self, PyObject *subclass) /*[clinic end generated code: output=97a4e51694500941 input=071b2ca9e03355f4]*/ { return _PyObject_RealIsSubclass(subclass, (PyObject *)self); } static PyGetSetDef type_getsets[] = { {"__name__", (getter)type_name, (setter)type_set_name, NULL}, {"__qualname__", (getter)type_qualname, (setter)type_set_qualname, NULL}, {"__bases__", (getter)type_get_bases, (setter)type_set_bases, NULL}, {"__module__", (getter)type_module, (setter)type_set_module, NULL}, {"__abstractmethods__", (getter)type_abstractmethods, (setter)type_set_abstractmethods, NULL}, {"__dict__", (getter)type_dict, NULL, NULL}, {"__doc__", (getter)type_get_doc, (setter)type_set_doc, NULL}, {"__text_signature__", (getter)type_get_text_signature, NULL, NULL}, {0} }; static PyObject * type_repr(PyTypeObject *type) { PyObject *mod, *name, *rtn; mod = type_module(type, NULL); if (mod == NULL) PyErr_Clear(); else if (!PyUnicode_Check(mod)) { Py_DECREF(mod); mod = NULL; } name = type_qualname(type, NULL); if (name == NULL) { Py_XDECREF(mod); return NULL; } if (mod != NULL && !_PyUnicode_EqualToASCIIId(mod, &PyId_builtins)) rtn = PyUnicode_FromFormat("<class '%U.%U'>", mod, name); else rtn = PyUnicode_FromFormat("<class '%s'>", type->tp_name); Py_XDECREF(mod); Py_DECREF(name); return rtn; } static PyObject * type_call(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *obj; { size_t a = strlen(type->tp_name); size_t b = strlen("F"); if (a == b) { if (!strncmp(type->tp_name, "F", a)) printf("Notice, run tp_call of class F.\n"); } } if (type->tp_new == NULL) { PyErr_Format(PyExc_TypeError, "cannot create '%.100s' instances", type->tp_name); return NULL; } #ifdef Py_DEBUG /* type_call() must not be called with an exception set, because it can clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); #endif obj = type->tp_new(type, args, kwds); obj = _Py_CheckFunctionResult((PyObject*)type, obj, NULL); if (obj == NULL) return NULL; /* Ugly exception: when the call was type(something), don't call tp_init on the result. */ if (type == &PyType_Type && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1 && (kwds == NULL || (PyDict_Check(kwds) && PyDict_GET_SIZE(kwds) == 0))) // Just want to show type of a object when run into this branch. return obj; /* If the returned object is not an instance of type, it won't be initialized. */ if (!PyType_IsSubtype(Py_TYPE(obj), type)) return obj; type = Py_TYPE(obj); if (type->tp_init != NULL) { int res = type->tp_init(obj, args, kwds); if (res < 0) { assert(PyErr_Occurred()); Py_DECREF(obj); obj = NULL; } else { assert(!PyErr_Occurred()); } } return obj; } PyObject * PyType_GenericAlloc(PyTypeObject *type, Py_ssize_t nitems) { PyObject *obj; const size_t size = _PyObject_VAR_SIZE(type, nitems+1); /* note that we need to add one, for the sentinel */ if (PyType_IS_GC(type)) obj = _PyObject_GC_Malloc(size); else obj = (PyObject *)PyObject_MALLOC(size); if (obj == NULL) return PyErr_NoMemory(); memset(obj, '\0', size); if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_INCREF(type); if (type->tp_itemsize == 0) (void)PyObject_INIT(obj, type); else (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems); if (PyType_IS_GC(type)) _PyObject_GC_TRACK(obj); return obj; } PyObject * PyType_GenericNew(PyTypeObject *type, PyObject *args, PyObject *kwds) { return type->tp_alloc(type, 0); } /* Helpers for subtyping */ static int traverse_slots(PyTypeObject *type, PyObject *self, visitproc visit, void *arg) { Py_ssize_t i, n; PyMemberDef *mp; n = Py_SIZE(type); mp = PyHeapType_GET_MEMBERS((PyHeapTypeObject *)type); for (i = 0; i < n; i++, mp++) { if (mp->type == T_OBJECT_EX) { char *addr = (char *)self + mp->offset; PyObject *obj = *(PyObject **)addr; if (obj != NULL) { int err = visit(obj, arg); if (err) return err; } } } return 0; } static int subtype_traverse(PyObject *self, visitproc visit, void *arg) { PyTypeObject *type, *base; traverseproc basetraverse; /* Find the nearest base with a different tp_traverse, and traverse slots while we're at it */ type = Py_TYPE(self); base = type; while ((basetraverse = base->tp_traverse) == subtype_traverse) { if (Py_SIZE(base)) { int err = traverse_slots(base, self, visit, arg); if (err) return err; } base = base->tp_base; assert(base); } if (type->tp_dictoffset != base->tp_dictoffset) { PyObject **dictptr = _PyObject_GetDictPtr(self); if (dictptr && *dictptr) Py_VISIT(*dictptr); } if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) /* For a heaptype, the instances count as references to the type. Traverse the type so the collector can find cycles involving this link. */ Py_VISIT(type); if (basetraverse) return basetraverse(self, visit, arg); return 0; } static void clear_slots(PyTypeObject *type, PyObject *self) { Py_ssize_t i, n; PyMemberDef *mp; n = Py_SIZE(type); mp = PyHeapType_GET_MEMBERS((PyHeapTypeObject *)type); for (i = 0; i < n; i++, mp++) { if (mp->type == T_OBJECT_EX && !(mp->flags & READONLY)) { char *addr = (char *)self + mp->offset; PyObject *obj = *(PyObject **)addr; if (obj != NULL) { *(PyObject **)addr = NULL; Py_DECREF(obj); } } } } static int subtype_clear(PyObject *self) { PyTypeObject *type, *base; inquiry baseclear; /* Find the nearest base with a different tp_clear and clear slots while we're at it */ type = Py_TYPE(self); base = type; while ((baseclear = base->tp_clear) == subtype_clear) { if (Py_SIZE(base)) clear_slots(base, self); base = base->tp_base; assert(base); } /* Clear the instance dict (if any), to break cycles involving only __dict__ slots (as in the case 'self.__dict__ is self'). */ if (type->tp_dictoffset != base->tp_dictoffset) { PyObject **dictptr = _PyObject_GetDictPtr(self); if (dictptr && *dictptr) Py_CLEAR(*dictptr); } if (baseclear) return baseclear(self); return 0; } static void subtype_dealloc(PyObject *self) { PyTypeObject *type, *base; destructor basedealloc; PyThreadState *tstate = PyThreadState_GET(); int has_finalizer; /* Extract the type; we expect it to be a heap type */ type = Py_TYPE(self); assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); /* Test whether the type has GC exactly once */ if (!PyType_IS_GC(type)) { /* It's really rare to find a dynamic type that doesn't have GC; it can only happen when deriving from 'object' and not adding any slots or instance variables. This allows certain simplifications: there's no need to call clear_slots(), or DECREF the dict, or clear weakrefs. */ /* Maybe call finalizer; exit early if resurrected */ if (type->tp_finalize) { if (PyObject_CallFinalizerFromDealloc(self) < 0) return; } if (type->tp_del) { type->tp_del(self); if (self->ob_refcnt > 0) return; } /* Find the nearest base with a different tp_dealloc */ base = type; while ((basedealloc = base->tp_dealloc) == subtype_dealloc) { assert(Py_SIZE(base) == 0); base = base->tp_base; assert(base); } /* Extract the type again; tp_del may have changed it */ type = Py_TYPE(self); /* Call the base tp_dealloc() */ assert(basedealloc); basedealloc(self); /* Can't reference self beyond this point */ Py_DECREF(type); /* Done */ return; } /* We get here only if the type has GC */ /* UnTrack and re-Track around the trashcan macro, alas */ /* See explanation at end of function for full disclosure */ PyObject_GC_UnTrack(self); ++_PyRuntime.gc.trash_delete_nesting; ++ tstate->trash_delete_nesting; Py_TRASHCAN_SAFE_BEGIN(self); --_PyRuntime.gc.trash_delete_nesting; -- tstate->trash_delete_nesting; /* Find the nearest base with a different tp_dealloc */ base = type; while ((/*basedealloc =*/ base->tp_dealloc) == subtype_dealloc) { base = base->tp_base; assert(base); } has_finalizer = type->tp_finalize || type->tp_del; if (type->tp_finalize) { _PyObject_GC_TRACK(self); if (PyObject_CallFinalizerFromDealloc(self) < 0) { /* Resurrected */ goto endlabel; } _PyObject_GC_UNTRACK(self); } /* If we added a weaklist, we clear it. Do this *before* calling tp_del, clearing slots, or clearing the instance dict. GC tracking must be off at this point. weakref callbacks (if any, and whether directly here or indirectly in something we call) may trigger GC, and if self is tracked at that point, it will look like trash to GC and GC will try to delete self again. */ if (type->tp_weaklistoffset && !base->tp_weaklistoffset) PyObject_ClearWeakRefs(self); if (type->tp_del) { _PyObject_GC_TRACK(self); type->tp_del(self); if (self->ob_refcnt > 0) { /* Resurrected */ goto endlabel; } _PyObject_GC_UNTRACK(self); } if (has_finalizer) { /* New weakrefs could be created during the finalizer call. If this occurs, clear them out without calling their finalizers since they might rely on part of the object being finalized that has already been destroyed. */ if (type->tp_weaklistoffset && !base->tp_weaklistoffset) { /* Modeled after GET_WEAKREFS_LISTPTR() */ PyWeakReference **list = (PyWeakReference **) \ PyObject_GET_WEAKREFS_LISTPTR(self); while (*list) _PyWeakref_ClearRef(*list); } } /* Clear slots up to the nearest base with a different tp_dealloc */ base = type; while ((basedealloc = base->tp_dealloc) == subtype_dealloc) { if (Py_SIZE(base)) clear_slots(base, self); base = base->tp_base; assert(base); } /* If we added a dict, DECREF it */ if (type->tp_dictoffset && !base->tp_dictoffset) { PyObject **dictptr = _PyObject_GetDictPtr(self); if (dictptr != NULL) { PyObject *dict = *dictptr; if (dict != NULL) { Py_DECREF(dict); *dictptr = NULL; } } } /* Extract the type again; tp_del may have changed it */ type = Py_TYPE(self); /* Call the base tp_dealloc(); first retrack self if * basedealloc knows about gc. */ if (PyType_IS_GC(base)) _PyObject_GC_TRACK(self); assert(basedealloc); basedealloc(self); /* Can't reference self beyond this point. It's possible tp_del switched our type from a HEAPTYPE to a non-HEAPTYPE, so be careful about reference counting. */ if (type->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_DECREF(type); endlabel: ++_PyRuntime.gc.trash_delete_nesting; ++ tstate->trash_delete_nesting; Py_TRASHCAN_SAFE_END(self); --_PyRuntime.gc.trash_delete_nesting; -- tstate->trash_delete_nesting; /* Explanation of the weirdness around the trashcan macros: Q. What do the trashcan macros do? A. Read the comment titled "Trashcan mechanism" in object.h. For one, this explains why there must be a call to GC-untrack before the trashcan begin macro. Without understanding the trashcan code, the answers to the following questions don't make sense. Q. Why do we GC-untrack before the trashcan and then immediately GC-track again afterward? A. In the case that the base class is GC-aware, the base class probably GC-untracks the object. If it does that using the UNTRACK macro, this will crash when the object is already untracked. Because we don't know what the base class does, the only safe thing is to make sure the object is tracked when we call the base class dealloc. But... The trashcan begin macro requires that the object is *untracked* before it is called. So the dance becomes: GC untrack trashcan begin GC track Q. Why did the last question say "immediately GC-track again"? It's nowhere near immediately. A. Because the code *used* to re-track immediately. Bad Idea. self has a refcount of 0, and if gc ever gets its hands on it (which can happen if any weakref callback gets invoked), it looks like trash to gc too, and gc also tries to delete self then. But we're already deleting self. Double deallocation is a subtle disaster. Q. Why the bizarre (net-zero) manipulation of _PyRuntime.trash_delete_nesting around the trashcan macros? A. Some base classes (e.g. list) also use the trashcan mechanism. The following scenario used to be possible: - suppose the trashcan level is one below the trashcan limit - subtype_dealloc() is called - the trashcan limit is not yet reached, so the trashcan level is incremented and the code between trashcan begin and end is executed - this destroys much of the object's contents, including its slots and __dict__ - basedealloc() is called; this is really list_dealloc(), or some other type which also uses the trashcan macros - the trashcan limit is now reached, so the object is put on the trashcan's to-be-deleted-later list - basedealloc() returns - subtype_dealloc() decrefs the object's type - subtype_dealloc() returns - later, the trashcan code starts deleting the objects from its to-be-deleted-later list - subtype_dealloc() is called *AGAIN* for the same object - at the very least (if the destroyed slots and __dict__ don't cause problems) the object's type gets decref'ed a second time, which is *BAD*!!! The remedy is to make sure that if the code between trashcan begin and end in subtype_dealloc() is called, the code between trashcan begin and end in basedealloc() will also be called. This is done by decrementing the level after passing into the trashcan block, and incrementing it just before leaving the block. But now it's possible that a chain of objects consisting solely of objects whose deallocator is subtype_dealloc() will defeat the trashcan mechanism completely: the decremented level means that the effective level never reaches the limit. Therefore, we *increment* the level *before* entering the trashcan block, and matchingly decrement it after leaving. This means the trashcan code will trigger a little early, but that's no big deal. Q. Are there any live examples of code in need of all this complexity? A. Yes. See SF bug 668433 for code that crashed (when Python was compiled in debug mode) before the trashcan level manipulations were added. For more discussion, see SF patches 581742, 575073 and bug 574207. */ } static PyTypeObject *solid_base(PyTypeObject *type); /* type test with subclassing support */ static int type_is_subtype_base_chain(PyTypeObject *a, PyTypeObject *b) { do { if (a == b) return 1; a = a->tp_base; } while (a != NULL); return (b == &PyBaseObject_Type); } // b appears a's MRO, this means a is subclass of b. int PyType_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; mro = a->tp_mro; if (mro != NULL) { /* Deal with multiple inheritance without recursion by walking the MRO tuple */ Py_ssize_t i, n; assert(PyTuple_Check(mro)); n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } else /* a is not completely initilized yet; follow tp_base */ return type_is_subtype_base_chain(a, b); } /* Routines to do a method lookup in the type without looking in the instance dictionary (so we can't use PyObject_GetAttr) but still binding it to the instance. Variants: - _PyObject_LookupSpecial() returns NULL without raising an exception when the _PyType_Lookup() call fails; - lookup_maybe_method() and lookup_method() are internal routines similar to _PyObject_LookupSpecial(), but can return unbound PyFunction to avoid temporary method object. Pass self as first argument when unbound == 1. */ PyObject * _PyObject_LookupSpecial(PyObject *self, _Py_Identifier *attrid) { PyObject *res; res = _PyType_LookupId(Py_TYPE(self), attrid); if (res != NULL) { descrgetfunc f; if ((f = Py_TYPE(res)->tp_descr_get) == NULL) Py_INCREF(res); else res = f(res, self, (PyObject *)(Py_TYPE(self))); } return res; } static PyObject * lookup_maybe_method(PyObject *self, _Py_Identifier *attrid, int *unbound) { PyObject *res = _PyType_LookupId(Py_TYPE(self), attrid); if (res == NULL) { return NULL; } if (PyFunction_Check(res)) { /* Avoid temporary PyMethodObject */ *unbound = 1; Py_INCREF(res); } else { *unbound = 0; descrgetfunc f = Py_TYPE(res)->tp_descr_get; if (f == NULL) { Py_INCREF(res); } else { res = f(res, self, (PyObject *)(Py_TYPE(self))); } } return res; } static PyObject * lookup_method(PyObject *self, _Py_Identifier *attrid, int *unbound) { PyObject *res = lookup_maybe_method(self, attrid, unbound); if (res == NULL && !PyErr_Occurred()) { PyErr_SetObject(PyExc_AttributeError, attrid->object); } return res; } static PyObject* call_unbound(int unbound, PyObject *func, PyObject *self, PyObject **args, Py_ssize_t nargs) { if (unbound) { return _PyObject_FastCall_Prepend(func, self, args, nargs); } else { return _PyObject_FastCall(func, args, nargs); } } static PyObject* call_unbound_noarg(int unbound, PyObject *func, PyObject *self) { if (unbound) { PyObject *args[1] = {self}; return _PyObject_FastCall(func, args, 1); } else { return _PyObject_CallNoArg(func); } } /* A variation of PyObject_CallMethod* that uses lookup_maybe_method() instead of PyObject_GetAttrString(). */ static PyObject * call_method(PyObject *obj, _Py_Identifier *name, PyObject **args, Py_ssize_t nargs) { int unbound; PyObject *func, *retval; func = lookup_method(obj, name, &unbound); if (func == NULL) { return NULL; } retval = call_unbound(unbound, func, obj, args, nargs); Py_DECREF(func); return retval; } /* Clone of call_method() that returns NotImplemented when the lookup fails. */ static PyObject * call_maybe(PyObject *obj, _Py_Identifier *name, PyObject **args, Py_ssize_t nargs) { int unbound; PyObject *func, *retval; func = lookup_maybe_method(obj, name, &unbound); if (func == NULL) { if (!PyErr_Occurred()) Py_RETURN_NOTIMPLEMENTED; return NULL; } retval = call_unbound(unbound, func, obj, args, nargs); Py_DECREF(func); return retval; } /* Method resolution order algorithm C3 described in "A Monotonic Superclass Linearization for Dylan", by Kim Barrett, Bob Cassel, Paul Haahr, David A. Moon, Keith Playford, and P. Tucker Withington. (OOPSLA 1996) Some notes about the rules implied by C3: No duplicate bases. It isn't legal to repeat a class in a list of base classes. The next three properties are the 3 constraints in "C3". Local precedence order. If A precedes B in C's MRO, then A will precede B in the MRO of all subclasses of C. Monotonicity. The MRO of a class must be an extension without reordering of the MRO of each of its superclasses. Extended Precedence Graph (EPG). Linearization is consistent if there is a path in the EPG from each class to all its successors in the linearization. See the paper for definition of EPG. */ static int tail_contains(PyObject* tuple, int whence, PyObject *o) { Py_ssize_t j, size; size = PyTuple_GET_SIZE(tuple); for (j = whence+1; j < size; j++) { if (PyTuple_GET_ITEM(tuple, j) == o) return 1; } return 0; } static PyObject * class_name(PyObject *cls) { PyObject *name = _PyObject_GetAttrId(cls, &PyId___name__); if (name == NULL) { PyErr_Clear(); name = PyObject_Repr(cls); } if (name == NULL) return NULL; if (!PyUnicode_Check(name)) { Py_DECREF(name); return NULL; } return name; } static int check_duplicates(PyObject *tuple) { Py_ssize_t i, j, n; /* Let's use a quadratic time algorithm, assuming that the bases tuples is short. */ n = PyTuple_GET_SIZE(tuple); for (i = 0; i < n; i++) { PyObject *o = PyTuple_GET_ITEM(tuple, i); for (j = i + 1; j < n; j++) { if (PyTuple_GET_ITEM(tuple, j) == o) { o = class_name(o); if (o != NULL) { PyErr_Format(PyExc_TypeError, "duplicate base class %U", o); Py_DECREF(o); } else { PyErr_SetString(PyExc_TypeError, "duplicate base class"); } return -1; } } } return 0; } /* Raise a TypeError for an MRO order disagreement. It's hard to produce a good error message. In the absence of better insight into error reporting, report the classes that were candidates to be put next into the MRO. There is some conflict between the order in which they should be put in the MRO, but it's hard to diagnose what constraint can't be satisfied. */ static void set_mro_error(PyObject **to_merge, Py_ssize_t to_merge_size, int *remain) { Py_ssize_t i, n, off; char buf[1000]; PyObject *k, *v; PyObject *set = PyDict_New(); if (!set) return; for (i = 0; i < to_merge_size; i++) { PyObject *L = to_merge[i]; if (remain[i] < PyTuple_GET_SIZE(L)) { PyObject *c = PyTuple_GET_ITEM(L, remain[i]); if (PyDict_SetItem(set, c, Py_None) < 0) { Py_DECREF(set); return; } } } n = PyDict_GET_SIZE(set); off = PyOS_snprintf(buf, sizeof(buf), "Cannot create a \ consistent method resolution\norder (MRO) for bases"); i = 0; while (PyDict_Next(set, &i, &k, &v) && (size_t)off < sizeof(buf)) { PyObject *name = class_name(k); const char *name_str; if (name != NULL) { name_str = PyUnicode_AsUTF8(name); if (name_str == NULL) name_str = "?"; } else name_str = "?"; off += PyOS_snprintf(buf + off, sizeof(buf) - off, " %s", name_str); Py_XDECREF(name); if (--n && (size_t)(off+1) < sizeof(buf)) { buf[off++] = ','; buf[off] = '\0'; } } PyErr_SetString(PyExc_TypeError, buf); Py_DECREF(set); } // A example that cause pmerge return an error: // class X(object): // pass // class Y(object): // pass // class A(X,Y): // pass // class B(Y,X): // pass // class T(A,B): // pass // In the mro of A, X is earilier that Y, but in the mro of B, // Y is earilier than X, which is a conflict. static int pmerge(PyObject *acc, PyObject **to_merge, Py_ssize_t to_merge_size) { int res = 0; Py_ssize_t i, j, empty_cnt; int *remain; /* remain stores an index into each sublist of to_merge. remain[i] is the index of the next base in to_merge[i] that is not included in acc. */ remain = PyMem_New(int, to_merge_size); if (remain == NULL) { PyErr_NoMemory(); return -1; } for (i = 0; i < to_merge_size; i++) remain[i] = 0; again: empty_cnt = 0; for (i = 0; i < to_merge_size; i++) { PyObject *candidate; PyObject *cur_tuple = to_merge[i]; if (remain[i] >= PyTuple_GET_SIZE(cur_tuple)) { empty_cnt++; continue; } /* Choose next candidate for MRO. The input sequences alone can determine the choice. If not, choose the class which appears in the MRO of the earliest direct superclass of the new class. */ candidate = PyTuple_GET_ITEM(cur_tuple, remain[i]); for (j = 0; j < to_merge_size; j++) { PyObject *j_lst = to_merge[j]; if (tail_contains(j_lst, remain[j], candidate)) goto skip; /* continue outer loop */ } res = PyList_Append(acc, candidate); if (res < 0) goto out; for (j = 0; j < to_merge_size; j++) { PyObject *j_lst = to_merge[j]; if (remain[j] < PyTuple_GET_SIZE(j_lst) && PyTuple_GET_ITEM(j_lst, remain[j]) == candidate) { remain[j]++; } } goto again; skip: ; } if (empty_cnt != to_merge_size) { set_mro_error(to_merge, to_merge_size, remain); res = -1; } out: PyMem_Del(remain); return res; } static PyObject * mro_implementation(PyTypeObject *type) { PyObject *result; PyObject *bases; PyObject **to_merge; Py_ssize_t i, n; if (type->tp_dict == NULL) { if (PyType_Ready(type) < 0) return NULL; } bases = type->tp_bases; assert(PyTuple_Check(bases)); n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(bases, i); if (base->tp_mro == NULL) { PyErr_Format(PyExc_TypeError, "Cannot extend an incomplete type '%.100s'", base->tp_name); return NULL; } assert(PyTuple_Check(base->tp_mro)); } if (n == 1) { /* Fast path: if there is a single base, constructing the MRO * is trivial. */ PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(bases, 0); Py_ssize_t k = PyTuple_GET_SIZE(base->tp_mro); result = PyTuple_New(k + 1); if (result == NULL) { return NULL; } Py_INCREF(type); PyTuple_SET_ITEM(result, 0, (PyObject *) type); for (i = 0; i < k; i++) { PyObject *cls = PyTuple_GET_ITEM(base->tp_mro, i); Py_INCREF(cls); PyTuple_SET_ITEM(result, i + 1, cls); } return result; } /* This is just a basic sanity check. */ if (check_duplicates(bases) < 0) { return NULL; } /* Find a superclass linearization that honors the constraints of the explicit tuples of bases and the constraints implied by each base class. to_merge is an array of tuples, where each tuple is a superclass linearization implied by a base class. The last element of to_merge is the declared tuple of bases. */ to_merge = PyMem_New(PyObject *, n + 1); if (to_merge == NULL) { PyErr_NoMemory(); return NULL; } for (i = 0; i < n; i++) { PyTypeObject *base = (PyTypeObject *)PyTuple_GET_ITEM(bases, i); to_merge[i] = base->tp_mro; } to_merge[n] = bases; result = PyList_New(1); if (result == NULL) { PyMem_Del(to_merge); return NULL; } Py_INCREF(type); PyList_SET_ITEM(result, 0, (PyObject *)type); if (pmerge(result, to_merge, n + 1) < 0) { Py_CLEAR(result); } PyMem_Del(to_merge); return result; } /*[clinic input] type.mro Return a type's method resolution order. [clinic start generated code]*/ static PyObject * type_mro_impl(PyTypeObject *self) /*[clinic end generated code: output=bffc4a39b5b57027 input=28414f4e156db28d]*/ { PyObject *seq; seq = mro_implementation(self); if (seq != NULL && !PyList_Check(seq)) { Py_SETREF(seq, PySequence_List(seq)); } return seq; } static int mro_check(PyTypeObject *type, PyObject *mro) { PyTypeObject *solid; Py_ssize_t i, n; solid = solid_base(type); n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { PyTypeObject *base; PyObject *tmp; tmp = PyTuple_GET_ITEM(mro, i); if (!PyType_Check(tmp)) { PyErr_Format( PyExc_TypeError, "mro() returned a non-class ('%.500s')", Py_TYPE(tmp)->tp_name); return -1; } base = (PyTypeObject*)tmp; if (!PyType_IsSubtype(solid, solid_base(base))) { PyErr_Format( PyExc_TypeError, "mro() returned base with unsuitable layout ('%.500s')", base->tp_name); return -1; } } return 0; } /* Lookups an mcls.mro method, invokes it and checks the result (if needed, in case of a custom mro() implementation). Keep in mind that during execution of this function type->tp_mro can be replaced due to possible reentrance (for example, through type_set_bases): - when looking up the mcls.mro attribute (it could be a user-provided descriptor); - from inside a custom mro() itself; - through a finalizer of the return value of mro(). */ static PyObject * mro_invoke(PyTypeObject *type) { PyObject *mro_result; PyObject *new_mro; int custom = (Py_TYPE(type) != &PyType_Type); if (custom) { _Py_IDENTIFIER(mro); int unbound; PyObject *mro_meth = lookup_method((PyObject *)type, &PyId_mro, &unbound); if (mro_meth == NULL) return NULL; mro_result = call_unbound_noarg(unbound, mro_meth, (PyObject *)type); Py_DECREF(mro_meth); } else { mro_result = mro_implementation(type); } if (mro_result == NULL) return NULL; new_mro = PySequence_Tuple(mro_result); Py_DECREF(mro_result); if (new_mro == NULL) return NULL; if (custom && mro_check(type, new_mro) < 0) { Py_DECREF(new_mro); return NULL; } return new_mro; } /* Calculates and assigns a new MRO to type->tp_mro. Return values and invariants: - Returns 1 if a new MRO value has been set to type->tp_mro due to this call of mro_internal (no tricky reentrancy and no errors). In case if p_old_mro argument is not NULL, a previous value of type->tp_mro is put there, and the ownership of this reference is transferred to a caller. Otherwise, the previous value (if any) is decref'ed. - Returns 0 in case when type->tp_mro gets changed because of reentering here through a custom mro() (see a comment to mro_invoke). In this case, a refcount of an old type->tp_mro is adjusted somewhere deeper in the call stack (by the innermost mro_internal or its caller) and may become zero upon returning from here. This also implies that the whole hierarchy of subclasses of the type has seen the new value and updated their MRO accordingly. - Returns -1 in case of an error. */ static int mro_internal(PyTypeObject *type, PyObject **p_old_mro) { PyObject *new_mro, *old_mro; int reent; /* Keep a reference to be able to do a reentrancy check below. Don't let old_mro be GC'ed and its address be reused for another object, like (suddenly!) a new tp_mro. */ old_mro = type->tp_mro; Py_XINCREF(old_mro); new_mro = mro_invoke(type); /* might cause reentrance */ reent = (type->tp_mro != old_mro); Py_XDECREF(old_mro); if (new_mro == NULL) return -1; if (reent) { Py_DECREF(new_mro); return 0; } type->tp_mro = new_mro; type_mro_modified(type, type->tp_mro); /* corner case: the super class might have been hidden from the custom MRO */ type_mro_modified(type, type->tp_bases); PyType_Modified(type); if (p_old_mro != NULL) *p_old_mro = old_mro; /* transfer the ownership */ else Py_XDECREF(old_mro); return 1; } /* Calculate the best base amongst multiple base classes. This is the first one that's on the path to the "solid base". */ static PyTypeObject * best_base(PyObject *bases) { Py_ssize_t i, n; PyTypeObject *base, *winner, *candidate, *base_i; PyObject *base_proto; assert(PyTuple_Check(bases)); n = PyTuple_GET_SIZE(bases); assert(n > 0); base = NULL; winner = NULL; for (i = 0; i < n; i++) { base_proto = PyTuple_GET_ITEM(bases, i); if (!PyType_Check(base_proto)) { PyErr_SetString( PyExc_TypeError, "bases must be types"); return NULL; } base_i = (PyTypeObject *)base_proto; if (base_i->tp_dict == NULL) { if (PyType_Ready(base_i) < 0) return NULL; } // Ensure this base enable subclass. if (!PyType_HasFeature(base_i, Py_TPFLAGS_BASETYPE)) { PyErr_Format(PyExc_TypeError, "type '%.100s' is not an acceptable base type", base_i->tp_name); return NULL; } candidate = solid_base(base_i); if (winner == NULL) { winner = candidate; base = base_i; } else if (PyType_IsSubtype(winner, candidate)) ; else if (PyType_IsSubtype(candidate, winner)) { winner = candidate; base = base_i; } else { PyErr_SetString( PyExc_TypeError, "multiple bases have " "instance lay-out conflict"); return NULL; } } assert (base != NULL); return base; } static int extra_ivars(PyTypeObject *type, PyTypeObject *base) { size_t t_size = type->tp_basicsize; size_t b_size = base->tp_basicsize; assert(t_size >= b_size); /* Else type smaller than base! */ if (type->tp_itemsize || base->tp_itemsize) { /* If itemsize is involved, stricter rules */ return t_size != b_size || type->tp_itemsize != base->tp_itemsize; } if (type->tp_weaklistoffset && base->tp_weaklistoffset == 0 && type->tp_weaklistoffset + sizeof(PyObject *) == t_size && type->tp_flags & Py_TPFLAGS_HEAPTYPE) t_size -= sizeof(PyObject *); if (type->tp_dictoffset && base->tp_dictoffset == 0 && type->tp_dictoffset + sizeof(PyObject *) == t_size && type->tp_flags & Py_TPFLAGS_HEAPTYPE) t_size -= sizeof(PyObject *); return t_size != b_size; } // TODO: Make sure the meaning of 'solid_base'. static PyTypeObject * solid_base(PyTypeObject *type) { PyTypeObject *base; if (type->tp_base) base = solid_base(type->tp_base); else base = &PyBaseObject_Type; if (extra_ivars(type, base)) return type; else return base; } static void object_dealloc(PyObject *); static int object_init(PyObject *, PyObject *, PyObject *); static int update_slot(PyTypeObject *, PyObject *); static void fixup_slot_dispatchers(PyTypeObject *); static int set_names(PyTypeObject *); static int init_subclass(PyTypeObject *, PyObject *); /* * Helpers for __dict__ descriptor. We don't want to expose the dicts * inherited from various builtin types. The builtin base usually provides * its own __dict__ descriptor, so we use that when we can. */ static PyTypeObject * get_builtin_base_with_dict(PyTypeObject *type) { while (type->tp_base != NULL) { if (type->tp_dictoffset != 0 && !(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) return type; type = type->tp_base; } return NULL; } static PyObject * get_dict_descriptor(PyTypeObject *type) { PyObject *descr; descr = _PyType_LookupId(type, &PyId___dict__); if (descr == NULL || !PyDescr_IsData(descr)) return NULL; return descr; } static void raise_dict_descr_error(PyObject *obj) { PyErr_Format(PyExc_TypeError, "this __dict__ descriptor does not support " "'%.200s' objects", Py_TYPE(obj)->tp_name); } static PyObject * subtype_dict(PyObject *obj, void *context) { PyTypeObject *base; base = get_builtin_base_with_dict(Py_TYPE(obj)); if (base != NULL) { descrgetfunc func; PyObject *descr = get_dict_descriptor(base); if (descr == NULL) { raise_dict_descr_error(obj); return NULL; } func = Py_TYPE(descr)->tp_descr_get; if (func == NULL) { raise_dict_descr_error(obj); return NULL; } return func(descr, obj, (PyObject *)(Py_TYPE(obj))); } return PyObject_GenericGetDict(obj, context); } static int subtype_setdict(PyObject *obj, PyObject *value, void *context) { PyObject **dictptr; PyTypeObject *base; base = get_builtin_base_with_dict(Py_TYPE(obj)); if (base != NULL) { descrsetfunc func; PyObject *descr = get_dict_descriptor(base); if (descr == NULL) { raise_dict_descr_error(obj); return -1; } func = Py_TYPE(descr)->tp_descr_set; if (func == NULL) { raise_dict_descr_error(obj); return -1; } return func(descr, obj, value); } /* Almost like PyObject_GenericSetDict, but allow __dict__ to be deleted. */ dictptr = _PyObject_GetDictPtr(obj); if (dictptr == NULL) { PyErr_SetString(PyExc_AttributeError, "This object has no __dict__"); return -1; } if (value != NULL && !PyDict_Check(value)) { PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, " "not a '%.200s'", Py_TYPE(value)->tp_name); return -1; } Py_XINCREF(value); Py_XSETREF(*dictptr, value); return 0; } static PyObject * subtype_getweakref(PyObject *obj, void *context) { PyObject **weaklistptr; PyObject *result; if (Py_TYPE(obj)->tp_weaklistoffset == 0) { PyErr_SetString(PyExc_AttributeError, "This object has no __weakref__"); return NULL; } assert(Py_TYPE(obj)->tp_weaklistoffset > 0); assert(Py_TYPE(obj)->tp_weaklistoffset + sizeof(PyObject *) <= (size_t)(Py_TYPE(obj)->tp_basicsize)); weaklistptr = (PyObject **) ((char *)obj + Py_TYPE(obj)->tp_weaklistoffset); if (*weaklistptr == NULL) result = Py_None; else result = *weaklistptr; Py_INCREF(result); return result; } /* Three variants on the subtype_getsets list. */ static PyGetSetDef subtype_getsets_full[] = { {"__dict__", subtype_dict, subtype_setdict, PyDoc_STR("dictionary for instance variables (if defined)")}, {"__weakref__", subtype_getweakref, NULL, PyDoc_STR("list of weak references to the object (if defined)")}, {0} }; static PyGetSetDef subtype_getsets_dict_only[] = { {"__dict__", subtype_dict, subtype_setdict, PyDoc_STR("dictionary for instance variables (if defined)")}, {0} }; static PyGetSetDef subtype_getsets_weakref_only[] = { {"__weakref__", subtype_getweakref, NULL, PyDoc_STR("list of weak references to the object (if defined)")}, {0} }; static int valid_identifier(PyObject *s) { if (!PyUnicode_Check(s)) { PyErr_Format(PyExc_TypeError, "__slots__ items must be strings, not '%.200s'", Py_TYPE(s)->tp_name); return 0; } if (!PyUnicode_IsIdentifier(s)) { PyErr_SetString(PyExc_TypeError, "__slots__ must be identifiers"); return 0; } return 1; } /* Forward */ static int object_init(PyObject *self, PyObject *args, PyObject *kwds); static int type_init(PyObject *cls, PyObject *args, PyObject *kwds) { int res; assert(args != NULL && PyTuple_Check(args)); assert(kwds == NULL || PyDict_Check(kwds)); if (kwds != NULL && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1 && PyDict_Check(kwds) && PyDict_GET_SIZE(kwds) != 0) { // __init__ has 1 positional arg and at least 1 kwargs; PyErr_SetString(PyExc_TypeError, "type.__init__() takes no keyword arguments"); return -1; } if (args != NULL && PyTuple_Check(args) && (PyTuple_GET_SIZE(args) != 1 && PyTuple_GET_SIZE(args) != 3)) { // __init__ need 1 or 3 position arguments; PyErr_SetString(PyExc_TypeError, "type.__init__() takes 1 or 3 arguments"); return -1; } /* Call object.__init__(self) now. */ /* XXX Could call super(type, cls).__init__() but what's the point? */ args = PyTuple_GetSlice(args, 0, 0); if (args == NULL) { return -1; } res = object_init(cls, args, NULL); Py_DECREF(args); return res; } unsigned long PyType_GetFlags(PyTypeObject *type) { return type->tp_flags; } /* Determine the most derived metatype. */ PyTypeObject * _PyType_CalculateMetaclass(PyTypeObject *metatype, PyObject *bases) { Py_ssize_t i, nbases; PyTypeObject *winner; PyObject *tmp; PyTypeObject *tmptype; /* Determine the proper metatype to deal with this, and check for metatype conflicts while we're at it. Note that if some other metatype wins to contract, it's possible that its instances are not types. */ nbases = PyTuple_GET_SIZE(bases); winner = metatype; for (i = 0; i < nbases; i++) { tmp = PyTuple_GET_ITEM(bases, i); tmptype = Py_TYPE(tmp); if (PyType_IsSubtype(winner, tmptype)) continue; if (PyType_IsSubtype(tmptype, winner)) { winner = tmptype; continue; } /* else: */ PyErr_SetString(PyExc_TypeError, "metaclass conflict: " "the metaclass of a derived class " "must be a (non-strict) subclass " "of the metaclasses of all its bases") ; return NULL; } return winner; } static PyObject * type_new(PyTypeObject *metatype, PyObject *args, PyObject *kwds) { PyObject *name, *bases = NULL, *orig_dict, *dict = NULL; PyObject *qualname, *slots = NULL, *tmp, *newslots, *cell; PyTypeObject *type = NULL, *base, *tmptype, *winner; PyHeapTypeObject *et; PyMemberDef *mp; Py_ssize_t i, nbases, nslots, slotoffset, name_size; int j, may_add_dict, may_add_weak, add_dict, add_weak; _Py_IDENTIFIER(__qualname__); _Py_IDENTIFIER(__slots__); _Py_IDENTIFIER(__classcell__); assert(args != NULL && PyTuple_Check(args)); assert(kwds == NULL || PyDict_Check(kwds)); /* Special case: type(x) should return x->ob_type */ /* We only want type itself to accept the one-argument form (#27157) Note: We don't call PyType_CheckExact as that also allows subclasses */ if (metatype == &PyType_Type) { const Py_ssize_t nargs = PyTuple_GET_SIZE(args); const Py_ssize_t nkwds = kwds == NULL ? 0 : PyDict_GET_SIZE(kwds); if (nargs == 1 && nkwds == 0) { PyObject *x = PyTuple_GET_ITEM(args, 0); Py_INCREF(Py_TYPE(x)); return (PyObject *) Py_TYPE(x); } /* SF bug 475327 -- if that didn't trigger, we need 3 arguments. but PyArg_ParseTuple below may give a msg saying type() needs exactly 3. */ if (nargs != 3) { PyErr_SetString(PyExc_TypeError, "type() takes 1 or 3 arguments"); return NULL; } } /* Check arguments: (name, bases, dict) */ if (!PyArg_ParseTuple(args, "UO!O!:type.__new__", &name, &PyTuple_Type, &bases, &PyDict_Type, &orig_dict)) return NULL; /* Adjust for empty tuple bases */ nbases = PyTuple_GET_SIZE(bases); if (nbases == 0) { // Default base is 'object' in python code. base = &PyBaseObject_Type; bases = PyTuple_Pack(1, base); if (bases == NULL) return NULL; nbases = 1; } else { _Py_IDENTIFIER(__mro_entries__); for (i = 0; i < nbases; i++) { tmp = PyTuple_GET_ITEM(bases, i); if (PyType_Check(tmp)) { continue; } if (_PyObject_LookupAttrId(tmp, &PyId___mro_entries__, &tmp) < 0) { return NULL; } if (tmp != NULL) { PyErr_SetString(PyExc_TypeError, "type() doesn't support MRO entry resolution; " "use types.new_class()"); Py_DECREF(tmp); return NULL; } } /* Search the bases for the proper metatype to deal with this: */ // Notice that type of a type in almost all case is PyType_Type, so // in most time the winner is PyType_Type, and if a class (it's also // a type) has metaclass, the should be the metaclass in class define. winner = _PyType_CalculateMetaclass(metatype, bases); if (winner == NULL) { return NULL; } if (winner != metatype) { if (winner->tp_new != type_new) /* Pass it to the winner */ return winner->tp_new(winner, args, kwds); metatype = winner; } /* Calculate best base, and check that all bases are type objects */ base = best_base(bases); if (base == NULL) { return NULL; } Py_INCREF(bases); } /* Use "goto error" from this point on as we now own the reference to "bases". */ dict = PyDict_Copy(orig_dict); if (dict == NULL) goto error; /* Check for a __slots__ sequence variable in dict, and count it */ slots = _PyDict_GetItemId(dict, &PyId___slots__); nslots = 0; add_dict = 0; add_weak = 0; may_add_dict = base->tp_dictoffset == 0; may_add_weak = base->tp_weaklistoffset == 0 && base->tp_itemsize == 0; if (slots == NULL) { if (may_add_dict) { add_dict++; } if (may_add_weak) { add_weak++; } } else { /* Have slots */ /* Make it into a tuple */ if (PyUnicode_Check(slots)) slots = PyTuple_Pack(1, slots); else slots = PySequence_Tuple(slots); if (slots == NULL) goto error; assert(PyTuple_Check(slots)); /* Are slots allowed? */ nslots = PyTuple_GET_SIZE(slots); if (nslots > 0 && base->tp_itemsize != 0) { PyErr_Format(PyExc_TypeError, "nonempty __slots__ " "not supported for subtype of '%s'", base->tp_name); goto error; } /* Check for valid slot names and two special cases */ // Code block A: the block of code seem be related to Code Block B. for (i = 0; i < nslots; i++) { PyObject *tmp = PyTuple_GET_ITEM(slots, i); if (!valid_identifier(tmp)) goto error; assert(PyUnicode_Check(tmp)); if (_PyUnicode_EqualToASCIIId(tmp, &PyId___dict__)) { if (!may_add_dict || add_dict) { // base type has __dict__. PyErr_SetString(PyExc_TypeError, "__dict__ slot disallowed: " "we already got one"); goto error; } add_dict++; } if (_PyUnicode_EqualToASCIIString(tmp, "__weakref__")) { if (!may_add_weak || add_weak) { // base type has __weakref__. PyErr_SetString(PyExc_TypeError, "__weakref__ slot disallowed: " "either we already got one, " "or __itemsize__ != 0"); goto error; } add_weak++; } } /* Copy slots into a list, mangle names and sort them. Sorted names are needed for __class__ assignment. Convert them back to tuple at the end. */ newslots = PyList_New(nslots - add_dict - add_weak); if (newslots == NULL) goto error; for (i = j = 0; i < nslots; i++) { tmp = PyTuple_GET_ITEM(slots, i); if ((add_dict && _PyUnicode_EqualToASCIIId(tmp, &PyId___dict__)) || (add_weak && _PyUnicode_EqualToASCIIString(tmp, "__weakref__"))) // Skip __dict__ and __weakref__. continue; tmp =_Py_Mangle(name, tmp); if (!tmp) { Py_DECREF(newslots); goto error; } PyList_SET_ITEM(newslots, j, tmp); if (PyDict_GetItem(dict, tmp)) { /* CPython inserts __qualname__ and __classcell__ (when needed) into the namespace when creating a class. They will be deleted below so won't act as class variables. */ if (!_PyUnicode_EqualToASCIIId(tmp, &PyId___qualname__) && !_PyUnicode_EqualToASCIIId(tmp, &PyId___classcell__)) { PyErr_Format(PyExc_ValueError, "%R in __slots__ conflicts with class variable", tmp); Py_DECREF(newslots); goto error; } } j++; } assert(j == nslots - add_dict - add_weak); nslots = j; Py_CLEAR(slots); if (PyList_Sort(newslots) == -1) { Py_DECREF(newslots); goto error; } slots = PyList_AsTuple(newslots); Py_DECREF(newslots); if (slots == NULL) goto error; /* Secondary bases may provide weakrefs or dict */ // Code Block B: this code block seems be related Code Block A. if (nbases > 1 && ((may_add_dict && !add_dict) || (may_add_weak && !add_weak))) { for (i = 0; i < nbases; i++) { tmp = PyTuple_GET_ITEM(bases, i); if (tmp == (PyObject *)base) continue; /* Skip primary base */ assert(PyType_Check(tmp)); tmptype = (PyTypeObject *)tmp; if (may_add_dict && !add_dict && tmptype->tp_dictoffset != 0) add_dict++; if (may_add_weak && !add_weak && tmptype->tp_weaklistoffset != 0) add_weak++; if (may_add_dict && !add_dict) continue; if (may_add_weak && !add_weak) continue; /* Nothing more to check */ break; } } } /* Allocate the type object */ type = (PyTypeObject *)metatype->tp_alloc(metatype, nslots); if (type == NULL) goto error; /* Keep name and slots alive in the extended type object */ et = (PyHeapTypeObject *)type; Py_INCREF(name); et->ht_name = name; et->ht_slots = slots; slots = NULL; /* Initialize tp_flags */ type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_FINALIZE; if (base->tp_flags & Py_TPFLAGS_HAVE_GC) type->tp_flags |= Py_TPFLAGS_HAVE_GC; /* Initialize essential fields */ type->tp_as_async = &et->as_async; type->tp_as_number = &et->as_number; type->tp_as_sequence = &et->as_sequence; type->tp_as_mapping = &et->as_mapping; type->tp_as_buffer = &et->as_buffer; type->tp_name = PyUnicode_AsUTF8AndSize(name, &name_size); if (!type->tp_name) goto error; if (strlen(type->tp_name) != (size_t)name_size) { PyErr_SetString(PyExc_ValueError, "type name must not contain null characters"); goto error; } /* Set tp_base and tp_bases */ type->tp_bases = bases; bases = NULL; Py_INCREF(base); type->tp_base = base; /* Initialize tp_dict from passed-in dict */ Py_INCREF(dict); type->tp_dict = dict; /* Set __module__ in the dict */ if (_PyDict_GetItemId(dict, &PyId___module__) == NULL) { tmp = PyEval_GetGlobals(); if (tmp != NULL) { tmp = _PyDict_GetItemId(tmp, &PyId___name__); if (tmp != NULL) { // Set class's __module__ to __name__ of outer namespace. if (_PyDict_SetItemId(dict, &PyId___module__, tmp) < 0) goto error; } } } /* Set ht_qualname to dict['__qualname__'] if available, else to __name__. The __qualname__ accessor will look for ht_qualname. */ qualname = _PyDict_GetItemId(dict, &PyId___qualname__); if (qualname != NULL) { if (!PyUnicode_Check(qualname)) { PyErr_Format(PyExc_TypeError, "type __qualname__ must be a str, not %s", Py_TYPE(qualname)->tp_name); goto error; } } et->ht_qualname = qualname ? qualname : et->ht_name; Py_INCREF(et->ht_qualname); if (qualname != NULL && _PyDict_DelItemId(dict, &PyId___qualname__) < 0) goto error; /* Set tp_doc to a copy of dict['__doc__'], if the latter is there and is a string. The __doc__ accessor will first look for tp_doc; if that fails, it will still look into __dict__. */ { PyObject *doc = _PyDict_GetItemId(dict, &PyId___doc__); if (doc != NULL && PyUnicode_Check(doc)) { Py_ssize_t len; const char *doc_str; char *tp_doc; doc_str = PyUnicode_AsUTF8(doc); if (doc_str == NULL) goto error; /* Silently truncate the docstring if it contains null bytes. */ len = strlen(doc_str); tp_doc = (char *)PyObject_MALLOC(len + 1); if (tp_doc == NULL) { PyErr_NoMemory(); goto error; } // Copy doc string from __dict__ to tp_doc field. memcpy(tp_doc, doc_str, len + 1); type->tp_doc = tp_doc; } } /* Special-case __new__: if it's a plain function, make it a static function */ tmp = _PyDict_GetItemId(dict, &PyId___new__); if (tmp != NULL && PyFunction_Check(tmp)) { tmp = PyStaticMethod_New(tmp); if (tmp == NULL) goto error; if (_PyDict_SetItemId(dict, &PyId___new__, tmp) < 0) { Py_DECREF(tmp); goto error; } Py_DECREF(tmp); } /* Special-case __init_subclass__ and __class_getitem__: if they are plain functions, make them classmethods */ // See PEP 487 for detail content for __init__subclass__. tmp = _PyDict_GetItemId(dict, &PyId___init_subclass__); if (tmp != NULL && PyFunction_Check(tmp)) { tmp = PyClassMethod_New(tmp); if (tmp == NULL) goto error; if (_PyDict_SetItemId(dict, &PyId___init_subclass__, tmp) < 0) { Py_DECREF(tmp); goto error; } Py_DECREF(tmp); } // Handle __class__getitem__. tmp = _PyDict_GetItemId(dict, &PyId___class_getitem__); if (tmp != NULL && PyFunction_Check(tmp)) { tmp = PyClassMethod_New(tmp); if (tmp == NULL) goto error; if (_PyDict_SetItemId(dict, &PyId___class_getitem__, tmp) < 0) { Py_DECREF(tmp); goto error; } Py_DECREF(tmp); } /* Add descriptors for custom slots from __slots__, or for __dict__ */ mp = PyHeapType_GET_MEMBERS(et); slotoffset = base->tp_basicsize; if (et->ht_slots != NULL) { for (i = 0; i < nslots; i++, mp++) { mp->name = PyUnicode_AsUTF8( PyTuple_GET_ITEM(et->ht_slots, i)); if (mp->name == NULL) goto error; mp->type = T_OBJECT_EX; mp->offset = slotoffset; /* __dict__ and __weakref__ are already filtered out */ assert(strcmp(mp->name, "__dict__") != 0); assert(strcmp(mp->name, "__weakref__") != 0); slotoffset += sizeof(PyObject *); } } if (add_dict) { if (base->tp_itemsize) // When tp_dictoffset is negative value, this means that // the dictionary is at very end of the type structure. type->tp_dictoffset = -(long)sizeof(PyObject *); else type->tp_dictoffset = slotoffset; slotoffset += sizeof(PyObject *); } if (add_weak) { assert(!base->tp_itemsize); type->tp_weaklistoffset = slotoffset; slotoffset += sizeof(PyObject *); } type->tp_basicsize = slotoffset; type->tp_itemsize = base->tp_itemsize; type->tp_members = PyHeapType_GET_MEMBERS(et); if (type->tp_weaklistoffset && type->tp_dictoffset) type->tp_getset = subtype_getsets_full; else if (type->tp_weaklistoffset && !type->tp_dictoffset) type->tp_getset = subtype_getsets_weakref_only; else if (!type->tp_weaklistoffset && type->tp_dictoffset) type->tp_getset = subtype_getsets_dict_only; else type->tp_getset = NULL; /* Special case some slots */ if (type->tp_dictoffset != 0 || nslots > 0) { if (base->tp_getattr == NULL && base->tp_getattro == NULL) type->tp_getattro = PyObject_GenericGetAttr; if (base->tp_setattr == NULL && base->tp_setattro == NULL) type->tp_setattro = PyObject_GenericSetAttr; } type->tp_dealloc = subtype_dealloc; /* Enable GC unless this class is not adding new instance variables and the base class did not use GC. */ if ((base->tp_flags & Py_TPFLAGS_HAVE_GC) || type->tp_basicsize > base->tp_basicsize) type->tp_flags |= Py_TPFLAGS_HAVE_GC; /* Always override allocation strategy to use regular heap */ type->tp_alloc = PyType_GenericAlloc; if (type->tp_flags & Py_TPFLAGS_HAVE_GC) { type->tp_free = PyObject_GC_Del; type->tp_traverse = subtype_traverse; type->tp_clear = subtype_clear; } else type->tp_free = PyObject_Del; /* store type in class' cell if one is supplied */ cell = _PyDict_GetItemId(dict, &PyId___classcell__); if (cell != NULL) { /* At least one method requires a reference to its defining class */ if (!PyCell_Check(cell)) { PyErr_Format(PyExc_TypeError, "__classcell__ must be a nonlocal cell, not %.200R", Py_TYPE(cell)); goto error; } PyCell_Set(cell, (PyObject *) type); _PyDict_DelItemId(dict, &PyId___classcell__); PyErr_Clear(); } if (!strcmp(type->tp_name, "E")) { printf("Hint the construct of type E.\n"); } /* Initialize the rest */ if (PyType_Ready(type) < 0) goto error; /* Put the proper slots in place */ // Replace default slot function with custom // function in some slot of this type, for // example: // class A(list): // def __repr__(self): // print('XXX') // // during construction of this class, type A's // original tp_repr ('__repr__' in python code) // is list_repr, and then replace it with a function // that lookup '__repr__' in dict of this type. And // also notice that construction of builtin type don't // need this function, only construction of custom type // (class define in python code) need this function. fixup_slot_dispatchers(type); // And also pay attention to the difference of three functions, they are // 'inherit_slots', 'inherit_special' and 'fixup_slot_dispatchers'. The // 'inherit_special' copy a little of field (not slot function) from best // base to the type, the 'inherit_special' copy some slot function from base // to type in limited case, the 'fixup_slot_dispatchers' handle the casethat // user defined method of type like __repr__ and also inherit some descriptor // from base. if (type->tp_dictoffset) { et->ht_cached_keys = _PyDict_NewKeysForClass(); } if (set_names(type) < 0) goto error; if (init_subclass(type, kwds) < 0) goto error; Py_DECREF(dict); return (PyObject *)type; error: Py_XDECREF(dict); Py_XDECREF(bases); Py_XDECREF(slots); Py_XDECREF(type); return NULL; } static const short slotoffsets[] = { -1, /* invalid slot */ #include "typeslots.inc" }; PyObject * PyType_FromSpecWithBases(PyType_Spec *spec, PyObject *bases) { PyHeapTypeObject *res = (PyHeapTypeObject*)PyType_GenericAlloc(&PyType_Type, 0); PyTypeObject *type, *base; PyObject *modname; char *s; char *res_start = (char*)res; PyType_Slot *slot; if (res == NULL) return NULL; if (spec->name == NULL) { PyErr_SetString(PyExc_SystemError, "Type spec does not define the name field."); goto fail; } /* Set the type name and qualname */ s = strrchr(spec->name, '.'); if (s == NULL) s = (char*)spec->name; else s++; type = &res->ht_type; /* The flags must be initialized early, before the GC traverses us */ type->tp_flags = spec->flags | Py_TPFLAGS_HEAPTYPE; res->ht_name = PyUnicode_FromString(s); if (!res->ht_name) goto fail; res->ht_qualname = res->ht_name; Py_INCREF(res->ht_qualname); type->tp_name = spec->name; /* Adjust for empty tuple bases */ if (!bases) { base = &PyBaseObject_Type; /* See whether Py_tp_base(s) was specified */ for (slot = spec->slots; slot->slot; slot++) { if (slot->slot == Py_tp_base) base = slot->pfunc; else if (slot->slot == Py_tp_bases) { bases = slot->pfunc; Py_INCREF(bases); } } if (!bases) bases = PyTuple_Pack(1, base); if (!bases) goto fail; } else Py_INCREF(bases); /* Calculate best base, and check that all bases are type objects */ base = best_base(bases); if (base == NULL) { goto fail; } if (!PyType_HasFeature(base, Py_TPFLAGS_BASETYPE)) { PyErr_Format(PyExc_TypeError, "type '%.100s' is not an acceptable base type", base->tp_name); goto fail; } /* Initialize essential fields */ type->tp_as_async = &res->as_async; type->tp_as_number = &res->as_number; type->tp_as_sequence = &res->as_sequence; type->tp_as_mapping = &res->as_mapping; type->tp_as_buffer = &res->as_buffer; /* Set tp_base and tp_bases */ type->tp_bases = bases; bases = NULL; Py_INCREF(base); type->tp_base = base; type->tp_basicsize = spec->basicsize; type->tp_itemsize = spec->itemsize; for (slot = spec->slots; slot->slot; slot++) { if (slot->slot < 0 || (size_t)slot->slot >= Py_ARRAY_LENGTH(slotoffsets)) { PyErr_SetString(PyExc_RuntimeError, "invalid slot offset"); goto fail; } if (slot->slot == Py_tp_base || slot->slot == Py_tp_bases) /* Processed above */ continue; *(void**)(res_start + slotoffsets[slot->slot]) = slot->pfunc; /* need to make a copy of the docstring slot, which usually points to a static string literal */ if (slot->slot == Py_tp_doc) { const char *old_doc = _PyType_DocWithoutSignature(type->tp_name, slot->pfunc); size_t len = strlen(old_doc)+1; char *tp_doc = PyObject_MALLOC(len); if (tp_doc == NULL) { type->tp_doc = NULL; PyErr_NoMemory(); goto fail; } memcpy(tp_doc, old_doc, len); type->tp_doc = tp_doc; } } if (type->tp_dealloc == NULL) { /* It's a heap type, so needs the heap types' dealloc. subtype_dealloc will call the base type's tp_dealloc, if necessary. */ type->tp_dealloc = subtype_dealloc; } if (PyType_Ready(type) < 0) goto fail; if (type->tp_dictoffset) { res->ht_cached_keys = _PyDict_NewKeysForClass(); } /* Set type.__module__ */ s = strrchr(spec->name, '.'); if (s != NULL) { int err; modname = PyUnicode_FromStringAndSize( spec->name, (Py_ssize_t)(s - spec->name)); if (modname == NULL) { goto fail; } err = _PyDict_SetItemId(type->tp_dict, &PyId___module__, modname); Py_DECREF(modname); if (err != 0) goto fail; } else { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "builtin type %.200s has no __module__ attribute", spec->name)) goto fail; } return (PyObject*)res; fail: Py_DECREF(res); return NULL; } PyObject * PyType_FromSpec(PyType_Spec *spec) { return PyType_FromSpecWithBases(spec, NULL); } void * PyType_GetSlot(PyTypeObject *type, int slot) { if (!PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE) || slot < 0) { PyErr_BadInternalCall(); return NULL; } if ((size_t)slot >= Py_ARRAY_LENGTH(slotoffsets)) { /* Extension module requesting slot from a future version */ return NULL; } return *(void**)(((char*)type) + slotoffsets[slot]); } /* Internal API to look for a name through the MRO, bypassing the method cache. This returns a borrowed reference, and might set an exception. 'error' is set to: -1: error with exception; 1: error without exception; 0: ok */ static PyObject * find_name_in_mro(PyTypeObject *type, PyObject *name, int *error) { Py_ssize_t i, n; PyObject *mro, *res, *base, *dict; Py_hash_t hash; if (!PyUnicode_CheckExact(name) || (hash = ((PyASCIIObject *) name)->hash) == -1) { hash = PyObject_Hash(name); if (hash == -1) { *error = -1; return NULL; } } /* Look in tp_dict of types in MRO */ mro = type->tp_mro; if (mro == NULL) { if ((type->tp_flags & Py_TPFLAGS_READYING) == 0) { if (PyType_Ready(type) < 0) { *error = -1; return NULL; } mro = type->tp_mro; } if (mro == NULL) { *error = 1; return NULL; } } res = NULL; /* Keep a strong reference to mro because type->tp_mro can be replaced during dict lookup, e.g. when comparing to non-string keys. */ Py_INCREF(mro); assert(PyTuple_Check(mro)); n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { base = PyTuple_GET_ITEM(mro, i); assert(PyType_Check(base)); dict = ((PyTypeObject *)base)->tp_dict; assert(dict && PyDict_Check(dict)); res = _PyDict_GetItem_KnownHash(dict, name, hash); if (res != NULL) break; if (PyErr_Occurred()) { *error = -1; goto done; } } *error = 0; done: Py_DECREF(mro); return res; } /* Internal API to look for a name through the MRO. This returns a borrowed reference, and doesn't set an exception! */ PyObject * _PyType_Lookup(PyTypeObject *type, PyObject *name) { PyObject *res; int error; unsigned int h; if (MCACHE_CACHEABLE_NAME(name) && PyType_HasFeature(type, Py_TPFLAGS_VALID_VERSION_TAG)) { /* fast path */ h = MCACHE_HASH_METHOD(type, name); if (method_cache[h].version == type->tp_version_tag && method_cache[h].name == name) { #if MCACHE_STATS method_cache_hits++; #endif return method_cache[h].value; } } /* We may end up clearing live exceptions below, so make sure it's ours. */ assert(!PyErr_Occurred()); res = find_name_in_mro(type, name, &error); /* Only put NULL results into cache if there was no error. */ if (error) { /* It's not ideal to clear the error condition, but this function is documented as not setting an exception, and I don't want to change that. E.g., when PyType_Ready() can't proceed, it won't set the "ready" flag, so future attempts to ready the same type will call it again -- hopefully in a context that propagates the exception out. */ if (error == -1) { PyErr_Clear(); } return NULL; } if (MCACHE_CACHEABLE_NAME(name) && assign_version_tag(type)) { h = MCACHE_HASH_METHOD(type, name); method_cache[h].version = type->tp_version_tag; method_cache[h].value = res; /* borrowed */ Py_INCREF(name); assert(((PyASCIIObject *)(name))->hash != -1); #if MCACHE_STATS if (method_cache[h].name != Py_None && method_cache[h].name != name) method_cache_collisions++; else method_cache_misses++; #endif Py_SETREF(method_cache[h].name, name); } return res; } PyObject * _PyType_LookupId(PyTypeObject *type, struct _Py_Identifier *name) { PyObject *oname; oname = _PyUnicode_FromId(name); /* borrowed */ if (oname == NULL) return NULL; return _PyType_Lookup(type, oname); } /* This is similar to PyObject_GenericGetAttr(), but uses _PyType_Lookup() instead of just looking in type->tp_dict. */ static PyObject * type_getattro(PyTypeObject *type, PyObject *name) { PyTypeObject *metatype = Py_TYPE(type); PyObject *meta_attribute, *attribute; descrgetfunc meta_get; PyObject* res; if (!PyUnicode_Check(name)) { PyErr_Format(PyExc_TypeError, "attribute name must be string, not '%.200s'", name->ob_type->tp_name); return NULL; } /* Initialize this type (we'll assume the metatype is initialized) */ if (type->tp_dict == NULL) { if (PyType_Ready(type) < 0) return NULL; } /* No readable descriptor found yet */ meta_get = NULL; /* Look for the attribute in the metatype */ meta_attribute = _PyType_Lookup(metatype, name); if (meta_attribute != NULL) { Py_INCREF(meta_attribute); meta_get = Py_TYPE(meta_attribute)->tp_descr_get; if (meta_get != NULL && PyDescr_IsData(meta_attribute)) { /* Data descriptors implement tp_descr_set to intercept * writes. Assume the attribute is not overridden in * type's tp_dict (and bases): call the descriptor now. */ res = meta_get(meta_attribute, (PyObject *)type, (PyObject *)metatype); Py_DECREF(meta_attribute); return res; } } /* No data descriptor found on metatype. Look in tp_dict of this * type and its bases */ attribute = _PyType_Lookup(type, name); if (attribute != NULL) { /* Implement descriptor functionality, if any */ Py_INCREF(attribute); descrgetfunc local_get = Py_TYPE(attribute)->tp_descr_get; Py_XDECREF(meta_attribute); if (local_get != NULL) { /* NULL 2nd argument indicates the descriptor was * found on the target object itself (or a base) */ res = local_get(attribute, (PyObject *)NULL, (PyObject *)type); Py_DECREF(attribute); return res; } return attribute; } /* No attribute found in local __dict__ (or bases): use the * descriptor from the metatype, if any */ if (meta_get != NULL) { PyObject *res; res = meta_get(meta_attribute, (PyObject *)type, (PyObject *)metatype); Py_DECREF(meta_attribute); return res; } /* If an ordinary attribute was found on the metatype, return it now */ if (meta_attribute != NULL) { return meta_attribute; } /* Give up */ PyErr_Format(PyExc_AttributeError, "type object '%.50s' has no attribute '%U'", type->tp_name, name); return NULL; } static int type_setattro(PyTypeObject *type, PyObject *name, PyObject *value) { int res; if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { PyErr_Format( PyExc_TypeError, "can't set attributes of built-in/extension type '%s'", type->tp_name); return -1; } if (PyUnicode_Check(name)) { if (PyUnicode_CheckExact(name)) { if (PyUnicode_READY(name) == -1) return -1; Py_INCREF(name); } else { name = _PyUnicode_Copy(name); if (name == NULL) return -1; } PyUnicode_InternInPlace(&name); if (!PyUnicode_CHECK_INTERNED(name)) { PyErr_SetString(PyExc_MemoryError, "Out of memory interning an attribute name"); Py_DECREF(name); return -1; } } else { /* Will fail in _PyObject_GenericSetAttrWithDict. */ Py_INCREF(name); } res = _PyObject_GenericSetAttrWithDict((PyObject *)type, name, value, NULL); if (res == 0) { res = update_slot(type, name); assert(_PyType_CheckConsistency(type)); } Py_DECREF(name); return res; } extern void _PyDictKeys_DecRef(PyDictKeysObject *keys); static void type_dealloc(PyTypeObject *type) { PyHeapTypeObject *et; PyObject *tp, *val, *tb; /* Assert this is a heap-allocated type object */ assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); _PyObject_GC_UNTRACK(type); PyErr_Fetch(&tp, &val, &tb); remove_all_subclasses(type, type->tp_bases); PyErr_Restore(tp, val, tb); PyObject_ClearWeakRefs((PyObject *)type); et = (PyHeapTypeObject *)type; Py_XDECREF(type->tp_base); Py_XDECREF(type->tp_dict); Py_XDECREF(type->tp_bases); Py_XDECREF(type->tp_mro); Py_XDECREF(type->tp_cache); Py_XDECREF(type->tp_subclasses); /* A type's tp_doc is heap allocated, unlike the tp_doc slots * of most other objects. It's okay to cast it to char *. */ PyObject_Free((char *)type->tp_doc); Py_XDECREF(et->ht_name); Py_XDECREF(et->ht_qualname); Py_XDECREF(et->ht_slots); if (et->ht_cached_keys) _PyDictKeys_DecRef(et->ht_cached_keys); Py_TYPE(type)->tp_free((PyObject *)type); } /*[clinic input] type.__subclasses__ Return a list of immediate subclasses. [clinic start generated code]*/ static PyObject * type___subclasses___impl(PyTypeObject *self) /*[clinic end generated code: output=eb5eb54485942819 input=5af66132436f9a7b]*/ { PyObject *list, *raw, *ref; Py_ssize_t i; list = PyList_New(0); if (list == NULL) return NULL; raw = self->tp_subclasses; if (raw == NULL) return list; assert(PyDict_CheckExact(raw)); i = 0; while (PyDict_Next(raw, &i, NULL, &ref)) { assert(PyWeakref_CheckRef(ref)); ref = PyWeakref_GET_OBJECT(ref); if (ref != Py_None) { if (PyList_Append(list, ref) < 0) { Py_DECREF(list); return NULL; } } } return list; } static PyObject * type_prepare(PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { return PyDict_New(); } /* Merge the __dict__ of aclass into dict, and recursively also all the __dict__s of aclass's base classes. The order of merging isn't defined, as it's expected that only the final set of dict keys is interesting. Return 0 on success, -1 on error. */ static int merge_class_dict(PyObject *dict, PyObject *aclass) { PyObject *classdict; PyObject *bases; _Py_IDENTIFIER(__bases__); assert(PyDict_Check(dict)); assert(aclass); /* Merge in the type's dict (if any). */ classdict = _PyObject_GetAttrId(aclass, &PyId___dict__); if (classdict == NULL) PyErr_Clear(); else { int status = PyDict_Update(dict, classdict); Py_DECREF(classdict); if (status < 0) return -1; } /* Recursively merge in the base types' (if any) dicts. */ bases = _PyObject_GetAttrId(aclass, &PyId___bases__); if (bases == NULL) PyErr_Clear(); else { /* We have no guarantee that bases is a real tuple */ Py_ssize_t i, n; n = PySequence_Size(bases); /* This better be right */ if (n < 0) PyErr_Clear(); else { for (i = 0; i < n; i++) { int status; PyObject *base = PySequence_GetItem(bases, i); if (base == NULL) { Py_DECREF(bases); return -1; } status = merge_class_dict(dict, base); Py_DECREF(base); if (status < 0) { Py_DECREF(bases); return -1; } } } Py_DECREF(bases); } return 0; } /* __dir__ for type objects: returns __dict__ and __bases__. We deliberately don't suck up its __class__, as methods belonging to the metaclass would probably be more confusing than helpful. */ /*[clinic input] type.__dir__ Specialized __dir__ implementation for types. [clinic start generated code]*/ static PyObject * type___dir___impl(PyTypeObject *self) /*[clinic end generated code: output=69d02fe92c0f15fa input=7733befbec645968]*/ { PyObject *result = NULL; PyObject *dict = PyDict_New(); if (dict != NULL && merge_class_dict(dict, (PyObject *)self) == 0) result = PyDict_Keys(dict); Py_XDECREF(dict); return result; } /*[clinic input] type.__sizeof__ Return memory consumption of the type object. [clinic start generated code]*/ static PyObject * type___sizeof___impl(PyTypeObject *self) /*[clinic end generated code: output=766f4f16cd3b1854 input=99398f24b9cf45d6]*/ { Py_ssize_t size; if (self->tp_flags & Py_TPFLAGS_HEAPTYPE) { PyHeapTypeObject* et = (PyHeapTypeObject*)self; size = sizeof(PyHeapTypeObject); if (et->ht_cached_keys) size += _PyDict_KeysSize(et->ht_cached_keys); } else size = sizeof(PyTypeObject); return PyLong_FromSsize_t(size); } static PyMethodDef type_methods[] = { TYPE_MRO_METHODDEF TYPE___SUBCLASSES___METHODDEF {"__prepare__", (PyCFunction)type_prepare, METH_FASTCALL | METH_KEYWORDS | METH_CLASS, PyDoc_STR("__prepare__() -> dict\n" "used to create the namespace for the class statement")}, TYPE___INSTANCECHECK___METHODDEF TYPE___SUBCLASSCHECK___METHODDEF TYPE___DIR___METHODDEF TYPE___SIZEOF___METHODDEF {0} }; PyDoc_STRVAR(type_doc, /* this text signature cannot be accurate yet. will fix. --larry */ "type(object_or_name, bases, dict)\n" "type(object) -> the object's type\n" "type(name, bases, dict) -> a new type"); static int type_traverse(PyTypeObject *type, visitproc visit, void *arg) { /* Because of type_is_gc(), the collector only calls this for heaptypes. */ if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { char msg[200]; sprintf(msg, "type_traverse() called for non-heap type '%.100s'", type->tp_name); Py_FatalError(msg); } Py_VISIT(type->tp_dict); Py_VISIT(type->tp_cache); Py_VISIT(type->tp_mro); Py_VISIT(type->tp_bases); Py_VISIT(type->tp_base); /* There's no need to visit type->tp_subclasses or ((PyHeapTypeObject *)type)->ht_slots, because they can't be involved in cycles; tp_subclasses is a list of weak references, and slots is a tuple of strings. */ return 0; } static int type_clear(PyTypeObject *type) { PyDictKeysObject *cached_keys; /* Because of type_is_gc(), the collector only calls this for heaptypes. */ assert(type->tp_flags & Py_TPFLAGS_HEAPTYPE); /* We need to invalidate the method cache carefully before clearing the dict, so that other objects caught in a reference cycle don't start calling destroyed methods. Otherwise, the only field we need to clear is tp_mro, which is part of a hard cycle (its first element is the class itself) that won't be broken otherwise (it's a tuple and tuples don't have a tp_clear handler). None of the other fields need to be cleared, and here's why: tp_cache: Not used; if it were, it would be a dict. tp_bases, tp_base: If these are involved in a cycle, there must be at least one other, mutable object in the cycle, e.g. a base class's dict; the cycle will be broken that way. tp_subclasses: A dict of weak references can't be part of a cycle; and dicts have their own tp_clear. slots (in PyHeapTypeObject): A tuple of strings can't be part of a cycle. */ PyType_Modified(type); cached_keys = ((PyHeapTypeObject *)type)->ht_cached_keys; if (cached_keys != NULL) { ((PyHeapTypeObject *)type)->ht_cached_keys = NULL; _PyDictKeys_DecRef(cached_keys); } if (type->tp_dict) PyDict_Clear(type->tp_dict); Py_CLEAR(type->tp_mro); return 0; } static int type_is_gc(PyTypeObject *type) { return type->tp_flags & Py_TPFLAGS_HEAPTYPE; } PyTypeObject PyType_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "type", /* tp_name */ sizeof(PyHeapTypeObject), /* tp_basicsize */ sizeof(PyMemberDef), /* tp_itemsize */ (destructor)type_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ (reprfunc)type_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ (ternaryfunc)type_call, /* tp_call */ 0, /* tp_str */ (getattrofunc)type_getattro, /* tp_getattro */ (setattrofunc)type_setattro, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_TYPE_SUBCLASS, /* tp_flags */ type_doc, /* tp_doc */ (traverseproc)type_traverse, /* tp_traverse */ (inquiry)type_clear, /* tp_clear */ 0, /* tp_richcompare */ offsetof(PyTypeObject, tp_weaklist), /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ type_methods, /* tp_methods */ type_members, /* tp_members */ type_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(PyTypeObject, tp_dict), /* tp_dictoffset */ type_init, /* tp_init */ 0, /* tp_alloc */ type_new, /* tp_new */ PyObject_GC_Del, /* tp_free */ (inquiry)type_is_gc, /* tp_is_gc */ }; /* The base type of all types (eventually)... except itself. */ /* You may wonder why object.__new__() only complains about arguments when object.__init__() is not overridden, and vice versa. Consider the use cases: 1. When neither is overridden, we want to hear complaints about excess (i.e., any) arguments, since their presence could indicate there's a bug. 2. When defining an Immutable type, we are likely to override only __new__(), since __init__() is called too late to initialize an Immutable object. Since __new__() defines the signature for the type, it would be a pain to have to override __init__() just to stop it from complaining about excess arguments. 3. When defining a Mutable type, we are likely to override only __init__(). So here the converse reasoning applies: we don't want to have to override __new__() just to stop it from complaining. 4. When __init__() is overridden, and the subclass __init__() calls object.__init__(), the latter should complain about excess arguments; ditto for __new__(). Use cases 2 and 3 make it unattractive to unconditionally check for excess arguments. The best solution that addresses all four use cases is as follows: __init__() complains about excess arguments unless __new__() is overridden and __init__() is not overridden (IOW, if __init__() is overridden or __new__() is not overridden); symmetrically, __new__() complains about excess arguments unless __init__() is overridden and __new__() is not overridden (IOW, if __new__() is overridden or __init__() is not overridden). However, for backwards compatibility, this breaks too much code. Therefore, in 2.6, we'll *warn* about excess arguments when both methods are overridden; for all other cases we'll use the above rules. */ /* Forward */ static PyObject * object_new(PyTypeObject *type, PyObject *args, PyObject *kwds); static int excess_args(PyObject *args, PyObject *kwds) { return PyTuple_GET_SIZE(args) || (kwds && PyDict_Check(kwds) && PyDict_GET_SIZE(kwds)); } static int object_init(PyObject *self, PyObject *args, PyObject *kwds) { PyTypeObject *type = Py_TYPE(self); if (excess_args(args, kwds)) { if (type->tp_init != object_init) { PyErr_SetString(PyExc_TypeError, "object.__init__() takes exactly one argument (the instance to initialize)"); return -1; } if (type->tp_new == object_new) { PyErr_Format(PyExc_TypeError, "%.200s.__init__() takes exactly one argument (the instance to initialize)", type->tp_name); return -1; } } return 0; } static PyObject * object_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { if (excess_args(args, kwds)) { if (type->tp_new != object_new) { PyErr_SetString(PyExc_TypeError, "object.__new__() takes exactly one argument (the type to instantiate)"); return NULL; } if (type->tp_init == object_init) { PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments", type->tp_name); return NULL; } } if (type->tp_flags & Py_TPFLAGS_IS_ABSTRACT) { PyObject *abstract_methods = NULL; PyObject *builtins; PyObject *sorted; PyObject *sorted_methods = NULL; PyObject *joined = NULL; PyObject *comma; _Py_static_string(comma_id, ", "); _Py_IDENTIFIER(sorted); /* Compute ", ".join(sorted(type.__abstractmethods__)) into joined. */ abstract_methods = type_abstractmethods(type, NULL); if (abstract_methods == NULL) goto error; builtins = PyEval_GetBuiltins(); if (builtins == NULL) goto error; sorted = _PyDict_GetItemId(builtins, &PyId_sorted); if (sorted == NULL) goto error; sorted_methods = PyObject_CallFunctionObjArgs(sorted, abstract_methods, NULL); if (sorted_methods == NULL) goto error; comma = _PyUnicode_FromId(&comma_id); if (comma == NULL) goto error; joined = PyUnicode_Join(comma, sorted_methods); if (joined == NULL) goto error; PyErr_Format(PyExc_TypeError, "Can't instantiate abstract class %s " "with abstract methods %U", type->tp_name, joined); error: Py_XDECREF(joined); Py_XDECREF(sorted_methods); Py_XDECREF(abstract_methods); return NULL; } return type->tp_alloc(type, 0); } static void object_dealloc(PyObject *self) { Py_TYPE(self)->tp_free(self); } static PyObject * object_repr(PyObject *self) { PyTypeObject *type; PyObject *mod, *name, *rtn; type = Py_TYPE(self); mod = type_module(type, NULL); if (mod == NULL) PyErr_Clear(); else if (!PyUnicode_Check(mod)) { Py_DECREF(mod); mod = NULL; } name = type_qualname(type, NULL); if (name == NULL) { Py_XDECREF(mod); return NULL; } if (mod != NULL && !_PyUnicode_EqualToASCIIId(mod, &PyId_builtins)) rtn = PyUnicode_FromFormat("<%U.%U object at %p>", mod, name, self); else rtn = PyUnicode_FromFormat("<%s object at %p>", type->tp_name, self); Py_XDECREF(mod); Py_DECREF(name); return rtn; } static PyObject * object_str(PyObject *self) { unaryfunc f; f = Py_TYPE(self)->tp_repr; if (f == NULL) f = object_repr; return f(self); } static PyObject * object_richcompare(PyObject *self, PyObject *other, int op) { PyObject *res; switch (op) { case Py_EQ: /* Return NotImplemented instead of False, so if two objects are compared, both get a chance at the comparison. See issue #1393. */ res = (self == other) ? Py_True : Py_NotImplemented; Py_INCREF(res); break; case Py_NE: /* By default, __ne__() delegates to __eq__() and inverts the result, unless the latter returns NotImplemented. */ if (self->ob_type->tp_richcompare == NULL) { res = Py_NotImplemented; Py_INCREF(res); break; } res = (*self->ob_type->tp_richcompare)(self, other, Py_EQ); if (res != NULL && res != Py_NotImplemented) { int ok = PyObject_IsTrue(res); Py_DECREF(res); if (ok < 0) res = NULL; else { if (ok) res = Py_False; else res = Py_True; Py_INCREF(res); } } break; default: res = Py_NotImplemented; Py_INCREF(res); break; } return res; } static PyObject * object_get_class(PyObject *self, void *closure) { Py_INCREF(Py_TYPE(self)); return (PyObject *)(Py_TYPE(self)); } static int compatible_with_tp_base(PyTypeObject *child) { PyTypeObject *parent = child->tp_base; return (parent != NULL && child->tp_basicsize == parent->tp_basicsize && child->tp_itemsize == parent->tp_itemsize && child->tp_dictoffset == parent->tp_dictoffset && child->tp_weaklistoffset == parent->tp_weaklistoffset && ((child->tp_flags & Py_TPFLAGS_HAVE_GC) == (parent->tp_flags & Py_TPFLAGS_HAVE_GC)) && (child->tp_dealloc == subtype_dealloc || child->tp_dealloc == parent->tp_dealloc)); } static int same_slots_added(PyTypeObject *a, PyTypeObject *b) { PyTypeObject *base = a->tp_base; Py_ssize_t size; PyObject *slots_a, *slots_b; assert(base == b->tp_base); size = base->tp_basicsize; if (a->tp_dictoffset == size && b->tp_dictoffset == size) size += sizeof(PyObject *); if (a->tp_weaklistoffset == size && b->tp_weaklistoffset == size) size += sizeof(PyObject *); /* Check slots compliance */ if (!(a->tp_flags & Py_TPFLAGS_HEAPTYPE) || !(b->tp_flags & Py_TPFLAGS_HEAPTYPE)) { return 0; } slots_a = ((PyHeapTypeObject *)a)->ht_slots; slots_b = ((PyHeapTypeObject *)b)->ht_slots; if (slots_a && slots_b) { if (PyObject_RichCompareBool(slots_a, slots_b, Py_EQ) != 1) return 0; size += sizeof(PyObject *) * PyTuple_GET_SIZE(slots_a); } return size == a->tp_basicsize && size == b->tp_basicsize; } static int compatible_for_assignment(PyTypeObject* oldto, PyTypeObject* newto, const char* attr) { PyTypeObject *newbase, *oldbase; if (newto->tp_free != oldto->tp_free) { PyErr_Format(PyExc_TypeError, "%s assignment: " "'%s' deallocator differs from '%s'", attr, newto->tp_name, oldto->tp_name); return 0; } /* It's tricky to tell if two arbitrary types are sufficiently compatible as to be interchangeable; e.g., even if they have the same tp_basicsize, they might have totally different struct fields. It's much easier to tell if a type and its supertype are compatible; e.g., if they have the same tp_basicsize, then that means they have identical fields. So to check whether two arbitrary types are compatible, we first find the highest supertype that each is compatible with, and then if those supertypes are compatible then the original types must also be compatible. */ newbase = newto; oldbase = oldto; while (compatible_with_tp_base(newbase)) newbase = newbase->tp_base; while (compatible_with_tp_base(oldbase)) oldbase = oldbase->tp_base; if (newbase != oldbase && (newbase->tp_base != oldbase->tp_base || !same_slots_added(newbase, oldbase))) { PyErr_Format(PyExc_TypeError, "%s assignment: " "'%s' object layout differs from '%s'", attr, newto->tp_name, oldto->tp_name); return 0; } return 1; } static int object_set_class(PyObject *self, PyObject *value, void *closure) { PyTypeObject *oldto = Py_TYPE(self); PyTypeObject *newto; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "can't delete __class__ attribute"); return -1; } if (!PyType_Check(value)) { PyErr_Format(PyExc_TypeError, "__class__ must be set to a class, not '%s' object", Py_TYPE(value)->tp_name); return -1; } newto = (PyTypeObject *)value; /* In versions of CPython prior to 3.5, the code in compatible_for_assignment was not set up to correctly check for memory layout / slot / etc. compatibility for non-HEAPTYPE classes, so we just disallowed __class__ assignment in any case that wasn't HEAPTYPE -> HEAPTYPE. During the 3.5 development cycle, we fixed the code in compatible_for_assignment to correctly check compatibility between arbitrary types, and started allowing __class__ assignment in all cases where the old and new types did in fact have compatible slots and memory layout (regardless of whether they were implemented as HEAPTYPEs or not). Just before 3.5 was released, though, we discovered that this led to problems with immutable types like int, where the interpreter assumes they are immutable and interns some values. Formerly this wasn't a problem, because they really were immutable -- in particular, all the types where the interpreter applied this interning trick happened to also be statically allocated, so the old HEAPTYPE rules were "accidentally" stopping them from allowing __class__ assignment. But with the changes to __class__ assignment, we started allowing code like class MyInt(int): ... # Modifies the type of *all* instances of 1 in the whole program, # including future instances (!), because the 1 object is interned. (1).__class__ = MyInt (see https://bugs.python.org/issue24912). In theory the proper fix would be to identify which classes rely on this invariant and somehow disallow __class__ assignment only for them, perhaps via some mechanism like a new Py_TPFLAGS_IMMUTABLE flag (a "blacklisting" approach). But in practice, since this problem wasn't noticed late in the 3.5 RC cycle, we're taking the conservative approach and reinstating the same HEAPTYPE->HEAPTYPE check that we used to have, plus a "whitelist". For now, the whitelist consists only of ModuleType subtypes, since those are the cases that motivated the patch in the first place -- see https://bugs.python.org/issue22986 -- and since module objects are mutable we can be sure that they are definitely not being interned. So now we allow HEAPTYPE->HEAPTYPE *or* ModuleType subtype -> ModuleType subtype. So far as we know, all the code beyond the following 'if' statement will correctly handle non-HEAPTYPE classes, and the HEAPTYPE check is needed only to protect that subset of non-HEAPTYPE classes for which the interpreter has baked in the assumption that all instances are truly immutable. */ if (!(PyType_IsSubtype(newto, &PyModule_Type) && PyType_IsSubtype(oldto, &PyModule_Type)) && (!(newto->tp_flags & Py_TPFLAGS_HEAPTYPE) || !(oldto->tp_flags & Py_TPFLAGS_HEAPTYPE))) { PyErr_Format(PyExc_TypeError, "__class__ assignment only supported for heap types " "or ModuleType subclasses"); return -1; } if (compatible_for_assignment(oldto, newto, "__class__")) { if (newto->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_INCREF(newto); Py_TYPE(self) = newto; if (oldto->tp_flags & Py_TPFLAGS_HEAPTYPE) Py_DECREF(oldto); return 0; } else { return -1; } } static PyGetSetDef object_getsets[] = { {"__class__", object_get_class, object_set_class, PyDoc_STR("the object's class")}, {0} }; /* Stuff to implement __reduce_ex__ for pickle protocols >= 2. We fall back to helpers in copyreg for: - pickle protocols < 2 - calculating the list of slot names (done only once per class) - the __newobj__ function (which is used as a token but never called) */ static PyObject * import_copyreg(void) { PyObject *copyreg_str; PyObject *copyreg_module; _Py_IDENTIFIER(copyreg); copyreg_str = _PyUnicode_FromId(&PyId_copyreg); if (copyreg_str == NULL) { return NULL; } /* Try to fetch cached copy of copyreg from sys.modules first in an attempt to avoid the import overhead. Previously this was implemented by storing a reference to the cached module in a static variable, but this broke when multiple embedded interpreters were in use (see issue #17408 and #19088). */ copyreg_module = PyImport_GetModule(copyreg_str); if (copyreg_module != NULL) { return copyreg_module; } if (PyErr_Occurred()) { return NULL; } return PyImport_Import(copyreg_str); } static PyObject * _PyType_GetSlotNames(PyTypeObject *cls) { PyObject *copyreg; PyObject *slotnames; _Py_IDENTIFIER(__slotnames__); _Py_IDENTIFIER(_slotnames); assert(PyType_Check(cls)); /* Get the slot names from the cache in the class if possible. */ slotnames = _PyDict_GetItemIdWithError(cls->tp_dict, &PyId___slotnames__); if (slotnames != NULL) { if (slotnames != Py_None && !PyList_Check(slotnames)) { PyErr_Format(PyExc_TypeError, "%.200s.__slotnames__ should be a list or None, " "not %.200s", cls->tp_name, Py_TYPE(slotnames)->tp_name); return NULL; } Py_INCREF(slotnames); return slotnames; } else { if (PyErr_Occurred()) { return NULL; } /* The class does not have the slot names cached yet. */ } copyreg = import_copyreg(); if (copyreg == NULL) return NULL; /* Use _slotnames function from the copyreg module to find the slots by this class and its bases. This function will cache the result in __slotnames__. */ slotnames = _PyObject_CallMethodIdObjArgs(copyreg, &PyId__slotnames, cls, NULL); Py_DECREF(copyreg); if (slotnames == NULL) return NULL; if (slotnames != Py_None && !PyList_Check(slotnames)) { PyErr_SetString(PyExc_TypeError, "copyreg._slotnames didn't return a list or None"); Py_DECREF(slotnames); return NULL; } return slotnames; } static PyObject * _PyObject_GetState(PyObject *obj, int required) { PyObject *state; PyObject *getstate; _Py_IDENTIFIER(__getstate__); if (_PyObject_LookupAttrId(obj, &PyId___getstate__, &getstate) < 0) { return NULL; } if (getstate == NULL) { PyObject *slotnames; if (required && obj->ob_type->tp_itemsize) { PyErr_Format(PyExc_TypeError, "can't pickle %.200s objects", Py_TYPE(obj)->tp_name); return NULL; } { PyObject **dict; dict = _PyObject_GetDictPtr(obj); /* It is possible that the object's dict is not initialized yet. In this case, we will return None for the state. We also return None if the dict is empty to make the behavior consistent regardless whether the dict was initialized or not. This make unit testing easier. */ if (dict != NULL && *dict != NULL && PyDict_GET_SIZE(*dict)) { state = *dict; } else { state = Py_None; } Py_INCREF(state); } slotnames = _PyType_GetSlotNames(Py_TYPE(obj)); if (slotnames == NULL) { Py_DECREF(state); return NULL; } assert(slotnames == Py_None || PyList_Check(slotnames)); if (required) { Py_ssize_t basicsize = PyBaseObject_Type.tp_basicsize; if (obj->ob_type->tp_dictoffset) basicsize += sizeof(PyObject *); if (obj->ob_type->tp_weaklistoffset) basicsize += sizeof(PyObject *); if (slotnames != Py_None) basicsize += sizeof(PyObject *) * PyList_GET_SIZE(slotnames); if (obj->ob_type->tp_basicsize > basicsize) { Py_DECREF(slotnames); Py_DECREF(state); PyErr_Format(PyExc_TypeError, "can't pickle %.200s objects", Py_TYPE(obj)->tp_name); return NULL; } } if (slotnames != Py_None && PyList_GET_SIZE(slotnames) > 0) { PyObject *slots; Py_ssize_t slotnames_size, i; slots = PyDict_New(); if (slots == NULL) { Py_DECREF(slotnames); Py_DECREF(state); return NULL; } slotnames_size = PyList_GET_SIZE(slotnames); for (i = 0; i < slotnames_size; i++) { PyObject *name, *value; name = PyList_GET_ITEM(slotnames, i); Py_INCREF(name); if (_PyObject_LookupAttr(obj, name, &value) < 0) { goto error; } if (value == NULL) { Py_DECREF(name); /* It is not an error if the attribute is not present. */ } else { int err = PyDict_SetItem(slots, name, value); Py_DECREF(name); Py_DECREF(value); if (err) { goto error; } } /* The list is stored on the class so it may mutate while we iterate over it */ if (slotnames_size != PyList_GET_SIZE(slotnames)) { PyErr_Format(PyExc_RuntimeError, "__slotsname__ changed size during iteration"); goto error; } /* We handle errors within the loop here. */ if (0) { error: Py_DECREF(slotnames); Py_DECREF(slots); Py_DECREF(state); return NULL; } } /* If we found some slot attributes, pack them in a tuple along the original attribute dictionary. */ if (PyDict_GET_SIZE(slots) > 0) { PyObject *state2; state2 = PyTuple_Pack(2, state, slots); Py_DECREF(state); if (state2 == NULL) { Py_DECREF(slotnames); Py_DECREF(slots); return NULL; } state = state2; } Py_DECREF(slots); } Py_DECREF(slotnames); } else { /* getstate != NULL */ state = _PyObject_CallNoArg(getstate); Py_DECREF(getstate); if (state == NULL) return NULL; } return state; } static int _PyObject_GetNewArguments(PyObject *obj, PyObject **args, PyObject **kwargs) { PyObject *getnewargs, *getnewargs_ex; _Py_IDENTIFIER(__getnewargs_ex__); _Py_IDENTIFIER(__getnewargs__); if (args == NULL || kwargs == NULL) { PyErr_BadInternalCall(); return -1; } /* We first attempt to fetch the arguments for __new__ by calling __getnewargs_ex__ on the object. */ getnewargs_ex = _PyObject_LookupSpecial(obj, &PyId___getnewargs_ex__); if (getnewargs_ex != NULL) { PyObject *newargs = _PyObject_CallNoArg(getnewargs_ex); Py_DECREF(getnewargs_ex); if (newargs == NULL) { return -1; } if (!PyTuple_Check(newargs)) { PyErr_Format(PyExc_TypeError, "__getnewargs_ex__ should return a tuple, " "not '%.200s'", Py_TYPE(newargs)->tp_name); Py_DECREF(newargs); return -1; } if (PyTuple_GET_SIZE(newargs) != 2) { PyErr_Format(PyExc_ValueError, "__getnewargs_ex__ should return a tuple of " "length 2, not %zd", PyTuple_GET_SIZE(newargs)); Py_DECREF(newargs); return -1; } *args = PyTuple_GET_ITEM(newargs, 0); Py_INCREF(*args); *kwargs = PyTuple_GET_ITEM(newargs, 1); Py_INCREF(*kwargs); Py_DECREF(newargs); /* XXX We should perhaps allow None to be passed here. */ if (!PyTuple_Check(*args)) { PyErr_Format(PyExc_TypeError, "first item of the tuple returned by " "__getnewargs_ex__ must be a tuple, not '%.200s'", Py_TYPE(*args)->tp_name); Py_CLEAR(*args); Py_CLEAR(*kwargs); return -1; } if (!PyDict_Check(*kwargs)) { PyErr_Format(PyExc_TypeError, "second item of the tuple returned by " "__getnewargs_ex__ must be a dict, not '%.200s'", Py_TYPE(*kwargs)->tp_name); Py_CLEAR(*args); Py_CLEAR(*kwargs); return -1; } return 0; } else if (PyErr_Occurred()) { return -1; } /* The object does not have __getnewargs_ex__ so we fallback on using __getnewargs__ instead. */ getnewargs = _PyObject_LookupSpecial(obj, &PyId___getnewargs__); if (getnewargs != NULL) { *args = _PyObject_CallNoArg(getnewargs); Py_DECREF(getnewargs); if (*args == NULL) { return -1; } if (!PyTuple_Check(*args)) { PyErr_Format(PyExc_TypeError, "__getnewargs__ should return a tuple, " "not '%.200s'", Py_TYPE(*args)->tp_name); Py_CLEAR(*args); return -1; } *kwargs = NULL; return 0; } else if (PyErr_Occurred()) { return -1; } /* The object does not have __getnewargs_ex__ and __getnewargs__. This may mean __new__ does not takes any arguments on this object, or that the object does not implement the reduce protocol for pickling or copying. */ *args = NULL; *kwargs = NULL; return 0; } static int _PyObject_GetItemsIter(PyObject *obj, PyObject **listitems, PyObject **dictitems) { if (listitems == NULL || dictitems == NULL) { PyErr_BadInternalCall(); return -1; } if (!PyList_Check(obj)) { *listitems = Py_None; Py_INCREF(*listitems); } else { *listitems = PyObject_GetIter(obj); if (*listitems == NULL) return -1; } if (!PyDict_Check(obj)) { *dictitems = Py_None; Py_INCREF(*dictitems); } else { PyObject *items; _Py_IDENTIFIER(items); items = _PyObject_CallMethodIdObjArgs(obj, &PyId_items, NULL); if (items == NULL) { Py_CLEAR(*listitems); return -1; } *dictitems = PyObject_GetIter(items); Py_DECREF(items); if (*dictitems == NULL) { Py_CLEAR(*listitems); return -1; } } assert(*listitems != NULL && *dictitems != NULL); return 0; } static PyObject * reduce_newobj(PyObject *obj) { PyObject *args = NULL, *kwargs = NULL; PyObject *copyreg; PyObject *newobj, *newargs, *state, *listitems, *dictitems; PyObject *result; int hasargs; if (Py_TYPE(obj)->tp_new == NULL) { PyErr_Format(PyExc_TypeError, "can't pickle %.200s objects", Py_TYPE(obj)->tp_name); return NULL; } if (_PyObject_GetNewArguments(obj, &args, &kwargs) < 0) return NULL; copyreg = import_copyreg(); if (copyreg == NULL) { Py_XDECREF(args); Py_XDECREF(kwargs); return NULL; } hasargs = (args != NULL); if (kwargs == NULL || PyDict_GET_SIZE(kwargs) == 0) { _Py_IDENTIFIER(__newobj__); PyObject *cls; Py_ssize_t i, n; Py_XDECREF(kwargs); newobj = _PyObject_GetAttrId(copyreg, &PyId___newobj__); Py_DECREF(copyreg); if (newobj == NULL) { Py_XDECREF(args); return NULL; } n = args ? PyTuple_GET_SIZE(args) : 0; newargs = PyTuple_New(n+1); if (newargs == NULL) { Py_XDECREF(args); Py_DECREF(newobj); return NULL; } cls = (PyObject *) Py_TYPE(obj); Py_INCREF(cls); PyTuple_SET_ITEM(newargs, 0, cls); for (i = 0; i < n; i++) { PyObject *v = PyTuple_GET_ITEM(args, i); Py_INCREF(v); PyTuple_SET_ITEM(newargs, i+1, v); } Py_XDECREF(args); } else if (args != NULL) { _Py_IDENTIFIER(__newobj_ex__); newobj = _PyObject_GetAttrId(copyreg, &PyId___newobj_ex__); Py_DECREF(copyreg); if (newobj == NULL) { Py_DECREF(args); Py_DECREF(kwargs); return NULL; } newargs = PyTuple_Pack(3, Py_TYPE(obj), args, kwargs); Py_DECREF(args); Py_DECREF(kwargs); if (newargs == NULL) { Py_DECREF(newobj); return NULL; } } else { /* args == NULL */ Py_DECREF(kwargs); PyErr_BadInternalCall(); return NULL; } state = _PyObject_GetState(obj, !hasargs && !PyList_Check(obj) && !PyDict_Check(obj)); if (state == NULL) { Py_DECREF(newobj); Py_DECREF(newargs); return NULL; } if (_PyObject_GetItemsIter(obj, &listitems, &dictitems) < 0) { Py_DECREF(newobj); Py_DECREF(newargs); Py_DECREF(state); return NULL; } result = PyTuple_Pack(5, newobj, newargs, state, listitems, dictitems); Py_DECREF(newobj); Py_DECREF(newargs); Py_DECREF(state); Py_DECREF(listitems); Py_DECREF(dictitems); return result; } /* * There were two problems when object.__reduce__ and object.__reduce_ex__ * were implemented in the same function: * - trying to pickle an object with a custom __reduce__ method that * fell back to object.__reduce__ in certain circumstances led to * infinite recursion at Python level and eventual RecursionError. * - Pickling objects that lied about their type by overwriting the * __class__ descriptor could lead to infinite recursion at C level * and eventual segfault. * * Because of backwards compatibility, the two methods still have to * behave in the same way, even if this is not required by the pickle * protocol. This common functionality was moved to the _common_reduce * function. */ static PyObject * _common_reduce(PyObject *self, int proto) { PyObject *copyreg, *res; if (proto >= 2) return reduce_newobj(self); copyreg = import_copyreg(); if (!copyreg) return NULL; res = PyObject_CallMethod(copyreg, "_reduce_ex", "Oi", self, proto); Py_DECREF(copyreg); return res; } /*[clinic input] object.__reduce__ Helper for pickle. [clinic start generated code]*/ static PyObject * object___reduce___impl(PyObject *self) /*[clinic end generated code: output=d4ca691f891c6e2f input=11562e663947e18b]*/ { return _common_reduce(self, 0); } /*[clinic input] object.__reduce_ex__ protocol: int / Helper for pickle. [clinic start generated code]*/ static PyObject * object___reduce_ex___impl(PyObject *self, int protocol) /*[clinic end generated code: output=2e157766f6b50094 input=f326b43fb8a4c5ff]*/ { static PyObject *objreduce; PyObject *reduce, *res; _Py_IDENTIFIER(__reduce__); if (objreduce == NULL) { objreduce = _PyDict_GetItemId(PyBaseObject_Type.tp_dict, &PyId___reduce__); if (objreduce == NULL) return NULL; } reduce = _PyObject_GetAttrId(self, &PyId___reduce__); if (reduce == NULL) PyErr_Clear(); else { PyObject *cls, *clsreduce; int override; cls = (PyObject *) Py_TYPE(self); clsreduce = _PyObject_GetAttrId(cls, &PyId___reduce__); if (clsreduce == NULL) { Py_DECREF(reduce); return NULL; } override = (clsreduce != objreduce); Py_DECREF(clsreduce); if (override) { res = _PyObject_CallNoArg(reduce); Py_DECREF(reduce); return res; } else Py_DECREF(reduce); } return _common_reduce(self, protocol); } static PyObject * object_subclasshook(PyObject *cls, PyObject *args) { Py_RETURN_NOTIMPLEMENTED; } PyDoc_STRVAR(object_subclasshook_doc, "Abstract classes can override this to customize issubclass().\n" "\n" "This is invoked early on by abc.ABCMeta.__subclasscheck__().\n" "It should return True, False or NotImplemented. If it returns\n" "NotImplemented, the normal algorithm is used. Otherwise, it\n" "overrides the normal algorithm (and the outcome is cached).\n"); static PyObject * object_init_subclass(PyObject *cls, PyObject *arg) { Py_RETURN_NONE; } PyDoc_STRVAR(object_init_subclass_doc, "This method is called when a class is subclassed.\n" "\n" "The default implementation does nothing. It may be\n" "overridden to extend subclasses.\n"); /*[clinic input] object.__format__ format_spec: unicode / Default object formatter. [clinic start generated code]*/ static PyObject * object___format___impl(PyObject *self, PyObject *format_spec) /*[clinic end generated code: output=34897efb543a974b input=7c3b3bc53a6fb7fa]*/ { /* Issue 7994: If we're converting to a string, we should reject format specifications */ if (PyUnicode_GET_LENGTH(format_spec) > 0) { PyErr_Format(PyExc_TypeError, "unsupported format string passed to %.200s.__format__", self->ob_type->tp_name); return NULL; } return PyObject_Str(self); } /*[clinic input] object.__sizeof__ Size of object in memory, in bytes. [clinic start generated code]*/ static PyObject * object___sizeof___impl(PyObject *self) /*[clinic end generated code: output=73edab332f97d550 input=1200ff3dfe485306]*/ { Py_ssize_t res, isize; res = 0; isize = self->ob_type->tp_itemsize; if (isize > 0) res = Py_SIZE(self) * isize; res += self->ob_type->tp_basicsize; return PyLong_FromSsize_t(res); } /* __dir__ for generic objects: returns __dict__, __class__, and recursively up the __class__.__bases__ chain. */ /*[clinic input] object.__dir__ Default dir() implementation. [clinic start generated code]*/ static PyObject * object___dir___impl(PyObject *self) /*[clinic end generated code: output=66dd48ea62f26c90 input=0a89305bec669b10]*/ { PyObject *result = NULL; PyObject *dict = NULL; PyObject *itsclass = NULL; /* Get __dict__ (which may or may not be a real dict...) */ dict = _PyObject_GetAttrId(self, &PyId___dict__); if (dict == NULL) { PyErr_Clear(); dict = PyDict_New(); } else if (!PyDict_Check(dict)) { Py_DECREF(dict); dict = PyDict_New(); } else { /* Copy __dict__ to avoid mutating it. */ PyObject *temp = PyDict_Copy(dict); Py_DECREF(dict); dict = temp; } if (dict == NULL) goto error; /* Merge in attrs reachable from its class. */ itsclass = _PyObject_GetAttrId(self, &PyId___class__); if (itsclass == NULL) /* XXX(tomer): Perhaps fall back to obj->ob_type if no __class__ exists? */ PyErr_Clear(); else if (merge_class_dict(dict, itsclass) != 0) goto error; result = PyDict_Keys(dict); /* fall through */ error: Py_XDECREF(itsclass); Py_XDECREF(dict); return result; } static PyMethodDef object_methods[] = { OBJECT___REDUCE_EX___METHODDEF OBJECT___REDUCE___METHODDEF {"__subclasshook__", object_subclasshook, METH_CLASS | METH_VARARGS, object_subclasshook_doc}, {"__init_subclass__", object_init_subclass, METH_CLASS | METH_NOARGS, object_init_subclass_doc}, OBJECT___FORMAT___METHODDEF OBJECT___SIZEOF___METHODDEF OBJECT___DIR___METHODDEF {0} }; PyTypeObject PyBaseObject_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "object", /* tp_name */ sizeof(PyObject), /* tp_basicsize */ 0, /* tp_itemsize */ object_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ object_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ (hashfunc)_Py_HashPointer, /* tp_hash */ 0, /* tp_call */ object_str, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ PyDoc_STR("object()\n--\n\nThe most base type"), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ object_richcompare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ object_methods, /* tp_methods */ 0, /* tp_members */ object_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ object_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ object_new, /* tp_new */ PyObject_Del, /* tp_free */ }; /* Add the methods from tp_methods to the __dict__ in a type object */ static int add_methods(PyTypeObject *type, PyMethodDef *meth) { PyObject *dict = type->tp_dict; for (; meth->ml_name != NULL; meth++) { PyObject *descr; int err; int isdescr = 1; if (PyDict_GetItemString(dict, meth->ml_name) && !(meth->ml_flags & METH_COEXIST)) continue; if (meth->ml_flags & METH_CLASS) { // The method is a class methond. if (meth->ml_flags & METH_STATIC) { PyErr_SetString(PyExc_ValueError, "method cannot be both class and static"); return -1; } descr = PyDescr_NewClassMethod(type, meth); } else if (meth->ml_flags & METH_STATIC) { // The method is a static method. PyObject *cfunc = PyCFunction_NewEx(meth, (PyObject*)type, NULL); if (cfunc == NULL) return -1; descr = PyStaticMethod_New(cfunc); isdescr = 0; // PyStaticMethod is not PyDescrObject Py_DECREF(cfunc); } else { descr = PyDescr_NewMethod(type, meth); } if (descr == NULL) return -1; if (isdescr) { err = PyDict_SetItem(dict, PyDescr_NAME(descr), descr); } else { err = PyDict_SetItemString(dict, meth->ml_name, descr); } Py_DECREF(descr); if (err < 0) return -1; } return 0; } static int add_members(PyTypeObject *type, PyMemberDef *memb) { PyObject *dict = type->tp_dict; for (; memb->name != NULL; memb++) { PyObject *descr; if (PyDict_GetItemString(dict, memb->name)) continue; descr = PyDescr_NewMember(type, memb); if (descr == NULL) return -1; if (PyDict_SetItem(dict, PyDescr_NAME(descr), descr) < 0) { Py_DECREF(descr); return -1; } Py_DECREF(descr); } return 0; } static int add_getset(PyTypeObject *type, PyGetSetDef *gsp) { PyObject *dict = type->tp_dict; for (; gsp->name != NULL; gsp++) { PyObject *descr; if (PyDict_GetItemString(dict, gsp->name)) continue; descr = PyDescr_NewGetSet(type, gsp); if (descr == NULL) return -1; if (PyDict_SetItem(dict, PyDescr_NAME(descr), descr) < 0) { Py_DECREF(descr); return -1; } Py_DECREF(descr); } return 0; } static void inherit_special(PyTypeObject *type, PyTypeObject *base) { /* Copying basicsize is connected to the GC flags */ if (!(type->tp_flags & Py_TPFLAGS_HAVE_GC) && (base->tp_flags & Py_TPFLAGS_HAVE_GC) && (!type->tp_traverse && !type->tp_clear)) { type->tp_flags |= Py_TPFLAGS_HAVE_GC; if (type->tp_traverse == NULL) type->tp_traverse = base->tp_traverse; if (type->tp_clear == NULL) type->tp_clear = base->tp_clear; } { /* The condition below could use some explanation. It appears that tp_new is not inherited for static types whose base class is 'object'; this seems to be a precaution so that old extension types don't suddenly become callable (object.__new__ wouldn't insure the invariants that the extension type's own factory function ensures). Heap types, of course, are under our control, so they do inherit tp_new; static extension types that specify some other built-in type as the default also inherit object.__new__. */ if (base != &PyBaseObject_Type || (type->tp_flags & Py_TPFLAGS_HEAPTYPE)) { if (type->tp_new == NULL) type->tp_new = base->tp_new; } } if (type->tp_basicsize == 0) type->tp_basicsize = base->tp_basicsize; /* Copy other non-function slots */ #undef COPYVAL #define COPYVAL(SLOT) \ if (type->SLOT == 0) type->SLOT = base->SLOT COPYVAL(tp_itemsize); COPYVAL(tp_weaklistoffset); COPYVAL(tp_dictoffset); /* Setup fast subclass flags */ if (PyType_IsSubtype(base, (PyTypeObject*)PyExc_BaseException)) type->tp_flags |= Py_TPFLAGS_BASE_EXC_SUBCLASS; else if (PyType_IsSubtype(base, &PyType_Type)) type->tp_flags |= Py_TPFLAGS_TYPE_SUBCLASS; else if (PyType_IsSubtype(base, &PyLong_Type)) type->tp_flags |= Py_TPFLAGS_LONG_SUBCLASS; else if (PyType_IsSubtype(base, &PyBytes_Type)) type->tp_flags |= Py_TPFLAGS_BYTES_SUBCLASS; else if (PyType_IsSubtype(base, &PyUnicode_Type)) type->tp_flags |= Py_TPFLAGS_UNICODE_SUBCLASS; else if (PyType_IsSubtype(base, &PyTuple_Type)) type->tp_flags |= Py_TPFLAGS_TUPLE_SUBCLASS; else if (PyType_IsSubtype(base, &PyList_Type)) type->tp_flags |= Py_TPFLAGS_LIST_SUBCLASS; else if (PyType_IsSubtype(base, &PyDict_Type)) type->tp_flags |= Py_TPFLAGS_DICT_SUBCLASS; } static int overrides_hash(PyTypeObject *type) { PyObject *dict = type->tp_dict; _Py_IDENTIFIER(__eq__); assert(dict != NULL); if (_PyDict_GetItemId(dict, &PyId___eq__) != NULL) return 1; if (_PyDict_GetItemId(dict, &PyId___hash__) != NULL) return 1; return 0; } static void inherit_slots(PyTypeObject *type, PyTypeObject *base) { PyTypeObject *basebase; #undef SLOTDEFINED #undef COPYSLOT #undef COPYNUM #undef COPYSEQ #undef COPYMAP #undef COPYBUF // This macro can avoid dup slot value, for below code: // class A(int): // pass // class C(A): // pass // when build type C, slot 'nb_add' has the same value, // so only copy when run 'inherit_slots' with 'type' is // 'C' and 'base' is 'int'. #define SLOTDEFINED(SLOT) \ (base->SLOT != 0 && \ (basebase == NULL || base->SLOT != basebase->SLOT)) // Notice that if 'type->SLOT' is not null, this macro avoid copy // slot function, and also notice the limit macro 'SLOTDEFINED' brings. #define COPYSLOT(SLOT) \ if (!type->SLOT && SLOTDEFINED(SLOT)) type->SLOT = base->SLOT #define COPYASYNC(SLOT) COPYSLOT(tp_as_async->SLOT) #define COPYNUM(SLOT) COPYSLOT(tp_as_number->SLOT) #define COPYSEQ(SLOT) COPYSLOT(tp_as_sequence->SLOT) #define COPYMAP(SLOT) COPYSLOT(tp_as_mapping->SLOT) #define COPYBUF(SLOT) COPYSLOT(tp_as_buffer->SLOT) // python code to help understand code in this function: // class A(int): // pass // class C(A): // pass int debug = 0; if (!strcmp(type->tp_name , "C")) { debug = 1; } /* This won't inherit indirect slots (from tp_as_number etc.) if type doesn't provide the space. */ if (type->tp_as_number != NULL && base->tp_as_number != NULL) { basebase = base->tp_base; if (basebase->tp_as_number == NULL) basebase = NULL; // For example, the base of bool is long, so bool type // can do some number operation due to these code. if (debug) { printf("[type '%s'] nb_add of base '%s' is 0x%lx.\n", type->tp_name, base->tp_name, (uint64_t)(base->tp_as_number->nb_add)); } COPYNUM(nb_add); COPYNUM(nb_subtract); COPYNUM(nb_multiply); COPYNUM(nb_remainder); COPYNUM(nb_divmod); COPYNUM(nb_power); COPYNUM(nb_negative); COPYNUM(nb_positive); COPYNUM(nb_absolute); COPYNUM(nb_bool); COPYNUM(nb_invert); COPYNUM(nb_lshift); COPYNUM(nb_rshift); COPYNUM(nb_and); COPYNUM(nb_xor); COPYNUM(nb_or); COPYNUM(nb_int); COPYNUM(nb_float); COPYNUM(nb_inplace_add); COPYNUM(nb_inplace_subtract); COPYNUM(nb_inplace_multiply); COPYNUM(nb_inplace_remainder); COPYNUM(nb_inplace_power); COPYNUM(nb_inplace_lshift); COPYNUM(nb_inplace_rshift); COPYNUM(nb_inplace_and); COPYNUM(nb_inplace_xor); COPYNUM(nb_inplace_or); COPYNUM(nb_true_divide); COPYNUM(nb_floor_divide); COPYNUM(nb_inplace_true_divide); COPYNUM(nb_inplace_floor_divide); COPYNUM(nb_index); COPYNUM(nb_matrix_multiply); COPYNUM(nb_inplace_matrix_multiply); } if (type->tp_as_async != NULL && base->tp_as_async != NULL) { basebase = base->tp_base; if (basebase->tp_as_async == NULL) basebase = NULL; COPYASYNC(am_await); COPYASYNC(am_aiter); COPYASYNC(am_anext); } if (type->tp_as_sequence != NULL && base->tp_as_sequence != NULL) { basebase = base->tp_base; if (basebase->tp_as_sequence == NULL) basebase = NULL; COPYSEQ(sq_length); COPYSEQ(sq_concat); COPYSEQ(sq_repeat); COPYSEQ(sq_item); COPYSEQ(sq_ass_item); COPYSEQ(sq_contains); COPYSEQ(sq_inplace_concat); COPYSEQ(sq_inplace_repeat); } if (type->tp_as_mapping != NULL && base->tp_as_mapping != NULL) { basebase = base->tp_base; if (basebase->tp_as_mapping == NULL) basebase = NULL; COPYMAP(mp_length); COPYMAP(mp_subscript); COPYMAP(mp_ass_subscript); } if (type->tp_as_buffer != NULL && base->tp_as_buffer != NULL) { basebase = base->tp_base; if (basebase->tp_as_buffer == NULL) basebase = NULL; COPYBUF(bf_getbuffer); COPYBUF(bf_releasebuffer); } basebase = base->tp_base; COPYSLOT(tp_dealloc); if (type->tp_getattr == NULL && type->tp_getattro == NULL) { type->tp_getattr = base->tp_getattr; type->tp_getattro = base->tp_getattro; } if (type->tp_setattr == NULL && type->tp_setattro == NULL) { type->tp_setattr = base->tp_setattr; type->tp_setattro = base->tp_setattro; } /* tp_reserved is ignored */ COPYSLOT(tp_repr); /* tp_hash see tp_richcompare */ COPYSLOT(tp_call); COPYSLOT(tp_str); { /* Copy comparison-related slots only when not overriding them anywhere */ if (type->tp_richcompare == NULL && type->tp_hash == NULL && !overrides_hash(type)) { type->tp_richcompare = base->tp_richcompare; type->tp_hash = base->tp_hash; } } { COPYSLOT(tp_iter); COPYSLOT(tp_iternext); } { COPYSLOT(tp_descr_get); COPYSLOT(tp_descr_set); COPYSLOT(tp_dictoffset); COPYSLOT(tp_init); COPYSLOT(tp_alloc); COPYSLOT(tp_is_gc); if ((type->tp_flags & Py_TPFLAGS_HAVE_FINALIZE) && (base->tp_flags & Py_TPFLAGS_HAVE_FINALIZE)) { COPYSLOT(tp_finalize); } if ((type->tp_flags & Py_TPFLAGS_HAVE_GC) == (base->tp_flags & Py_TPFLAGS_HAVE_GC)) { /* They agree about gc. */ COPYSLOT(tp_free); } else if ((type->tp_flags & Py_TPFLAGS_HAVE_GC) && type->tp_free == NULL && base->tp_free == PyObject_Free) { /* A bit of magic to plug in the correct default * tp_free function when a derived class adds gc, * didn't define tp_free, and the base uses the * default non-gc tp_free. */ type->tp_free = PyObject_GC_Del; } /* else they didn't agree about gc, and there isn't something * obvious to be done -- the type is on its own. */ } } static int add_operators(PyTypeObject *); int PyType_Ready(PyTypeObject *type) { PyObject *dict, *bases; PyTypeObject *base; Py_ssize_t i, n; if (type->tp_flags & Py_TPFLAGS_READY) { assert(_PyType_CheckConsistency(type)); return 0; } assert((type->tp_flags & Py_TPFLAGS_READYING) == 0); type->tp_flags |= Py_TPFLAGS_READYING; #ifdef Py_TRACE_REFS /* PyType_Ready is the closest thing we have to a choke point * for type objects, so is the best place I can think of to try * to get type objects into the doubly-linked list of all objects. * Still, not all type objects go through PyType_Ready. */ _Py_AddToAllObjects((PyObject *)type, 0); #endif if (type->tp_name == NULL) { PyErr_Format(PyExc_SystemError, "Type does not define the tp_name field."); goto error; } /* Initialize tp_base (defaults to BaseObject unless that's us) */ base = type->tp_base; if (base == NULL && type != &PyBaseObject_Type) { base = type->tp_base = &PyBaseObject_Type; Py_INCREF(base); } /* Now the only way base can still be NULL is if type is * &PyBaseObject_Type. */ /* Initialize the base class */ if (base != NULL && base->tp_dict == NULL) { if (PyType_Ready(base) < 0) goto error; } /* Initialize ob_type if NULL. This means extensions that want to be compilable separately on Windows can call PyType_Ready() instead of initializing the ob_type field of their type objects. */ /* The test for base != NULL is really unnecessary, since base is only NULL when type is &PyBaseObject_Type, and we know its ob_type is not NULL (it's initialized to &PyType_Type). But coverity doesn't know that. */ if (Py_TYPE(type) == NULL && base != NULL) // Initialize ob_type field. Py_TYPE(type) = Py_TYPE(base); /* Initialize tp_bases */ bases = type->tp_bases; if (bases == NULL) { if (base == NULL) bases = PyTuple_New(0); else bases = PyTuple_Pack(1, base); if (bases == NULL) goto error; type->tp_bases = bases; } /* Initialize tp_dict */ dict = type->tp_dict; if (dict == NULL) { dict = PyDict_New(); if (dict == NULL) goto error; type->tp_dict = dict; } /* Add type-specific descriptors to tp_dict */ // For different type, 'add_method', 'add_members', 'add_getset' // function will construct callable object that wrap different // function then save to dict. // And if the type is a user defined class, not a builtin type, // this function nearly do nothing, and only for builtin type // this function construct a descriptor object then add it to // dict of the builtin type. if (add_operators(type) < 0) goto error; if (type->tp_methods != NULL) { if (add_methods(type, type->tp_methods) < 0) goto error; } if (type->tp_members != NULL) { if (add_members(type, type->tp_members) < 0) goto error; } if (type->tp_getset != NULL) { if (add_getset(type, type->tp_getset) < 0) goto error; } /* Calculate method resolution order */ if (mro_internal(type, NULL) < 0) goto error; /* Inherit special flags from dominant base */ if (type->tp_base != NULL) inherit_special(type, type->tp_base); /* Initialize tp_dict properly */ bases = type->tp_mro; assert(bases != NULL); assert(PyTuple_Check(bases)); n = PyTuple_GET_SIZE(bases); // These days I have very confused about this loop, it seems that // 'inherit_slots' maybe change some field in 'type', such as 'nb_add' // in 'tp_as_number' of 'type'. And what I concern is that this field // maybe overwrite many times along with the run of for loop. And now // I think this case can occurs but field maybe overwrite to the same // value, for example: // class A(int): // pass // class C(A): // pass // When construct type 'C', the type in bases is: A, long, type, and the // nb_add will assign value two times, one for A and another for long. if (!strcmp(type->tp_name, "A")) { printf("Hint the construct of type A.\n"); } // Inherit some slot function from base in mro. for (i = 1; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); if (PyType_Check(b)) inherit_slots(type, (PyTypeObject *)b); } /* All bases of statically allocated type should be statically allocated */ if (!(type->tp_flags & Py_TPFLAGS_HEAPTYPE)) for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); if (PyType_Check(b) && (((PyTypeObject *)b)->tp_flags & Py_TPFLAGS_HEAPTYPE)) { PyErr_Format(PyExc_TypeError, "type '%.100s' is not dynamically allocated but " "its base type '%.100s' is dynamically allocated", type->tp_name, ((PyTypeObject *)b)->tp_name); goto error; } } /* Sanity check for tp_free. */ if (PyType_IS_GC(type) && (type->tp_flags & Py_TPFLAGS_BASETYPE) && (type->tp_free == NULL || type->tp_free == PyObject_Del)) { /* This base class needs to call tp_free, but doesn't have * one, or its tp_free is for non-gc'ed objects. */ PyErr_Format(PyExc_TypeError, "type '%.100s' participates in " "gc and is a base type but has inappropriate " "tp_free slot", type->tp_name); goto error; } /* if the type dictionary doesn't contain a __doc__, set it from the tp_doc slot. */ if (_PyDict_GetItemId(type->tp_dict, &PyId___doc__) == NULL) { if (type->tp_doc != NULL) { const char *old_doc = _PyType_DocWithoutSignature(type->tp_name, type->tp_doc); PyObject *doc = PyUnicode_FromString(old_doc); if (doc == NULL) goto error; if (_PyDict_SetItemId(type->tp_dict, &PyId___doc__, doc) < 0) { Py_DECREF(doc); goto error; } Py_DECREF(doc); } else { if (_PyDict_SetItemId(type->tp_dict, &PyId___doc__, Py_None) < 0) goto error; } } /* Hack for tp_hash and __hash__. If after all that, tp_hash is still NULL, and __hash__ is not in tp_dict, set tp_hash to PyObject_HashNotImplemented and tp_dict['__hash__'] equal to None. This signals that __hash__ is not inherited. */ if (type->tp_hash == NULL) { if (_PyDict_GetItemId(type->tp_dict, &PyId___hash__) == NULL) { if (_PyDict_SetItemId(type->tp_dict, &PyId___hash__, Py_None) < 0) goto error; type->tp_hash = PyObject_HashNotImplemented; } } /* Some more special stuff */ base = type->tp_base; if (base != NULL) { // Notice the condition of copy slot value from base type to // constructing type in function 'inherit_slots'. Content copy // here is very different with copy in 'inherit_slots'. if (type->tp_as_async == NULL) type->tp_as_async = base->tp_as_async; if (type->tp_as_number == NULL) type->tp_as_number = base->tp_as_number; if (type->tp_as_sequence == NULL) type->tp_as_sequence = base->tp_as_sequence; if (type->tp_as_mapping == NULL) type->tp_as_mapping = base->tp_as_mapping; if (type->tp_as_buffer == NULL) type->tp_as_buffer = base->tp_as_buffer; } /* Link into each base class's list of subclasses */ bases = type->tp_bases; n = PyTuple_GET_SIZE(bases); for (i = 0; i < n; i++) { PyObject *b = PyTuple_GET_ITEM(bases, i); if (PyType_Check(b) && add_subclass((PyTypeObject *)b, type) < 0) goto error; } /* All done -- set the ready flag */ type->tp_flags = (type->tp_flags & ~Py_TPFLAGS_READYING) | Py_TPFLAGS_READY; assert(_PyType_CheckConsistency(type)); return 0; error: type->tp_flags &= ~Py_TPFLAGS_READYING; return -1; } static int add_subclass(PyTypeObject *base, PyTypeObject *type) { int result = -1; PyObject *dict, *key, *newobj; dict = base->tp_subclasses; if (dict == NULL) { base->tp_subclasses = dict = PyDict_New(); if (dict == NULL) return -1; } assert(PyDict_CheckExact(dict)); key = PyLong_FromVoidPtr((void *) type); if (key == NULL) return -1; newobj = PyWeakref_NewRef((PyObject *)type, NULL); if (newobj != NULL) { result = PyDict_SetItem(dict, key, newobj); Py_DECREF(newobj); } Py_DECREF(key); return result; } static int add_all_subclasses(PyTypeObject *type, PyObject *bases) { int res = 0; if (bases) { Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(bases); i++) { PyObject *base = PyTuple_GET_ITEM(bases, i); if (PyType_Check(base) && add_subclass((PyTypeObject*)base, type) < 0) res = -1; } } return res; } static void remove_subclass(PyTypeObject *base, PyTypeObject *type) { PyObject *dict, *key; dict = base->tp_subclasses; if (dict == NULL) { return; } assert(PyDict_CheckExact(dict)); key = PyLong_FromVoidPtr((void *) type); if (key == NULL || PyDict_DelItem(dict, key)) { /* This can happen if the type initialization errored out before the base subclasses were updated (e.g. a non-str __qualname__ was passed in the type dict). */ PyErr_Clear(); } Py_XDECREF(key); } static void remove_all_subclasses(PyTypeObject *type, PyObject *bases) { if (bases) { Py_ssize_t i; for (i = 0; i < PyTuple_GET_SIZE(bases); i++) { PyObject *base = PyTuple_GET_ITEM(bases, i); if (PyType_Check(base)) remove_subclass((PyTypeObject*) base, type); } } } static int check_num_args(PyObject *ob, int n) { if (!PyTuple_CheckExact(ob)) { PyErr_SetString(PyExc_SystemError, "PyArg_UnpackTuple() argument list is not a tuple"); return 0; } if (n == PyTuple_GET_SIZE(ob)) return 1; PyErr_Format( PyExc_TypeError, "expected %d arguments, got %zd", n, PyTuple_GET_SIZE(ob)); return 0; } /* Generic wrappers for overloadable 'operators' such as __getitem__ */ /* There's a wrapper *function* for each distinct function typedef used for type object slots (e.g. binaryfunc, ternaryfunc, etc.). There's a wrapper *table* for each distinct operation (e.g. __len__, __add__). Most tables have only one entry; the tables for binary operators have two entries, one regular and one with reversed arguments. */ static PyObject * wrap_lenfunc(PyObject *self, PyObject *args, void *wrapped) { lenfunc func = (lenfunc)wrapped; Py_ssize_t res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == -1 && PyErr_Occurred()) return NULL; return PyLong_FromSsize_t(res); } static PyObject * wrap_inquirypred(PyObject *self, PyObject *args, void *wrapped) { inquiry func = (inquiry)wrapped; int res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == -1 && PyErr_Occurred()) return NULL; return PyBool_FromLong((long)res); } static PyObject * wrap_binaryfunc(PyObject *self, PyObject *args, void *wrapped) { binaryfunc func = (binaryfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(self, other); } static PyObject * wrap_binaryfunc_l(PyObject *self, PyObject *args, void *wrapped) { binaryfunc func = (binaryfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(self, other); } static PyObject * wrap_binaryfunc_r(PyObject *self, PyObject *args, void *wrapped) { binaryfunc func = (binaryfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(other, self); } static PyObject * wrap_ternaryfunc(PyObject *self, PyObject *args, void *wrapped) { ternaryfunc func = (ternaryfunc)wrapped; PyObject *other; PyObject *third = Py_None; /* Note: This wrapper only works for __pow__() */ if (!PyArg_UnpackTuple(args, "", 1, 2, &other, &third)) return NULL; return (*func)(self, other, third); } static PyObject * wrap_ternaryfunc_r(PyObject *self, PyObject *args, void *wrapped) { ternaryfunc func = (ternaryfunc)wrapped; PyObject *other; PyObject *third = Py_None; /* Note: This wrapper only works for __pow__() */ if (!PyArg_UnpackTuple(args, "", 1, 2, &other, &third)) return NULL; return (*func)(other, self, third); } static PyObject * wrap_unaryfunc(PyObject *self, PyObject *args, void *wrapped) { unaryfunc func = (unaryfunc)wrapped; if (!check_num_args(args, 0)) return NULL; return (*func)(self); } static PyObject * wrap_indexargfunc(PyObject *self, PyObject *args, void *wrapped) { ssizeargfunc func = (ssizeargfunc)wrapped; PyObject* o; Py_ssize_t i; if (!PyArg_UnpackTuple(args, "", 1, 1, &o)) return NULL; i = PyNumber_AsSsize_t(o, PyExc_OverflowError); if (i == -1 && PyErr_Occurred()) return NULL; return (*func)(self, i); } static Py_ssize_t getindex(PyObject *self, PyObject *arg) { Py_ssize_t i; i = PyNumber_AsSsize_t(arg, PyExc_OverflowError); if (i == -1 && PyErr_Occurred()) return -1; if (i < 0) { PySequenceMethods *sq = Py_TYPE(self)->tp_as_sequence; if (sq && sq->sq_length) { Py_ssize_t n = (*sq->sq_length)(self); if (n < 0) { assert(PyErr_Occurred()); return -1; } i += n; } } return i; } static PyObject * wrap_sq_item(PyObject *self, PyObject *args, void *wrapped) { ssizeargfunc func = (ssizeargfunc)wrapped; PyObject *arg; Py_ssize_t i; if (PyTuple_GET_SIZE(args) == 1) { arg = PyTuple_GET_ITEM(args, 0); i = getindex(self, arg); if (i == -1 && PyErr_Occurred()) return NULL; return (*func)(self, i); } check_num_args(args, 1); assert(PyErr_Occurred()); return NULL; } static PyObject * wrap_sq_setitem(PyObject *self, PyObject *args, void *wrapped) { ssizeobjargproc func = (ssizeobjargproc)wrapped; Py_ssize_t i; int res; PyObject *arg, *value; if (!PyArg_UnpackTuple(args, "", 2, 2, &arg, &value)) return NULL; i = getindex(self, arg); if (i == -1 && PyErr_Occurred()) return NULL; res = (*func)(self, i, value); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } static PyObject * wrap_sq_delitem(PyObject *self, PyObject *args, void *wrapped) { ssizeobjargproc func = (ssizeobjargproc)wrapped; Py_ssize_t i; int res; PyObject *arg; if (!check_num_args(args, 1)) return NULL; arg = PyTuple_GET_ITEM(args, 0); i = getindex(self, arg); if (i == -1 && PyErr_Occurred()) return NULL; res = (*func)(self, i, NULL); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } /* XXX objobjproc is a misnomer; should be objargpred */ static PyObject * wrap_objobjproc(PyObject *self, PyObject *args, void *wrapped) { objobjproc func = (objobjproc)wrapped; int res; PyObject *value; if (!check_num_args(args, 1)) return NULL; value = PyTuple_GET_ITEM(args, 0); res = (*func)(self, value); if (res == -1 && PyErr_Occurred()) return NULL; else return PyBool_FromLong(res); } static PyObject * wrap_objobjargproc(PyObject *self, PyObject *args, void *wrapped) { objobjargproc func = (objobjargproc)wrapped; int res; PyObject *key, *value; if (!PyArg_UnpackTuple(args, "", 2, 2, &key, &value)) return NULL; res = (*func)(self, key, value); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } static PyObject * wrap_delitem(PyObject *self, PyObject *args, void *wrapped) { objobjargproc func = (objobjargproc)wrapped; int res; PyObject *key; if (!check_num_args(args, 1)) return NULL; key = PyTuple_GET_ITEM(args, 0); res = (*func)(self, key, NULL); if (res == -1 && PyErr_Occurred()) return NULL; Py_RETURN_NONE; } /* Helper to check for object.__setattr__ or __delattr__ applied to a type. This is called the Carlo Verre hack after its discoverer. */ static int hackcheck(PyObject *self, setattrofunc func, const char *what) { PyTypeObject *type = Py_TYPE(self); while (type && type->tp_flags & Py_TPFLAGS_HEAPTYPE) type = type->tp_base; /* If type is NULL now, this is a really weird type. In the spirit of backwards compatibility (?), just shut up. */ if (type && type->tp_setattro != func) { PyErr_Format(PyExc_TypeError, "can't apply this %s to %s object", what, type->tp_name); return 0; } return 1; } static PyObject * wrap_setattr(PyObject *self, PyObject *args, void *wrapped) { setattrofunc func = (setattrofunc)wrapped; int res; PyObject *name, *value; if (!PyArg_UnpackTuple(args, "", 2, 2, &name, &value)) return NULL; if (!hackcheck(self, func, "__setattr__")) return NULL; res = (*func)(self, name, value); if (res < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_delattr(PyObject *self, PyObject *args, void *wrapped) { setattrofunc func = (setattrofunc)wrapped; int res; PyObject *name; if (!check_num_args(args, 1)) return NULL; name = PyTuple_GET_ITEM(args, 0); if (!hackcheck(self, func, "__delattr__")) return NULL; res = (*func)(self, name, NULL); if (res < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_hashfunc(PyObject *self, PyObject *args, void *wrapped) { hashfunc func = (hashfunc)wrapped; Py_hash_t res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == -1 && PyErr_Occurred()) return NULL; return PyLong_FromSsize_t(res); } static PyObject * wrap_call(PyObject *self, PyObject *args, void *wrapped, PyObject *kwds) { ternaryfunc func = (ternaryfunc)wrapped; return (*func)(self, args, kwds); } static PyObject * wrap_del(PyObject *self, PyObject *args, void *wrapped) { destructor func = (destructor)wrapped; if (!check_num_args(args, 0)) return NULL; (*func)(self); Py_RETURN_NONE; } static PyObject * wrap_richcmpfunc(PyObject *self, PyObject *args, void *wrapped, int op) { richcmpfunc func = (richcmpfunc)wrapped; PyObject *other; if (!check_num_args(args, 1)) return NULL; other = PyTuple_GET_ITEM(args, 0); return (*func)(self, other, op); } #undef RICHCMP_WRAPPER #define RICHCMP_WRAPPER(NAME, OP) \ static PyObject * \ richcmp_##NAME(PyObject *self, PyObject *args, void *wrapped) \ { \ return wrap_richcmpfunc(self, args, wrapped, OP); \ } RICHCMP_WRAPPER(lt, Py_LT) RICHCMP_WRAPPER(le, Py_LE) RICHCMP_WRAPPER(eq, Py_EQ) RICHCMP_WRAPPER(ne, Py_NE) RICHCMP_WRAPPER(gt, Py_GT) RICHCMP_WRAPPER(ge, Py_GE) static PyObject * wrap_next(PyObject *self, PyObject *args, void *wrapped) { unaryfunc func = (unaryfunc)wrapped; PyObject *res; if (!check_num_args(args, 0)) return NULL; res = (*func)(self); if (res == NULL && !PyErr_Occurred()) PyErr_SetNone(PyExc_StopIteration); return res; } static PyObject * wrap_descr_get(PyObject *self, PyObject *args, void *wrapped) { descrgetfunc func = (descrgetfunc)wrapped; PyObject *obj; PyObject *type = NULL; if (!PyArg_UnpackTuple(args, "", 1, 2, &obj, &type)) return NULL; if (obj == Py_None) obj = NULL; if (type == Py_None) type = NULL; if (type == NULL &&obj == NULL) { PyErr_SetString(PyExc_TypeError, "__get__(None, None) is invalid"); return NULL; } return (*func)(self, obj, type); } static PyObject * wrap_descr_set(PyObject *self, PyObject *args, void *wrapped) { descrsetfunc func = (descrsetfunc)wrapped; PyObject *obj, *value; int ret; if (!PyArg_UnpackTuple(args, "", 2, 2, &obj, &value)) return NULL; ret = (*func)(self, obj, value); if (ret < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_descr_delete(PyObject *self, PyObject *args, void *wrapped) { descrsetfunc func = (descrsetfunc)wrapped; PyObject *obj; int ret; if (!check_num_args(args, 1)) return NULL; obj = PyTuple_GET_ITEM(args, 0); ret = (*func)(self, obj, NULL); if (ret < 0) return NULL; Py_RETURN_NONE; } static PyObject * wrap_init(PyObject *self, PyObject *args, void *wrapped, PyObject *kwds) { initproc func = (initproc)wrapped; if (func(self, args, kwds) < 0) return NULL; Py_RETURN_NONE; } static PyObject * tp_new_wrapper(PyObject *self, PyObject *args, PyObject *kwds) { PyTypeObject *type, *subtype, *staticbase; PyObject *arg0, *res; if (self == NULL || !PyType_Check(self)) Py_FatalError("__new__() called with non-type 'self'"); type = (PyTypeObject *)self; if (!PyTuple_Check(args) || PyTuple_GET_SIZE(args) < 1) { PyErr_Format(PyExc_TypeError, "%s.__new__(): not enough arguments", type->tp_name); return NULL; } arg0 = PyTuple_GET_ITEM(args, 0); if (!PyType_Check(arg0)) { PyErr_Format(PyExc_TypeError, "%s.__new__(X): X is not a type object (%s)", type->tp_name, Py_TYPE(arg0)->tp_name); return NULL; } subtype = (PyTypeObject *)arg0; if (!PyType_IsSubtype(subtype, type)) { PyErr_Format(PyExc_TypeError, "%s.__new__(%s): %s is not a subtype of %s", type->tp_name, subtype->tp_name, subtype->tp_name, type->tp_name); return NULL; } /* Check that the use doesn't do something silly and unsafe like object.__new__(dict). To do this, we check that the most derived base that's not a heap type is this type. */ staticbase = subtype; while (staticbase && (staticbase->tp_new == slot_tp_new)) staticbase = staticbase->tp_base; /* If staticbase is NULL now, it is a really weird type. In the spirit of backwards compatibility (?), just shut up. */ if (staticbase && staticbase->tp_new != type->tp_new) { PyErr_Format(PyExc_TypeError, "%s.__new__(%s) is not safe, use %s.__new__()", type->tp_name, subtype->tp_name, staticbase->tp_name); return NULL; } args = PyTuple_GetSlice(args, 1, PyTuple_GET_SIZE(args)); if (args == NULL) return NULL; res = type->tp_new(subtype, args, kwds); Py_DECREF(args); return res; } static struct PyMethodDef tp_new_methoddef[] = { {"__new__", (PyCFunction)tp_new_wrapper, METH_VARARGS|METH_KEYWORDS, PyDoc_STR("__new__($type, *args, **kwargs)\n--\n\n" "Create and return a new object. " "See help(type) for accurate signature.")}, {0} }; static int add_tp_new_wrapper(PyTypeObject *type) { PyObject *func; if (_PyDict_GetItemId(type->tp_dict, &PyId___new__) != NULL) return 0; func = PyCFunction_NewEx(tp_new_methoddef, (PyObject *)type, NULL); if (func == NULL) return -1; if (_PyDict_SetItemId(type->tp_dict, &PyId___new__, func)) { Py_DECREF(func); return -1; } Py_DECREF(func); return 0; } /* Slot wrappers that call the corresponding __foo__ slot. See comments below at override_slots() for more explanation. */ #define SLOT0(FUNCNAME, OPSTR) \ static PyObject * \ FUNCNAME(PyObject *self) \ { \ _Py_static_string(id, OPSTR); \ return call_method(self, &id, NULL, 0); \ } #define SLOT1(FUNCNAME, OPSTR, ARG1TYPE) \ static PyObject * \ FUNCNAME(PyObject *self, ARG1TYPE arg1) \ { \ PyObject* stack[1] = {arg1}; \ _Py_static_string(id, OPSTR); \ return call_method(self, &id, stack, 1); \ } /* Boolean helper for SLOT1BINFULL(). right.__class__ is a nontrivial subclass of left.__class__. */ static int method_is_overloaded(PyObject *left, PyObject *right, struct _Py_Identifier *name) { PyObject *a, *b; int ok; b = _PyObject_GetAttrId((PyObject *)(Py_TYPE(right)), name); if (b == NULL) { PyErr_Clear(); /* If right doesn't have it, it's not overloaded */ return 0; } a = _PyObject_GetAttrId((PyObject *)(Py_TYPE(left)), name); if (a == NULL) { PyErr_Clear(); Py_DECREF(b); /* If right has it but left doesn't, it's overloaded */ return 1; } ok = PyObject_RichCompareBool(a, b, Py_NE); Py_DECREF(a); Py_DECREF(b); if (ok < 0) { PyErr_Clear(); return 0; } return ok; } #define SLOT1BINFULL(FUNCNAME, TESTFUNC, SLOTNAME, OPSTR, ROPSTR) \ static PyObject * \ FUNCNAME(PyObject *self, PyObject *other) \ { \ PyObject* stack[1]; \ _Py_static_string(op_id, OPSTR); \ _Py_static_string(rop_id, ROPSTR); \ int do_other = Py_TYPE(self) != Py_TYPE(other) && \ Py_TYPE(other)->tp_as_number != NULL && \ Py_TYPE(other)->tp_as_number->SLOTNAME == TESTFUNC; \ if (Py_TYPE(self)->tp_as_number != NULL && \ Py_TYPE(self)->tp_as_number->SLOTNAME == TESTFUNC) { \ PyObject *r; \ if (do_other && \ PyType_IsSubtype(Py_TYPE(other), Py_TYPE(self)) && \ method_is_overloaded(self, other, &rop_id)) { \ stack[0] = self; \ r = call_maybe(other, &rop_id, stack, 1); \ if (r != Py_NotImplemented) \ return r; \ Py_DECREF(r); \ do_other = 0; \ } \ stack[0] = other; \ r = call_maybe(self, &op_id, stack, 1); \ if (r != Py_NotImplemented || \ Py_TYPE(other) == Py_TYPE(self)) \ return r; \ Py_DECREF(r); \ } \ if (do_other) { \ stack[0] = self; \ return call_maybe(other, &rop_id, stack, 1); \ } \ Py_RETURN_NOTIMPLEMENTED; \ } #define SLOT1BIN(FUNCNAME, SLOTNAME, OPSTR, ROPSTR) \ SLOT1BINFULL(FUNCNAME, FUNCNAME, SLOTNAME, OPSTR, ROPSTR) static Py_ssize_t slot_sq_length(PyObject *self) { PyObject *res = call_method(self, &PyId___len__, NULL, 0); Py_ssize_t len; if (res == NULL) return -1; Py_SETREF(res, PyNumber_Index(res)); if (res == NULL) return -1; assert(PyLong_Check(res)); if (Py_SIZE(res) < 0) { Py_DECREF(res); PyErr_SetString(PyExc_ValueError, "__len__() should return >= 0"); return -1; } len = PyNumber_AsSsize_t(res, PyExc_OverflowError); assert(len >= 0 || PyErr_ExceptionMatches(PyExc_OverflowError)); Py_DECREF(res); return len; } static PyObject * slot_sq_item(PyObject *self, Py_ssize_t i) { PyObject *retval; PyObject *args[1]; PyObject *ival = PyLong_FromSsize_t(i); if (ival == NULL) { return NULL; } args[0] = ival; retval = call_method(self, &PyId___getitem__, args, 1); Py_DECREF(ival); return retval; } static int slot_sq_ass_item(PyObject *self, Py_ssize_t index, PyObject *value) { PyObject *stack[2]; PyObject *res; PyObject *index_obj; index_obj = PyLong_FromSsize_t(index); if (index_obj == NULL) { return -1; } stack[0] = index_obj; if (value == NULL) { res = call_method(self, &PyId___delitem__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___setitem__, stack, 2); } Py_DECREF(index_obj); if (res == NULL) { return -1; } Py_DECREF(res); return 0; } static int slot_sq_contains(PyObject *self, PyObject *value) { PyObject *func, *res; int result = -1, unbound; _Py_IDENTIFIER(__contains__); func = lookup_maybe_method(self, &PyId___contains__, &unbound); if (func == Py_None) { Py_DECREF(func); PyErr_Format(PyExc_TypeError, "'%.200s' object is not a container", Py_TYPE(self)->tp_name); return -1; } if (func != NULL) { PyObject *args[1] = {value}; res = call_unbound(unbound, func, self, args, 1); Py_DECREF(func); if (res != NULL) { result = PyObject_IsTrue(res); Py_DECREF(res); } } else if (! PyErr_Occurred()) { /* Possible results: -1 and 1 */ result = (int)_PySequence_IterSearch(self, value, PY_ITERSEARCH_CONTAINS); } return result; } #define slot_mp_length slot_sq_length SLOT1(slot_mp_subscript, "__getitem__", PyObject *) static int slot_mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value) { PyObject *stack[2]; PyObject *res; stack[0] = key; if (value == NULL) { res = call_method(self, &PyId___delitem__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___setitem__, stack, 2); } if (res == NULL) return -1; Py_DECREF(res); return 0; } SLOT1BIN(slot_nb_add, nb_add, "__add__", "__radd__") SLOT1BIN(slot_nb_subtract, nb_subtract, "__sub__", "__rsub__") SLOT1BIN(slot_nb_multiply, nb_multiply, "__mul__", "__rmul__") SLOT1BIN(slot_nb_matrix_multiply, nb_matrix_multiply, "__matmul__", "__rmatmul__") SLOT1BIN(slot_nb_remainder, nb_remainder, "__mod__", "__rmod__") SLOT1BIN(slot_nb_divmod, nb_divmod, "__divmod__", "__rdivmod__") static PyObject *slot_nb_power(PyObject *, PyObject *, PyObject *); SLOT1BINFULL(slot_nb_power_binary, slot_nb_power, nb_power, "__pow__", "__rpow__") static PyObject * slot_nb_power(PyObject *self, PyObject *other, PyObject *modulus) { _Py_IDENTIFIER(__pow__); if (modulus == Py_None) return slot_nb_power_binary(self, other); /* Three-arg power doesn't use __rpow__. But ternary_op can call this when the second argument's type uses slot_nb_power, so check before calling self.__pow__. */ if (Py_TYPE(self)->tp_as_number != NULL && Py_TYPE(self)->tp_as_number->nb_power == slot_nb_power) { PyObject* stack[2] = {other, modulus}; return call_method(self, &PyId___pow__, stack, 2); } Py_RETURN_NOTIMPLEMENTED; } SLOT0(slot_nb_negative, "__neg__") SLOT0(slot_nb_positive, "__pos__") SLOT0(slot_nb_absolute, "__abs__") static int slot_nb_bool(PyObject *self) { PyObject *func, *value; int result, unbound; int using_len = 0; _Py_IDENTIFIER(__bool__); func = lookup_maybe_method(self, &PyId___bool__, &unbound); if (func == NULL) { if (PyErr_Occurred()) { return -1; } func = lookup_maybe_method(self, &PyId___len__, &unbound); if (func == NULL) { if (PyErr_Occurred()) { return -1; } return 1; } using_len = 1; } value = call_unbound_noarg(unbound, func, self); if (value == NULL) { goto error; } if (using_len) { /* bool type enforced by slot_nb_len */ result = PyObject_IsTrue(value); } else if (PyBool_Check(value)) { result = PyObject_IsTrue(value); } else { PyErr_Format(PyExc_TypeError, "__bool__ should return " "bool, returned %s", Py_TYPE(value)->tp_name); result = -1; } Py_DECREF(value); Py_DECREF(func); return result; error: Py_DECREF(func); return -1; } static PyObject * slot_nb_index(PyObject *self) { _Py_IDENTIFIER(__index__); return call_method(self, &PyId___index__, NULL, 0); } SLOT0(slot_nb_invert, "__invert__") SLOT1BIN(slot_nb_lshift, nb_lshift, "__lshift__", "__rlshift__") SLOT1BIN(slot_nb_rshift, nb_rshift, "__rshift__", "__rrshift__") SLOT1BIN(slot_nb_and, nb_and, "__and__", "__rand__") SLOT1BIN(slot_nb_xor, nb_xor, "__xor__", "__rxor__") SLOT1BIN(slot_nb_or, nb_or, "__or__", "__ror__") SLOT0(slot_nb_int, "__int__") SLOT0(slot_nb_float, "__float__") SLOT1(slot_nb_inplace_add, "__iadd__", PyObject *) SLOT1(slot_nb_inplace_subtract, "__isub__", PyObject *) SLOT1(slot_nb_inplace_multiply, "__imul__", PyObject *) SLOT1(slot_nb_inplace_matrix_multiply, "__imatmul__", PyObject *) SLOT1(slot_nb_inplace_remainder, "__imod__", PyObject *) /* Can't use SLOT1 here, because nb_inplace_power is ternary */ static PyObject * slot_nb_inplace_power(PyObject *self, PyObject * arg1, PyObject *arg2) { PyObject *stack[1] = {arg1}; _Py_IDENTIFIER(__ipow__); return call_method(self, &PyId___ipow__, stack, 1); } SLOT1(slot_nb_inplace_lshift, "__ilshift__", PyObject *) SLOT1(slot_nb_inplace_rshift, "__irshift__", PyObject *) SLOT1(slot_nb_inplace_and, "__iand__", PyObject *) SLOT1(slot_nb_inplace_xor, "__ixor__", PyObject *) SLOT1(slot_nb_inplace_or, "__ior__", PyObject *) SLOT1BIN(slot_nb_floor_divide, nb_floor_divide, "__floordiv__", "__rfloordiv__") SLOT1BIN(slot_nb_true_divide, nb_true_divide, "__truediv__", "__rtruediv__") SLOT1(slot_nb_inplace_floor_divide, "__ifloordiv__", PyObject *) SLOT1(slot_nb_inplace_true_divide, "__itruediv__", PyObject *) static PyObject * slot_tp_repr(PyObject *self) { PyObject *func, *res; _Py_IDENTIFIER(__repr__); int unbound; func = lookup_maybe_method(self, &PyId___repr__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Clear(); return PyUnicode_FromFormat("<%s object at %p>", Py_TYPE(self)->tp_name, self); } SLOT0(slot_tp_str, "__str__") static Py_hash_t slot_tp_hash(PyObject *self) { PyObject *func, *res; Py_ssize_t h; int unbound; func = lookup_maybe_method(self, &PyId___hash__, &unbound); if (func == Py_None) { Py_DECREF(func); func = NULL; } if (func == NULL) { return PyObject_HashNotImplemented(self); } res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); if (res == NULL) return -1; if (!PyLong_Check(res)) { PyErr_SetString(PyExc_TypeError, "__hash__ method should return an integer"); return -1; } /* Transform the PyLong `res` to a Py_hash_t `h`. For an existing hashable Python object x, hash(x) will always lie within the range of Py_hash_t. Therefore our transformation must preserve values that already lie within this range, to ensure that if x.__hash__() returns hash(y) then hash(x) == hash(y). */ h = PyLong_AsSsize_t(res); if (h == -1 && PyErr_Occurred()) { /* res was not within the range of a Py_hash_t, so we're free to use any sufficiently bit-mixing transformation; long.__hash__ will do nicely. */ PyErr_Clear(); h = PyLong_Type.tp_hash(res); } /* -1 is reserved for errors. */ if (h == -1) h = -2; Py_DECREF(res); return h; } static PyObject * slot_tp_call(PyObject *self, PyObject *args, PyObject *kwds) { _Py_IDENTIFIER(__call__); int unbound; PyObject *meth = lookup_method(self, &PyId___call__, &unbound); PyObject *res; if (meth == NULL) return NULL; if (unbound) { res = _PyObject_Call_Prepend(meth, self, args, kwds); } else { res = PyObject_Call(meth, args, kwds); } Py_DECREF(meth); return res; } /* There are two slot dispatch functions for tp_getattro. - slot_tp_getattro() is used when __getattribute__ is overridden but no __getattr__ hook is present; - slot_tp_getattr_hook() is used when a __getattr__ hook is present. The code in update_one_slot() always installs slot_tp_getattr_hook(); this detects the absence of __getattr__ and then installs the simpler slot if necessary. */ static PyObject * slot_tp_getattro(PyObject *self, PyObject *name) { PyObject *stack[1] = {name}; return call_method(self, &PyId___getattribute__, stack, 1); } static PyObject * call_attribute(PyObject *self, PyObject *attr, PyObject *name) { PyObject *res, *descr = NULL; descrgetfunc f = Py_TYPE(attr)->tp_descr_get; if (f != NULL) { descr = f(attr, self, (PyObject *)(Py_TYPE(self))); if (descr == NULL) return NULL; else attr = descr; } res = PyObject_CallFunctionObjArgs(attr, name, NULL); Py_XDECREF(descr); return res; } static PyObject * slot_tp_getattr_hook(PyObject *self, PyObject *name) { PyTypeObject *tp = Py_TYPE(self); PyObject *getattr, *getattribute, *res; _Py_IDENTIFIER(__getattr__); /* speed hack: we could use lookup_maybe, but that would resolve the method fully for each attribute lookup for classes with __getattr__, even when the attribute is present. So we use _PyType_Lookup and create the method only when needed, with call_attribute. */ getattr = _PyType_LookupId(tp, &PyId___getattr__); if (getattr == NULL) { /* No __getattr__ hook: use a simpler dispatcher */ tp->tp_getattro = slot_tp_getattro; return slot_tp_getattro(self, name); } Py_INCREF(getattr); /* speed hack: we could use lookup_maybe, but that would resolve the method fully for each attribute lookup for classes with __getattr__, even when self has the default __getattribute__ method. So we use _PyType_Lookup and create the method only when needed, with call_attribute. */ getattribute = _PyType_LookupId(tp, &PyId___getattribute__); if (getattribute == NULL || (Py_TYPE(getattribute) == &PyWrapperDescr_Type && ((PyWrapperDescrObject *)getattribute)->d_wrapped == (void *)PyObject_GenericGetAttr)) res = PyObject_GenericGetAttr(self, name); else { Py_INCREF(getattribute); res = call_attribute(self, getattribute, name); Py_DECREF(getattribute); } if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); res = call_attribute(self, getattr, name); } Py_DECREF(getattr); return res; } static int slot_tp_setattro(PyObject *self, PyObject *name, PyObject *value) { PyObject *stack[2]; PyObject *res; _Py_IDENTIFIER(__delattr__); _Py_IDENTIFIER(__setattr__); stack[0] = name; if (value == NULL) { res = call_method(self, &PyId___delattr__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___setattr__, stack, 2); } if (res == NULL) return -1; Py_DECREF(res); return 0; } static _Py_Identifier name_op[] = { {0, "__lt__", 0}, {0, "__le__", 0}, {0, "__eq__", 0}, {0, "__ne__", 0}, {0, "__gt__", 0}, {0, "__ge__", 0} }; static PyObject * slot_tp_richcompare(PyObject *self, PyObject *other, int op) { int unbound; PyObject *func, *res; func = lookup_maybe_method(self, &name_op[op], &unbound); if (func == NULL) { PyErr_Clear(); Py_RETURN_NOTIMPLEMENTED; } PyObject *args[1] = {other}; res = call_unbound(unbound, func, self, args, 1); Py_DECREF(func); return res; } static PyObject * slot_tp_iter(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__iter__); func = lookup_maybe_method(self, &PyId___iter__, &unbound); if (func == Py_None) { Py_DECREF(func); PyErr_Format(PyExc_TypeError, "'%.200s' object is not iterable", Py_TYPE(self)->tp_name); return NULL; } if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Clear(); func = lookup_maybe_method(self, &PyId___getitem__, &unbound); if (func == NULL) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not iterable", Py_TYPE(self)->tp_name); return NULL; } Py_DECREF(func); return PySeqIter_New(self); } static PyObject * slot_tp_iternext(PyObject *self) { _Py_IDENTIFIER(__next__); return call_method(self, &PyId___next__, NULL, 0); } static PyObject * slot_tp_descr_get(PyObject *self, PyObject *obj, PyObject *type) { PyTypeObject *tp = Py_TYPE(self); PyObject *get; _Py_IDENTIFIER(__get__); get = _PyType_LookupId(tp, &PyId___get__); if (get == NULL) { /* Avoid further slowdowns */ if (tp->tp_descr_get == slot_tp_descr_get) tp->tp_descr_get = NULL; Py_INCREF(self); return self; } if (obj == NULL) obj = Py_None; if (type == NULL) type = Py_None; return PyObject_CallFunctionObjArgs(get, self, obj, type, NULL); } static int slot_tp_descr_set(PyObject *self, PyObject *target, PyObject *value) { PyObject* stack[2]; PyObject *res; _Py_IDENTIFIER(__delete__); _Py_IDENTIFIER(__set__); stack[0] = target; if (value == NULL) { res = call_method(self, &PyId___delete__, stack, 1); } else { stack[1] = value; res = call_method(self, &PyId___set__, stack, 2); } if (res == NULL) return -1; Py_DECREF(res); return 0; } static int slot_tp_init(PyObject *self, PyObject *args, PyObject *kwds) { _Py_IDENTIFIER(__init__); int unbound; PyObject *meth = lookup_method(self, &PyId___init__, &unbound); PyObject *res; if (meth == NULL) return -1; if (unbound) { res = _PyObject_Call_Prepend(meth, self, args, kwds); } else { res = PyObject_Call(meth, args, kwds); } Py_DECREF(meth); if (res == NULL) return -1; if (res != Py_None) { PyErr_Format(PyExc_TypeError, "__init__() should return None, not '%.200s'", Py_TYPE(res)->tp_name); Py_DECREF(res); return -1; } Py_DECREF(res); return 0; } static PyObject * slot_tp_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *func, *result; func = _PyObject_GetAttrId((PyObject *)type, &PyId___new__); if (func == NULL) { return NULL; } result = _PyObject_Call_Prepend(func, (PyObject *)type, args, kwds); Py_DECREF(func); return result; } static void slot_tp_finalize(PyObject *self) { _Py_IDENTIFIER(__del__); int unbound; PyObject *del, *res; PyObject *error_type, *error_value, *error_traceback; /* Save the current exception, if any. */ PyErr_Fetch(&error_type, &error_value, &error_traceback); /* Execute __del__ method, if any. */ del = lookup_maybe_method(self, &PyId___del__, &unbound); if (del != NULL) { res = call_unbound_noarg(unbound, del, self); if (res == NULL) PyErr_WriteUnraisable(del); else Py_DECREF(res); Py_DECREF(del); } /* Restore the saved exception. */ PyErr_Restore(error_type, error_value, error_traceback); } static PyObject * slot_am_await(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__await__); func = lookup_maybe_method(self, &PyId___await__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Format(PyExc_AttributeError, "object %.50s does not have __await__ method", Py_TYPE(self)->tp_name); return NULL; } static PyObject * slot_am_aiter(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__aiter__); func = lookup_maybe_method(self, &PyId___aiter__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Format(PyExc_AttributeError, "object %.50s does not have __aiter__ method", Py_TYPE(self)->tp_name); return NULL; } static PyObject * slot_am_anext(PyObject *self) { int unbound; PyObject *func, *res; _Py_IDENTIFIER(__anext__); func = lookup_maybe_method(self, &PyId___anext__, &unbound); if (func != NULL) { res = call_unbound_noarg(unbound, func, self); Py_DECREF(func); return res; } PyErr_Format(PyExc_AttributeError, "object %.50s does not have __anext__ method", Py_TYPE(self)->tp_name); return NULL; } /* Table mapping __foo__ names to tp_foo offsets and slot_tp_foo wrapper functions. The table is ordered by offsets relative to the 'PyHeapTypeObject' structure, which incorporates the additional structures used for numbers, sequences and mappings. Note that multiple names may map to the same slot (e.g. __eq__, __ne__ etc. all map to tp_richcompare) and one name may map to multiple slots (e.g. __str__ affects tp_str as well as tp_repr). The table is terminated with an all-zero entry. (This table is further initialized in init_slotdefs().) */ typedef struct wrapperbase slotdef; #undef TPSLOT #undef FLSLOT #undef AMSLOT #undef ETSLOT #undef SQSLOT #undef MPSLOT #undef NBSLOT #undef UNSLOT #undef IBSLOT #undef BINSLOT #undef RBINSLOT #define TPSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ {NAME, offsetof(PyTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \ PyDoc_STR(DOC)} #define FLSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC, FLAGS) \ {NAME, offsetof(PyTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \ PyDoc_STR(DOC), FLAGS} #define ETSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ {NAME, offsetof(PyHeapTypeObject, SLOT), (void *)(FUNCTION), WRAPPER, \ PyDoc_STR(DOC)} #define AMSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_async.SLOT, FUNCTION, WRAPPER, DOC) #define SQSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_sequence.SLOT, FUNCTION, WRAPPER, DOC) #define MPSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_mapping.SLOT, FUNCTION, WRAPPER, DOC) #define NBSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, DOC) #define UNSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, \ NAME "($self, /)\n--\n\n" DOC) #define IBSLOT(NAME, SLOT, FUNCTION, WRAPPER, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, WRAPPER, \ NAME "($self, value, /)\n--\n\nReturn self" DOC "value.") #define BINSLOT(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, \ NAME "($self, value, /)\n--\n\nReturn self" DOC "value.") #define RBINSLOT(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_r, \ NAME "($self, value, /)\n--\n\nReturn value" DOC "self.") #define BINSLOTNOTINFIX(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_l, \ NAME "($self, value, /)\n--\n\n" DOC) #define RBINSLOTNOTINFIX(NAME, SLOT, FUNCTION, DOC) \ ETSLOT(NAME, as_number.SLOT, FUNCTION, wrap_binaryfunc_r, \ NAME "($self, value, /)\n--\n\n" DOC) static slotdef slotdefs[] = { TPSLOT("__getattribute__", tp_getattr, NULL, NULL, ""), TPSLOT("__getattr__", tp_getattr, NULL, NULL, ""), TPSLOT("__setattr__", tp_setattr, NULL, NULL, ""), TPSLOT("__delattr__", tp_setattr, NULL, NULL, ""), TPSLOT("__repr__", tp_repr, slot_tp_repr, wrap_unaryfunc, "__repr__($self, /)\n--\n\nReturn repr(self)."), TPSLOT("__hash__", tp_hash, slot_tp_hash, wrap_hashfunc, "__hash__($self, /)\n--\n\nReturn hash(self)."), FLSLOT("__call__", tp_call, slot_tp_call, (wrapperfunc)(void(*)(void))wrap_call, "__call__($self, /, *args, **kwargs)\n--\n\nCall self as a function.", PyWrapperFlag_KEYWORDS), TPSLOT("__str__", tp_str, slot_tp_str, wrap_unaryfunc, "__str__($self, /)\n--\n\nReturn str(self)."), TPSLOT("__getattribute__", tp_getattro, slot_tp_getattr_hook, wrap_binaryfunc, "__getattribute__($self, name, /)\n--\n\nReturn getattr(self, name)."), TPSLOT("__getattr__", tp_getattro, slot_tp_getattr_hook, NULL, ""), TPSLOT("__setattr__", tp_setattro, slot_tp_setattro, wrap_setattr, "__setattr__($self, name, value, /)\n--\n\nImplement setattr(self, name, value)."), TPSLOT("__delattr__", tp_setattro, slot_tp_setattro, wrap_delattr, "__delattr__($self, name, /)\n--\n\nImplement delattr(self, name)."), TPSLOT("__lt__", tp_richcompare, slot_tp_richcompare, richcmp_lt, "__lt__($self, value, /)\n--\n\nReturn self<value."), TPSLOT("__le__", tp_richcompare, slot_tp_richcompare, richcmp_le, "__le__($self, value, /)\n--\n\nReturn self<=value."), TPSLOT("__eq__", tp_richcompare, slot_tp_richcompare, richcmp_eq, "__eq__($self, value, /)\n--\n\nReturn self==value."), TPSLOT("__ne__", tp_richcompare, slot_tp_richcompare, richcmp_ne, "__ne__($self, value, /)\n--\n\nReturn self!=value."), TPSLOT("__gt__", tp_richcompare, slot_tp_richcompare, richcmp_gt, "__gt__($self, value, /)\n--\n\nReturn self>value."), TPSLOT("__ge__", tp_richcompare, slot_tp_richcompare, richcmp_ge, "__ge__($self, value, /)\n--\n\nReturn self>=value."), TPSLOT("__iter__", tp_iter, slot_tp_iter, wrap_unaryfunc, "__iter__($self, /)\n--\n\nImplement iter(self)."), TPSLOT("__next__", tp_iternext, slot_tp_iternext, wrap_next, "__next__($self, /)\n--\n\nImplement next(self)."), TPSLOT("__get__", tp_descr_get, slot_tp_descr_get, wrap_descr_get, "__get__($self, instance, owner, /)\n--\n\nReturn an attribute of instance, which is of type owner."), TPSLOT("__set__", tp_descr_set, slot_tp_descr_set, wrap_descr_set, "__set__($self, instance, value, /)\n--\n\nSet an attribute of instance to value."), TPSLOT("__delete__", tp_descr_set, slot_tp_descr_set, wrap_descr_delete, "__delete__($self, instance, /)\n--\n\nDelete an attribute of instance."), FLSLOT("__init__", tp_init, slot_tp_init, (wrapperfunc)(void(*)(void))wrap_init, "__init__($self, /, *args, **kwargs)\n--\n\n" "Initialize self. See help(type(self)) for accurate signature.", PyWrapperFlag_KEYWORDS), TPSLOT("__new__", tp_new, slot_tp_new, NULL, "__new__(type, /, *args, **kwargs)\n--\n\n" "Create and return new object. See help(type) for accurate signature."), TPSLOT("__del__", tp_finalize, slot_tp_finalize, (wrapperfunc)wrap_del, ""), AMSLOT("__await__", am_await, slot_am_await, wrap_unaryfunc, "__await__($self, /)\n--\n\nReturn an iterator to be used in await expression."), AMSLOT("__aiter__", am_aiter, slot_am_aiter, wrap_unaryfunc, "__aiter__($self, /)\n--\n\nReturn an awaitable, that resolves in asynchronous iterator."), AMSLOT("__anext__", am_anext, slot_am_anext, wrap_unaryfunc, "__anext__($self, /)\n--\n\nReturn a value or raise StopAsyncIteration."), BINSLOT("__add__", nb_add, slot_nb_add, "+"), RBINSLOT("__radd__", nb_add, slot_nb_add, "+"), BINSLOT("__sub__", nb_subtract, slot_nb_subtract, "-"), RBINSLOT("__rsub__", nb_subtract, slot_nb_subtract, "-"), BINSLOT("__mul__", nb_multiply, slot_nb_multiply, "*"), RBINSLOT("__rmul__", nb_multiply, slot_nb_multiply, "*"), BINSLOT("__mod__", nb_remainder, slot_nb_remainder, "%"), RBINSLOT("__rmod__", nb_remainder, slot_nb_remainder, "%"), BINSLOTNOTINFIX("__divmod__", nb_divmod, slot_nb_divmod, "Return divmod(self, value)."), RBINSLOTNOTINFIX("__rdivmod__", nb_divmod, slot_nb_divmod, "Return divmod(value, self)."), NBSLOT("__pow__", nb_power, slot_nb_power, wrap_ternaryfunc, "__pow__($self, value, mod=None, /)\n--\n\nReturn pow(self, value, mod)."), NBSLOT("__rpow__", nb_power, slot_nb_power, wrap_ternaryfunc_r, "__rpow__($self, value, mod=None, /)\n--\n\nReturn pow(value, self, mod)."), UNSLOT("__neg__", nb_negative, slot_nb_negative, wrap_unaryfunc, "-self"), UNSLOT("__pos__", nb_positive, slot_nb_positive, wrap_unaryfunc, "+self"), UNSLOT("__abs__", nb_absolute, slot_nb_absolute, wrap_unaryfunc, "abs(self)"), UNSLOT("__bool__", nb_bool, slot_nb_bool, wrap_inquirypred, "self != 0"), UNSLOT("__invert__", nb_invert, slot_nb_invert, wrap_unaryfunc, "~self"), BINSLOT("__lshift__", nb_lshift, slot_nb_lshift, "<<"), RBINSLOT("__rlshift__", nb_lshift, slot_nb_lshift, "<<"), BINSLOT("__rshift__", nb_rshift, slot_nb_rshift, ">>"), RBINSLOT("__rrshift__", nb_rshift, slot_nb_rshift, ">>"), BINSLOT("__and__", nb_and, slot_nb_and, "&"), RBINSLOT("__rand__", nb_and, slot_nb_and, "&"), BINSLOT("__xor__", nb_xor, slot_nb_xor, "^"), RBINSLOT("__rxor__", nb_xor, slot_nb_xor, "^"), BINSLOT("__or__", nb_or, slot_nb_or, "|"), RBINSLOT("__ror__", nb_or, slot_nb_or, "|"), UNSLOT("__int__", nb_int, slot_nb_int, wrap_unaryfunc, "int(self)"), UNSLOT("__float__", nb_float, slot_nb_float, wrap_unaryfunc, "float(self)"), IBSLOT("__iadd__", nb_inplace_add, slot_nb_inplace_add, wrap_binaryfunc, "+="), IBSLOT("__isub__", nb_inplace_subtract, slot_nb_inplace_subtract, wrap_binaryfunc, "-="), IBSLOT("__imul__", nb_inplace_multiply, slot_nb_inplace_multiply, wrap_binaryfunc, "*="), IBSLOT("__imod__", nb_inplace_remainder, slot_nb_inplace_remainder, wrap_binaryfunc, "%="), IBSLOT("__ipow__", nb_inplace_power, slot_nb_inplace_power, wrap_binaryfunc, "**="), IBSLOT("__ilshift__", nb_inplace_lshift, slot_nb_inplace_lshift, wrap_binaryfunc, "<<="), IBSLOT("__irshift__", nb_inplace_rshift, slot_nb_inplace_rshift, wrap_binaryfunc, ">>="), IBSLOT("__iand__", nb_inplace_and, slot_nb_inplace_and, wrap_binaryfunc, "&="), IBSLOT("__ixor__", nb_inplace_xor, slot_nb_inplace_xor, wrap_binaryfunc, "^="), IBSLOT("__ior__", nb_inplace_or, slot_nb_inplace_or, wrap_binaryfunc, "|="), BINSLOT("__floordiv__", nb_floor_divide, slot_nb_floor_divide, "//"), RBINSLOT("__rfloordiv__", nb_floor_divide, slot_nb_floor_divide, "//"), BINSLOT("__truediv__", nb_true_divide, slot_nb_true_divide, "/"), RBINSLOT("__rtruediv__", nb_true_divide, slot_nb_true_divide, "/"), IBSLOT("__ifloordiv__", nb_inplace_floor_divide, slot_nb_inplace_floor_divide, wrap_binaryfunc, "//="), IBSLOT("__itruediv__", nb_inplace_true_divide, slot_nb_inplace_true_divide, wrap_binaryfunc, "/="), NBSLOT("__index__", nb_index, slot_nb_index, wrap_unaryfunc, "__index__($self, /)\n--\n\n" "Return self converted to an integer, if self is suitable " "for use as an index into a list."), BINSLOT("__matmul__", nb_matrix_multiply, slot_nb_matrix_multiply, "@"), RBINSLOT("__rmatmul__", nb_matrix_multiply, slot_nb_matrix_multiply, "@"), IBSLOT("__imatmul__", nb_inplace_matrix_multiply, slot_nb_inplace_matrix_multiply, wrap_binaryfunc, "@="), MPSLOT("__len__", mp_length, slot_mp_length, wrap_lenfunc, "__len__($self, /)\n--\n\nReturn len(self)."), MPSLOT("__getitem__", mp_subscript, slot_mp_subscript, wrap_binaryfunc, "__getitem__($self, key, /)\n--\n\nReturn self[key]."), MPSLOT("__setitem__", mp_ass_subscript, slot_mp_ass_subscript, wrap_objobjargproc, "__setitem__($self, key, value, /)\n--\n\nSet self[key] to value."), MPSLOT("__delitem__", mp_ass_subscript, slot_mp_ass_subscript, wrap_delitem, "__delitem__($self, key, /)\n--\n\nDelete self[key]."), SQSLOT("__len__", sq_length, slot_sq_length, wrap_lenfunc, "__len__($self, /)\n--\n\nReturn len(self)."), /* Heap types defining __add__/__mul__ have sq_concat/sq_repeat == NULL. The logic in abstract.c always falls back to nb_add/nb_multiply in this case. Defining both the nb_* and the sq_* slots to call the user-defined methods has unexpected side-effects, as shown by test_descr.notimplemented() */ SQSLOT("__add__", sq_concat, NULL, wrap_binaryfunc, "__add__($self, value, /)\n--\n\nReturn self+value."), SQSLOT("__mul__", sq_repeat, NULL, wrap_indexargfunc, "__mul__($self, value, /)\n--\n\nReturn self*value."), SQSLOT("__rmul__", sq_repeat, NULL, wrap_indexargfunc, "__rmul__($self, value, /)\n--\n\nReturn value*self."), SQSLOT("__getitem__", sq_item, slot_sq_item, wrap_sq_item, "__getitem__($self, key, /)\n--\n\nReturn self[key]."), SQSLOT("__setitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_setitem, "__setitem__($self, key, value, /)\n--\n\nSet self[key] to value."), SQSLOT("__delitem__", sq_ass_item, slot_sq_ass_item, wrap_sq_delitem, "__delitem__($self, key, /)\n--\n\nDelete self[key]."), SQSLOT("__contains__", sq_contains, slot_sq_contains, wrap_objobjproc, "__contains__($self, key, /)\n--\n\nReturn key in self."), SQSLOT("__iadd__", sq_inplace_concat, NULL, wrap_binaryfunc, "__iadd__($self, value, /)\n--\n\nImplement self+=value."), SQSLOT("__imul__", sq_inplace_repeat, NULL, wrap_indexargfunc, "__imul__($self, value, /)\n--\n\nImplement self*=value."), {NULL} }; /* Given a type pointer and an offset gotten from a slotdef entry, return a pointer to the actual slot. This is not quite the same as simply adding the offset to the type pointer, since it takes care to indirect through the proper indirection pointer (as_buffer, etc.); it returns NULL if the indirection pointer is NULL. */ static void ** slotptr(PyTypeObject *type, int ioffset) { char *ptr; long offset = ioffset; /* Note: this depends on the order of the members of PyHeapTypeObject! */ assert(offset >= 0); assert((size_t)offset < offsetof(PyHeapTypeObject, as_buffer)); if ((size_t)offset >= offsetof(PyHeapTypeObject, as_sequence)) { ptr = (char *)type->tp_as_sequence; offset -= offsetof(PyHeapTypeObject, as_sequence); } else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_mapping)) { ptr = (char *)type->tp_as_mapping; offset -= offsetof(PyHeapTypeObject, as_mapping); } else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_number)) { ptr = (char *)type->tp_as_number; offset -= offsetof(PyHeapTypeObject, as_number); } else if ((size_t)offset >= offsetof(PyHeapTypeObject, as_async)) { ptr = (char *)type->tp_as_async; offset -= offsetof(PyHeapTypeObject, as_async); } else { ptr = (char *)type; } if (ptr != NULL) ptr += offset; return (void **)ptr; } /* Length of array of slotdef pointers used to store slots with the same __name__. There should be at most MAX_EQUIV-1 slotdef entries with the same __name__, for any __name__. Since that's a static property, it is appropriate to declare fixed-size arrays for this. */ #define MAX_EQUIV 10 /* Return a slot pointer for a given name, but ONLY if the attribute has exactly one slot function. The name must be an interned string. */ static void ** resolve_slotdups(PyTypeObject *type, PyObject *name) { /* XXX Maybe this could be optimized more -- but is it worth it? */ /* pname and ptrs act as a little cache */ static PyObject *pname; static slotdef *ptrs[MAX_EQUIV]; slotdef *p, **pp; void **res, **ptr; if (pname != name) { /* Collect all slotdefs that match name into ptrs. */ pname = name; pp = ptrs; for (p = slotdefs; p->name_strobj; p++) { if (p->name_strobj == name) *pp++ = p; } *pp = NULL; } /* Look in all matching slots of the type; if exactly one of these has a filled-in slot, return its value. Otherwise return NULL. */ res = NULL; for (pp = ptrs; *pp; pp++) { ptr = slotptr(type, (*pp)->offset); // Define field such as 'tp_as_number', 'tp_as_async' in type instance // as slot group, and each member in slot group such as 'am_wait' in // 'PyAsyncMethods' is called slot. if (ptr == NULL || *ptr == NULL) // ptr == NULL means that in this type, the // slot group this slot belong to is null, // and *ptr == NULL means that this slot is // null. continue; if (res != NULL) // This type contain at least two slot in // a slot group that has the same name. return NULL; res = ptr; } return res; } /* Common code for update_slots_callback() and fixup_slot_dispatchers(). This does some incredibly complex thinking and then sticks something into the slot. (It sees if the adjacent slotdefs for the same slot have conflicting interests, and then stores a generic wrapper or a specific function into the slot.) Return a pointer to the next slotdef with a different offset, because that's convenient for fixup_slot_dispatchers(). */ static slotdef * update_one_slot(PyTypeObject *type, slotdef *p) { PyObject *descr; PyWrapperDescrObject *d; void *generic = NULL, *specific = NULL; int use_generic = 0; int offset = p->offset; int error; void **ptr = slotptr(type, offset); if (ptr == NULL) { do { ++p; } while (p->offset == offset); return p; } /* We may end up clearing live exceptions below, so make sure it's ours. */ assert(!PyErr_Occurred()); do { /* Use faster uncached lookup as we won't get any cache hits during type setup. */ descr = find_name_in_mro(type, p->name_strobj, &error); if (descr == NULL) { if (error == -1) { /* It is unlikely by not impossible that there has been an exception during lookup. Since this function originally expected no errors, we ignore them here in order to keep up the interface. */ PyErr_Clear(); } if (ptr == (void**)&type->tp_iternext) { specific = (void *)_PyObject_NextNotImplemented; } continue; } if (Py_TYPE(descr) == &PyWrapperDescr_Type && ((PyWrapperDescrObject *)descr)->d_base->name_strobj == p->name_strobj) { void **tptr = resolve_slotdups(type, p->name_strobj); if (tptr == NULL || tptr == ptr) // Code reach here assumes that slot function // will use the default function. generic = p->function; d = (PyWrapperDescrObject *)descr; // Ensure the slot function comes from parent type of this type // and use the same wrapper function, notice why wrapper of the // two should the same. if (d->d_base->wrapper == p->wrapper && PyType_IsSubtype(type, PyDescr_TYPE(d))) { if (specific == NULL || specific == d->d_wrapped) // Use the slot function from parent type, and the // assume of using the default slot function is broken. specific = d->d_wrapped; else use_generic = 1; } } else if (Py_TYPE(descr) == &PyCFunction_Type && PyCFunction_GET_FUNCTION(descr) == (PyCFunction)tp_new_wrapper && ptr == (void**)&type->tp_new) { /* The __new__ wrapper is not a wrapper descriptor, so must be special-cased differently. If we don't do this, creating an instance will always use slot_tp_new which will look up __new__ in the MRO which will call tp_new_wrapper which will look through the base classes looking for a static base and call its tp_new (usually PyType_GenericNew), after performing various sanity checks and constructing a new argument list. Cut all that nonsense short -- this speeds up instance creation tremendously. */ specific = (void *)type->tp_new; /* XXX I'm not 100% sure that there isn't a hole in this reasoning that requires additional sanity checks. I'll buy the first person to point out a bug in this reasoning a beer. */ } else if (descr == Py_None && ptr == (void**)&type->tp_hash) { /* We specifically allow __hash__ to be set to None to prevent inheritance of the default implementation from object.__hash__ */ specific = (void *)PyObject_HashNotImplemented; } else { use_generic = 1; generic = p->function; } } while ((++p)->offset == offset); if (specific && !use_generic) *ptr = specific; else *ptr = generic; return p; } /* In the type, update the slots whose slotdefs are gathered in the pp array. This is a callback for update_subclasses(). */ static int update_slots_callback(PyTypeObject *type, void *data) { slotdef **pp = (slotdef **)data; for (; *pp; pp++) update_one_slot(type, *pp); return 0; } static int slotdefs_initialized = 0; /* Initialize the slotdefs table by adding interned string objects for the names. */ static void init_slotdefs(void) { slotdef *p; if (slotdefs_initialized) return; for (p = slotdefs; p->name; p++) { /* Slots must be ordered by their offset in the PyHeapTypeObject. */ assert(!p[1].name || p->offset <= p[1].offset); p->name_strobj = PyUnicode_InternFromString(p->name); if (!p->name_strobj || !PyUnicode_CHECK_INTERNED(p->name_strobj)) Py_FatalError("Out of memory interning slotdef names"); } slotdefs_initialized = 1; } /* Undo init_slotdefs, releasing the interned strings. */ static void clear_slotdefs(void) { slotdef *p; for (p = slotdefs; p->name; p++) { Py_CLEAR(p->name_strobj); } slotdefs_initialized = 0; } /* Update the slots after assignment to a class (type) attribute. */ static int update_slot(PyTypeObject *type, PyObject *name) { slotdef *ptrs[MAX_EQUIV]; slotdef *p; slotdef **pp; int offset; assert(PyUnicode_CheckExact(name)); assert(PyUnicode_CHECK_INTERNED(name)); /* Clear the VALID_VERSION flag of 'type' and all its subclasses. This could possibly be unified with the update_subclasses() recursion below, but carefully: they each have their own conditions on which to stop recursing into subclasses. */ PyType_Modified(type); init_slotdefs(); pp = ptrs; for (p = slotdefs; p->name; p++) { if (p->name_strobj == name) *pp++ = p; } *pp = NULL; for (pp = ptrs; *pp; pp++) { p = *pp; offset = p->offset; while (p > slotdefs && (p-1)->offset == offset) --p; *pp = p; } if (ptrs[0] == NULL) return 0; /* Not an attribute that affects any slots */ return update_subclasses(type, name, update_slots_callback, (void *)ptrs); } /* Store the proper functions in the slot dispatches at class (type) definition time, based upon which operations the class overrides in its dict. */ static void fixup_slot_dispatchers(PyTypeObject *type) { slotdef *p; init_slotdefs(); for (p = slotdefs; p->name; ) p = update_one_slot(type, p); } static void update_all_slots(PyTypeObject* type) { slotdef *p; init_slotdefs(); for (p = slotdefs; p->name; p++) { /* update_slot returns int but can't actually fail */ update_slot(type, p->name_strobj); } } /* Call __set_name__ on all descriptors in a newly generated type */ static int set_names(PyTypeObject *type) { PyObject *names_to_set, *key, *value, *set_name, *tmp; Py_ssize_t i = 0; names_to_set = PyDict_Copy(type->tp_dict); if (names_to_set == NULL) return -1; while (PyDict_Next(names_to_set, &i, &key, &value)) { set_name = _PyObject_LookupSpecial(value, &PyId___set_name__); if (set_name != NULL) { tmp = PyObject_CallFunctionObjArgs(set_name, type, key, NULL); Py_DECREF(set_name); if (tmp == NULL) { _PyErr_FormatFromCause(PyExc_RuntimeError, "Error calling __set_name__ on '%.100s' instance %R " "in '%.100s'", value->ob_type->tp_name, key, type->tp_name); Py_DECREF(names_to_set); return -1; } else Py_DECREF(tmp); } else if (PyErr_Occurred()) { Py_DECREF(names_to_set); return -1; } } Py_DECREF(names_to_set); return 0; } /* Call __init_subclass__ on the parent of a newly generated type */ static int init_subclass(PyTypeObject *type, PyObject *kwds) { PyObject *super, *func, *result; PyObject *args[2] = {(PyObject *)type, (PyObject *)type}; super = _PyObject_FastCall((PyObject *)&PySuper_Type, args, 2); if (super == NULL) { return -1; } func = _PyObject_GetAttrId(super, &PyId___init_subclass__); Py_DECREF(super); if (func == NULL) { return -1; } result = _PyObject_FastCallDict(func, NULL, 0, kwds); Py_DECREF(func); if (result == NULL) { return -1; } Py_DECREF(result); return 0; } /* recurse_down_subclasses() and update_subclasses() are mutually recursive functions to call a callback for all subclasses, but refraining from recursing into subclasses that define 'name'. */ static int update_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data) { if (callback(type, data) < 0) return -1; return recurse_down_subclasses(type, name, callback, data); } static int recurse_down_subclasses(PyTypeObject *type, PyObject *name, update_callback callback, void *data) { PyTypeObject *subclass; PyObject *ref, *subclasses, *dict; Py_ssize_t i; subclasses = type->tp_subclasses; if (subclasses == NULL) return 0; assert(PyDict_CheckExact(subclasses)); i = 0; while (PyDict_Next(subclasses, &i, NULL, &ref)) { assert(PyWeakref_CheckRef(ref)); subclass = (PyTypeObject *)PyWeakref_GET_OBJECT(ref); assert(subclass != NULL); if ((PyObject *)subclass == Py_None) continue; assert(PyType_Check(subclass)); /* Avoid recursing down into unaffected classes */ dict = subclass->tp_dict; if (dict != NULL && PyDict_Check(dict) && PyDict_GetItem(dict, name) != NULL) continue; if (update_subclasses(subclass, name, callback, data) < 0) return -1; } return 0; } /* This function is called by PyType_Ready() to populate the type's dictionary with method descriptors for function slots. For each function slot (like tp_repr) that's defined in the type, one or more corresponding descriptors are added in the type's tp_dict dictionary under the appropriate name (like __repr__). Some function slots cause more than one descriptor to be added (for example, the nb_add slot adds both __add__ and __radd__ descriptors) and some function slots compete for the same descriptor (for example both sq_item and mp_subscript generate a __getitem__ descriptor). In the latter case, the first slotdef entry encountered wins. Since slotdef entries are sorted by the offset of the slot in the PyHeapTypeObject, this gives us some control over disambiguating between competing slots: the members of PyHeapTypeObject are listed from most general to least general, so the most general slot is preferred. In particular, because as_mapping comes before as_sequence, for a type that defines both mp_subscript and sq_item, mp_subscript wins. This only adds new descriptors and doesn't overwrite entries in tp_dict that were previously defined. The descriptors contain a reference to the C function they must call, so that it's safe if they are copied into a subtype's __dict__ and the subtype has a different C function in its slot -- calling the method defined by the descriptor will call the C function that was used to create it, rather than the C function present in the slot when it is called. (This is important because a subtype may have a C function in the slot that calls the method from the dictionary, and we want to avoid infinite recursion here.) */ static int add_operators(PyTypeObject *type) { PyObject *dict = type->tp_dict; slotdef *p; PyObject *descr; void **ptr; init_slotdefs(); for (p = slotdefs; p->name; p++) { if (p->wrapper == NULL) continue; // When value of 'ptr' is null, this means that this type don't need // method group(I think of tp_as_number, tp_as_mapping, tp_as_sequence // as method group, because the three contain more than one method), // for example 'tp_as_number' in list is null. When value of *ptr is // not null, this means that this function has it's own implement of a // method. ptr = slotptr(type, p->offset); if (!ptr || !*ptr) continue; if (PyDict_GetItem(dict, p->name_strobj)) continue; if (*ptr == (void *)PyObject_HashNotImplemented) { /* Classes may prevent the inheritance of the tp_hash slot by storing PyObject_HashNotImplemented in it. Make it visible as a None value for the __hash__ attribute. */ if (PyDict_SetItem(dict, p->name_strobj, Py_None) < 0) return -1; } else { // type is the constructing type, p is slot in slotdefs, // *ptr is the slot function. descr = PyDescr_NewWrapper(type, p, *ptr); if (descr == NULL) return -1; if (PyDict_SetItem(dict, p->name_strobj, descr) < 0) { Py_DECREF(descr); return -1; } Py_DECREF(descr); } } if (type->tp_new != NULL) { if (add_tp_new_wrapper(type) < 0) return -1; } return 0; } /* Cooperative 'super' */ typedef struct { PyObject_HEAD PyTypeObject *type; PyObject *obj; PyTypeObject *obj_type; } superobject; static PyMemberDef super_members[] = { {"__thisclass__", T_OBJECT, offsetof(superobject, type), READONLY, "the class invoking super()"}, {"__self__", T_OBJECT, offsetof(superobject, obj), READONLY, "the instance invoking super(); may be None"}, {"__self_class__", T_OBJECT, offsetof(superobject, obj_type), READONLY, "the type of the instance invoking super(); may be None"}, {0} }; static void super_dealloc(PyObject *self) { superobject *su = (superobject *)self; _PyObject_GC_UNTRACK(self); Py_XDECREF(su->obj); Py_XDECREF(su->type); Py_XDECREF(su->obj_type); Py_TYPE(self)->tp_free(self); } static PyObject * super_repr(PyObject *self) { superobject *su = (superobject *)self; if (su->obj_type) return PyUnicode_FromFormat( "<super: <class '%s'>, <%s object>>", su->type ? su->type->tp_name : "NULL", su->obj_type->tp_name); else return PyUnicode_FromFormat( "<super: <class '%s'>, NULL>", su->type ? su->type->tp_name : "NULL"); } static PyObject * super_getattro(PyObject *self, PyObject *name) { superobject *su = (superobject *)self; PyTypeObject *starttype; PyObject *mro; Py_ssize_t i, n; starttype = su->obj_type; if (starttype == NULL) goto skip; /* We want __class__ to return the class of the super object (i.e. super, or a subclass), not the class of su->obj. */ if (PyUnicode_Check(name) && PyUnicode_GET_LENGTH(name) == 9 && _PyUnicode_EqualToASCIIId(name, &PyId___class__)) goto skip; mro = starttype->tp_mro; if (mro == NULL) goto skip; assert(PyTuple_Check(mro)); n = PyTuple_GET_SIZE(mro); /* No need to check the last one: it's gonna be skipped anyway. */ for (i = 0; i+1 < n; i++) { if ((PyObject *)(su->type) == PyTuple_GET_ITEM(mro, i)) break; } i++; /* skip su->type (if any) */ if (i >= n) goto skip; /* keep a strong reference to mro because starttype->tp_mro can be replaced during PyDict_GetItem(dict, name) */ Py_INCREF(mro); do { PyObject *res, *tmp, *dict; descrgetfunc f; tmp = PyTuple_GET_ITEM(mro, i); assert(PyType_Check(tmp)); dict = ((PyTypeObject *)tmp)->tp_dict; assert(dict != NULL && PyDict_Check(dict)); res = PyDict_GetItem(dict, name); if (res != NULL) { Py_INCREF(res); f = Py_TYPE(res)->tp_descr_get; if (f != NULL) { tmp = f(res, /* Only pass 'obj' param if this is instance-mode super (See SF ID #743627) */ (su->obj == (PyObject *)starttype) ? NULL : su->obj, (PyObject *)starttype); Py_DECREF(res); res = tmp; } Py_DECREF(mro); return res; } i++; } while (i < n); Py_DECREF(mro); skip: return PyObject_GenericGetAttr(self, name); } static PyTypeObject * supercheck(PyTypeObject *type, PyObject *obj) { /* Check that a super() call makes sense. Return a type object. obj can be a class, or an instance of one: - If it is a class, it must be a subclass of 'type'. This case is used for class methods; the return value is obj. - If it is an instance, it must be an instance of 'type'. This is the normal case; the return value is obj.__class__. But... when obj is an instance, we want to allow for the case where Py_TYPE(obj) is not a subclass of type, but obj.__class__ is! This will allow using super() with a proxy for obj. */ /* Check for first bullet above (special case) */ if (PyType_Check(obj) && PyType_IsSubtype((PyTypeObject *)obj, type)) { Py_INCREF(obj); return (PyTypeObject *)obj; } /* Normal case */ if (PyType_IsSubtype(Py_TYPE(obj), type)) { Py_INCREF(Py_TYPE(obj)); return Py_TYPE(obj); } else { /* Try the slow way */ PyObject *class_attr; class_attr = _PyObject_GetAttrId(obj, &PyId___class__); if (class_attr != NULL && PyType_Check(class_attr) && (PyTypeObject *)class_attr != Py_TYPE(obj)) { int ok = PyType_IsSubtype( (PyTypeObject *)class_attr, type); if (ok) return (PyTypeObject *)class_attr; } if (class_attr == NULL) PyErr_Clear(); else Py_DECREF(class_attr); } PyErr_SetString(PyExc_TypeError, "super(type, obj): " "obj must be an instance or subtype of type"); return NULL; } static PyObject * super_descr_get(PyObject *self, PyObject *obj, PyObject *type) { superobject *su = (superobject *)self; superobject *newobj; if (obj == NULL || obj == Py_None || su->obj != NULL) { /* Not binding to an object, or already bound */ Py_INCREF(self); return self; } if (Py_TYPE(su) != &PySuper_Type) /* If su is an instance of a (strict) subclass of super, call its type */ return PyObject_CallFunctionObjArgs((PyObject *)Py_TYPE(su), su->type, obj, NULL); else { /* Inline the common case */ PyTypeObject *obj_type = supercheck(su->type, obj); if (obj_type == NULL) return NULL; newobj = (superobject *)PySuper_Type.tp_new(&PySuper_Type, NULL, NULL); if (newobj == NULL) return NULL; Py_INCREF(su->type); Py_INCREF(obj); newobj->type = su->type; newobj->obj = obj; newobj->obj_type = obj_type; return (PyObject *)newobj; } } static int super_init(PyObject *self, PyObject *args, PyObject *kwds) { superobject *su = (superobject *)self; PyTypeObject *type = NULL; PyObject *obj = NULL; PyTypeObject *obj_type = NULL; if (!_PyArg_NoKeywords("super", kwds)) return -1; if (!PyArg_ParseTuple(args, "|O!O:super", &PyType_Type, &type, &obj)) return -1; if (type == NULL) { /* Call super(), without args -- fill in from __class__ and first local variable on the stack. */ PyFrameObject *f; PyCodeObject *co; Py_ssize_t i, n; f = PyThreadState_GET()->frame; if (f == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): no current frame"); return -1; } co = f->f_code; if (co == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): no code object"); return -1; } if (co->co_argcount == 0) { PyErr_SetString(PyExc_RuntimeError, "super(): no arguments"); return -1; } obj = f->f_localsplus[0]; if (obj == NULL && co->co_cell2arg) { /* The first argument might be a cell. */ n = PyTuple_GET_SIZE(co->co_cellvars); for (i = 0; i < n; i++) { if (co->co_cell2arg[i] == 0) { PyObject *cell = f->f_localsplus[co->co_nlocals + i]; assert(PyCell_Check(cell)); obj = PyCell_GET(cell); break; } } } if (obj == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): arg[0] deleted"); return -1; } if (co->co_freevars == NULL) n = 0; else { assert(PyTuple_Check(co->co_freevars)); n = PyTuple_GET_SIZE(co->co_freevars); } for (i = 0; i < n; i++) { PyObject *name = PyTuple_GET_ITEM(co->co_freevars, i); assert(PyUnicode_Check(name)); if (_PyUnicode_EqualToASCIIId(name, &PyId___class__)) { Py_ssize_t index = co->co_nlocals + PyTuple_GET_SIZE(co->co_cellvars) + i; PyObject *cell = f->f_localsplus[index]; if (cell == NULL || !PyCell_Check(cell)) { PyErr_SetString(PyExc_RuntimeError, "super(): bad __class__ cell"); return -1; } type = (PyTypeObject *) PyCell_GET(cell); if (type == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): empty __class__ cell"); return -1; } if (!PyType_Check(type)) { PyErr_Format(PyExc_RuntimeError, "super(): __class__ is not a type (%s)", Py_TYPE(type)->tp_name); return -1; } break; } } if (type == NULL) { PyErr_SetString(PyExc_RuntimeError, "super(): __class__ cell not found"); return -1; } } if (obj == Py_None) obj = NULL; if (obj != NULL) { obj_type = supercheck(type, obj); if (obj_type == NULL) return -1; Py_INCREF(obj); } Py_INCREF(type); Py_XSETREF(su->type, type); Py_XSETREF(su->obj, obj); Py_XSETREF(su->obj_type, obj_type); return 0; } PyDoc_STRVAR(super_doc, "super() -> same as super(__class__, <first argument>)\n" "super(type) -> unbound super object\n" "super(type, obj) -> bound super object; requires isinstance(obj, type)\n" "super(type, type2) -> bound super object; requires issubclass(type2, type)\n" "Typical use to call a cooperative superclass method:\n" "class C(B):\n" " def meth(self, arg):\n" " super().meth(arg)\n" "This works for class methods too:\n" "class C(B):\n" " @classmethod\n" " def cmeth(cls, arg):\n" " super().cmeth(arg)\n"); static int super_traverse(PyObject *self, visitproc visit, void *arg) { superobject *su = (superobject *)self; Py_VISIT(su->obj); Py_VISIT(su->type); Py_VISIT(su->obj_type); return 0; } PyTypeObject PySuper_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "super", /* tp_name */ sizeof(superobject), /* tp_basicsize */ 0, /* tp_itemsize */ /* methods */ super_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ super_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ super_getattro, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /* tp_flags */ super_doc, /* tp_doc */ super_traverse, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ super_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ super_descr_get, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ super_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ PyType_GenericNew, /* tp_new */ PyObject_GC_Del, /* tp_free */ };
3636.c
/* gsl_histogram2d_oper.c * Copyright (C) 2000 Simone Piccardi * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ /*************************************************************** * * File gsl_histogram2d_oper.c: * Routine to make operation on 2D histograms. * Need GSL library and header. * Contains the routines: * gsl_histogram2d_same_binning check if two histograms have the same binning * gsl_histogram2d_add add two histogram * gsl_histogram2d_sub subctract two histogram * gsl_histogram2d_mult multiply two histogram * gsl_histogram2d_div divide two histogram * gsl_histogram2d_scale scale histogram contents * * Author: S. Piccardi * Jan. 2000 * ***************************************************************/ #include <config.h> #include <stdlib.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_histogram2d.h> /* * gsl_histogram2d_same_binning: * control if two histogram have the * same binning */ int gsl_histogram2d_equal_bins_p (const gsl_histogram2d * h1, const gsl_histogram2d * h2) { if ((h1->nx != h2->nx) || (h1->ny != h2->ny)) { return 0; } { size_t i; /* init ranges */ for (i = 0; i <= (h1->nx); i++) { if (h1->xrange[i] != h2->xrange[i]) { return 0; } } for (i = 0; i <= (h1->ny); i++) { if (h1->yrange[i] != h2->yrange[i]) { return 0; } } } return 1; } /* * gsl_histogram2d_add: * add two histogram */ int gsl_histogram2d_add (gsl_histogram2d * h1, const gsl_histogram2d * h2) { size_t i; if (!gsl_histogram2d_equal_bins_p (h1, h2)) { GSL_ERROR ("histograms have different binning", GSL_EINVAL); } for (i = 0; i < (h1->nx) * (h1->ny); i++) { h1->bin[i] += h2->bin[i]; } return GSL_SUCCESS; } /* * gsl_histogram2d_sub: * subtract two histogram */ int gsl_histogram2d_sub (gsl_histogram2d * h1, const gsl_histogram2d * h2) { size_t i; if (!gsl_histogram2d_equal_bins_p (h1, h2)) { GSL_ERROR ("histograms have different binning", GSL_EINVAL); } for (i = 0; i < (h1->nx) * (h1->ny); i++) { h1->bin[i] -= h2->bin[i]; } return GSL_SUCCESS; } /* * gsl_histogram2d_mult: * multiply two histogram */ int gsl_histogram2d_mul (gsl_histogram2d * h1, const gsl_histogram2d * h2) { size_t i; if (!gsl_histogram2d_equal_bins_p (h1, h2)) { GSL_ERROR ("histograms have different binning", GSL_EINVAL); } for (i = 0; i < (h1->nx) * (h1->ny); i++) { h1->bin[i] *= h2->bin[i]; } return GSL_SUCCESS; } /* * gsl_histogram2d_div: * divide two histogram */ int gsl_histogram2d_div (gsl_histogram2d * h1, const gsl_histogram2d * h2) { size_t i; if (!gsl_histogram2d_equal_bins_p (h1, h2)) { GSL_ERROR ("histograms have different binning", GSL_EINVAL); } for (i = 0; i < (h1->nx) * (h1->ny); i++) { h1->bin[i] /= h2->bin[i]; } return GSL_SUCCESS; } /* * gsl_histogram2d_scale: * scale a histogram by a numeric factor */ int gsl_histogram2d_scale (gsl_histogram2d * h, double scale) { size_t i; for (i = 0; i < (h->nx) * (h->ny); i++) { h->bin[i] *= scale; } return GSL_SUCCESS; } /* * gsl_histogram2d_shift: * shift a histogram by a numeric offset */ int gsl_histogram2d_shift (gsl_histogram2d * h, double shift) { size_t i; for (i = 0; i < (h->nx) * (h->ny); i++) { h->bin[i] += shift; } return GSL_SUCCESS; }
223717.c
/* * This file is part of LibCSS * Licensed under the MIT License, * http://www.opensource.org/licenses/mit-license.php * Copyright 2009 John-Mark Bell <[email protected]> */ #include "bytecode/bytecode.h" #include "bytecode/opcodes.h" #include "select/propset.h" #include "select/propget.h" #include "utils/utils.h" #include "select/properties/properties.h" #include "select/properties/helpers.h" css_error css__cascade_widows(uint32_t opv, css_style *style, css_select_state *state) { return css__cascade_number(opv, style, state, set_widows); } css_error css__set_widows_from_hint(const css_hint *hint, css_computed_style *style) { return set_widows(style, hint->status, hint->data.integer); } css_error css__initial_widows(css_select_state *state) { return set_widows(state->computed, CSS_WIDOWS_SET, 2); } css_error css__compose_widows(const css_computed_style *parent, const css_computed_style *child, css_computed_style *result) { int32_t count = 0; uint8_t type = get_widows(child, &count); if (type == CSS_WIDOWS_INHERIT) { type = get_widows(parent, &count); } return set_widows(result, type, count); }
204051.c
/***************************************************************************** * Test cases for libxlsxwriter. * * Test to compare output against Excel files. * * Copyright 2014-2019, John McNamara, [email protected] * */ #include "xlsxwriter.h" int main() { lxw_workbook *workbook = workbook_new("test_protect03.xlsx"); lxw_worksheet *worksheet = workbook_add_worksheet(workbook, NULL); lxw_format *unlocked = workbook_add_format(workbook); format_set_unlocked(unlocked); lxw_format *hidden = workbook_add_format(workbook); format_set_unlocked(hidden); format_set_hidden(hidden); worksheet_protect(worksheet, "password", NULL); worksheet_write_number(worksheet, CELL("A1"), 1 , NULL); worksheet_write_number(worksheet, CELL("A2"), 2, unlocked); worksheet_write_number(worksheet, CELL("A3"), 3, hidden); return workbook_close(workbook); }
425069.c
/* * The Clear BSD License * Copyright (c) 2017, NXP Semiconductors, Inc. * All rights reserved. * * * Redistribution and use in source and binary forms, with or without modification, * are permitted (subject to the limitations in the disclaimer below) provided * that the following conditions are met: * * o Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * * o Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * o Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "fsl_csi.h" /******************************************************************************* * Definitions ******************************************************************************/ /* Component ID definition, used by tools. */ #ifndef FSL_COMPONENT_ID #define FSL_COMPONENT_ID "platform.drivers.csi" #endif /* Two frame buffer loaded to CSI register at most. */ #define CSI_MAX_ACTIVE_FRAME_NUM 2 /******************************************************************************* * Prototypes ******************************************************************************/ /*! * @brief Get the instance from the base address * * @param base CSI peripheral base address * * @return The CSI module instance */ static uint32_t CSI_GetInstance(CSI_Type *base); /*! * @brief Get the delta value of two index in queue. * * @param startIdx Start index. * @param endIdx End index. * * @return The delta between startIdx and endIdx in queue. */ static uint32_t CSI_TransferGetQueueDelta(uint32_t startIdx, uint32_t endIdx); /*! * @brief Increase a index value in queue. * * This function increases the index value in the queue, if the index is out of * the queue range, it is reset to 0. * * @param idx The index value to increase. * * @return The index value after increase. */ static uint32_t CSI_TransferIncreaseQueueIdx(uint32_t idx); /*! * @brief Get the empty frame buffer count in queue. * * @param base CSI peripheral base address * @param handle Pointer to CSI driver handle. * * @return Number of the empty frame buffer count in queue. */ static uint32_t CSI_TransferGetEmptyBufferCount(CSI_Type *base, csi_handle_t *handle); /*! * @brief Load one empty frame buffer in queue to CSI module. * * Load one empty frame in queue to CSI module, this function could only be called * when there is empty frame buffer in queue. * * @param base CSI peripheral base address * @param handle Pointer to CSI driver handle. */ static void CSI_TransferLoadBufferToDevice(CSI_Type *base, csi_handle_t *handle); /* Typedef for interrupt handler. */ typedef void (*csi_isr_t)(CSI_Type *base, csi_handle_t *handle); /******************************************************************************* * Variables ******************************************************************************/ /*! @brief Pointers to CSI bases for each instance. */ static CSI_Type *const s_csiBases[] = CSI_BASE_PTRS; #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) /*! @brief Pointers to CSI clocks for each CSI submodule. */ static const clock_ip_name_t s_csiClocks[] = CSI_CLOCKS; #endif /* FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL */ /* Array for the CSI driver handle. */ static csi_handle_t *s_csiHandle[ARRAY_SIZE(s_csiBases)]; /* Array of CSI IRQ number. */ static const IRQn_Type s_csiIRQ[] = CSI_IRQS; /* CSI ISR for transactional APIs. */ static csi_isr_t s_csiIsr; /******************************************************************************* * Code ******************************************************************************/ static uint32_t CSI_GetInstance(CSI_Type *base) { uint32_t instance; /* Find the instance index from base address mappings. */ for (instance = 0; instance < ARRAY_SIZE(s_csiBases); instance++) { if (s_csiBases[instance] == base) { break; } } assert(instance < ARRAY_SIZE(s_csiBases)); return instance; } static uint32_t CSI_TransferGetQueueDelta(uint32_t startIdx, uint32_t endIdx) { if (endIdx >= startIdx) { return endIdx - startIdx; } else { return startIdx + CSI_DRIVER_ACTUAL_QUEUE_SIZE - endIdx; } } static uint32_t CSI_TransferIncreaseQueueIdx(uint32_t idx) { uint32_t ret; /* * Here not use the method: * ret = (idx+1) % CSI_DRIVER_ACTUAL_QUEUE_SIZE; * * Because the mod function might be slow. */ ret = idx + 1; if (ret >= CSI_DRIVER_ACTUAL_QUEUE_SIZE) { ret = 0; } return ret; } static uint32_t CSI_TransferGetEmptyBufferCount(CSI_Type *base, csi_handle_t *handle) { return CSI_TransferGetQueueDelta(handle->queueDrvReadIdx, handle->queueUserWriteIdx); } static void CSI_TransferLoadBufferToDevice(CSI_Type *base, csi_handle_t *handle) { /* Load the frame buffer address to CSI register. */ CSI_SetRxBufferAddr(base, handle->nextBufferIdx, handle->frameBufferQueue[handle->queueDrvReadIdx]); handle->queueDrvReadIdx = CSI_TransferIncreaseQueueIdx(handle->queueDrvReadIdx); handle->activeBufferNum++; /* There are two CSI buffers, so could use XOR to get the next index. */ handle->nextBufferIdx ^= 1U; } status_t CSI_Init(CSI_Type *base, const csi_config_t *config) { assert(config); uint32_t reg; uint32_t imgWidth_Bytes; imgWidth_Bytes = config->width * config->bytesPerPixel; /* The image width and frame buffer pitch should be multiple of 8-bytes. */ if ((imgWidth_Bytes & 0x07) | ((uint32_t)config->linePitch_Bytes & 0x07)) { return kStatus_InvalidArgument; } #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) uint32_t instance = CSI_GetInstance(base); CLOCK_EnableClock(s_csiClocks[instance]); #endif CSI_Reset(base); /* Configure CSICR1. CSICR1 has been reset to the default value, so could write it directly. */ reg = ((uint32_t)config->workMode) | config->polarityFlags | CSI_CSICR1_FCC_MASK; if (config->useExtVsync) { reg |= CSI_CSICR1_EXT_VSYNC_MASK; } base->CSICR1 = reg; /* * Generally, CSIIMAG_PARA[IMAGE_WIDTH] indicates how many data bus cycles per line. * One special case is when receiving 24-bit pixels through 8-bit data bus, and * CSICR3[ZERO_PACK_EN] is enabled, in this case, the CSIIMAG_PARA[IMAGE_WIDTH] * should be set to the pixel number per line. * * Currently the CSI driver only support 8-bit data bus, so generally the * CSIIMAG_PARA[IMAGE_WIDTH] is bytes number per line. When the CSICR3[ZERO_PACK_EN] * is enabled, CSIIMAG_PARA[IMAGE_WIDTH] is pixel number per line. * * NOTE: The CSIIMAG_PARA[IMAGE_WIDTH] setting code should be updated if the * driver is upgraded to support other data bus width. */ if (4U == config->bytesPerPixel) { /* Enable zero pack. */ base->CSICR3 |= CSI_CSICR3_ZERO_PACK_EN_MASK; /* Image parameter. */ base->CSIIMAG_PARA = ((uint32_t)(config->width) << CSI_CSIIMAG_PARA_IMAGE_WIDTH_SHIFT) | ((uint32_t)(config->height) << CSI_CSIIMAG_PARA_IMAGE_HEIGHT_SHIFT); } else { /* Image parameter. */ base->CSIIMAG_PARA = ((uint32_t)(imgWidth_Bytes) << CSI_CSIIMAG_PARA_IMAGE_WIDTH_SHIFT) | ((uint32_t)(config->height) << CSI_CSIIMAG_PARA_IMAGE_HEIGHT_SHIFT); } /* The CSI frame buffer bus is 8-byte width. */ base->CSIFBUF_PARA = (uint32_t)((config->linePitch_Bytes - imgWidth_Bytes) / 8U) << CSI_CSIFBUF_PARA_FBUF_STRIDE_SHIFT; /* Enable auto ECC. */ base->CSICR3 |= CSI_CSICR3_ECC_AUTO_EN_MASK; /* * For better performance. * The DMA burst size could be set to 16 * 8 byte, 8 * 8 byte, or 4 * 8 byte, * choose the best burst size based on bytes per line. */ if (!(imgWidth_Bytes % (8 * 16))) { base->CSICR2 = CSI_CSICR2_DMA_BURST_TYPE_RFF(3U); base->CSICR3 = (CSI->CSICR3 & ~CSI_CSICR3_RxFF_LEVEL_MASK) | ((2U << CSI_CSICR3_RxFF_LEVEL_SHIFT)); } else if (!(imgWidth_Bytes % (8 * 8))) { base->CSICR2 = CSI_CSICR2_DMA_BURST_TYPE_RFF(2U); base->CSICR3 = (CSI->CSICR3 & ~CSI_CSICR3_RxFF_LEVEL_MASK) | ((1U << CSI_CSICR3_RxFF_LEVEL_SHIFT)); } else { base->CSICR2 = CSI_CSICR2_DMA_BURST_TYPE_RFF(1U); base->CSICR3 = (CSI->CSICR3 & ~CSI_CSICR3_RxFF_LEVEL_MASK) | ((0U << CSI_CSICR3_RxFF_LEVEL_SHIFT)); } CSI_ReflashFifoDma(base, kCSI_RxFifo); return kStatus_Success; } void CSI_Deinit(CSI_Type *base) { /* Disable transfer first. */ CSI_Stop(base); #if !(defined(FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) && FSL_SDK_DISABLE_DRIVER_CLOCK_CONTROL) uint32_t instance = CSI_GetInstance(base); CLOCK_DisableClock(s_csiClocks[instance]); #endif } void CSI_Reset(CSI_Type *base) { uint32_t csisr; /* Disable transfer first. */ CSI_Stop(base); /* Disable DMA request. */ base->CSICR3 = 0U; /* Reset the fame count. */ base->CSICR3 |= CSI_CSICR3_FRMCNT_RST_MASK; while (base->CSICR3 & CSI_CSICR3_FRMCNT_RST_MASK) { } /* Clear the RX FIFO. */ CSI_ClearFifo(base, kCSI_AllFifo); /* Reflash DMA. */ CSI_ReflashFifoDma(base, kCSI_AllFifo); /* Clear the status. */ csisr = base->CSISR; base->CSISR = csisr; /* Set the control registers to default value. */ base->CSICR1 = CSI_CSICR1_HSYNC_POL_MASK | CSI_CSICR1_EXT_VSYNC_MASK; base->CSICR2 = 0U; base->CSICR3 = 0U; #if defined(CSI_CSICR18_CSI_LCDIF_BUFFER_LINES) base->CSICR18 = CSI_CSICR18_AHB_HPROT(0x0DU) | CSI_CSICR18_CSI_LCDIF_BUFFER_LINES(0x02U); #else base->CSICR18 = CSI_CSICR18_AHB_HPROT(0x0DU); #endif base->CSIFBUF_PARA = 0U; base->CSIIMAG_PARA = 0U; } void CSI_GetDefaultConfig(csi_config_t *config) { assert(config); config->width = 320U; config->height = 240U; config->polarityFlags = kCSI_HsyncActiveHigh | kCSI_DataLatchOnRisingEdge; config->bytesPerPixel = 2U; config->linePitch_Bytes = 320U * 2U; config->workMode = kCSI_GatedClockMode; config->dataBus = kCSI_DataBus8Bit; config->useExtVsync = true; } void CSI_SetRxBufferAddr(CSI_Type *base, uint8_t index, uint32_t addr) { if (index) { base->CSIDMASA_FB2 = addr; } else { base->CSIDMASA_FB1 = addr; } } void CSI_ClearFifo(CSI_Type *base, csi_fifo_t fifo) { uint32_t cr1; uint32_t mask = 0U; /* The FIFO could only be cleared when CSICR1[FCC] = 0, so first clear the FCC. */ cr1 = base->CSICR1; base->CSICR1 = (cr1 & ~CSI_CSICR1_FCC_MASK); if ((uint32_t)fifo & (uint32_t)kCSI_RxFifo) { mask |= CSI_CSICR1_CLR_RXFIFO_MASK; } if ((uint32_t)fifo & (uint32_t)kCSI_StatFifo) { mask |= CSI_CSICR1_CLR_STATFIFO_MASK; } base->CSICR1 = (cr1 & ~CSI_CSICR1_FCC_MASK) | mask; /* Wait clear completed. */ while (base->CSICR1 & mask) { } /* Recover the FCC. */ base->CSICR1 = cr1; } void CSI_ReflashFifoDma(CSI_Type *base, csi_fifo_t fifo) { uint32_t cr3 = 0U; if ((uint32_t)fifo & (uint32_t)kCSI_RxFifo) { cr3 |= CSI_CSICR3_DMA_REFLASH_RFF_MASK; } if ((uint32_t)fifo & (uint32_t)kCSI_StatFifo) { cr3 |= CSI_CSICR3_DMA_REFLASH_SFF_MASK; } base->CSICR3 |= cr3; /* Wait clear completed. */ while (base->CSICR3 & cr3) { } } void CSI_EnableFifoDmaRequest(CSI_Type *base, csi_fifo_t fifo, bool enable) { uint32_t cr3 = 0U; if ((uint32_t)fifo & (uint32_t)kCSI_RxFifo) { cr3 |= CSI_CSICR3_DMA_REQ_EN_RFF_MASK; } if ((uint32_t)fifo & (uint32_t)kCSI_StatFifo) { cr3 |= CSI_CSICR3_DMA_REQ_EN_SFF_MASK; } if (enable) { base->CSICR3 |= cr3; } else { base->CSICR3 &= ~cr3; } } void CSI_EnableInterrupts(CSI_Type *base, uint32_t mask) { base->CSICR1 |= (mask & CSI_CSICR1_INT_EN_MASK); base->CSICR3 |= (mask & CSI_CSICR3_INT_EN_MASK); base->CSICR18 |= ((mask & CSI_CSICR18_INT_EN_MASK) >> 6U); } void CSI_DisableInterrupts(CSI_Type *base, uint32_t mask) { base->CSICR1 &= ~(mask & CSI_CSICR1_INT_EN_MASK); base->CSICR3 &= ~(mask & CSI_CSICR3_INT_EN_MASK); base->CSICR18 &= ~((mask & CSI_CSICR18_INT_EN_MASK) >> 6U); } status_t CSI_TransferCreateHandle(CSI_Type *base, csi_handle_t *handle, csi_transfer_callback_t callback, void *userData) { assert(handle); uint32_t instance; memset(handle, 0, sizeof(*handle)); /* Set the callback and user data. */ handle->callback = callback; handle->userData = userData; /* Get instance from peripheral base address. */ instance = CSI_GetInstance(base); /* Save the handle in global variables to support the double weak mechanism. */ s_csiHandle[instance] = handle; s_csiIsr = CSI_TransferHandleIRQ; /* Enable interrupt. */ EnableIRQ(s_csiIRQ[instance]); return kStatus_Success; } status_t CSI_TransferStart(CSI_Type *base, csi_handle_t *handle) { assert(handle); uint32_t emptyBufferCount; emptyBufferCount = CSI_TransferGetEmptyBufferCount(base, handle); if (emptyBufferCount < 2U) { return kStatus_CSI_NoEmptyBuffer; } handle->nextBufferIdx = 0U; handle->activeBufferNum = 0U; /* Write to memory from second completed frame. */ base->CSICR18 = (base->CSICR18 & ~CSI_CSICR18_MASK_OPTION_MASK) | CSI_CSICR18_MASK_OPTION(2); /* Load the frame buffer to CSI register, there are at least two empty buffers. */ CSI_TransferLoadBufferToDevice(base, handle); CSI_TransferLoadBufferToDevice(base, handle); /* After reflash DMA, the CSI saves frame to frame buffer 0. */ CSI_ReflashFifoDma(base, kCSI_RxFifo); handle->transferStarted = true; handle->transferOnGoing = true; CSI_EnableInterrupts(base, kCSI_RxBuffer1DmaDoneInterruptEnable | kCSI_RxBuffer0DmaDoneInterruptEnable); CSI_Start(base); return kStatus_Success; } status_t CSI_TransferStop(CSI_Type *base, csi_handle_t *handle) { assert(handle); CSI_Stop(base); CSI_DisableInterrupts(base, kCSI_RxBuffer1DmaDoneInterruptEnable | kCSI_RxBuffer0DmaDoneInterruptEnable); handle->transferStarted = false; handle->transferOnGoing = false; /* Stoped, reset the state flags. */ handle->queueDrvReadIdx = handle->queueDrvWriteIdx; handle->activeBufferNum = 0U; return kStatus_Success; } status_t CSI_TransferSubmitEmptyBuffer(CSI_Type *base, csi_handle_t *handle, uint32_t frameBuffer) { uint32_t csicr1; if (CSI_DRIVER_QUEUE_SIZE == CSI_TransferGetQueueDelta(handle->queueUserReadIdx, handle->queueUserWriteIdx)) { return kStatus_CSI_QueueFull; } /* Disable the interrupt to protect the index information in handle. */ csicr1 = base->CSICR1; base->CSICR1 = (csicr1 & ~(CSI_CSICR1_FB2_DMA_DONE_INTEN_MASK | CSI_CSICR1_FB1_DMA_DONE_INTEN_MASK)); /* Save the empty frame buffer address to queue. */ handle->frameBufferQueue[handle->queueUserWriteIdx] = frameBuffer; handle->queueUserWriteIdx = CSI_TransferIncreaseQueueIdx(handle->queueUserWriteIdx); base->CSICR1 = csicr1; if (handle->transferStarted) { /* * If user has started transfer using @ref CSI_TransferStart, and the CSI is * stopped due to no empty frame buffer in queue, then start the CSI. */ if ((!handle->transferOnGoing) && (CSI_TransferGetEmptyBufferCount(base, handle) >= 2U)) { handle->transferOnGoing = true; handle->nextBufferIdx = 0U; /* Load the frame buffers to CSI module. */ CSI_TransferLoadBufferToDevice(base, handle); CSI_TransferLoadBufferToDevice(base, handle); CSI_ReflashFifoDma(base, kCSI_RxFifo); CSI_Start(base); } } return kStatus_Success; } status_t CSI_TransferGetFullBuffer(CSI_Type *base, csi_handle_t *handle, uint32_t *frameBuffer) { uint32_t csicr1; /* No full frame buffer. */ if (handle->queueUserReadIdx == handle->queueDrvWriteIdx) { return kStatus_CSI_NoFullBuffer; } /* Disable the interrupt to protect the index information in handle. */ csicr1 = base->CSICR1; base->CSICR1 = (csicr1 & ~(CSI_CSICR1_FB2_DMA_DONE_INTEN_MASK | CSI_CSICR1_FB1_DMA_DONE_INTEN_MASK)); *frameBuffer = handle->frameBufferQueue[handle->queueUserReadIdx]; handle->queueUserReadIdx = CSI_TransferIncreaseQueueIdx(handle->queueUserReadIdx); base->CSICR1 = csicr1; return kStatus_Success; } void CSI_TransferHandleIRQ(CSI_Type *base, csi_handle_t *handle) { uint32_t queueDrvWriteIdx; uint32_t csisr = base->CSISR; /* Clear the error flags. */ base->CSISR = csisr; /* * If both frame buffer 0 and frame buffer 1 flags assert, driver does not * know which frame buffer ready just now, so reset the CSI transfer to * start from frame buffer 0. */ if ((csisr & (CSI_CSISR_DMA_TSF_DONE_FB2_MASK | CSI_CSISR_DMA_TSF_DONE_FB1_MASK)) == (CSI_CSISR_DMA_TSF_DONE_FB2_MASK | CSI_CSISR_DMA_TSF_DONE_FB1_MASK)) { CSI_Stop(base); /* Reset the active buffers. */ if (1 <= handle->activeBufferNum) { queueDrvWriteIdx = handle->queueDrvWriteIdx; base->CSIDMASA_FB1 = handle->frameBufferQueue[queueDrvWriteIdx]; if (2U == handle->activeBufferNum) { queueDrvWriteIdx = CSI_TransferIncreaseQueueIdx(queueDrvWriteIdx); base->CSIDMASA_FB2 = handle->frameBufferQueue[queueDrvWriteIdx]; handle->nextBufferIdx = 0U; } else { handle->nextBufferIdx = 1U; } } CSI_ReflashFifoDma(base, kCSI_RxFifo); CSI_Start(base); } else if (csisr & (CSI_CSISR_DMA_TSF_DONE_FB2_MASK | CSI_CSISR_DMA_TSF_DONE_FB1_MASK)) { handle->queueDrvWriteIdx = CSI_TransferIncreaseQueueIdx(handle->queueDrvWriteIdx); handle->activeBufferNum--; if (handle->callback) { handle->callback(base, handle, kStatus_CSI_FrameDone, handle->userData); } /* No frame buffer to save incoming data, then stop the CSI module. */ if (!(handle->activeBufferNum)) { CSI_Stop(base); handle->transferOnGoing = false; } else { if (CSI_TransferGetEmptyBufferCount(base, handle)) { CSI_TransferLoadBufferToDevice(base, handle); } } } else { } } #if defined(CSI) void CSI_DriverIRQHandler(void) { s_csiIsr(CSI, s_csiHandle[0]); /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) __DSB(); #endif } #endif #if defined(CSI0) void CSI0_DriverIRQHandler(void) { s_csiIsr(CSI, s_csiHandle[0]); /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) __DSB(); #endif } #endif
997357.c
// SPDX-License-Identifier: GPL-2.0 /* * This file contains work-arounds for many known PCI hardware * bugs. Devices present only on certain architectures (host * bridges et cetera) should be handled in arch-specific code. * * Note: any quirks for hotpluggable devices must _NOT_ be declared __init. * * Copyright (c) 1999 Martin Mares <[email protected]> * * Init/reset quirks for USB host controllers should be in the * USB quirks file, where their drivers can access reuse it. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/kallsyms.h> #include <linux/dmi.h> #include <linux/pci-aspm.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/ktime.h> #include <linux/mm.h> #include <linux/platform_data/x86/apple.h> #include <asm/dma.h> /* isa_dma_bridge_buggy */ #include "pci.h" /* * Decoding should be disabled for a PCI device during BAR sizing to avoid * conflict. But doing so may cause problems on host bridge and perhaps other * key system devices. For devices that need to have mmio decoding always-on, * we need to set the dev->mmio_always_on bit. */ static void quirk_mmio_always_on(struct pci_dev *dev) { dev->mmio_always_on = 1; } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); /* The Mellanox Tavor device gives false positive parity errors * Mark this device with a broken_parity_status, to allow * PCI scanning code to "skip" this now blacklisted device. */ static void quirk_mellanox_tavor(struct pci_dev *dev) { dev->broken_parity_status = 1; /* This device gives false positives */ } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor); /* Deal with broken BIOSes that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) { struct pci_dev *d = NULL; unsigned char dlc; /* We have to make sure a particular bit is set in the PIIX3 ISA bridge, so we have to go out and find it. */ while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { pci_read_config_byte(d, 0x82, &dlc); if (!(dlc & 1<<1)) { dev_info(&d->dev, "PIIX3: Enabling Passive Release\n"); dlc |= 1<<1; pci_write_config_byte(d, 0x82, dlc); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); /* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround but VIA don't answer queries. If you happen to have good contacts at VIA ask them for me please -- Alan This appears to be BIOS not version dependent. So presumably there is a chipset level fix */ static void quirk_isa_dma_hangs(struct pci_dev *dev) { if (!isa_dma_bridge_buggy) { isa_dma_bridge_buggy = 1; dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n"); } } /* * Its not totally clear which chipsets are the problematic ones * We know 82C586 and 82C596 variants are affected. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); /* * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear * for some HT machines to use C4 w/o hanging. */ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev) { u32 pmbase; u16 pm1a; pci_read_config_dword(dev, 0x40, &pmbase); pmbase = pmbase & 0xff80; pm1a = inw(pmbase); if (pm1a & 0x10) { dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); outw(0x10, pmbase); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); /* * Chipsets where PCI->PCI transfers vanish or hang */ static void quirk_nopcipci(struct pci_dev *dev) { if ((pci_pci_problems & PCIPCI_FAIL) == 0) { dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_FAIL; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci); static void quirk_nopciamd(struct pci_dev *dev) { u8 rev; pci_read_config_byte(dev, 0x08, &rev); if (rev == 0x13) { /* Erratum 24 */ dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n"); pci_pci_problems |= PCIAGP_FAIL; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd); /* * Triton requires workarounds to be used by the drivers */ static void quirk_triton(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_TRITON) == 0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_TRITON; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); /* * VIA Apollo KT133 needs PCI latency patch * Made according to a windows driver based patch by George E. Breese * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for * the info on which Mr Breese based his work. * * Updated based on further information from the site and also on * information provided by VIA */ static void quirk_vialatency(struct pci_dev *dev) { struct pci_dev *p; u8 busarb; /* Ok we have a potential problem chipset here. Now see if we have a buggy southbridge */ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); if (p != NULL) { /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ /* Check for buggy part revisions */ if (p->revision < 0x40 || p->revision > 0x42) goto exit; } else { p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); if (p == NULL) /* No problem parts */ goto exit; /* Check for buggy part revisions */ if (p->revision < 0x10 || p->revision > 0x12) goto exit; } /* * Ok we have the problem. Now set the PCI master grant to * occur every master grant. The apparent bug is that under high * PCI load (quite common in Linux of course) you can get data * loss when the CPU is held off the bus for 3 bus master requests * This happens to include the IDE controllers.... * * VIA only apply this fix when an SB Live! is present but under * both Linux and Windows this isn't enough, and we have seen * corruption without SB Live! but with things like 3 UDMA IDE * controllers. So we ignore that bit of the VIA recommendation.. */ pci_read_config_byte(dev, 0x76, &busarb); /* Set bit 4 and bi 5 of byte 76 to 0x01 "Master priority rotation on every PCI master grant */ busarb &= ~(1<<5); busarb |= (1<<4); pci_write_config_byte(dev, 0x76, busarb); dev_info(&dev->dev, "Applying VIA southbridge workaround\n"); exit: pci_dev_put(p); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); /* Must restore this on a resume from RAM */ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); /* * VIA Apollo VP3 needs ETBF on BT848/878 */ static void quirk_viaetbf(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_VIAETBF; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf); static void quirk_vsfx(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_VSFX) == 0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_VSFX; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx); /* * Ali Magik requires workarounds to be used by the drivers * that DMA to AGP space. Latency must be set to 0xA and triton * workaround applied too * [Info kindly provided by ALi] */ static void quirk_alimagik(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); /* * Natoma has some interesting boundary conditions with Zoran stuff * at least */ static void quirk_natoma(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_NATOMA) == 0) { dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_NATOMA; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); /* * This chip can cause PCI parity errors if config register 0xA0 is read * while DMAs are occurring. */ static void quirk_citrine(struct pci_dev *dev) { dev->cfg_size = 0xA0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); /* * This chip can cause bus lockups if config addresses above 0x600 * are read or written. */ static void quirk_nfp6000(struct pci_dev *dev) { dev->cfg_size = 0x600; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000); /* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */ static void quirk_extend_bar_to_page(struct pci_dev *dev) { int i; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { struct resource *r = &dev->resource[i]; if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { r->end = PAGE_SIZE - 1; r->start = 0; r->flags |= IORESOURCE_UNSET; dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n", i, r); } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page); /* * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. * If it's needed, re-allocate the region. */ static void quirk_s3_64M(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0x3ffffff; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); static void quirk_io(struct pci_dev *dev, int pos, unsigned size, const char *name) { u32 region; struct pci_bus_region bus_region; struct resource *res = dev->resource + pos; pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region); if (!region) return; res->name = pci_name(dev); res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; res->flags |= (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); region &= ~(size - 1); /* Convert from PCI bus to resource space */ bus_region.start = region; bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", name, PCI_BASE_ADDRESS_0 + (pos << 2), res); } /* * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS * ver. 1.33 20070103) don't set the correct ISA PCI region header info. * BAR0 should be 8 bytes; instead, it may be set to something like 8k * (which conflicts w/ BAR1's memory range). * * CS553x's ISA PCI BARs may also be read-only (ref: * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). */ static void quirk_cs5536_vsa(struct pci_dev *dev) { static char *name = "CS5536 ISA bridge"; if (pci_resource_len(dev, 0) != 8) { quirk_io(dev, 0, 8, name); /* SMB */ quirk_io(dev, 1, 256, name); /* GPIO */ quirk_io(dev, 2, 64, name); /* MFGPT */ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n", name); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); static void quirk_io_region(struct pci_dev *dev, int port, unsigned size, int nr, const char *name) { u16 region; struct pci_bus_region bus_region; struct resource *res = dev->resource + nr; pci_read_config_word(dev, port, &region); region &= ~(size - 1); if (!region) return; res->name = pci_name(dev); res->flags = IORESOURCE_IO; /* Convert from PCI bus to resource space */ bus_region.start = region; bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); if (!pci_claim_resource(dev, nr)) dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name); } /* * ATI Northbridge setups MCE the processor if you even * read somewhere between 0x3b0->0x3bb or read 0x3d3 */ static void quirk_ati_exploding_mce(struct pci_dev *dev) { dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n"); /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */ request_region(0x3b0, 0x0C, "RadeonIGP"); request_region(0x3d3, 0x01, "RadeonIGP"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce); /* * In the AMD NL platform, this device ([1022:7912]) has a class code of * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will * claim it. * But the dwc3 driver is a more specific driver for this device, and we'd * prefer to use it instead of xhci. To prevent xhci from claiming the * device, change the class code to 0x0c03fe, which the PCI r3.0 spec * defines as "USB device (not host controller)". The dwc3 driver can then * claim it based on its Vendor and Device ID. */ static void quirk_amd_nl_class(struct pci_dev *pdev) { u32 class = pdev->class; /* Use "USB Device (not host controller)" class */ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", class, pdev->class); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB, quirk_amd_nl_class); /* * Let's make the southbridge information explicit instead * of having to worry about people probing the ACPI areas, * for example.. (Yes, it happens, and if you read the wrong * ACPI register it will put the machine to sleep with no * way of waking it up again. Bummer). * * ALI M7101: Two IO regions pointed to by words at * 0xE0 (64 bytes of ACPI registers) * 0xE2 (32 bytes of SMB registers) */ static void quirk_ali7101_acpi(struct pci_dev *dev) { quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI"); quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi); static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) { u32 devres; u32 mask, size, base; pci_read_config_dword(dev, port, &devres); if ((devres & enable) != enable) return; mask = (devres >> 16) & 15; base = devres & 0xffff; size = 16; for (;;) { unsigned bit = size >> 1; if ((bit & mask) == bit) break; size = bit; } /* * For now we only print it out. Eventually we'll want to * reserve it (at least if it's in the 0x1000+ range), but * let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1); } static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) { u32 devres; u32 mask, size, base; pci_read_config_dword(dev, port, &devres); if ((devres & enable) != enable) return; base = devres & 0xffff0000; mask = (devres & 0x3f) << 16; size = 128 << 16; for (;;) { unsigned bit = size >> 1; if ((bit & mask) == bit) break; size = bit; } /* * For now we only print it out. Eventually we'll want to * reserve it, but let's get enough confirmation reports first. */ base &= -size; dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1); } /* * PIIX4 ACPI: Two IO regions pointed to by longwords at * 0x40 (64 bytes of ACPI registers) * 0x90 (16 bytes of SMB registers) * and a few strange programmable PIIX4 device resources. */ static void quirk_piix4_acpi(struct pci_dev *dev) { u32 res_a; quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); /* Device resource A has enables for some of the other ones */ pci_read_config_dword(dev, 0x5c, &res_a); piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21); piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21); /* Device resource D is just bitfields for static resources */ /* Device 12 enabled? */ if (res_a & (1 << 29)) { piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20); piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7); } /* Device 13 enabled? */ if (res_a & (1 << 30)) { piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20); piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7); } piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20); piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); #define ICH_PMBASE 0x40 #define ICH_ACPI_CNTL 0x44 #define ICH4_ACPI_EN 0x10 #define ICH6_ACPI_EN 0x80 #define ICH4_GPIOBASE 0x58 #define ICH4_GPIO_CNTL 0x5c #define ICH4_GPIO_EN 0x10 #define ICH6_GPIOBASE 0x48 #define ICH6_GPIO_CNTL 0x4c #define ICH6_GPIO_EN 0x10 /* * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at * 0x40 (128 bytes of ACPI, GPIO & TCO registers) * 0x58 (64 bytes of GPIO I/O space) */ static void quirk_ich4_lpc_acpi(struct pci_dev *dev) { u8 enable; /* * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict * with low legacy (and fixed) ports. We don't know the decoding * priority and can't tell whether the legacy device or the one created * here is really at that address. This happens on boards with broken * BIOSes. */ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); if (enable & ICH4_ACPI_EN) quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); if (enable & ICH4_GPIO_EN) quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); static void ich6_lpc_acpi_gpio(struct pci_dev *dev) { u8 enable; pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); if (enable & ICH6_ACPI_EN) quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); if (enable & ICH6_GPIO_EN) quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) { u32 val; u32 size, base; pci_read_config_dword(dev, reg, &val); /* Enabled? */ if (!(val & 1)) return; base = val & 0xfffc; if (dynsize) { /* * This is not correct. It is 16, 32 or 64 bytes depending on * register D31:F0:ADh bits 5:4. * * But this gets us at least _part_ of it. */ size = 16; } else { size = 128; } base &= ~(size-1); /* Just print it out for now. We should reserve it after more debugging */ dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); } static void quirk_ich6_lpc(struct pci_dev *dev) { /* Shared ACPI/GPIO decode with all ICH6+ */ ich6_lpc_acpi_gpio(dev); /* ICH6-specific generic IO decode */ ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0); ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name) { u32 val; u32 mask, base; pci_read_config_dword(dev, reg, &val); /* Enabled? */ if (!(val & 1)) return; /* * IO base in bits 15:2, mask in bits 23:18, both * are dword-based */ base = val & 0xfffc; mask = (val >> 16) & 0xfc; mask |= 3; /* Just print it out for now. We should reserve it after more debugging */ dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); } /* ICH7-10 has the same common LPC generic IO decode registers */ static void quirk_ich7_lpc(struct pci_dev *dev) { /* We share the common ACPI/GPIO decode with ICH6 */ ich6_lpc_acpi_gpio(dev); /* And have 4 ICH7+ generic decodes */ ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1"); ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2"); ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3"); ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc); /* * VIA ACPI: One IO region pointed to by longword at * 0x48 or 0x20 (256 bytes of ACPI registers) */ static void quirk_vt82c586_acpi(struct pci_dev *dev) { if (dev->revision & 0x10) quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi); /* * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at * 0x48 (256 bytes of ACPI registers) * 0x70 (128 bytes of hardware monitoring register) * 0x90 (16 bytes of SMB registers) */ static void quirk_vt82c686_acpi(struct pci_dev *dev) { quirk_vt82c586_acpi(dev); quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1, "vt82c686 HW-mon"); quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi); /* * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at * 0x88 (128 bytes of power management registers) * 0xd0 (16 bytes of SMB registers) */ static void quirk_vt8235_acpi(struct pci_dev *dev) { quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM"); quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); /* * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back: * Disable fast back-to-back on the secondary bus segment */ static void quirk_xio2000a(struct pci_dev *dev) { struct pci_dev *pdev; u16 command; dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n"); list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { pci_read_config_word(pdev, PCI_COMMAND, &command); if (command & PCI_COMMAND_FAST_BACK) pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, quirk_xio2000a); #ifdef CONFIG_X86_IO_APIC #include <asm/io_apic.h> /* * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip * devices to the external APIC. * * TODO: When we have device-specific interrupt routers, * this code will go away from quirks. */ static void quirk_via_ioapic(struct pci_dev *dev) { u8 tmp; if (nr_ioapics < 1) tmp = 0; /* nothing routed to external APIC */ else tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ dev_info(&dev->dev, "%sbling VIA external APIC routing\n", tmp == 0 ? "Disa" : "Ena"); /* Offset 0x58: External APIC IRQ output control */ pci_write_config_byte(dev, 0x58, tmp); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit. * This leads to doubled level interrupt rates. * Set this bit to get rid of cycle wastage. * Otherwise uncritical. */ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) { u8 misc_control2; #define BYPASS_APIC_DEASSERT 8 pci_read_config_byte(dev, 0x5B, &misc_control2); if (!(misc_control2 & BYPASS_APIC_DEASSERT)) { dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n"); pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); /* * The AMD io apic can hang the box when an apic irq is masked. * We check all revs >= B0 (yet not in the pre production!) as the bug * is currently marked NoFix * * We have multiple reports of hangs with this chipset that went away with * noapic specified. For the moment we assume it's the erratum. We may be wrong * of course. However the advice is demonstrably good even if so.. */ static void quirk_amd_ioapic(struct pci_dev *dev) { if (dev->revision >= 0x02) { dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n"); dev_warn(&dev->dev, " : booting with the \"noapic\" option\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); #endif /* CONFIG_X86_IO_APIC */ #if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS) static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev) { /* Fix for improper SRIOV configuration on Cavium cn88xx RNM device */ if (dev->subsystem_device == 0xa118) dev->sriov->link = dev->devfn; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link); #endif /* * Some settings of MMRBC can lead to data corruption so block changes. * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide */ static void quirk_amd_8131_mmrbc(struct pci_dev *dev) { if (dev->subordinate && dev->revision <= 0x12) { dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n", dev->revision); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc); /* * FIXME: it is questionable that quirk_via_acpi * is needed. It shows up as an ISA bridge, and does not * support the PCI_INTERRUPT_LINE register at all. Therefore * it seems like setting the pci_dev's 'irq' to the * value of the ACPI SCI interrupt is only done for convenience. * -jgarzik */ static void quirk_via_acpi(struct pci_dev *d) { /* * VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */ u8 irq; pci_read_config_byte(d, 0x42, &irq); irq &= 0xf; if (irq && (irq != 2)) d->irq = irq; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi); /* * VIA bridges which have VLink */ static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18; static void quirk_via_bridge(struct pci_dev *dev) { /* See what bridge we have and find the device ranges */ switch (dev->device) { case PCI_DEVICE_ID_VIA_82C686: /* The VT82C686 is special, it attaches to PCI and can have any device number. All its subdevices are functions of that single device. */ via_vlink_dev_lo = PCI_SLOT(dev->devfn); via_vlink_dev_hi = PCI_SLOT(dev->devfn); break; case PCI_DEVICE_ID_VIA_8237: case PCI_DEVICE_ID_VIA_8237A: via_vlink_dev_lo = 15; break; case PCI_DEVICE_ID_VIA_8235: via_vlink_dev_lo = 16; break; case PCI_DEVICE_ID_VIA_8231: case PCI_DEVICE_ID_VIA_8233_0: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8233C_0: via_vlink_dev_lo = 17; break; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge); /** * quirk_via_vlink - VIA VLink IRQ number update * @dev: PCI device * * If the device we are dealing with is on a PIC IRQ we need to * ensure that the IRQ line register which usually is not relevant * for PCI cards, is actually written so that interrupts get sent * to the right place. * We only do this on systems where a VIA south bridge was detected, * and only for VIA devices on the motherboard (see quirk_via_bridge * above). */ static void quirk_via_vlink(struct pci_dev *dev) { u8 irq, new_irq; /* Check if we have VLink at all */ if (via_vlink_dev_lo == -1) return; new_irq = dev->irq; /* Don't quirk interrupts outside the legacy IRQ range */ if (!new_irq || new_irq > 15) return; /* Internal device ? */ if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi || PCI_SLOT(dev->devfn) < via_vlink_dev_lo) return; /* This is an internal VLink device on a PIC interrupt. The BIOS ought to have set this but may not have, so we redo it */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); if (new_irq != irq) { dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n", irq, new_irq); udelay(15); /* unknown if delay really needed */ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink); /* * VIA VT82C598 has its device ID settable and many BIOSes * set it to the ID of VT82C597 for backward compatibility. * We need to switch it off to be able to recognize the real * type of the chip. */ static void quirk_vt82c598_id(struct pci_dev *dev) { pci_write_config_byte(dev, 0xfc, 0); pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id); /* * CardBus controllers have a legacy base address that enables them * to respond as i82365 pcmcia controllers. We don't want them to * do this even if the Linux CardBus driver is not loaded, because * the Linux i82365 driver does not (and should not) handle CardBus. */ static void quirk_cardbus_legacy(struct pci_dev *dev) { pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy); DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy); /* * Following the PCI ordering rules is optional on the AMD762. I'm not * sure what the designers were smoking but let's not inhale... * * To be fair to AMD, it follows the spec by default, its BIOS people * who turn it off! */ static void quirk_amd_ordering(struct pci_dev *dev) { u32 pcic; pci_read_config_dword(dev, 0x4C, &pcic); if ((pcic & 6) != 6) { pcic |= 6; dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n"); pci_write_config_dword(dev, 0x4C, pcic); pci_read_config_dword(dev, 0x84, &pcic); pcic |= (1 << 23); /* Required in this mode */ pci_write_config_dword(dev, 0x84, pcic); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); /* * DreamWorks provided workaround for Dunord I-3000 problem * * This card decodes and responds to addresses not apparently * assigned to it. We force a larger allocation to ensure that * nothing gets put too close to it. */ static void quirk_dunord(struct pci_dev *dev) { struct resource *r = &dev->resource[1]; r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0xffffff; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord); /* * i82380FB mobile docking controller: its PCI-to-PCI bridge * is subtractive decoding (transparent), and does indicate this * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80 * instead of 0x01. */ static void quirk_transparent_bridge(struct pci_dev *dev) { dev->transparent = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge); /* * Common misconfiguration of the MediaGX/Geode PCI master that will * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 * datasheets found at http://www.national.com/analog for info on what * these bits do. <[email protected]> */ static void quirk_mediagx_master(struct pci_dev *dev) { u8 reg; pci_read_config_byte(dev, 0x41, &reg); if (reg & 2) { reg &= ~2; dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg); pci_write_config_byte(dev, 0x41, reg); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); /* * Ensure C0 rev restreaming is off. This is normally done by * the BIOS but in the odd case it is not the results are corruption * hence the presence of a Linux check */ static void quirk_disable_pxb(struct pci_dev *pdev) { u16 config; if (pdev->revision != 0x04) /* Only C0 requires this */ return; pci_read_config_word(pdev, 0x40, &config); if (config & (1<<6)) { config &= ~(1<<6); pci_write_config_word(pdev, 0x40, config); dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); static void quirk_amd_ide_mode(struct pci_dev *pdev) { /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */ u8 tmp; pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); if (tmp == 0x01) { pci_read_config_byte(pdev, 0x40, &tmp); pci_write_config_byte(pdev, 0x40, tmp|1); pci_write_config_byte(pdev, 0x9, 1); pci_write_config_byte(pdev, 0xa, 6); pci_write_config_byte(pdev, 0x40, tmp); pdev->class = PCI_CLASS_STORAGE_SATA_AHCI; dev_info(&pdev->dev, "set SATA to AHCI mode\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode */ static void quirk_svwks_csb5ide(struct pci_dev *pdev) { u8 prog; pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); if (prog & 5) { prog &= ~5; pdev->class &= ~5; pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); /* PCI layer will sort out resources */ } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide); /* * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */ static void quirk_ide_samemode(struct pci_dev *pdev) { u8 prog; pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) { dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n"); prog &= ~5; pdev->class &= ~5; pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); /* * Some ATA devices break if put into D3 */ static void quirk_no_ata_d3(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; } /* Quirk the legacy ATA devices only. The AHCI ones are ok */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); /* ALi loses some register settings that we cannot then restore */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); /* VIA comes back fine but we need to keep it alive or ACPI GTM failures occur when mode detecting */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); /* This was originally an Alpha specific thing, but it really fits here. * The i82375 PCI/EISA bridge appears as non-classified. Fix that. */ static void quirk_eisa_bridge(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_EISA << 8; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge); /* * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge * is not activated. The myth is that Asus said that they do not want the * users to be irritated by just another PCI Device in the Win98 device * manager. (see the file prog/hotplug/README.p4b in the lm_sensors * package 2.7.0 for details) * * The SMBus PCI Device can be activated by setting a bit in the ICH LPC * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it * becomes necessary to do this tweak in two steps -- the chosen trigger * is either the Host bridge (preferred) or on-board VGA controller. * * Note that we used to unhide the SMBus that way on Toshiba laptops * (Satellite A40 and Tecra M2) but then found that the thermal management * was done by SMM code, which could cause unsynchronized concurrent * accesses to the SMBus registers, with potentially bad effects. Thus you * should be very careful when adding new entries: if SMM is accessing the * Intel SMBus, this is a very good reason to leave it hidden. * * Likewise, many recent laptops use ACPI for thermal management. If the * ACPI DSDT code accesses the SMBus, then Linux should not access it * natively, and keeping the SMBus hidden is the right thing to do. If you * are about to add an entry in the table below, please first disassemble * the DSDT and double-check that there is no code accessing the SMBus. */ static int asus_hides_smbus; static void asus_hides_smbus_hostbridge(struct pci_dev *dev) { if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB) switch (dev->subsystem_device) { case 0x8025: /* P4B-LX */ case 0x8070: /* P4B */ case 0x8088: /* P4B533 */ case 0x1626: /* L3C notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) switch (dev->subsystem_device) { case 0x80b1: /* P4GE-V */ case 0x80b2: /* P4PE */ case 0x8093: /* P4B533-V */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) switch (dev->subsystem_device) { case 0x8030: /* P4T533 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) switch (dev->subsystem_device) { case 0x8070: /* P4G8X Deluxe */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) switch (dev->subsystem_device) { case 0x80c9: /* PU-DLS */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) switch (dev->subsystem_device) { case 0x1751: /* M2N notebook */ case 0x1821: /* M5N notebook */ case 0x1897: /* A6L notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x184b: /* W1N notebook */ case 0x186a: /* M6Ne notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) switch (dev->subsystem_device) { case 0x80f2: /* P4P800-X */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) switch (dev->subsystem_device) { case 0x1882: /* M6V notebook */ case 0x1977: /* A6VA notebook */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x088C: /* HP Compaq nc8000 */ case 0x0890: /* HP Compaq nc6000 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) switch (dev->subsystem_device) { case 0x12bc: /* HP D330L */ case 0x12bd: /* HP D530 */ case 0x006a: /* HP Compaq nx9500 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB) switch (dev->subsystem_device) { case 0x12bf: /* HP xw4100 */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0xC00C: /* Samsung P35 notebook */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x0058: /* Compaq Evo N620c */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3) switch (dev->subsystem_device) { case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */ /* Motherboard doesn't have Host bridge * subvendor/subdevice IDs, therefore checking * its on-board VGA controller */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2) switch (dev->subsystem_device) { case 0x00b8: /* Compaq Evo D510 CMT */ case 0x00b9: /* Compaq Evo D510 SFF */ case 0x00ba: /* Compaq Evo D510 USDT */ /* Motherboard doesn't have Host bridge * subvendor/subdevice IDs and on-board VGA * controller is disabled if an AGP card is * inserted, therefore checking USB UHCI * Controller #1 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) switch (dev->subsystem_device) { case 0x001A: /* Compaq Deskpro EN SSF P667 815E */ /* Motherboard doesn't have host bridge * subvendor/subdevice IDs, therefore checking * its on-board VGA controller */ asus_hides_smbus = 1; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); static void asus_hides_smbus_lpc(struct pci_dev *dev) { u16 val; if (likely(!asus_hides_smbus)) return; pci_read_config_word(dev, 0xF2, &val); if (val & 0x8) { pci_write_config_word(dev, 0xF2, val & (~0x8)); pci_read_config_word(dev, 0xF2, &val); if (val & 0x8) dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val); else dev_info(&dev->dev, "Enabled i801 SMBus device\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); /* It appears we just have one such device. If not, we have a warning */ static void __iomem *asus_rcba_base; static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) { u32 rcba; if (likely(!asus_hides_smbus)) return; WARN_ON(asus_rcba_base); pci_read_config_dword(dev, 0xF0, &rcba); /* use bits 31:14, 16 kB aligned */ asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); if (asus_rcba_base == NULL) return; } static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev) { u32 val; if (likely(!asus_hides_smbus || !asus_rcba_base)) return; /* read the Function Disable register, dword mode only */ val = readl(asus_rcba_base + 0x3418); writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */ } static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev) { if (likely(!asus_hides_smbus || !asus_rcba_base)) return; iounmap(asus_rcba_base); asus_rcba_base = NULL; dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); } static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) { asus_hides_smbus_lpc_ich6_suspend(dev); asus_hides_smbus_lpc_ich6_resume_early(dev); asus_hides_smbus_lpc_ich6_resume(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early); /* * SiS 96x south bridge: BIOS typically hides SMBus device... */ static void quirk_sis_96x_smbus(struct pci_dev *dev) { u8 val = 0; pci_read_config_byte(dev, 0x77, &val); if (val & 0x10) { dev_info(&dev->dev, "Enabling SiS 96x SMBus\n"); pci_write_config_byte(dev, 0x77, val & ~0x10); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); /* * ... This is further complicated by the fact that some SiS96x south * bridges pretend to be 85C503/5513 instead. In that case see if we * spotted a compatible north bridge to make sure. * (pci_find_device doesn't work yet) * * We can also enable the sis96x bit in the discovery register.. */ #define SIS_DETECT_REGISTER 0x40 static void quirk_sis_503(struct pci_dev *dev) { u8 reg; u16 devid; pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg); pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6)); pci_read_config_word(dev, PCI_DEVICE_ID, &devid); if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) { pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg); return; } /* * Ok, it now shows up as a 96x.. run the 96x quirk by * hand in case it has already been processed. * (depends on link order, which is apparently not guaranteed) */ dev->device = devid; quirk_sis_96x_smbus(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); /* * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller * and MC97 modem controller are disabled when a second PCI soundcard is * present. This patch, tweaking the VT8237 ISA bridge, enables them. * -- bjd */ static void asus_hides_ac97_lpc(struct pci_dev *dev) { u8 val; int asus_hides_ac97 = 0; if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { if (dev->device == PCI_DEVICE_ID_VIA_8237) asus_hides_ac97 = 1; } if (!asus_hides_ac97) return; pci_read_config_byte(dev, 0x50, &val); if (val & 0xc0) { pci_write_config_byte(dev, 0x50, val & (~0xc0)); pci_read_config_byte(dev, 0x50, &val); if (val & 0xc0) dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val); else dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) /* * If we are using libata we can drive this chip properly but must * do this early on to make the additional device appear during * the PCI scanning. */ static void quirk_jmicron_ata(struct pci_dev *pdev) { u32 conf1, conf5, class; u8 hdr; /* Only poke fn 0 */ if (PCI_FUNC(pdev->devfn)) return; pci_read_config_dword(pdev, 0x40, &conf1); pci_read_config_dword(pdev, 0x80, &conf5); conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */ conf5 &= ~(1 << 24); /* Clear bit 24 */ switch (pdev->device) { case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */ /* The controller should be in single function ahci mode */ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ break; case PCI_DEVICE_ID_JMICRON_JMB365: case PCI_DEVICE_ID_JMICRON_JMB366: /* Redirect IDE second PATA port to the right spot */ conf5 |= (1 << 24); /* Fall through */ case PCI_DEVICE_ID_JMICRON_JMB361: case PCI_DEVICE_ID_JMICRON_JMB363: case PCI_DEVICE_ID_JMICRON_JMB369: /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ /* Set the class codes correctly and then direct IDE 0 */ conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */ break; case PCI_DEVICE_ID_JMICRON_JMB368: /* The controller should be in single function IDE mode */ conf1 |= 0x00C00000; /* Set 22, 23 */ break; } pci_write_config_dword(pdev, 0x40, conf1); pci_write_config_dword(pdev, 0x80, conf5); /* Update pdev accordingly */ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr); pdev->hdr_type = hdr & 0x7f; pdev->multifunction = !!(hdr & 0x80); pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class); pdev->class = class >> 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata); #endif static void quirk_jmicron_async_suspend(struct pci_dev *dev) { if (dev->multifunction) { device_disable_async_suspend(&dev->dev); dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n"); } } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend); #ifdef CONFIG_X86_IO_APIC static void quirk_alder_ioapic(struct pci_dev *pdev) { int i; if ((pdev->class >> 8) != 0xff00) return; /* the first BAR is the location of the IO APIC...we must * not touch this (and it's already covered by the fixmap), so * forcibly insert it into the resource tree */ if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0)) insert_resource(&iomem_resource, &pdev->resource[0]); /* The next five BARs all seem to be rubbish, so just clean * them out */ for (i = 1; i < 6; i++) memset(&pdev->resource[i], 0, sizeof(pdev->resource[i])); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif static void quirk_pcie_mch(struct pci_dev *pdev) { pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); /* * It's possible for the MSI to get corrupted if shpc and acpi * are used together on certain PXH-based systems. */ static void quirk_pcie_pxh(struct pci_dev *dev) { dev->no_msi = 1; dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); /* * Some Intel PCI Express chipsets have trouble with downstream * device power management. */ static void quirk_intel_pcie_pm(struct pci_dev *dev) { pci_pm_d3_delay = 120; dev->no_d1d2 = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); static void quirk_radeon_pm(struct pci_dev *dev) { if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE && dev->subsystem_device == 0x00e2) { if (dev->d3_delay < 20) { dev->d3_delay = 20; dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n", dev->d3_delay); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm); #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) { noioapicreroute = 1; pr_info("%s detected: disable boot interrupt reroute\n", d->ident); return 0; } static const struct dmi_system_id boot_interrupt_dmi_table[] = { /* * Systems to exclude from boot interrupt reroute quirks */ { .callback = dmi_disable_ioapicreroute, .ident = "ASUSTek Computer INC. M2N-LR", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"), }, }, {} }; /* * Boot interrupts on some chipsets cannot be turned off. For these chipsets, * remap the original interrupt in the linux kernel to the boot interrupt, so * that a PCI device's interrupt handler is installed on the boot interrupt * line instead. */ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) { dmi_check_system(boot_interrupt_dmi_table); if (noioapicquirk || noioapicreroute) return; dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); /* * On some chipsets we can disable the generation of legacy INTx boot * interrupts. */ /* * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no * 300641-004US, section 5.7.3. */ #define INTEL_6300_IOAPIC_ABAR 0x40 #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; if (noioapicquirk) return; pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); /* * disable boot interrupts on HT-1000 */ #define BC_HT1000_FEATURE_REG 0x64 #define BC_HT1000_PIC_REGS_ENABLE (1<<0) #define BC_HT1000_MAP_IDX 0xC00 #define BC_HT1000_MAP_DATA 0xC01 static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) { u32 pci_config_dword; u8 irq; if (noioapicquirk) return; pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | BC_HT1000_PIC_REGS_ENABLE); for (irq = 0x10; irq < 0x10 + 32; irq++) { outb(irq, BC_HT1000_MAP_IDX); outb(0x00, BC_HT1000_MAP_DATA); } pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); /* * disable boot interrupts on AMD and ATI chipsets */ /* * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode * (due to an erratum). */ #define AMD_813X_MISC 0x40 #define AMD_813X_NOIOAMODE (1<<0) #define AMD_813X_REV_B1 0x12 #define AMD_813X_REV_B2 0x13 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) { u32 pci_config_dword; if (noioapicquirk) return; if ((dev->revision == AMD_813X_REV_B1) || (dev->revision == AMD_813X_REV_B2)) return; pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); pci_config_dword &= ~AMD_813X_NOIOAMODE; pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); #define AMD_8111_PCI_IRQ_ROUTING 0x56 static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; if (noioapicquirk) return; pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); if (!pci_config_word) { dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n", dev->vendor, dev->device); return; } pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); #endif /* CONFIG_X86_IO_APIC */ /* * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. * Re-allocate the region if needed... */ static void quirk_tc86c001_ide(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; if (r->start & 0x8) { r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0xf; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE, quirk_tc86c001_ide); /* * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o) * being read correctly if bit 7 of the base address is set. * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128). * Re-allocate the regions to a 256-byte boundary if necessary. */ static void quirk_plx_pci9050(struct pci_dev *dev) { unsigned int bar; /* Fixed in revision 2 (PCI 9052). */ if (dev->revision >= 2) return; for (bar = 0; bar <= 1; bar++) if (pci_resource_len(dev, bar) == 0x80 && (pci_resource_start(dev, bar) & 0x80)) { struct resource *r = &dev->resource[bar]; dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n", bar); r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0xff; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, quirk_plx_pci9050); /* * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others) * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b, * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c, * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b. * * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq" * driver. */ DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050); DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050); static void quirk_netmos(struct pci_dev *dev) { unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; unsigned int num_serial = dev->subsystem_device & 0xf; /* * These Netmos parts are multiport serial devices with optional * parallel ports. Even when parallel ports are present, they * are identified as class SERIAL, which means the serial driver * will claim them. To prevent this, mark them as class OTHER. * These combo devices should be claimed by parport_serial. * * The subdevice ID is of the form 0x00PS, where <P> is the number * of parallel ports and <S> is the number of serial ports. */ switch (dev->device) { case PCI_DEVICE_ID_NETMOS_9835: /* Well, this rule doesn't hold for the following 9835 device */ if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return; case PCI_DEVICE_ID_NETMOS_9735: case PCI_DEVICE_ID_NETMOS_9745: case PCI_DEVICE_ID_NETMOS_9845: case PCI_DEVICE_ID_NETMOS_9855: if (num_parallel) { dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n", dev->device, num_parallel, num_serial); dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) | (dev->class & 0xff); } } } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); /* * Quirk non-zero PCI functions to route VPD access through function 0 for * devices that share VPD resources between functions. The functions are * expected to be identical devices. */ static void quirk_f0_vpd_link(struct pci_dev *dev) { struct pci_dev *f0; if (!PCI_FUNC(dev->devfn)) return; f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); if (!f0) return; if (f0->vpd && dev->class == f0->class && dev->vendor == f0->vendor && dev->device == f0->device) dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; pci_dev_put(f0); } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); static void quirk_e100_interrupt(struct pci_dev *dev) { u16 command, pmcsr; u8 __iomem *csr; u8 cmd_hi; switch (dev->device) { /* PCI IDs taken from drivers/net/e100.c */ case 0x1029: case 0x1030 ... 0x1034: case 0x1038 ... 0x103E: case 0x1050 ... 0x1057: case 0x1059: case 0x1064 ... 0x106B: case 0x1091 ... 0x1095: case 0x1209: case 0x1229: case 0x2449: case 0x2459: case 0x245D: case 0x27DC: break; default: return; } /* * Some firmware hands off the e100 with interrupts enabled, * which can cause a flood of interrupts if packets are * received before the driver attaches to the device. So * disable all e100 interrupts here. The driver will * re-enable them when it's ready. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0)) return; /* * Check that the device is in the D0 power state. If it's not, * there is no point to look any further. */ if (dev->pm_cap) { pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) return; } /* Convert from PCI bus to resource space. */ csr = ioremap(pci_resource_start(dev, 0), 8); if (!csr) { dev_warn(&dev->dev, "Can't map e100 registers\n"); return; } cmd_hi = readb(csr + 3); if (cmd_hi == 0) { dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n"); writeb(1, csr + 3); } iounmap(csr); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt); /* * The 82575 and 82598 may experience data corruption issues when transitioning * out of L0S. To prevent this we need to disable L0S on the PCIe link. */ static void quirk_disable_aspm_l0s(struct pci_dev *dev) { dev_info(&dev->dev, "Disabling L0s\n"); pci_disable_link_state(dev, PCIE_LINK_STATE_L0S); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); static void fixup_rev1_53c810(struct pci_dev *dev) { u32 class = dev->class; /* * rev 1 ncr53c810 chips don't set the class at all which means * they don't get their resources remapped. Fix that here. */ if (class) return; dev->class = PCI_CLASS_STORAGE_SCSI << 8; dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n", class, dev->class); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); /* Enable 1k I/O space granularity on the Intel P64H2 */ static void quirk_p64h2_1k_io(struct pci_dev *dev) { u16 en1k; pci_read_config_word(dev, 0x40, &en1k); if (en1k & 0x200) { dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n"); dev->io_window_1k = 1; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); /* Under some circumstances, AER is not linked with extended capabilities. * Force it to be linked by setting the corresponding control bit in the * config space. */ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) { uint8_t b; if (pci_read_config_byte(dev, 0xf41, &b) == 0) { if (!(b & 0x20)) { pci_write_config_byte(dev, 0xf41, b | 0x20); dev_info(&dev->dev, "Linking AER extended capability\n"); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) { /* * Disable PCI Bus Parking and PCI Master read caching on CX700 * which causes unspecified timing errors with a VT6212L on the PCI * bus leading to USB2.0 packet loss. * * This quirk is only enabled if a second (on the external PCI bus) * VT6212L is found -- the CX700 core itself also contains a USB * host controller with the same PCI ID as the VT6212L. */ /* Count VT6212L instances */ struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, NULL); uint8_t b; /* p should contain the first (internal) VT6212L -- see if we have an external one by searching again */ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p); if (!p) return; pci_dev_put(p); if (pci_read_config_byte(dev, 0x76, &b) == 0) { if (b & 0x40) { /* Turn off PCI Bus Parking */ pci_write_config_byte(dev, 0x76, b ^ 0x40); dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n"); } } if (pci_read_config_byte(dev, 0x72, &b) == 0) { if (b != 0) { /* Turn off PCI Master read caching */ pci_write_config_byte(dev, 0x72, 0x0); /* Set PCI Master Bus time-out to "1x16 PCLK" */ pci_write_config_byte(dev, 0x75, 0x1); /* Disable "Read FIFO Timer" */ pci_write_config_byte(dev, 0x77, 0x0); dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n"); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); /* * If a device follows the VPD format spec, the PCI core will not read or * write past the VPD End Tag. But some vendors do not follow the VPD * format spec, so we can't tell how much data is safe to access. Devices * may behave unpredictably if we access too much. Blacklist these devices * so we don't touch VPD at all. */ static void quirk_blacklist_vpd(struct pci_dev *dev) { if (dev->vpd) { dev->vpd->len = 0; dev_warn(&dev->dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd); /* * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the * VPD end tag will hang the device. This problem was initially * observed when a vpd entry was created in sysfs * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry * will dump 32k of data. Reading a full 32k will cause an access * beyond the VPD end tag causing the device to hang. Once the device * is hung, the bnx2 driver will not be able to reset the device. * We believe that it is legal to read beyond the end tag and * therefore the solution is to limit the read/write length. */ static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev) { /* * Only disable the VPD capability for 5706, 5706S, 5708, * 5708S and 5709 rev. A */ if ((dev->device == PCI_DEVICE_ID_NX2_5706) || (dev->device == PCI_DEVICE_ID_NX2_5706S) || (dev->device == PCI_DEVICE_ID_NX2_5708) || (dev->device == PCI_DEVICE_ID_NX2_5708S) || ((dev->device == PCI_DEVICE_ID_NX2_5709) && (dev->revision & 0xf0) == 0x0)) { if (dev->vpd) dev->vpd->len = 0x80; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709, quirk_brcm_570x_limit_vpd); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S, quirk_brcm_570x_limit_vpd); static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev) { u32 rev; pci_read_config_dword(dev, 0xf4, &rev); /* Only CAP the MRRS if the device is a 5719 A0 */ if (rev == 0x05719000) { int readrq = pcie_get_readrq(dev); if (readrq > 2048) pcie_set_readrq(dev, 2048); } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5719, quirk_brcm_5719_limit_mrrs); #ifdef CONFIG_PCIE_IPROC_PLATFORM static void quirk_paxc_bridge(struct pci_dev *pdev) { /* The PCI config space is shared with the PAXC root port and the first * Ethernet device. So, we need to workaround this by telling the PCI * code that the bridge is not an Ethernet device. */ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) pdev->class = PCI_CLASS_BRIDGE_PCI << 8; /* MPSS is not being set properly (as it is currently 0). This is * because that area of the PCI config space is hard coded to zero, and * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) * so that the MPS can be set to the real max value. */ pdev->pcie_mpss = 2; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge); #endif /* Originally in EDAC sources for i82875P: * Intel tells BIOS developers to hide device 6 which * configures the overflow device access containing * the DRBs - this is where we expose device 6. * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm */ static void quirk_unhide_mch_dev6(struct pci_dev *dev) { u8 reg; if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) { dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n"); pci_write_config_byte(dev, 0xF4, reg | 0x02); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, quirk_unhide_mch_dev6); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, quirk_unhide_mch_dev6); #ifdef CONFIG_TILEPRO /* * The Tilera TILEmpower tilepro platform needs to set the link speed * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe * capability register of the PEX8624 PCIe switch. The switch * supports link speed auto negotiation, but falsely sets * the link speed to 5GT/s. */ static void quirk_tile_plx_gen1(struct pci_dev *dev) { if (tile_plx_gen1) { pci_write_config_dword(dev, 0x98, 0x1); mdelay(50); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); #endif /* CONFIG_TILEPRO */ #ifdef CONFIG_PCI_MSI /* Some chipsets do not support MSI. We cannot easily rely on setting * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually * some other buses controlled by the chipset even if Linux is not * aware of it. Instead of setting the flag on all buses in the * machine, simply disable MSI globally. */ static void quirk_disable_all_msi(struct pci_dev *dev) { pci_no_msi(); dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi); /* Disable MSI on chipsets that are known to not support it */ static void quirk_disable_msi(struct pci_dev *dev) { if (dev->subordinate) { dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); /* * The APC bridge device in AMD 780 family northbridges has some random * OEM subsystem ID in its vendor ID register (erratum 18), so instead * we use the possible vendor/device IDs of the host bridge for the * declared quirk, and search for the APC bridge by slot number. */ static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge) { struct pci_dev *apc_bridge; apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0)); if (apc_bridge) { if (apc_bridge->device == 0x9602) quirk_disable_msi(apc_bridge); pci_dev_put(apc_bridge); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi); /* Go through the list of Hypertransport capabilities and * return 1 if a HT MSI capability is found and enabled */ static int msi_ht_cap_enabled(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { dev_info(&dev->dev, "Found %s HT MSI Mapping\n", flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled"); return (flags & HT_MSI_FLAGS_ENABLE) != 0; } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } return 0; } /* Check the hypertransport MSI mapping to know whether MSI is enabled or not */ static void quirk_msi_ht_cap(struct pci_dev *dev) { if (dev->subordinate && !msi_ht_cap_enabled(dev)) { dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, quirk_msi_ht_cap); /* The nVidia CK804 chipset may have 2 HT MSI mappings. * MSI are supported if the MSI capability set in any of these mappings. */ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) { struct pci_dev *pdev; if (!dev->subordinate) return; /* check HT MSI cap on this chipset and the root one. * a single one having MSI is enough to be sure that MSI are supported. */ pdev = pci_get_slot(dev->bus, 0); if (!pdev) return; if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) { dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } pci_dev_put(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_msi_ht_cap); /* Force enable MSI mapping capability on HT bridges */ static void ht_enable_msi_mapping(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { dev_info(&dev->dev, "Enabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags | HT_MSI_FLAGS_ENABLE); } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, ht_enable_msi_mapping); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, ht_enable_msi_mapping); /* The P5N32-SLI motherboards from Asus have a problem with msi * for the MCP55 NIC. It is not yet determined whether the msi problem * also affects other devices. As for now, turn off msi for this device. */ static void nvenet_msi_disable(struct pci_dev *dev) { const char *board_name = dmi_get_system_info(DMI_BOARD_NAME); if (board_name && (strstr(board_name, "P5N32-SLI PREMIUM") || strstr(board_name, "P5N32-E SLI"))) { dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n"); dev->no_msi = 1; } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15, nvenet_msi_disable); /* * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing * config register. This register controls the routing of legacy * interrupts from devices that route through the MCP55. If this register * is misprogrammed, interrupts are only sent to the BSP, unlike * conventional systems where the IRQ is broadcast to all online CPUs. Not * having this register set properly prevents kdump from booting up * properly, so let's make sure that we have it set correctly. * Note that this is an undocumented register. */ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) { u32 cfg; if (!pci_find_capability(dev, PCI_CAP_ID_HT)) return; pci_read_config_dword(dev, 0x74, &cfg); if (cfg & ((1 << 2) | (1 << 15))) { printk(KERN_INFO "Rewriting irq routing register on MCP55\n"); cfg &= ~((1 << 2) | (1 << 15)); pci_write_config_dword(dev, 0x74, cfg); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0, nvbridge_check_legacy_irq_routing); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4, nvbridge_check_legacy_irq_routing); static int ht_check_msi_mapping(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; int found = 0; /* check if there is HT MSI cap or enabled on this device */ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (found < 1) found = 1; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { if (flags & HT_MSI_FLAGS_ENABLE) { if (found < 2) { found = 2; break; } } } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } return found; } static int host_bridge_with_leaf(struct pci_dev *host_bridge) { struct pci_dev *dev; int pos; int i, dev_no; int found = 0; dev_no = host_bridge->devfn >> 3; for (i = dev_no + 1; i < 0x20; i++) { dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0)); if (!dev) continue; /* found next host bridge ?*/ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); if (pos != 0) { pci_dev_put(dev); break; } if (ht_check_msi_mapping(dev)) { found = 1; pci_dev_put(dev); break; } pci_dev_put(dev); } return found; } #define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */ #define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */ static int is_end_of_ht_chain(struct pci_dev *dev) { int pos, ctrl_off; int end = 0; u16 flags, ctrl; pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); if (!pos) goto out; pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags); ctrl_off = ((flags >> 10) & 1) ? PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1; pci_read_config_word(dev, pos + ctrl_off, &ctrl); if (ctrl & (1 << 6)) end = 1; out: return end; } static void nv_ht_enable_msi_mapping(struct pci_dev *dev) { struct pci_dev *host_bridge; int pos; int i, dev_no; int found = 0; dev_no = dev->devfn >> 3; for (i = dev_no; i >= 0; i--) { host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0)); if (!host_bridge) continue; pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { found = 1; break; } pci_dev_put(host_bridge); } if (!found) return; /* don't enable end_device/host_bridge with leaf directly here */ if (host_bridge == dev && is_end_of_ht_chain(host_bridge) && host_bridge_with_leaf(host_bridge)) goto out; /* root did that ! */ if (msi_ht_cap_enabled(host_bridge)) goto out; ht_enable_msi_mapping(dev); out: pci_dev_put(host_bridge); } static void ht_disable_msi_mapping(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { dev_info(&dev->dev, "Disabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags & ~HT_MSI_FLAGS_ENABLE); } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } } static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all) { struct pci_dev *host_bridge; int pos; int found; if (!pci_msi_enabled()) return; /* check if there is HT MSI cap or enabled on this device */ found = ht_check_msi_mapping(dev); /* no HT MSI CAP */ if (found == 0) return; /* * HT MSI mapping should be disabled on devices that are below * a non-Hypertransport host bridge. Locate the host bridge... */ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); if (host_bridge == NULL) { dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n"); return; } pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { /* Host bridge is to HT */ if (found == 1) { /* it is not enabled, try to enable it */ if (all) ht_enable_msi_mapping(dev); else nv_ht_enable_msi_mapping(dev); } goto out; } /* HT MSI is not enabled */ if (found == 1) goto out; /* Host bridge is not to HT, disable HT MSI mapping on this device */ ht_disable_msi_mapping(dev); out: pci_dev_put(host_bridge); } static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev) { return __nv_msi_ht_cap_quirk(dev, 1); } static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) { return __nv_msi_ht_cap_quirk(dev, 0); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); static void quirk_msi_intx_disable_bug(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; } static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev) { struct pci_dev *p; /* SB700 MSI issue will be fixed at HW level from revision A21, * we need check PCI REVISION ID of SMBus controller to get SB700 * revision. */ p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p) return; if ((p->revision < 0x3B) && (p->revision >= 0x30)) dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; pci_dev_put(p); } static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev) { /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */ if (dev->revision < 0x18) { dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n"); dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, quirk_msi_intx_disable_qca_bug); #endif /* CONFIG_PCI_MSI */ /* Allow manual resource allocation for PCI hotplug bridges * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6), * kernel fails to allocate resources when hotplug device is * inserted and PCI bus is rescanned. */ static void quirk_hotplug_bridge(struct pci_dev *dev) { dev->is_hotplug_bridge = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge); /* * This is a quirk for the Ricoh MMC controller found as a part of * some mulifunction chips. * This is very similar and based on the ricoh_mmc driver written by * Philip Langdale. Thank you for these magic sequences. * * These chips implement the four main memory card controllers (SD, MMC, MS, xD) * and one or both of cardbus or firewire. * * It happens that they implement SD and MMC * support as separate controllers (and PCI functions). The linux SDHCI * driver supports MMC cards but the chip detects MMC cards in hardware * and directs them to the MMC controller - so the SDHCI driver never sees * them. * * To get around this, we must disable the useless MMC controller. * At that point, the SDHCI controller will start seeing them * It seems to be the case that the relevant PCI registers to deactivate the * MMC controller live on PCI function 0, which might be the cardbus controller * or the firewire controller, depending on the particular chip in question * * This has to be done early, because as soon as we disable the MMC controller * other pci functions shift up one level, e.g. function #2 becomes function * #1, and this will confuse the pci core. */ #ifdef CONFIG_MMC_RICOH_MMC static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev) { /* disable via cardbus interface */ u8 write_enable; u8 write_target; u8 disable; /* disable must be done via function #0 */ if (PCI_FUNC(dev->devfn)) return; pci_read_config_byte(dev, 0xB7, &disable); if (disable & 0x02) return; pci_read_config_byte(dev, 0x8E, &write_enable); pci_write_config_byte(dev, 0x8E, 0xAA); pci_read_config_byte(dev, 0x8D, &write_target); pci_write_config_byte(dev, 0x8D, 0xB7); pci_write_config_byte(dev, 0xB7, disable | 0x02); pci_write_config_byte(dev, 0x8E, write_enable); pci_write_config_byte(dev, 0x8D, write_target); dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n"); dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) { /* disable via firewire interface */ u8 write_enable; u8 disable; /* disable must be done via function #0 */ if (PCI_FUNC(dev->devfn)) return; /* * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize * certain types of SD/MMC cards. Lowering the SD base * clock frequency from 200Mhz to 50Mhz fixes this issue. * * 0x150 - SD2.0 mode enable for changing base clock * frequency to 50Mhz * 0xe1 - Base clock frequency * 0x32 - 50Mhz new clock frequency * 0xf9 - Key register for 0x150 * 0xfc - key register for 0xe1 */ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { pci_write_config_byte(dev, 0xf9, 0xfc); pci_write_config_byte(dev, 0x150, 0x10); pci_write_config_byte(dev, 0xf9, 0x00); pci_write_config_byte(dev, 0xfc, 0x01); pci_write_config_byte(dev, 0xe1, 0x32); pci_write_config_byte(dev, 0xfc, 0x00); dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n"); } pci_read_config_byte(dev, 0xCB, &disable); if (disable & 0x02) return; pci_read_config_byte(dev, 0xCA, &write_enable); pci_write_config_byte(dev, 0xCA, 0x57); pci_write_config_byte(dev, 0xCB, disable | 0x02); pci_write_config_byte(dev, 0xCA, write_enable); dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n"); dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ #ifdef CONFIG_DMAR_TABLE #define VTUNCERRMSK_REG 0x1ac #define VTD_MSK_SPEC_ERRORS (1 << 31) /* * This is a quirk for masking vt-d spec defined errors to platform error * handling logic. With out this, platforms using Intel 7500, 5500 chipsets * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based * on the RAS config settings of the platform) when a vt-d fault happens. * The resulting SMI caused the system to hang. * * VT-d spec related errors are already handled by the VT-d OS code, so no * need to report the same error through other channels. */ static void vtd_mask_spec_errors(struct pci_dev *dev) { u32 word; pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); #endif static void fixup_ti816x_class(struct pci_dev *dev) { u32 class = dev->class; /* TI 816x devices do not have class code set when in PCIe boot mode */ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8; dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n", class, dev->class); } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800, PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class); /* Some PCIe devices do not work reliably with the claimed maximum * payload size supported. */ static void fixup_mpss_256(struct pci_dev *dev) { dev->pcie_mpss = 1; /* 256 bytes */ } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); /* Intel 5000 and 5100 Memory controllers have an errata with read completion * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. * Since there is no way of knowing what the PCIE MPS on each fabric will be * until all of the devices are discovered and buses walked, read completion * coalescing must be disabled. Unfortunately, it cannot be re-enabled because * it is possible to hotplug a device with MPS of 256B. */ static void quirk_intel_mc_errata(struct pci_dev *dev) { int err; u16 rcc; if (pcie_bus_config == PCIE_BUS_TUNE_OFF || pcie_bus_config == PCIE_BUS_DEFAULT) return; /* Intel errata specifies bits to change but does not say what they are. * Keeping them magical until such time as the registers and values can * be explained. */ err = pci_read_config_word(dev, 0x48, &rcc); if (err) { dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n"); return; } if (!(rcc & (1 << 10))) return; rcc &= ~(1 << 10); err = pci_write_config_word(dev, 0x48, rcc); if (err) { dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n"); return; } pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n"); } /* Intel 5000 series memory controllers and ports 2-7 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata); /* Intel 5100 series memory controllers and ports 2-7 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); /* * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To * work around this, query the size it should be configured to by the device and * modify the resource end to correspond to this new size. */ static void quirk_intel_ntb(struct pci_dev *dev) { int rc; u8 val; rc = pci_read_config_byte(dev, 0x00D0, &val); if (rc) return; dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1; rc = pci_read_config_byte(dev, 0x00D1, &val); if (rc) return; dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); static ktime_t fixup_debug_start(struct pci_dev *dev, void (*fn)(struct pci_dev *dev)) { ktime_t calltime = 0; dev_dbg(&dev->dev, "calling %pF\n", fn); if (initcall_debug) { pr_debug("calling %pF @ %i for %s\n", fn, task_pid_nr(current), dev_name(&dev->dev)); calltime = ktime_get(); } return calltime; } static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime, void (*fn)(struct pci_dev *dev)) { ktime_t delta, rettime; unsigned long long duration; if (initcall_debug) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; pr_debug("pci fixup %pF returned after %lld usecs for %s\n", fn, duration, dev_name(&dev->dev)); } } /* * Some BIOS implementations leave the Intel GPU interrupts enabled, * even though no one is handling them (f.e. i915 driver is never loaded). * Additionally the interrupt destination is not set up properly * and the interrupt ends up -somewhere-. * * These spurious interrupts are "sticky" and the kernel disables * the (shared) interrupt line after 100.000+ generated interrupts. * * Fix it by disabling the still enabled interrupts. * This resolves crashes often seen on monitor unplug. */ #define I915_DEIER_REG 0x4400c static void disable_igfx_irq(struct pci_dev *dev) { void __iomem *regs = pci_iomap(dev, 0, 0); if (regs == NULL) { dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n"); return; } /* Check if any interrupt line is still enabled */ if (readl(regs + I915_DEIER_REG) != 0) { dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n"); writel(0, regs + I915_DEIER_REG); } pci_iounmap(dev, regs); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); /* * PCI devices which are on Intel chips can skip the 10ms delay * before entering D3 mode. */ static void quirk_remove_d3_delay(struct pci_dev *dev) { dev->d3_delay = 0; } /* C600 Series devices do not need 10ms d3_delay */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay); /* Lynxpoint-H PCH devices do not need 10ms d3_delay */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay); /* Intel Cherrytrail devices do not need 10ms d3_delay */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay); /* * Some devices may pass our check in pci_intx_mask_supported() if * PCI_COMMAND_INTX_DISABLE works though they actually do not properly * support this feature. */ static void quirk_broken_intx_masking(struct pci_dev *dev) { dev->broken_intx_masking = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ quirk_broken_intx_masking); /* * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC * * RTL8110SC - Fails under PCI device assignment using DisINTx masking. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169, quirk_broken_intx_masking); /* * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, * DisINTx can be set but the interrupt status bit is non-functional. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking); static u16 mellanox_broken_intx_devs[] = { PCI_DEVICE_ID_MELLANOX_HERMON_SDR, PCI_DEVICE_ID_MELLANOX_HERMON_DDR, PCI_DEVICE_ID_MELLANOX_HERMON_QDR, PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2, PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2, PCI_DEVICE_ID_MELLANOX_HERMON_EN, PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX2, PCI_DEVICE_ID_MELLANOX_CONNECTX3, PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO, }; #define CONNECTX_4_CURR_MAX_MINOR 99 #define CONNECTX_4_INTX_SUPPORT_MINOR 14 /* * Check ConnectX-4/LX FW version to see if it supports legacy interrupts. * If so, don't mark it as broken. * FW minor > 99 means older FW version format and no INTx masking support. * FW minor < 14 means new FW version format and no INTx masking support. */ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev) { __be32 __iomem *fw_ver; u16 fw_major; u16 fw_minor; u16 fw_subminor; u32 fw_maj_min; u32 fw_sub_min; int i; for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) { if (pdev->device == mellanox_broken_intx_devs[i]) { pdev->broken_intx_masking = 1; return; } } /* Getting here means Connect-IB cards and up. Connect-IB has no INTx * support so shouldn't be checked further */ if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB) return; if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 && pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) return; /* For ConnectX-4 and ConnectX-4LX, need to check FW support */ if (pci_enable_device_mem(pdev)) { dev_warn(&pdev->dev, "Can't enable device memory\n"); return; } fw_ver = ioremap(pci_resource_start(pdev, 0), 4); if (!fw_ver) { dev_warn(&pdev->dev, "Can't map ConnectX-4 initialization segment\n"); goto out; } /* Reading from resource space should be 32b aligned */ fw_maj_min = ioread32be(fw_ver); fw_sub_min = ioread32be(fw_ver + 1); fw_major = fw_maj_min & 0xffff; fw_minor = fw_maj_min >> 16; fw_subminor = fw_sub_min & 0xffff; if (fw_minor > CONNECTX_4_CURR_MAX_MINOR || fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) { dev_warn(&pdev->dev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n", fw_major, fw_minor, fw_subminor, pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14); pdev->broken_intx_masking = 1; } iounmap(fw_ver); out: pci_disable_device(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, mellanox_check_broken_intx_masking); static void quirk_no_bus_reset(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; } /* * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. * The device will throw a Link Down error on AER-capable systems and * regardless of AER, config space of the device is never accessible again * and typically causes the system to hang or reset when access is attempted. * http://www.spinics.net/lists/linux-pci/msg34797.html */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); static void quirk_no_pm_reset(struct pci_dev *dev) { /* * We can't do a bus reset on root bus devices, but an ineffective * PM reset may be better than nothing. */ if (!pci_is_root_bus(dev->bus)) dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET; } /* * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition * causes a reset (i.e., they advertise NoSoftRst-). This transition seems * to have no effect on the device: it retains the framebuffer contents and * monitor sync. Advertising this support makes other layers, like VFIO, * assume pci_reset_function() is viable for this device. Mark it as * unavailable to skip it when testing reset methods. */ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); /* * Thunderbolt controllers with broken MSI hotplug signaling: * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge). */ static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev) { if (pdev->is_hotplug_bridge && (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C || pdev->revision <= 1)) pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, quirk_thunderbolt_hotplug_msi); static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { int chip = (dev->device & 0xf000) >> 12; int func = (dev->device & 0x0f00) >> 8; int prod = (dev->device & 0x00ff) >> 0; /* * If this is a T3-based adapter, there's a 1KB VPD area at offset * 0xc00 which contains the preferred VPD values. If this is a T4 or * later based adapter, the special VPD is at offset 0x400 for the * Physical Functions (the SR-IOV Virtual Functions have no VPD * Capabilities). The PCI VPD Access core routines will normally * compute the size of the VPD by parsing the VPD Data Structure at * offset 0x000. This will result in silent failures when attempting * to accesses these other VPD areas which are beyond those computed * limits. */ if (chip == 0x0 && prod >= 0x20) pci_set_vpd_size(dev, 8192); else if (chip >= 0x4 && func < 0x8) pci_set_vpd_size(dev, 2048); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_extend_vpd); #ifdef CONFIG_ACPI /* * Apple: Shutdown Cactus Ridge Thunderbolt controller. * * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be * shutdown before suspend. Otherwise the native host interface (NHI) will not * be present after resume if a device was plugged in before suspend. * * The thunderbolt controller consists of a pcie switch with downstream * bridges leading to the NHI and to the tunnel pci bridges. * * This quirk cuts power to the whole chip. Therefore we have to apply it * during suspend_noirq of the upstream bridge. * * Power is automagically restored before resume. No action is needed. */ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) { acpi_handle bridge, SXIO, SXFP, SXLV; if (!x86_apple_machine) return; if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) return; bridge = ACPI_HANDLE(&dev->dev); if (!bridge) return; /* * SXIO and SXLV are present only on machines requiring this quirk. * TB bridges in external devices might have the same device id as those * on the host, but they will not have the associated ACPI methods. This * implicitly checks that we are at the right bridge. */ if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO)) || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP)) || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV))) return; dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n"); /* magic sequence */ acpi_execute_simple_method(SXIO, NULL, 1); acpi_execute_simple_method(SXFP, NULL, 0); msleep(300); acpi_execute_simple_method(SXLV, NULL, 0); acpi_execute_simple_method(SXIO, NULL, 0); acpi_execute_simple_method(SXLV, NULL, 0); } DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_apple_poweroff_thunderbolt); /* * Apple: Wait for the thunderbolt controller to reestablish pci tunnels. * * During suspend the thunderbolt controller is reset and all pci * tunnels are lost. The NHI driver will try to reestablish all tunnels * during resume. We have to manually wait for the NHI since there is * no parent child relationship between the NHI and the tunneled * bridges. */ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) { struct pci_dev *sibling = NULL; struct pci_dev *nhi = NULL; if (!x86_apple_machine) return; if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM) return; /* * Find the NHI and confirm that we are a bridge on the tb host * controller and not on a tb endpoint. */ sibling = pci_get_slot(dev->bus, 0x0); if (sibling == dev) goto out; /* we are the downstream bridge to the NHI */ if (!sibling || !sibling->subordinate) goto out; nhi = pci_get_slot(sibling->subordinate, 0x0); if (!nhi) goto out; if (nhi->vendor != PCI_VENDOR_ID_INTEL || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI && nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8) goto out; dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); device_pm_wait_for_dev(&dev->dev, &nhi->dev); out: pci_dev_put(nhi); pci_dev_put(sibling); } DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, quirk_apple_wait_for_thunderbolt); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_apple_wait_for_thunderbolt); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE, quirk_apple_wait_for_thunderbolt); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, quirk_apple_wait_for_thunderbolt); #endif static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) { ktime_t calltime; for (; f < end; f++) if ((f->class == (u32) (dev->class >> f->class_shift) || f->class == (u32) PCI_ANY_ID) && (f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { calltime = fixup_debug_start(dev, f->hook); f->hook(dev); fixup_debug_report(dev, calltime, f->hook); } } extern struct pci_fixup __start_pci_fixups_early[]; extern struct pci_fixup __end_pci_fixups_early[]; extern struct pci_fixup __start_pci_fixups_header[]; extern struct pci_fixup __end_pci_fixups_header[]; extern struct pci_fixup __start_pci_fixups_final[]; extern struct pci_fixup __end_pci_fixups_final[]; extern struct pci_fixup __start_pci_fixups_enable[]; extern struct pci_fixup __end_pci_fixups_enable[]; extern struct pci_fixup __start_pci_fixups_resume[]; extern struct pci_fixup __end_pci_fixups_resume[]; extern struct pci_fixup __start_pci_fixups_resume_early[]; extern struct pci_fixup __end_pci_fixups_resume_early[]; extern struct pci_fixup __start_pci_fixups_suspend[]; extern struct pci_fixup __end_pci_fixups_suspend[]; extern struct pci_fixup __start_pci_fixups_suspend_late[]; extern struct pci_fixup __end_pci_fixups_suspend_late[]; static bool pci_apply_fixup_final_quirks; void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { struct pci_fixup *start, *end; switch (pass) { case pci_fixup_early: start = __start_pci_fixups_early; end = __end_pci_fixups_early; break; case pci_fixup_header: start = __start_pci_fixups_header; end = __end_pci_fixups_header; break; case pci_fixup_final: if (!pci_apply_fixup_final_quirks) return; start = __start_pci_fixups_final; end = __end_pci_fixups_final; break; case pci_fixup_enable: start = __start_pci_fixups_enable; end = __end_pci_fixups_enable; break; case pci_fixup_resume: start = __start_pci_fixups_resume; end = __end_pci_fixups_resume; break; case pci_fixup_resume_early: start = __start_pci_fixups_resume_early; end = __end_pci_fixups_resume_early; break; case pci_fixup_suspend: start = __start_pci_fixups_suspend; end = __end_pci_fixups_suspend; break; case pci_fixup_suspend_late: start = __start_pci_fixups_suspend_late; end = __end_pci_fixups_suspend_late; break; default: /* stupid compiler warning, you would think with an enum... */ return; } pci_do_fixups(dev, start, end); } EXPORT_SYMBOL(pci_fixup_device); static int __init pci_apply_final_quirks(void) { struct pci_dev *dev = NULL; u8 cls = 0; u8 tmp; if (pci_cache_line_size) printk(KERN_DEBUG "PCI: CLS %u bytes\n", pci_cache_line_size << 2); pci_apply_fixup_final_quirks = true; for_each_pci_dev(dev) { pci_fixup_device(pci_fixup_final, dev); /* * If arch hasn't set it explicitly yet, use the CLS * value shared by all PCI devices. If there's a * mismatch, fall back to the default value. */ if (!pci_cache_line_size) { pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp); if (!cls) cls = tmp; if (!tmp || cls == tmp) continue; printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n", cls << 2, tmp << 2, pci_dfl_cache_line_size << 2); pci_cache_line_size = pci_dfl_cache_line_size; } } if (!pci_cache_line_size) { printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", cls << 2, pci_dfl_cache_line_size << 2); pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; } return 0; } fs_initcall_sync(pci_apply_final_quirks); /* * Following are device-specific reset methods which can be used to * reset a single function if other methods (e.g. FLR, PM D0->D3) are * not available. */ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe) { /* * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf * * The 82599 supports FLR on VFs, but FLR support is reported only * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). * Thus we must call pcie_flr() directly without first checking if it is * supported. */ if (!probe) pcie_flr(dev); return 0; } #define SOUTH_CHICKEN2 0xc2004 #define PCH_PP_STATUS 0xc7200 #define PCH_PP_CONTROL 0xc7204 #define MSG_CTL 0x45010 #define NSDE_PWR_STATE 0xd0100 #define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */ static int reset_ivb_igd(struct pci_dev *dev, int probe) { void __iomem *mmio_base; unsigned long timeout; u32 val; if (probe) return 0; mmio_base = pci_iomap(dev, 0, 0); if (!mmio_base) return -ENOMEM; iowrite32(0x00000002, mmio_base + MSG_CTL); /* * Clobbering SOUTH_CHICKEN2 register is fine only if the next * driver loaded sets the right bits. However, this's a reset and * the bits have been set by i915 previously, so we clobber * SOUTH_CHICKEN2 register directly here. */ iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2); val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe; iowrite32(val, mmio_base + PCH_PP_CONTROL); timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT); do { val = ioread32(mmio_base + PCH_PP_STATUS); if ((val & 0xb0000000) == 0) goto reset_complete; msleep(10); } while (time_before(jiffies, timeout)); dev_warn(&dev->dev, "timeout during reset\n"); reset_complete: iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE); pci_iounmap(dev, mmio_base); return 0; } /* * Device-specific reset method for Chelsio T4-based adapters. */ static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe) { u16 old_command; u16 msix_flags; /* * If this isn't a Chelsio T4-based device, return -ENOTTY indicating * that we have no device-specific reset method. */ if ((dev->device & 0xf000) != 0x4000) return -ENOTTY; /* * If this is the "probe" phase, return 0 indicating that we can * reset this device. */ if (probe) return 0; /* * T4 can wedge if there are DMAs in flight within the chip and Bus * Master has been disabled. We need to have it on till the Function * Level Reset completes. (BUS_MASTER is disabled in * pci_reset_function()). */ pci_read_config_word(dev, PCI_COMMAND, &old_command); pci_write_config_word(dev, PCI_COMMAND, old_command | PCI_COMMAND_MASTER); /* * Perform the actual device function reset, saving and restoring * configuration information around the reset. */ pci_save_state(dev); /* * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts * are disabled when an MSI-X interrupt message needs to be delivered. * So we briefly re-enable MSI-X interrupts for the duration of the * FLR. The pci_restore_state() below will restore the original * MSI-X state. */ pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags); if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0) pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, msix_flags | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); pcie_flr(dev); /* * Restore the configuration information (BAR values, etc.) including * the original PCI Configuration Space Command word, and return * success. */ pci_restore_state(dev); pci_write_config_word(dev, PCI_COMMAND, old_command); return 0; } #define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed #define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156 #define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166 static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF, reset_intel_82599_sfp_virtfn }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA, reset_ivb_igd }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA, reset_ivb_igd }, { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, reset_chelsio_generic_dev }, { 0 } }; /* * These device-specific reset methods are here rather than in a driver * because when a host assigns a device to a guest VM, the host may need * to reset the device but probably doesn't have a driver for it. */ int pci_dev_specific_reset(struct pci_dev *dev, int probe) { const struct pci_dev_reset_methods *i; for (i = pci_dev_reset_methods; i->reset; i++) { if ((i->vendor == dev->vendor || i->vendor == (u16)PCI_ANY_ID) && (i->device == dev->device || i->device == (u16)PCI_ANY_ID)) return i->reset(dev, probe); } return -ENOTTY; } static void quirk_dma_func0_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 0) pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); } /* * https://bugzilla.redhat.com/show_bug.cgi?id=605888 * * Some Ricoh devices use function 0 as the PCIe requester ID for DMA. */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); static void quirk_dma_func1_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 1) pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); } /* * Marvell 88SE9123 uses function 1 as the requester ID for DMA. In some * SKUs function 1 is present and is a legacy IDE controller, in other * SKUs this function is not present, making this a ghost requester. * https://bugzilla.kernel.org/show_bug.cgi?id=42679 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, quirk_dma_func1_alias); /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB388_ESD, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c117 */ DECLARE_PCI_FIXUP_HEADER(0x1c28, /* Lite-On */ 0x0122, /* Plextor M6E (Marvell 88SS9183)*/ quirk_dma_func1_alias); /* * Some devices DMA with the wrong devfn, not just the wrong function. * quirk_fixed_dma_alias() uses this table to create fixed aliases, where * the alias is "fixed" and independent of the device devfn. * * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O * processor. To software, this appears as a PCIe-to-PCI/X bridge with a * single device on the secondary bus. In reality, the single exposed * device at 0e.0 is the Address Translation Unit (ATU) of the controller * that provides a bridge to the internal bus of the I/O processor. The * controller supports private devices, which can be hidden from PCI config * space. In the case of the Adaptec 3405, a private device at 01.0 * appears to be the DMA engine, which therefore needs to become a DMA * alias for the device. */ static const struct pci_device_id fixed_dma_alias_tbl[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */ .driver_data = PCI_DEVFN(1, 0) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */ .driver_data = PCI_DEVFN(1, 0) }, { 0 } }; static void quirk_fixed_dma_alias(struct pci_dev *dev) { const struct pci_device_id *id; id = pci_match_id(fixed_dma_alias_tbl, dev); if (id) pci_add_dma_alias(dev, id->driver_data); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); /* * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in * using the wrong DMA alias for the device. Some of these devices can be * used as either forward or reverse bridges, so we need to test whether the * device is operating in the correct mode. We could probably apply this * quirk to PCI_ANY_ID, but for now we'll just use known offenders. The test * is for a non-root, non-PCIe bridge where the upstream device is PCIe and * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge. */ static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev) { if (!pci_is_root_bus(pdev->bus) && pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE && !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) && pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE) pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS; } /* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_use_pcie_bridge_dma_alias); /* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */ DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); /* ITE 8893 has the same problem as the 8892 */ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias); /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); /* * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to * be added as aliases to the DMA device in order to allow buffer access * when IOMMU is enabled. Following devfns have to match RIT-LUT table * programmed in the EEPROM. */ static void quirk_mic_x200_dma_alias(struct pci_dev *pdev) { pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0)); pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0)); pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3)); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); /* * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are * associated not at the root bus, but at a bridge below. This quirk avoids * generating invalid DMA aliases. */ static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, quirk_bridge_cavm_thrx2_pcie_root); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) * class code. Fix it. */ static void quirk_tw686x_class(struct pci_dev *pdev) { u32 class = pdev->class; /* Use "Multimedia controller" class */ pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01; dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n", class, pdev->class); } DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); /* * Some devices have problems with Transaction Layer Packets with the Relaxed * Ordering Attribute set. Such devices should mark themselves and other * Device Drivers should check before sending TLPs with RO set. */ static void quirk_relaxedordering_disable(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING; dev_info(&dev->dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n"); } /* * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root * Complex has a Flow Control Credit issue which can cause performance * problems with Upstream Transaction Layer Packets with Relaxed Ordering set. */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); /* * The AMD ARM A1100 (AKA "SEATTLE") SoC has a bug in its PCIe Root Complex * where Upstream Transaction Layer Packets with the Relaxed Ordering * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0 * November 10, 2010). As a result, on this platform we can't use Relaxed * Ordering for Upstream TLPs. */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); /* * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same * values for the Attribute as were supplied in the header of the * corresponding Request, except as explicitly allowed when IDO is used." * * If a non-compliant device generates a completion with a different * attribute than the request, the receiver may accept it (which itself * seems non-compliant based on sec 2.3.2), or it may handle it as a * Malformed TLP or an Unexpected Completion, which will probably lead to a * device access timeout. * * If the non-compliant device generates completions with zero attributes * (instead of copying the attributes from the request), we can work around * this by disabling the "Relaxed Ordering" and "No Snoop" attributes in * upstream devices so they always generate requests with zero attributes. * * This affects other devices under the same Root Port, but since these * attributes are performance hints, there should be no functional problem. * * Note that Configuration Space accesses are never supposed to have TLP * Attributes, so we're safe waiting till after any Configuration Space * accesses to do the Root Port fixup. */ static void quirk_disable_root_port_attributes(struct pci_dev *pdev) { struct pci_dev *root_port = pci_find_pcie_root_port(pdev); if (!root_port) { dev_warn(&pdev->dev, "PCIe Completion erratum may cause device errors\n"); return; } dev_info(&root_port->dev, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n", dev_name(&pdev->dev)); pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); } /* * The Chelsio T5 chip fails to copy TLP Attributes from a Request to the * Completion it generates. */ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) { /* * This mask/compare operation selects for Physical Function 4 on a * T5. We only need to fix up the Root Port once for any of the * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely * 0x54xx so we use that one, */ if ((pdev->device & 0xff00) == 0x5400) quirk_disable_root_port_attributes(pdev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); /* * AMD has indicated that the devices below do not support peer-to-peer * in any system where they are found in the southbridge with an AMD * IOMMU in the system. Multifunction devices that do not support * peer-to-peer between functions can claim to support a subset of ACS. * Such devices effectively enable request redirect (RR) and completion * redirect (CR) since all transactions are redirected to the upstream * root complex. * * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086 * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102 * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402 * * 1002:4385 SBx00 SMBus Controller * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller * 1002:4383 SBx00 Azalia (Intel HDA) * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller * 1002:4384 SBx00 PCI to PCI Bridge * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller * * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15 * * 1022:780f [AMD] FCH PCI Bridge * 1022:7809 [AMD] FCH USB OHCI Controller */ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) { #ifdef CONFIG_ACPI struct acpi_table_header *header = NULL; acpi_status status; /* Targeting multifunction devices on the SB (appears on root bus) */ if (!dev->multifunction || !pci_is_root_bus(dev->bus)) return -ENODEV; /* The IVRS table describes the AMD IOMMU */ status = acpi_get_table("IVRS", 0, &header); if (ACPI_FAILURE(status)) return -ENODEV; /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1; #else return -ENODEV; #endif } static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) { /* * Effectively selects all downstream ports for whole ThunderX 1 * family by 0xf800 mask (which represents 8 SoCs), while the lower * bits of device ID are used to indicate which subdevice is used * within the SoC. */ return (pci_is_pcie(dev) && (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) && ((dev->device & 0xf800) == 0xa000)); } static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { /* * Cavium root ports don't advertise an ACS capability. However, * the RTL internally implements similar protection as if ACS had * Request Redirection, Completion Redirection, Source Validation, * and Upstream Forwarding features enabled. Assert that the * hardware implements and enables equivalent ACS functionality for * these flags. */ acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF); if (!pci_quirk_cavium_acs_match(dev)) return -ENOTTY; return acs_flags ? 0 : 1; } static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) { /* * X-Gene root matching this quirk do not allow peer-to-peer * transactions with others, allowing masking out these bits as if they * were unimplemented in the ACS capability. */ acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); return acs_flags ? 0 : 1; } /* * Many Intel PCH root ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. This is the list of device IDs known to fall * into that category as provided by Intel in Red Hat bugzilla 1037684. */ static const u16 pci_quirk_intel_pch_acs_ids[] = { /* Ibexpeak PCH */ 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49, 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51, /* Cougarpoint PCH */ 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17, 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f, /* Pantherpoint PCH */ 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17, 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f, /* Lynxpoint-H PCH */ 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17, 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f, /* Lynxpoint-LP PCH */ 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17, 0x9c18, 0x9c19, 0x9c1a, 0x9c1b, /* Wildcat PCH */ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97, 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, /* Patsburg (X79) PCH */ 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, /* Wellsburg (X99) PCH */ 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17, 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e, /* Lynx Point (9 series) PCH */ 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e, }; static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) { int i; /* Filter out a few obvious non-matches first */ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return false; for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++) if (pci_quirk_intel_pch_acs_ids[i] == dev->device) return true; return false; } #define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV) static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) { u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ? INTEL_PCH_ACS_FLAGS : 0; if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; return acs_flags & ~flags ? 0 : 1; } /* * These QCOM root ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. Hardware supports source validation but it * will report the issue as Completer Abort instead of ACS Violation. * Hardware doesn't support peer-to-peer and each root port is a root * complex with unique segment numbers. It is not possible for one root * port to pass traffic to another root port. All PCIe transactions are * terminated inside the root port. */ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) { u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV); int ret = acs_flags & ~flags ? 0 : 1; dev_info(&dev->dev, "Using QCOM ACS Quirk (%d)\n", ret); return ret; } /* * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and * control registers whereas the PCIe spec packs them into words (Rev 3.0, * 7.16 ACS Extended Capability). The bit definitions are correct, but the * control register is at offset 8 instead of 6 and we should probably use * dword accesses to them. This applies to the following PCI Device IDs, as * found in volume 1 of the datasheet[2]: * * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16} * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20} * * N.B. This doesn't fix what lspci shows. * * The 100 series chipset specification update includes this as errata #23[3]. * * The 200 series chipset (Union Point) has the same bug according to the * specification update (Intel 200 Series Chipset Family Platform Controller * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001, * Errata 22)[4]. Per the datasheet[5], root port PCI Device IDs for this * chipset include: * * 0xa290-0xa29f PCI Express Root port #{0-16} * 0xa2e7-0xa2ee PCI Express Root port #{17-24} * * Mobile chipsets are also affected, 7th & 8th Generation * Specification update confirms ACS errata 22, status no fix: (7th Generation * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel * Processor Family I/O for U Quad Core Platforms Specification Update, * August 2017, Revision 002, Document#: 334660-002)[6] * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7] * * 0x9d10-0x9d1b PCI Express Root port #{1-12} * * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html */ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) { if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return false; switch (dev->device) { case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ return true; } return false; } #define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4) static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) { int pos; u32 cap, ctrl; if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); if (!pos) return -ENOTTY; /* see pci_acs_flags_enabled() */ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); acs_flags &= (cap | PCI_ACS_EC); pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); return acs_flags & ~ctrl ? 0 : 1; } static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) { /* * SV, TB, and UF are not relevant to multifunction endpoints. * * Multifunction devices are only required to implement RR, CR, and DT * in their ACS capability if they support peer-to-peer transactions. * Devices matching this quirk have been verified by the vendor to not * perform peer-to-peer with other functions, allowing us to mask out * these bits as if they were unimplemented in the ACS capability. */ acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); return acs_flags ? 0 : 1; } static const struct pci_dev_acs_enabled { u16 vendor; u16 device; int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags); } pci_dev_acs_enabled[] = { { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs }, /* 82580 */ { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs }, /* 82576 */ { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs }, /* 82575 */ { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs }, /* I350 */ { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs }, /* 82571 (Quads omitted due to non-ACS switch) */ { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs }, /* I219 */ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, /* QCOM QDF2xxx root ports */ { 0x17cb, 0x400, pci_quirk_qcom_rp_acs }, { 0x17cb, 0x401, pci_quirk_qcom_rp_acs }, /* Intel PCH root ports */ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ /* Cavium ThunderX */ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, /* APM X-Gene */ { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, { 0 } }; int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { const struct pci_dev_acs_enabled *i; int ret; /* * Allow devices that do not expose standard PCIe ACS capabilities * or control to indicate their support here. Multi-function express * devices which do not allow internal peer-to-peer between functions, * but do not implement PCIe ACS may wish to return true here. */ for (i = pci_dev_acs_enabled; i->acs_enabled; i++) { if ((i->vendor == dev->vendor || i->vendor == (u16)PCI_ANY_ID) && (i->device == dev->device || i->device == (u16)PCI_ANY_ID)) { ret = i->acs_enabled(dev, acs_flags); if (ret >= 0) return ret; } } return -ENOTTY; } /* Config space offset of Root Complex Base Address register */ #define INTEL_LPC_RCBA_REG 0xf0 /* 31:14 RCBA address */ #define INTEL_LPC_RCBA_MASK 0xffffc000 /* RCBA Enable */ #define INTEL_LPC_RCBA_ENABLE (1 << 0) /* Backbone Scratch Pad Register */ #define INTEL_BSPR_REG 0x1104 /* Backbone Peer Non-Posted Disable */ #define INTEL_BSPR_REG_BPNPD (1 << 8) /* Backbone Peer Posted Disable */ #define INTEL_BSPR_REG_BPPD (1 << 9) /* Upstream Peer Decode Configuration Register */ #define INTEL_UPDCR_REG 0x1114 /* 5:0 Peer Decode Enable bits */ #define INTEL_UPDCR_REG_MASK 0x3f static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev) { u32 rcba, bspr, updcr; void __iomem *rcba_mem; /* * Read the RCBA register from the LPC (D31:F0). PCH root ports * are D28:F* and therefore get probed before LPC, thus we can't * use pci_get_slot/pci_read_config_dword here. */ pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0), INTEL_LPC_RCBA_REG, &rcba); if (!(rcba & INTEL_LPC_RCBA_ENABLE)) return -EINVAL; rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK, PAGE_ALIGN(INTEL_UPDCR_REG)); if (!rcba_mem) return -ENOMEM; /* * The BSPR can disallow peer cycles, but it's set by soft strap and * therefore read-only. If both posted and non-posted peer cycles are * disallowed, we're ok. If either are allowed, then we need to use * the UPDCR to disable peer decodes for each port. This provides the * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF */ bspr = readl(rcba_mem + INTEL_BSPR_REG); bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD; if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) { updcr = readl(rcba_mem + INTEL_UPDCR_REG); if (updcr & INTEL_UPDCR_REG_MASK) { dev_info(&dev->dev, "Disabling UPDCR peer decodes\n"); updcr &= ~INTEL_UPDCR_REG_MASK; writel(updcr, rcba_mem + INTEL_UPDCR_REG); } } iounmap(rcba_mem); return 0; } /* Miscellaneous Port Configuration register */ #define INTEL_MPC_REG 0xd8 /* MPC: Invalid Receive Bus Number Check Enable */ #define INTEL_MPC_REG_IRBNCE (1 << 26) static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev) { u32 mpc; /* * When enabled, the IRBNCE bit of the MPC register enables the * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which * ensures that requester IDs fall within the bus number range * of the bridge. Enable if not already. */ pci_read_config_dword(dev, INTEL_MPC_REG, &mpc); if (!(mpc & INTEL_MPC_REG_IRBNCE)) { dev_info(&dev->dev, "Enabling MPC IRBNCE\n"); mpc |= INTEL_MPC_REG_IRBNCE; pci_write_config_word(dev, INTEL_MPC_REG, mpc); } } static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev) { if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; if (pci_quirk_enable_intel_lpc_acs(dev)) { dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n"); return 0; } pci_quirk_enable_intel_rp_mpc_acs(dev); dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK; dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n"); return 0; } static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) { int pos; u32 cap, ctrl; if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); if (!pos) return -ENOTTY; pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); ctrl |= (cap & PCI_ACS_SV); ctrl |= (cap & PCI_ACS_RR); ctrl |= (cap & PCI_ACS_CR); ctrl |= (cap & PCI_ACS_UF); pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n"); return 0; } static const struct pci_dev_enable_acs { u16 vendor; u16 device; int (*enable_acs)(struct pci_dev *dev); } pci_dev_enable_acs[] = { { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs }, { 0 } }; int pci_dev_specific_enable_acs(struct pci_dev *dev) { const struct pci_dev_enable_acs *i; int ret; for (i = pci_dev_enable_acs; i->enable_acs; i++) { if ((i->vendor == dev->vendor || i->vendor == (u16)PCI_ANY_ID) && (i->device == dev->device || i->device == (u16)PCI_ANY_ID)) { ret = i->enable_acs(dev); if (ret >= 0) return ret; } } return -ENOTTY; } /* * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with * QuickAssist Technology (QAT) is prematurely terminated in hardware. The * Next Capability pointer in the MSI Capability Structure should point to * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating * the list. */ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) { int pos, i = 0; u8 next_cap; u16 reg16, *cap; struct pci_cap_saved_state *state; /* Bail if the hardware bug is fixed */ if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP)) return; /* Bail if MSI Capability Structure is not found for some reason */ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); if (!pos) return; /* * Bail if Next Capability pointer in the MSI Capability Structure * is not the expected incorrect 0x00. */ pci_read_config_byte(pdev, pos + 1, &next_cap); if (next_cap) return; /* * PCIe Capability Structure is expected to be at 0x50 and should * terminate the list (Next Capability pointer is 0x00). Verify * Capability Id and Next Capability pointer is as expected. * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext() * to correctly set kernel data structures which have already been * set incorrectly due to the hardware bug. */ pos = 0x50; pci_read_config_word(pdev, pos, &reg16); if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) { u32 status; #ifndef PCI_EXP_SAVE_REGS #define PCI_EXP_SAVE_REGS 7 #endif int size = PCI_EXP_SAVE_REGS * sizeof(u16); pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); pdev->pcie_flags_reg = reg16; pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) != PCIBIOS_SUCCESSFUL || (status == 0xffffffff)) pdev->cfg_size = PCI_CFG_SPACE_SIZE; if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP)) return; /* * Save PCIE cap */ state = kzalloc(sizeof(*state) + size, GFP_KERNEL); if (!state) return; state->cap.cap_nr = PCI_CAP_ID_EXP; state->cap.cap_extended = 0; state->cap.size = size; cap = (u16 *)&state->cap.data[0]; pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]); hlist_add_head(&state->next, &pdev->saved_cap_space); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); /* FLR may cause some 82579 devices to hang. */ static void quirk_intel_no_flr(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr); static void quirk_no_ext_tags(struct pci_dev *pdev) { struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); if (!bridge) return; bridge->no_ext_tags = 1; dev_info(&pdev->dev, "disabling Extended Tags (this device can't handle them)\n"); pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* * Some devices have a broken ATS implementation causing IOMMU stalls. * Don't use ATS for those devices. */ static void quirk_no_ats(struct pci_dev *pdev) { dev_info(&pdev->dev, "disabling ATS (broken on this device)\n"); pdev->ats_cap = 0; } /* AMD Stoney platform GPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); #endif /* CONFIG_PCI_ATS */
976599.c
/* $OpenBSD: sftp-server.c,v 1.129 2021/08/09 23:47:44 djm Exp $ */ /* * Copyright (c) 2000-2004 Markus Friedl. All rights reserved. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "includes.h" #include <sys/types.h> #include <sys/stat.h> #include <sys/resource.h> #ifdef HAVE_SYS_TIME_H # include <sys/time.h> #endif #ifdef HAVE_SYS_MOUNT_H #include <sys/mount.h> #endif #ifdef HAVE_SYS_STATVFS_H #include <sys/statvfs.h> #endif #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <pwd.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <unistd.h> #include <stdarg.h> #include "xmalloc.h" #include "sshbuf.h" #include "ssherr.h" #include "log.h" #include "misc.h" #include "match.h" #include "uidswap.h" #include "sftp.h" #include "sftp-common.h" char *sftp_realpath(const char *, char *); /* sftp-realpath.c */ /* Maximum data read that we are willing to accept */ #define SFTP_MAX_READ_LENGTH (SFTP_MAX_MSG_LENGTH - 1024) /* Our verbosity */ static LogLevel log_level = SYSLOG_LEVEL_ERROR; /* Our client */ static struct passwd *pw = NULL; static char *client_addr = NULL; /* input and output queue */ struct sshbuf *iqueue; struct sshbuf *oqueue; /* Version of client */ static u_int version; /* SSH2_FXP_INIT received */ static int init_done; /* Disable writes */ static int readonly; /* Requests that are allowed/denied */ static char *request_allowlist, *request_denylist; /* portable attributes, etc. */ typedef struct Stat Stat; struct Stat { char *name; char *long_name; Attrib attrib; }; /* Packet handlers */ static void process_open(u_int32_t id); static void process_close(u_int32_t id); static void process_read(u_int32_t id); static void process_write(u_int32_t id); static void process_stat(u_int32_t id); static void process_lstat(u_int32_t id); static void process_fstat(u_int32_t id); static void process_setstat(u_int32_t id); static void process_fsetstat(u_int32_t id); static void process_opendir(u_int32_t id); static void process_readdir(u_int32_t id); static void process_remove(u_int32_t id); static void process_mkdir(u_int32_t id); static void process_rmdir(u_int32_t id); static void process_realpath(u_int32_t id); static void process_rename(u_int32_t id); static void process_readlink(u_int32_t id); static void process_symlink(u_int32_t id); static void process_extended_posix_rename(u_int32_t id); static void process_extended_statvfs(u_int32_t id); static void process_extended_fstatvfs(u_int32_t id); static void process_extended_hardlink(u_int32_t id); static void process_extended_fsync(u_int32_t id); static void process_extended_lsetstat(u_int32_t id); static void process_extended_limits(u_int32_t id); static void process_extended_expand(u_int32_t id); static void process_extended(u_int32_t id); struct sftp_handler { const char *name; /* user-visible name for fine-grained perms */ const char *ext_name; /* extended request name */ u_int type; /* packet type, for non extended packets */ void (*handler)(u_int32_t); int does_write; /* if nonzero, banned for readonly mode */ }; static const struct sftp_handler handlers[] = { /* NB. SSH2_FXP_OPEN does the readonly check in the handler itself */ { "open", NULL, SSH2_FXP_OPEN, process_open, 0 }, { "close", NULL, SSH2_FXP_CLOSE, process_close, 0 }, { "read", NULL, SSH2_FXP_READ, process_read, 0 }, { "write", NULL, SSH2_FXP_WRITE, process_write, 1 }, { "lstat", NULL, SSH2_FXP_LSTAT, process_lstat, 0 }, { "fstat", NULL, SSH2_FXP_FSTAT, process_fstat, 0 }, { "setstat", NULL, SSH2_FXP_SETSTAT, process_setstat, 1 }, { "fsetstat", NULL, SSH2_FXP_FSETSTAT, process_fsetstat, 1 }, { "opendir", NULL, SSH2_FXP_OPENDIR, process_opendir, 0 }, { "readdir", NULL, SSH2_FXP_READDIR, process_readdir, 0 }, { "remove", NULL, SSH2_FXP_REMOVE, process_remove, 1 }, { "mkdir", NULL, SSH2_FXP_MKDIR, process_mkdir, 1 }, { "rmdir", NULL, SSH2_FXP_RMDIR, process_rmdir, 1 }, { "realpath", NULL, SSH2_FXP_REALPATH, process_realpath, 0 }, { "stat", NULL, SSH2_FXP_STAT, process_stat, 0 }, { "rename", NULL, SSH2_FXP_RENAME, process_rename, 1 }, { "readlink", NULL, SSH2_FXP_READLINK, process_readlink, 0 }, { "symlink", NULL, SSH2_FXP_SYMLINK, process_symlink, 1 }, { NULL, NULL, 0, NULL, 0 } }; /* SSH2_FXP_EXTENDED submessages */ static const struct sftp_handler extended_handlers[] = { { "posix-rename", "[email protected]", 0, process_extended_posix_rename, 1 }, { "statvfs", "[email protected]", 0, process_extended_statvfs, 0 }, { "fstatvfs", "[email protected]", 0, process_extended_fstatvfs, 0 }, { "hardlink", "[email protected]", 0, process_extended_hardlink, 1 }, { "fsync", "[email protected]", 0, process_extended_fsync, 1 }, { "lsetstat", "[email protected]", 0, process_extended_lsetstat, 1 }, { "limits", "[email protected]", 0, process_extended_limits, 0 }, { "expand-path", "[email protected]", 0, process_extended_expand, 0 }, { NULL, NULL, 0, NULL, 0 } }; static const struct sftp_handler * extended_handler_byname(const char *name) { int i; for (i = 0; extended_handlers[i].handler != NULL; i++) { if (strcmp(name, extended_handlers[i].ext_name) == 0) return &extended_handlers[i]; } return NULL; } static int request_permitted(const struct sftp_handler *h) { char *result; if (readonly && h->does_write) { verbose("Refusing %s request in read-only mode", h->name); return 0; } if (request_denylist != NULL && ((result = match_list(h->name, request_denylist, NULL))) != NULL) { free(result); verbose("Refusing denylisted %s request", h->name); return 0; } if (request_allowlist != NULL && ((result = match_list(h->name, request_allowlist, NULL))) != NULL) { free(result); debug2("Permitting allowlisted %s request", h->name); return 1; } if (request_allowlist != NULL) { verbose("Refusing non-allowlisted %s request", h->name); return 0; } return 1; } static int errno_to_portable(int unixerrno) { int ret = 0; switch (unixerrno) { case 0: ret = SSH2_FX_OK; break; case ENOENT: case ENOTDIR: case EBADF: case ELOOP: ret = SSH2_FX_NO_SUCH_FILE; break; case EPERM: case EACCES: case EFAULT: ret = SSH2_FX_PERMISSION_DENIED; break; case ENAMETOOLONG: case EINVAL: ret = SSH2_FX_BAD_MESSAGE; break; case ENOSYS: ret = SSH2_FX_OP_UNSUPPORTED; break; default: ret = SSH2_FX_FAILURE; break; } return ret; } static int flags_from_portable(int pflags) { int flags = 0; if ((pflags & SSH2_FXF_READ) && (pflags & SSH2_FXF_WRITE)) { flags = O_RDWR; } else if (pflags & SSH2_FXF_READ) { flags = O_RDONLY; } else if (pflags & SSH2_FXF_WRITE) { flags = O_WRONLY; } if (pflags & SSH2_FXF_APPEND) flags |= O_APPEND; if (pflags & SSH2_FXF_CREAT) flags |= O_CREAT; if (pflags & SSH2_FXF_TRUNC) flags |= O_TRUNC; if (pflags & SSH2_FXF_EXCL) flags |= O_EXCL; return flags; } static const char * string_from_portable(int pflags) { static char ret[128]; *ret = '\0'; #define PAPPEND(str) { \ if (*ret != '\0') \ strlcat(ret, ",", sizeof(ret)); \ strlcat(ret, str, sizeof(ret)); \ } if (pflags & SSH2_FXF_READ) PAPPEND("READ") if (pflags & SSH2_FXF_WRITE) PAPPEND("WRITE") if (pflags & SSH2_FXF_APPEND) PAPPEND("APPEND") if (pflags & SSH2_FXF_CREAT) PAPPEND("CREATE") if (pflags & SSH2_FXF_TRUNC) PAPPEND("TRUNCATE") if (pflags & SSH2_FXF_EXCL) PAPPEND("EXCL") return ret; } /* handle handles */ typedef struct Handle Handle; struct Handle { int use; DIR *dirp; int fd; int flags; char *name; u_int64_t bytes_read, bytes_write; int next_unused; }; enum { HANDLE_UNUSED, HANDLE_DIR, HANDLE_FILE }; static Handle *handles = NULL; static u_int num_handles = 0; static int first_unused_handle = -1; static void handle_unused(int i) { handles[i].use = HANDLE_UNUSED; handles[i].next_unused = first_unused_handle; first_unused_handle = i; } static int handle_new(int use, const char *name, int fd, int flags, DIR *dirp) { int i; if (first_unused_handle == -1) { if (num_handles + 1 <= num_handles) return -1; num_handles++; handles = xreallocarray(handles, num_handles, sizeof(Handle)); handle_unused(num_handles - 1); } i = first_unused_handle; first_unused_handle = handles[i].next_unused; handles[i].use = use; handles[i].dirp = dirp; handles[i].fd = fd; handles[i].flags = flags; handles[i].name = xstrdup(name); handles[i].bytes_read = handles[i].bytes_write = 0; return i; } static int handle_is_ok(int i, int type) { return i >= 0 && (u_int)i < num_handles && handles[i].use == type; } static int handle_to_string(int handle, u_char **stringp, int *hlenp) { if (stringp == NULL || hlenp == NULL) return -1; *stringp = xmalloc(sizeof(int32_t)); put_u32(*stringp, handle); *hlenp = sizeof(int32_t); return 0; } static int handle_from_string(const u_char *handle, u_int hlen) { int val; if (hlen != sizeof(int32_t)) return -1; val = get_u32(handle); if (handle_is_ok(val, HANDLE_FILE) || handle_is_ok(val, HANDLE_DIR)) return val; return -1; } static char * handle_to_name(int handle) { if (handle_is_ok(handle, HANDLE_DIR)|| handle_is_ok(handle, HANDLE_FILE)) return handles[handle].name; return NULL; } static DIR * handle_to_dir(int handle) { if (handle_is_ok(handle, HANDLE_DIR)) return handles[handle].dirp; return NULL; } static int handle_to_fd(int handle) { if (handle_is_ok(handle, HANDLE_FILE)) return handles[handle].fd; return -1; } static int handle_to_flags(int handle) { if (handle_is_ok(handle, HANDLE_FILE)) return handles[handle].flags; return 0; } static void handle_update_read(int handle, ssize_t bytes) { if (handle_is_ok(handle, HANDLE_FILE) && bytes > 0) handles[handle].bytes_read += bytes; } static void handle_update_write(int handle, ssize_t bytes) { if (handle_is_ok(handle, HANDLE_FILE) && bytes > 0) handles[handle].bytes_write += bytes; } static u_int64_t handle_bytes_read(int handle) { if (handle_is_ok(handle, HANDLE_FILE)) return (handles[handle].bytes_read); return 0; } static u_int64_t handle_bytes_write(int handle) { if (handle_is_ok(handle, HANDLE_FILE)) return (handles[handle].bytes_write); return 0; } static int handle_close(int handle) { int ret = -1; if (handle_is_ok(handle, HANDLE_FILE)) { ret = close(handles[handle].fd); free(handles[handle].name); handle_unused(handle); } else if (handle_is_ok(handle, HANDLE_DIR)) { ret = closedir(handles[handle].dirp); free(handles[handle].name); handle_unused(handle); } else { errno = ENOENT; } return ret; } static void handle_log_close(int handle, char *emsg) { if (handle_is_ok(handle, HANDLE_FILE)) { logit("%s%sclose \"%s\" bytes read %llu written %llu", emsg == NULL ? "" : emsg, emsg == NULL ? "" : " ", handle_to_name(handle), (unsigned long long)handle_bytes_read(handle), (unsigned long long)handle_bytes_write(handle)); } else { logit("%s%sclosedir \"%s\"", emsg == NULL ? "" : emsg, emsg == NULL ? "" : " ", handle_to_name(handle)); } } static void handle_log_exit(void) { u_int i; for (i = 0; i < num_handles; i++) if (handles[i].use != HANDLE_UNUSED) handle_log_close(i, "forced"); } static int get_handle(struct sshbuf *queue, int *hp) { u_char *handle; int r; size_t hlen; *hp = -1; if ((r = sshbuf_get_string(queue, &handle, &hlen)) != 0) return r; if (hlen < 256) *hp = handle_from_string(handle, hlen); free(handle); return 0; } /* send replies */ static void send_msg(struct sshbuf *m) { int r; if ((r = sshbuf_put_stringb(oqueue, m)) != 0) fatal_fr(r, "enqueue"); sshbuf_reset(m); } static const char * status_to_message(u_int32_t status) { const char *status_messages[] = { "Success", /* SSH_FX_OK */ "End of file", /* SSH_FX_EOF */ "No such file", /* SSH_FX_NO_SUCH_FILE */ "Permission denied", /* SSH_FX_PERMISSION_DENIED */ "Failure", /* SSH_FX_FAILURE */ "Bad message", /* SSH_FX_BAD_MESSAGE */ "No connection", /* SSH_FX_NO_CONNECTION */ "Connection lost", /* SSH_FX_CONNECTION_LOST */ "Operation unsupported", /* SSH_FX_OP_UNSUPPORTED */ "Unknown error" /* Others */ }; return (status_messages[MINIMUM(status,SSH2_FX_MAX)]); } static void send_status(u_int32_t id, u_int32_t status) { struct sshbuf *msg; int r; debug3("request %u: sent status %u", id, status); if (log_level > SYSLOG_LEVEL_VERBOSE || (status != SSH2_FX_OK && status != SSH2_FX_EOF)) logit("sent status %s", status_to_message(status)); if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, SSH2_FXP_STATUS)) != 0 || (r = sshbuf_put_u32(msg, id)) != 0 || (r = sshbuf_put_u32(msg, status)) != 0) fatal_fr(r, "compose"); if (version >= 3) { if ((r = sshbuf_put_cstring(msg, status_to_message(status))) != 0 || (r = sshbuf_put_cstring(msg, "")) != 0) fatal_fr(r, "compose message"); } send_msg(msg); sshbuf_free(msg); } static void send_data_or_handle(char type, u_int32_t id, const u_char *data, int dlen) { struct sshbuf *msg; int r; if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, type)) != 0 || (r = sshbuf_put_u32(msg, id)) != 0 || (r = sshbuf_put_string(msg, data, dlen)) != 0) fatal_fr(r, "compose"); send_msg(msg); sshbuf_free(msg); } static void send_data(u_int32_t id, const u_char *data, int dlen) { debug("request %u: sent data len %d", id, dlen); send_data_or_handle(SSH2_FXP_DATA, id, data, dlen); } static void send_handle(u_int32_t id, int handle) { u_char *string; int hlen; handle_to_string(handle, &string, &hlen); debug("request %u: sent handle handle %d", id, handle); send_data_or_handle(SSH2_FXP_HANDLE, id, string, hlen); free(string); } static void send_names(u_int32_t id, int count, const Stat *stats) { struct sshbuf *msg; int i, r; if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, SSH2_FXP_NAME)) != 0 || (r = sshbuf_put_u32(msg, id)) != 0 || (r = sshbuf_put_u32(msg, count)) != 0) fatal_fr(r, "compose"); debug("request %u: sent names count %d", id, count); for (i = 0; i < count; i++) { if ((r = sshbuf_put_cstring(msg, stats[i].name)) != 0 || (r = sshbuf_put_cstring(msg, stats[i].long_name)) != 0 || (r = encode_attrib(msg, &stats[i].attrib)) != 0) fatal_fr(r, "compose filenames/attrib"); } send_msg(msg); sshbuf_free(msg); } static void send_attrib(u_int32_t id, const Attrib *a) { struct sshbuf *msg; int r; debug("request %u: sent attrib have 0x%x", id, a->flags); if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, SSH2_FXP_ATTRS)) != 0 || (r = sshbuf_put_u32(msg, id)) != 0 || (r = encode_attrib(msg, a)) != 0) fatal_fr(r, "compose"); send_msg(msg); sshbuf_free(msg); } static void send_statvfs(u_int32_t id, struct statvfs *st) { struct sshbuf *msg; u_int64_t flag; int r; flag = (st->f_flag & ST_RDONLY) ? SSH2_FXE_STATVFS_ST_RDONLY : 0; flag |= (st->f_flag & ST_NOSUID) ? SSH2_FXE_STATVFS_ST_NOSUID : 0; if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, SSH2_FXP_EXTENDED_REPLY)) != 0 || (r = sshbuf_put_u32(msg, id)) != 0 || (r = sshbuf_put_u64(msg, st->f_bsize)) != 0 || (r = sshbuf_put_u64(msg, st->f_frsize)) != 0 || (r = sshbuf_put_u64(msg, st->f_blocks)) != 0 || (r = sshbuf_put_u64(msg, st->f_bfree)) != 0 || (r = sshbuf_put_u64(msg, st->f_bavail)) != 0 || (r = sshbuf_put_u64(msg, st->f_files)) != 0 || (r = sshbuf_put_u64(msg, st->f_ffree)) != 0 || (r = sshbuf_put_u64(msg, st->f_favail)) != 0 || (r = sshbuf_put_u64(msg, FSID_TO_ULONG(st->f_fsid))) != 0 || (r = sshbuf_put_u64(msg, flag)) != 0 || (r = sshbuf_put_u64(msg, st->f_namemax)) != 0) fatal_fr(r, "compose"); send_msg(msg); sshbuf_free(msg); } /* * Prepare SSH2_FXP_VERSION extension advertisement for a single extension. * The extension is checked for permission prior to advertisment. */ static int compose_extension(struct sshbuf *msg, const char *name, const char *ver) { int r; const struct sftp_handler *exthnd; if ((exthnd = extended_handler_byname(name)) == NULL) fatal_f("internal error: no handler for %s", name); if (!request_permitted(exthnd)) { debug2_f("refusing to advertise disallowed extension %s", name); return 0; } if ((r = sshbuf_put_cstring(msg, name)) != 0 || (r = sshbuf_put_cstring(msg, ver)) != 0) fatal_fr(r, "compose %s", name); return 0; } /* parse incoming */ static void process_init(void) { struct sshbuf *msg; int r; if ((r = sshbuf_get_u32(iqueue, &version)) != 0) fatal_fr(r, "parse"); verbose("received client version %u", version); if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, SSH2_FXP_VERSION)) != 0 || (r = sshbuf_put_u32(msg, SSH2_FILEXFER_VERSION)) != 0) fatal_fr(r, "compose"); /* extension advertisments */ compose_extension(msg, "[email protected]", "1"); compose_extension(msg, "[email protected]", "2"); compose_extension(msg, "[email protected]", "2"); compose_extension(msg, "[email protected]", "1"); compose_extension(msg, "[email protected]", "1"); compose_extension(msg, "[email protected]", "1"); compose_extension(msg, "[email protected]", "1"); compose_extension(msg, "[email protected]", "1"); send_msg(msg); sshbuf_free(msg); } static void process_open(u_int32_t id) { u_int32_t pflags; Attrib a; char *name; int r, handle, fd, flags, mode, status = SSH2_FX_FAILURE; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || (r = sshbuf_get_u32(iqueue, &pflags)) != 0 || /* portable flags */ (r = decode_attrib(iqueue, &a)) != 0) fatal_fr(r, "parse"); debug3("request %u: open flags %d", id, pflags); flags = flags_from_portable(pflags); mode = (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) ? a.perm : 0666; logit("open \"%s\" flags %s mode 0%o", name, string_from_portable(pflags), mode); if (readonly && ((flags & O_ACCMODE) != O_RDONLY || (flags & (O_CREAT|O_TRUNC)) != 0)) { verbose("Refusing open request in read-only mode"); status = SSH2_FX_PERMISSION_DENIED; } else { fd = open(name, flags, mode); if (fd == -1) { status = errno_to_portable(errno); } else { handle = handle_new(HANDLE_FILE, name, fd, flags, NULL); if (handle < 0) { close(fd); } else { send_handle(id, handle); status = SSH2_FX_OK; } } } if (status != SSH2_FX_OK) send_status(id, status); free(name); } static void process_close(u_int32_t id) { int r, handle, ret, status = SSH2_FX_FAILURE; if ((r = get_handle(iqueue, &handle)) != 0) fatal_fr(r, "parse"); debug3("request %u: close handle %u", id, handle); handle_log_close(handle, NULL); ret = handle_close(handle); status = (ret == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); } static void process_read(u_int32_t id) { static u_char *buf; static size_t buflen; u_int32_t len; int r, handle, fd, ret, status = SSH2_FX_FAILURE; u_int64_t off; if ((r = get_handle(iqueue, &handle)) != 0 || (r = sshbuf_get_u64(iqueue, &off)) != 0 || (r = sshbuf_get_u32(iqueue, &len)) != 0) fatal_fr(r, "parse"); debug("request %u: read \"%s\" (handle %d) off %llu len %u", id, handle_to_name(handle), handle, (unsigned long long)off, len); if ((fd = handle_to_fd(handle)) == -1) goto out; if (len > SFTP_MAX_READ_LENGTH) { debug2("read change len %u to %u", len, SFTP_MAX_READ_LENGTH); len = SFTP_MAX_READ_LENGTH; } if (len > buflen) { debug3_f("allocate %zu => %u", buflen, len); if ((buf = realloc(NULL, len)) == NULL) fatal_f("realloc failed"); buflen = len; } if (lseek(fd, off, SEEK_SET) == -1) { status = errno_to_portable(errno); error_f("seek \"%.100s\": %s", handle_to_name(handle), strerror(errno)); goto out; } if (len == 0) { /* weird, but not strictly disallowed */ ret = 0; } else if ((ret = read(fd, buf, len)) == -1) { status = errno_to_portable(errno); error_f("read \"%.100s\": %s", handle_to_name(handle), strerror(errno)); goto out; } else if (ret == 0) { status = SSH2_FX_EOF; goto out; } send_data(id, buf, ret); handle_update_read(handle, ret); /* success */ status = SSH2_FX_OK; out: if (status != SSH2_FX_OK) send_status(id, status); } static void process_write(u_int32_t id) { u_int64_t off; size_t len; int r, handle, fd, ret, status; u_char *data; if ((r = get_handle(iqueue, &handle)) != 0 || (r = sshbuf_get_u64(iqueue, &off)) != 0 || (r = sshbuf_get_string(iqueue, &data, &len)) != 0) fatal_fr(r, "parse"); debug("request %u: write \"%s\" (handle %d) off %llu len %zu", id, handle_to_name(handle), handle, (unsigned long long)off, len); fd = handle_to_fd(handle); if (fd < 0) status = SSH2_FX_FAILURE; else { if (!(handle_to_flags(handle) & O_APPEND) && lseek(fd, off, SEEK_SET) == -1) { status = errno_to_portable(errno); error_f("seek \"%.100s\": %s", handle_to_name(handle), strerror(errno)); } else { /* XXX ATOMICIO ? */ ret = write(fd, data, len); if (ret == -1) { status = errno_to_portable(errno); error_f("write \"%.100s\": %s", handle_to_name(handle), strerror(errno)); } else if ((size_t)ret == len) { status = SSH2_FX_OK; handle_update_write(handle, ret); } else { debug2_f("nothing at all written"); status = SSH2_FX_FAILURE; } } } send_status(id, status); free(data); } static void process_do_stat(u_int32_t id, int do_lstat) { Attrib a; struct stat st; char *name; int r, status = SSH2_FX_FAILURE; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: %sstat", id, do_lstat ? "l" : ""); verbose("%sstat name \"%s\"", do_lstat ? "l" : "", name); r = do_lstat ? lstat(name, &st) : stat(name, &st); if (r == -1) { status = errno_to_portable(errno); } else { stat_to_attrib(&st, &a); send_attrib(id, &a); status = SSH2_FX_OK; } if (status != SSH2_FX_OK) send_status(id, status); free(name); } static void process_stat(u_int32_t id) { process_do_stat(id, 0); } static void process_lstat(u_int32_t id) { process_do_stat(id, 1); } static void process_fstat(u_int32_t id) { Attrib a; struct stat st; int fd, r, handle, status = SSH2_FX_FAILURE; if ((r = get_handle(iqueue, &handle)) != 0) fatal_fr(r, "parse"); debug("request %u: fstat \"%s\" (handle %u)", id, handle_to_name(handle), handle); fd = handle_to_fd(handle); if (fd >= 0) { r = fstat(fd, &st); if (r == -1) { status = errno_to_portable(errno); } else { stat_to_attrib(&st, &a); send_attrib(id, &a); status = SSH2_FX_OK; } } if (status != SSH2_FX_OK) send_status(id, status); } static struct timeval * attrib_to_tv(const Attrib *a) { static struct timeval tv[2]; tv[0].tv_sec = a->atime; tv[0].tv_usec = 0; tv[1].tv_sec = a->mtime; tv[1].tv_usec = 0; return tv; } static struct timespec * attrib_to_ts(const Attrib *a) { static struct timespec ts[2]; ts[0].tv_sec = a->atime; ts[0].tv_nsec = 0; ts[1].tv_sec = a->mtime; ts[1].tv_nsec = 0; return ts; } static void process_setstat(u_int32_t id) { Attrib a; char *name; int r, status = SSH2_FX_OK; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || (r = decode_attrib(iqueue, &a)) != 0) fatal_fr(r, "parse"); debug("request %u: setstat name \"%s\"", id, name); if (a.flags & SSH2_FILEXFER_ATTR_SIZE) { logit("set \"%s\" size %llu", name, (unsigned long long)a.size); r = truncate(name, a.size); if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) { logit("set \"%s\" mode %04o", name, a.perm); r = chmod(name, a.perm & 07777); if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_ACMODTIME) { char buf[64]; time_t t = a.mtime; strftime(buf, sizeof(buf), "%Y%m%d-%H:%M:%S", localtime(&t)); logit("set \"%s\" modtime %s", name, buf); r = utimes(name, attrib_to_tv(&a)); if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_UIDGID) { logit("set \"%s\" owner %lu group %lu", name, (u_long)a.uid, (u_long)a.gid); r = chown(name, a.uid, a.gid); if (r == -1) status = errno_to_portable(errno); } send_status(id, status); free(name); } static void process_fsetstat(u_int32_t id) { Attrib a; int handle, fd, r; int status = SSH2_FX_OK; if ((r = get_handle(iqueue, &handle)) != 0 || (r = decode_attrib(iqueue, &a)) != 0) fatal_fr(r, "parse"); debug("request %u: fsetstat handle %d", id, handle); fd = handle_to_fd(handle); if (fd < 0) status = SSH2_FX_FAILURE; else { char *name = handle_to_name(handle); if (a.flags & SSH2_FILEXFER_ATTR_SIZE) { logit("set \"%s\" size %llu", name, (unsigned long long)a.size); r = ftruncate(fd, a.size); if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) { logit("set \"%s\" mode %04o", name, a.perm); #ifdef HAVE_FCHMOD r = fchmod(fd, a.perm & 07777); #else r = chmod(name, a.perm & 07777); #endif if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_ACMODTIME) { char buf[64]; time_t t = a.mtime; strftime(buf, sizeof(buf), "%Y%m%d-%H:%M:%S", localtime(&t)); logit("set \"%s\" modtime %s", name, buf); #ifdef HAVE_FUTIMES r = futimes(fd, attrib_to_tv(&a)); #else r = utimes(name, attrib_to_tv(&a)); #endif if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_UIDGID) { logit("set \"%s\" owner %lu group %lu", name, (u_long)a.uid, (u_long)a.gid); #ifdef HAVE_FCHOWN r = fchown(fd, a.uid, a.gid); #else r = chown(name, a.uid, a.gid); #endif if (r == -1) status = errno_to_portable(errno); } } send_status(id, status); } static void process_opendir(u_int32_t id) { DIR *dirp = NULL; char *path; int r, handle, status = SSH2_FX_FAILURE; if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: opendir", id); logit("opendir \"%s\"", path); dirp = opendir(path); if (dirp == NULL) { status = errno_to_portable(errno); } else { handle = handle_new(HANDLE_DIR, path, 0, 0, dirp); if (handle < 0) { closedir(dirp); } else { send_handle(id, handle); status = SSH2_FX_OK; } } if (status != SSH2_FX_OK) send_status(id, status); free(path); } static void process_readdir(u_int32_t id) { DIR *dirp; struct dirent *dp; char *path; int r, handle; if ((r = get_handle(iqueue, &handle)) != 0) fatal_fr(r, "parse"); debug("request %u: readdir \"%s\" (handle %d)", id, handle_to_name(handle), handle); dirp = handle_to_dir(handle); path = handle_to_name(handle); if (dirp == NULL || path == NULL) { send_status(id, SSH2_FX_FAILURE); } else { struct stat st; char pathname[PATH_MAX]; Stat *stats; int nstats = 10, count = 0, i; stats = xcalloc(nstats, sizeof(Stat)); while ((dp = readdir(dirp)) != NULL) { if (count >= nstats) { nstats *= 2; stats = xreallocarray(stats, nstats, sizeof(Stat)); } /* XXX OVERFLOW ? */ snprintf(pathname, sizeof pathname, "%s%s%s", path, strcmp(path, "/") ? "/" : "", dp->d_name); if (lstat(pathname, &st) == -1) continue; stat_to_attrib(&st, &(stats[count].attrib)); stats[count].name = xstrdup(dp->d_name); stats[count].long_name = ls_file(dp->d_name, &st, 0, 0); count++; /* send up to 100 entries in one message */ /* XXX check packet size instead */ if (count == 100) break; } if (count > 0) { send_names(id, count, stats); for (i = 0; i < count; i++) { free(stats[i].name); free(stats[i].long_name); } } else { send_status(id, SSH2_FX_EOF); } free(stats); } } static void process_remove(u_int32_t id) { char *name; int r, status = SSH2_FX_FAILURE; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: remove", id); logit("remove name \"%s\"", name); r = unlink(name); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); free(name); } static void process_mkdir(u_int32_t id) { Attrib a; char *name; int r, mode, status = SSH2_FX_FAILURE; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || (r = decode_attrib(iqueue, &a)) != 0) fatal_fr(r, "parse"); mode = (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) ? a.perm & 07777 : 0777; debug3("request %u: mkdir", id); logit("mkdir name \"%s\" mode 0%o", name, mode); r = mkdir(name, mode); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); free(name); } static void process_rmdir(u_int32_t id) { char *name; int r, status; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: rmdir", id); logit("rmdir name \"%s\"", name); r = rmdir(name); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); free(name); } static void process_realpath(u_int32_t id) { char resolvedname[PATH_MAX]; char *path; int r; if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) fatal_fr(r, "parse"); if (path[0] == '\0') { free(path); path = xstrdup("."); } debug3("request %u: realpath", id); verbose("realpath \"%s\"", path); if (sftp_realpath(path, resolvedname) == NULL) { send_status(id, errno_to_portable(errno)); } else { Stat s; attrib_clear(&s.attrib); s.name = s.long_name = resolvedname; send_names(id, 1, &s); } free(path); } static void process_rename(u_int32_t id) { char *oldpath, *newpath; int r, status; struct stat sb; if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: rename", id); logit("rename old \"%s\" new \"%s\"", oldpath, newpath); status = SSH2_FX_FAILURE; if (lstat(oldpath, &sb) == -1) status = errno_to_portable(errno); else if (S_ISREG(sb.st_mode)) { /* Race-free rename of regular files */ if (link(oldpath, newpath) == -1) { if (errno == EOPNOTSUPP || errno == ENOSYS #ifdef EXDEV || errno == EXDEV #endif #ifdef LINK_OPNOTSUPP_ERRNO || errno == LINK_OPNOTSUPP_ERRNO #endif ) { struct stat st; /* * fs doesn't support links, so fall back to * stat+rename. This is racy. */ if (stat(newpath, &st) == -1) { if (rename(oldpath, newpath) == -1) status = errno_to_portable(errno); else status = SSH2_FX_OK; } } else { status = errno_to_portable(errno); } } else if (unlink(oldpath) == -1) { status = errno_to_portable(errno); /* clean spare link */ unlink(newpath); } else status = SSH2_FX_OK; } else if (stat(newpath, &sb) == -1) { if (rename(oldpath, newpath) == -1) status = errno_to_portable(errno); else status = SSH2_FX_OK; } send_status(id, status); free(oldpath); free(newpath); } static void process_readlink(u_int32_t id) { int r, len; char buf[PATH_MAX]; char *path; if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: readlink", id); verbose("readlink \"%s\"", path); if ((len = readlink(path, buf, sizeof(buf) - 1)) == -1) send_status(id, errno_to_portable(errno)); else { Stat s; buf[len] = '\0'; attrib_clear(&s.attrib); s.name = s.long_name = buf; send_names(id, 1, &s); } free(path); } static void process_symlink(u_int32_t id) { char *oldpath, *newpath; int r, status; if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: symlink", id); logit("symlink old \"%s\" new \"%s\"", oldpath, newpath); /* this will fail if 'newpath' exists */ r = symlink(oldpath, newpath); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); free(oldpath); free(newpath); } static void process_extended_posix_rename(u_int32_t id) { char *oldpath, *newpath; int r, status; if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: posix-rename", id); logit("posix-rename old \"%s\" new \"%s\"", oldpath, newpath); r = rename(oldpath, newpath); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); free(oldpath); free(newpath); } static void process_extended_statvfs(u_int32_t id) { char *path; struct statvfs st; int r; if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: statvfs", id); logit("statvfs \"%s\"", path); if (statvfs(path, &st) != 0) send_status(id, errno_to_portable(errno)); else send_statvfs(id, &st); free(path); } static void process_extended_fstatvfs(u_int32_t id) { int r, handle, fd; struct statvfs st; if ((r = get_handle(iqueue, &handle)) != 0) fatal_fr(r, "parse"); debug("request %u: fstatvfs \"%s\" (handle %u)", id, handle_to_name(handle), handle); if ((fd = handle_to_fd(handle)) < 0) { send_status(id, SSH2_FX_FAILURE); return; } if (fstatvfs(fd, &st) != 0) send_status(id, errno_to_portable(errno)); else send_statvfs(id, &st); } static void process_extended_hardlink(u_int32_t id) { char *oldpath, *newpath; int r, status; if ((r = sshbuf_get_cstring(iqueue, &oldpath, NULL)) != 0 || (r = sshbuf_get_cstring(iqueue, &newpath, NULL)) != 0) fatal_fr(r, "parse"); debug3("request %u: hardlink", id); logit("hardlink old \"%s\" new \"%s\"", oldpath, newpath); r = link(oldpath, newpath); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; send_status(id, status); free(oldpath); free(newpath); } static void process_extended_fsync(u_int32_t id) { int handle, fd, r, status = SSH2_FX_OP_UNSUPPORTED; if ((r = get_handle(iqueue, &handle)) != 0) fatal_fr(r, "parse"); debug3("request %u: fsync (handle %u)", id, handle); verbose("fsync \"%s\"", handle_to_name(handle)); if ((fd = handle_to_fd(handle)) < 0) status = SSH2_FX_NO_SUCH_FILE; else if (handle_is_ok(handle, HANDLE_FILE)) { r = fsync(fd); status = (r == -1) ? errno_to_portable(errno) : SSH2_FX_OK; } send_status(id, status); } static void process_extended_lsetstat(u_int32_t id) { Attrib a; char *name; int r, status = SSH2_FX_OK; if ((r = sshbuf_get_cstring(iqueue, &name, NULL)) != 0 || (r = decode_attrib(iqueue, &a)) != 0) fatal_fr(r, "parse"); debug("request %u: lsetstat name \"%s\"", id, name); if (a.flags & SSH2_FILEXFER_ATTR_SIZE) { /* nonsensical for links */ status = SSH2_FX_BAD_MESSAGE; goto out; } if (a.flags & SSH2_FILEXFER_ATTR_PERMISSIONS) { logit("set \"%s\" mode %04o", name, a.perm); r = fchmodat(AT_FDCWD, name, a.perm & 07777, AT_SYMLINK_NOFOLLOW); if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_ACMODTIME) { char buf[64]; time_t t = a.mtime; strftime(buf, sizeof(buf), "%Y%m%d-%H:%M:%S", localtime(&t)); logit("set \"%s\" modtime %s", name, buf); r = utimensat(AT_FDCWD, name, attrib_to_ts(&a), AT_SYMLINK_NOFOLLOW); if (r == -1) status = errno_to_portable(errno); } if (a.flags & SSH2_FILEXFER_ATTR_UIDGID) { logit("set \"%s\" owner %lu group %lu", name, (u_long)a.uid, (u_long)a.gid); r = fchownat(AT_FDCWD, name, a.uid, a.gid, AT_SYMLINK_NOFOLLOW); if (r == -1) status = errno_to_portable(errno); } out: send_status(id, status); free(name); } static void process_extended_limits(u_int32_t id) { struct sshbuf *msg; int r; uint64_t nfiles = 0; #if defined(HAVE_GETRLIMIT) && defined(RLIMIT_NOFILE) struct rlimit rlim; #endif debug("request %u: limits", id); #if defined(HAVE_GETRLIMIT) && defined(RLIMIT_NOFILE) if (getrlimit(RLIMIT_NOFILE, &rlim) != -1 && rlim.rlim_cur > 5) nfiles = rlim.rlim_cur - 5; /* stdio(3) + syslog + spare */ #endif if ((msg = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((r = sshbuf_put_u8(msg, SSH2_FXP_EXTENDED_REPLY)) != 0 || (r = sshbuf_put_u32(msg, id)) != 0 || /* max-packet-length */ (r = sshbuf_put_u64(msg, SFTP_MAX_MSG_LENGTH)) != 0 || /* max-read-length */ (r = sshbuf_put_u64(msg, SFTP_MAX_READ_LENGTH)) != 0 || /* max-write-length */ (r = sshbuf_put_u64(msg, SFTP_MAX_MSG_LENGTH - 1024)) != 0 || /* max-open-handles */ (r = sshbuf_put_u64(msg, nfiles)) != 0) fatal_fr(r, "compose"); send_msg(msg); sshbuf_free(msg); } static void process_extended_expand(u_int32_t id) { char cwd[PATH_MAX], resolvedname[PATH_MAX]; char *path, *npath; int r; Stat s; if ((r = sshbuf_get_cstring(iqueue, &path, NULL)) != 0) fatal_fr(r, "parse"); if (getcwd(cwd, sizeof(cwd)) == NULL) { send_status(id, errno_to_portable(errno)); goto out; } debug3("request %u: expand, original \"%s\"", id, path); if (path[0] == '\0') { /* empty path */ free(path); path = xstrdup("."); } else if (*path == '~') { /* ~ expand path */ /* Special-case for "~" and "~/" to respect homedir flag */ if (strcmp(path, "~") == 0) { free(path); path = xstrdup(cwd); } else if (strncmp(path, "~/", 2) == 0) { npath = xstrdup(path + 2); free(path); xasprintf(&path, "%s/%s", cwd, npath); } else { /* ~user expansions */ if (tilde_expand(path, pw->pw_uid, &npath) != 0) { send_status(id, errno_to_portable(EINVAL)); goto out; } free(path); path = npath; } } else if (*path != '/') { /* relative path */ xasprintf(&npath, "%s/%s", cwd, path); free(path); path = npath; } verbose("expand \"%s\"", path); if (sftp_realpath(path, resolvedname) == NULL) { send_status(id, errno_to_portable(errno)); goto out; } attrib_clear(&s.attrib); s.name = s.long_name = resolvedname; send_names(id, 1, &s); out: free(path); } static void process_extended(u_int32_t id) { char *request; int r; const struct sftp_handler *exthand; if ((r = sshbuf_get_cstring(iqueue, &request, NULL)) != 0) fatal_fr(r, "parse"); if ((exthand = extended_handler_byname(request)) == NULL) { error("Unknown extended request \"%.100s\"", request); send_status(id, SSH2_FX_OP_UNSUPPORTED); /* MUST */ } else { if (!request_permitted(exthand)) send_status(id, SSH2_FX_PERMISSION_DENIED); else exthand->handler(id); } free(request); } /* stolen from ssh-agent */ static void process(void) { u_int msg_len; u_int buf_len; u_int consumed; u_char type; const u_char *cp; int i, r; u_int32_t id; buf_len = sshbuf_len(iqueue); if (buf_len < 5) return; /* Incomplete message. */ cp = sshbuf_ptr(iqueue); msg_len = get_u32(cp); if (msg_len > SFTP_MAX_MSG_LENGTH) { error("bad message from %s local user %s", client_addr, pw->pw_name); sftp_server_cleanup_exit(11); } if (buf_len < msg_len + 4) return; if ((r = sshbuf_consume(iqueue, 4)) != 0) fatal_fr(r, "consume"); buf_len -= 4; if ((r = sshbuf_get_u8(iqueue, &type)) != 0) fatal_fr(r, "parse type"); switch (type) { case SSH2_FXP_INIT: process_init(); init_done = 1; break; case SSH2_FXP_EXTENDED: if (!init_done) fatal("Received extended request before init"); if ((r = sshbuf_get_u32(iqueue, &id)) != 0) fatal_fr(r, "parse extended ID"); process_extended(id); break; default: if (!init_done) fatal("Received %u request before init", type); if ((r = sshbuf_get_u32(iqueue, &id)) != 0) fatal_fr(r, "parse ID"); for (i = 0; handlers[i].handler != NULL; i++) { if (type == handlers[i].type) { if (!request_permitted(&handlers[i])) { send_status(id, SSH2_FX_PERMISSION_DENIED); } else { handlers[i].handler(id); } break; } } if (handlers[i].handler == NULL) error("Unknown message %u", type); } /* discard the remaining bytes from the current packet */ if (buf_len < sshbuf_len(iqueue)) { error("iqueue grew unexpectedly"); sftp_server_cleanup_exit(255); } consumed = buf_len - sshbuf_len(iqueue); if (msg_len < consumed) { error("msg_len %u < consumed %u", msg_len, consumed); sftp_server_cleanup_exit(255); } if (msg_len > consumed && (r = sshbuf_consume(iqueue, msg_len - consumed)) != 0) fatal_fr(r, "consume"); } /* Cleanup handler that logs active handles upon normal exit */ void sftp_server_cleanup_exit(int i) { if (pw != NULL && client_addr != NULL) { handle_log_exit(); logit("session closed for local user %s from [%s]", pw->pw_name, client_addr); } _exit(i); } static void sftp_server_usage(void) { extern char *__progname; fprintf(stderr, "usage: %s [-ehR] [-d start_directory] [-f log_facility] " "[-l log_level]\n\t[-P denied_requests] " "[-p allowed_requests] [-u umask]\n" " %s -Q protocol_feature\n", __progname, __progname); exit(1); } int sftp_server_main(int argc, char **argv, struct passwd *user_pw) { fd_set *rset, *wset; int i, r, in, out, max, ch, skipargs = 0, log_stderr = 0; ssize_t len, olen, set_size; SyslogFacility log_facility = SYSLOG_FACILITY_AUTH; char *cp, *homedir = NULL, uidstr[32], buf[4*4096]; long mask; extern char *optarg; extern char *__progname; __progname = ssh_get_progname(argv[0]); log_init(__progname, log_level, log_facility, log_stderr); pw = pwcopy(user_pw); while (!skipargs && (ch = getopt(argc, argv, "d:f:l:P:p:Q:u:cehR")) != -1) { switch (ch) { case 'Q': if (strcasecmp(optarg, "requests") != 0) { fprintf(stderr, "Invalid query type\n"); exit(1); } for (i = 0; handlers[i].handler != NULL; i++) printf("%s\n", handlers[i].name); for (i = 0; extended_handlers[i].handler != NULL; i++) printf("%s\n", extended_handlers[i].name); exit(0); break; case 'R': readonly = 1; break; case 'c': /* * Ignore all arguments if we are invoked as a * shell using "sftp-server -c command" */ skipargs = 1; break; case 'e': log_stderr = 1; break; case 'l': log_level = log_level_number(optarg); if (log_level == SYSLOG_LEVEL_NOT_SET) error("Invalid log level \"%s\"", optarg); break; case 'f': log_facility = log_facility_number(optarg); if (log_facility == SYSLOG_FACILITY_NOT_SET) error("Invalid log facility \"%s\"", optarg); break; case 'd': cp = tilde_expand_filename(optarg, user_pw->pw_uid); snprintf(uidstr, sizeof(uidstr), "%llu", (unsigned long long)pw->pw_uid); homedir = percent_expand(cp, "d", user_pw->pw_dir, "u", user_pw->pw_name, "U", uidstr, (char *)NULL); free(cp); break; case 'p': if (request_allowlist != NULL) fatal("Permitted requests already set"); request_allowlist = xstrdup(optarg); break; case 'P': if (request_denylist != NULL) fatal("Refused requests already set"); request_denylist = xstrdup(optarg); break; case 'u': errno = 0; mask = strtol(optarg, &cp, 8); if (mask < 0 || mask > 0777 || *cp != '\0' || cp == optarg || (mask == 0 && errno != 0)) fatal("Invalid umask \"%s\"", optarg); (void)umask((mode_t)mask); break; case 'h': default: sftp_server_usage(); } } log_init(__progname, log_level, log_facility, log_stderr); /* * On platforms where we can, avoid making /proc/self/{mem,maps} * available to the user so that sftp access doesn't automatically * imply arbitrary code execution access that will break * restricted configurations. */ platform_disable_tracing(1); /* strict */ /* Drop any fine-grained privileges we don't need */ platform_pledge_sftp_server(); if ((cp = getenv("SSH_CONNECTION")) != NULL) { client_addr = xstrdup(cp); if ((cp = strchr(client_addr, ' ')) == NULL) { error("Malformed SSH_CONNECTION variable: \"%s\"", getenv("SSH_CONNECTION")); sftp_server_cleanup_exit(255); } *cp = '\0'; } else client_addr = xstrdup("UNKNOWN"); logit("session opened for local user %s from [%s]", pw->pw_name, client_addr); in = STDIN_FILENO; out = STDOUT_FILENO; #ifdef HAVE_CYGWIN setmode(in, O_BINARY); setmode(out, O_BINARY); #endif max = 0; if (in > max) max = in; if (out > max) max = out; if ((iqueue = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); if ((oqueue = sshbuf_new()) == NULL) fatal_f("sshbuf_new failed"); rset = xcalloc(howmany(max + 1, NFDBITS), sizeof(fd_mask)); wset = xcalloc(howmany(max + 1, NFDBITS), sizeof(fd_mask)); if (homedir != NULL) { if (chdir(homedir) != 0) { error("chdir to \"%s\" failed: %s", homedir, strerror(errno)); } } set_size = howmany(max + 1, NFDBITS) * sizeof(fd_mask); for (;;) { memset(rset, 0, set_size); memset(wset, 0, set_size); /* * Ensure that we can read a full buffer and handle * the worst-case length packet it can generate, * otherwise apply backpressure by stopping reads. */ if ((r = sshbuf_check_reserve(iqueue, sizeof(buf))) == 0 && (r = sshbuf_check_reserve(oqueue, SFTP_MAX_MSG_LENGTH)) == 0) FD_SET(in, rset); else if (r != SSH_ERR_NO_BUFFER_SPACE) fatal_fr(r, "reserve"); olen = sshbuf_len(oqueue); if (olen > 0) FD_SET(out, wset); if (select(max+1, rset, wset, NULL, NULL) == -1) { if (errno == EINTR) continue; error("select: %s", strerror(errno)); sftp_server_cleanup_exit(2); } /* copy stdin to iqueue */ if (FD_ISSET(in, rset)) { len = read(in, buf, sizeof buf); if (len == 0) { debug("read eof"); sftp_server_cleanup_exit(0); } else if (len == -1) { error("read: %s", strerror(errno)); sftp_server_cleanup_exit(1); } else if ((r = sshbuf_put(iqueue, buf, len)) != 0) fatal_fr(r, "sshbuf_put"); } /* send oqueue to stdout */ if (FD_ISSET(out, wset)) { len = write(out, sshbuf_ptr(oqueue), olen); if (len == -1) { error("write: %s", strerror(errno)); sftp_server_cleanup_exit(1); } else if ((r = sshbuf_consume(oqueue, len)) != 0) fatal_fr(r, "consume"); } /* * Process requests from client if we can fit the results * into the output buffer, otherwise stop processing input * and let the output queue drain. */ r = sshbuf_check_reserve(oqueue, SFTP_MAX_MSG_LENGTH); if (r == 0) process(); else if (r != SSH_ERR_NO_BUFFER_SPACE) fatal_fr(r, "reserve"); } }
793644.c
#include <stdio.h> #include <stdlib.h> #include <regex.h> #include <string.h> #include <unistd.h> #include <math.h> #include "block.h" typedef struct { long total; long idle; } Cpuinfo; const char *FILE_STAT = "/proc/stat"; const char *REGEX_CPU = "^cpu[0-9]"; const int THRESHOLD_YELLOW = 50; const int THRESHOLD_RED = 75; void read_cpuinfo(Cpuinfo *cpuinfo) { FILE *file = fopen(FILE_STAT, "r"); if (file != NULL) { char *line = NULL; size_t len = 0; ssize_t read; long total = 0; long idle = 0; regex_t regex; regcomp(&regex, REGEX_CPU, 0); while ((read = getline(&line, &len, file)) != -1) { if (!regexec(&regex, line, 0, NULL, 0)) { int column = 0; char *token = strtok(line, " "); while (token) { long value = strtol(token, NULL, 10); if (column == 4) idle += value; total += value; token = strtok(NULL, " "); column++; } } } cpuinfo->total = total; cpuinfo->idle = idle; free(line); fclose(file); } } void print_cpuinfo(Cpuinfo *cpuinfoone, Cpuinfo *cpuinfotwo) { char color[8]; long idle = cpuinfotwo->idle - cpuinfoone->idle; long total = cpuinfotwo->total - cpuinfoone->total; double percent = (double)(1000.0 * (total - idle) / total + 5.0) / 10.0; set_color(color, percent, THRESHOLD_RED, THRESHOLD_YELLOW); printf("%.2f%%\n", percent); printf("%.2f%%\n", percent); printf("%s\n", color); } int main() { Cpuinfo cpuinfoone; Cpuinfo cpuinfotwo; cpuinfoone.total = 0; cpuinfoone.idle = 0; cpuinfotwo.total = 0; cpuinfotwo.idle = 0; if (access(FILE_STAT, F_OK) != -1) { read_cpuinfo(&cpuinfoone); sleep(1); read_cpuinfo(&cpuinfotwo); } print_cpuinfo(&cpuinfoone, &cpuinfotwo); return 0; }
83320.c
$NetBSD: patch-src_cm.c,v 1.1 2013/04/21 15:39:59 joerg Exp $ --- src/cm.c.orig 1994-10-21 04:19:53.000000000 +0000 +++ src/cm.c @@ -282,7 +282,7 @@ losecursor () #define USELL 2 #define USECR 3 -cmgoto (row, col) +void cmgoto (int row, int col) { int homecost, crcost,
861728.c
/* * Copyright 2015-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ // Based on Eclipse Paho. /******************************************************************************* * Copyright (c) 2014 IBM Corp. * * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * and Eclipse Distribution License v1.0 which accompany this distribution. * * The Eclipse Public License is available at * http://www.eclipse.org/legal/epl-v10.html * and the Eclipse Distribution License is available at * http://www.eclipse.org/org/documents/edl-v10.php. * * Contributors: * Ian Craggs - initial API and implementation and/or initial documentation * Sergio R. Caprile - non-blocking packet read functions for stream transport *******************************************************************************/ /** * @file aws_iot_mqtt_client_common_internal.c * @brief MQTT client internal API definitions */ #ifdef __cplusplus extern "C" { #endif #include <aws_iot_mqtt_client.h> #include "aws_iot_mqtt_client_common_internal.h" /* Max length of packet header */ #define MAX_NO_OF_REMAINING_LENGTH_BYTES 4 /** * Encodes the message length according to the MQTT algorithm * @param buf the buffer into which the encoded data is written * @param length the length to be encoded * @return the number of bytes written to buffer */ size_t aws_iot_mqtt_internal_write_len_to_buffer(unsigned char *buf, uint32_t length) { size_t outLen = 0; unsigned char encodedByte; FUNC_ENTRY; do { encodedByte = (unsigned char) (length % 128); length /= 128; /* if there are more digits to encode, set the top bit of this digit */ if(length > 0) { encodedByte |= 0x80; } buf[outLen++] = encodedByte; } while(length > 0); FUNC_EXIT_RC(outLen); } /** * Decodes the message length according to the MQTT algorithm * @param the buffer containing the message * @param value the decoded length returned * @return the number of bytes read from the socket */ IoT_Error_t aws_iot_mqtt_internal_decode_remaining_length_from_buffer(unsigned char *buf, uint32_t *decodedLen, uint32_t *readBytesLen) { unsigned char encodedByte; uint32_t multiplier, len; FUNC_ENTRY; multiplier = 1; len = 0; *decodedLen = 0; do { if(++len > MAX_NO_OF_REMAINING_LENGTH_BYTES) { /* bad data */ FUNC_EXIT_RC(MQTT_DECODE_REMAINING_LENGTH_ERROR); } encodedByte = *buf; buf++; *decodedLen += (encodedByte & 127) * multiplier; multiplier *= 128; } while((encodedByte & 128) != 0); *readBytesLen = len; FUNC_EXIT_RC(SUCCESS); } uint32_t aws_iot_mqtt_internal_get_final_packet_length_from_remaining_length(uint32_t rem_len) { rem_len += 1; /* header byte */ /* now remaining_length field (MQTT 3.1.1 - 2.2.3)*/ if(rem_len < 128) { rem_len += 1; } else if(rem_len < 16384) { rem_len += 2; } else if(rem_len < 2097152) { rem_len += 3; } else { rem_len += 4; } return rem_len; } /** * Calculates uint16 packet id from two bytes read from the input buffer * Checks Endianness at runtime * * @param pptr pointer to the input buffer - incremented by the number of bytes used & returned * @return the value calculated */ uint16_t aws_iot_mqtt_internal_read_uint16_t(unsigned char **pptr) { unsigned char *ptr = *pptr; uint16_t len = 0; uint8_t firstByte = (uint8_t) (*ptr); uint8_t secondByte = (uint8_t) (*(ptr + 1)); len = (uint16_t) (secondByte + (256 * firstByte)); *pptr += 2; return len; } /** * Writes an integer as 2 bytes to an output buffer. * @param pptr pointer to the output buffer - incremented by the number of bytes used & returned * @param anInt the integer to write */ void aws_iot_mqtt_internal_write_uint_16(unsigned char **pptr, uint16_t anInt) { **pptr = (unsigned char) (anInt / 256); (*pptr)++; **pptr = (unsigned char) (anInt % 256); (*pptr)++; } /** * Reads one character from the input buffer. * @param pptr pointer to the input buffer - incremented by the number of bytes used & returned * @return the character read */ unsigned char aws_iot_mqtt_internal_read_char(unsigned char **pptr) { unsigned char c = **pptr; (*pptr)++; return c; } /** * Writes one character to an output buffer. * @param pptr pointer to the output buffer - incremented by the number of bytes used & returned * @param c the character to write */ void aws_iot_mqtt_internal_write_char(unsigned char **pptr, unsigned char c) { **pptr = c; (*pptr)++; } void aws_iot_mqtt_internal_write_utf8_string(unsigned char **pptr, const char *string, uint16_t stringLen) { /* Nothing that calls this function will have a stringLen with a size larger than 2 bytes (MQTT 3.1.1 - 1.5.3) */ aws_iot_mqtt_internal_write_uint_16(pptr, stringLen); if(stringLen > 0) { memcpy(*pptr, string, stringLen); *pptr += stringLen; } } /** * Initialize the MQTTHeader structure. Used to ensure that Header bits are * always initialized using the proper mappings. No Endianness issues here since * the individual fields are all less than a byte. Also generates no warnings since * all fields are initialized using hex constants */ IoT_Error_t aws_iot_mqtt_internal_init_header(MQTTHeader *pHeader, MessageTypes message_type, QoS qos, uint8_t dup, uint8_t retained) { FUNC_ENTRY; if(NULL == pHeader) { FUNC_EXIT_RC(NULL_VALUE_ERROR); } /* Set all bits to zero */ pHeader->byte = 0; uint8_t type = 0; switch(message_type) { case UNKNOWN: /* Should never happen */ return FAILURE; case CONNECT: type = 0x01; break; case CONNACK: type = 0x02; break; case PUBLISH: type = 0x03; break; case PUBACK: type = 0x04; break; case PUBREC: type = 0x05; break; case PUBREL: type = 0x06; break; case PUBCOMP: type = 0x07; break; case SUBSCRIBE: type = 0x08; break; case SUBACK: type = 0x09; break; case UNSUBSCRIBE: type = 0x0A; break; case UNSUBACK: type = 0x0B; break; case PINGREQ: type = 0x0C; break; case PINGRESP: type = 0x0D; break; case DISCONNECT: type = 0x0E; break; default: /* Should never happen */ FUNC_EXIT_RC(FAILURE); } pHeader->byte = type << 4; pHeader->byte |= dup << 3; switch(qos) { case QOS0: break; case QOS1: pHeader->byte |= 1 << 1; break; default: /* Using QOS0 as default */ break; } pHeader->byte |= (1 == retained) ? 0x01 : 0x00; FUNC_EXIT_RC(SUCCESS); } IoT_Error_t aws_iot_mqtt_internal_send_packet(AWS_IoT_Client *pClient, size_t length, Timer *pTimer) { size_t sentLen, sent; IoT_Error_t rc; FUNC_ENTRY; if(NULL == pClient || NULL == pTimer) { FUNC_EXIT_RC(NULL_VALUE_ERROR); } if(length >= pClient->clientData.writeBufSize) { FUNC_EXIT_RC(MQTT_TX_BUFFER_TOO_SHORT_ERROR); } #ifdef _ENABLE_THREAD_SUPPORT_ rc = aws_iot_mqtt_client_lock_mutex(pClient, &(pClient->clientData.tls_write_mutex)); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } #endif sentLen = 0; sent = 0; while(sent < length && !has_timer_expired(pTimer)) { rc = pClient->networkStack.write(&(pClient->networkStack), &pClient->clientData.writeBuf[sent], (length - sent), pTimer, &sentLen); if(SUCCESS != rc) { /* there was an error writing the data */ break; } sent += sentLen; } #ifdef _ENABLE_THREAD_SUPPORT_ rc = aws_iot_mqtt_client_unlock_mutex(pClient, &(pClient->clientData.tls_write_mutex)); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } #endif if(sent == length) { /* record the fact that we have successfully sent the packet */ //countdown_sec(&c->pingTimer, c->clientData.keepAliveInterval); FUNC_EXIT_RC(SUCCESS); } FUNC_EXIT_RC(rc); } static IoT_Error_t _aws_iot_mqtt_internal_readWrapper( AWS_IoT_Client *pClient, size_t offset, size_t size, Timer *pTimer, size_t * read_len ) { IoT_Error_t rc; int byteToRead; size_t byteRead = 0; byteToRead = ( offset + size ) - pClient->clientData.readBufIndex; if ( byteToRead > 0 ) { rc = pClient->networkStack.read( &( pClient->networkStack ), pClient->clientData.readBuf + pClient->clientData.readBufIndex, (size_t)byteToRead, pTimer, &byteRead ); pClient->clientData.readBufIndex += byteRead; /* refresh byte to read */ byteToRead = ( offset + size ) - ((int)pClient->clientData.readBufIndex); *read_len = size - (size_t)byteToRead; } else { *read_len = size; rc = SUCCESS; } return rc; } static IoT_Error_t _aws_iot_mqtt_internal_decode_packet_remaining_len(AWS_IoT_Client *pClient, size_t * offset, size_t *rem_len, Timer *pTimer) { size_t multiplier, len; IoT_Error_t rc; size_t read_len; FUNC_ENTRY; multiplier = 1; len = 0; *rem_len = 0; do { if(++len > MAX_NO_OF_REMAINING_LENGTH_BYTES) { /* bad data */ FUNC_EXIT_RC(MQTT_DECODE_REMAINING_LENGTH_ERROR); } rc = _aws_iot_mqtt_internal_readWrapper( pClient, len, 1, pTimer, &read_len ); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } *rem_len += (( pClient->clientData.readBuf[len] & 127) * multiplier); multiplier *= 128; } while(( pClient->clientData.readBuf[len] & 128) != 0); *offset = len + 1; FUNC_EXIT_RC(rc); } static IoT_Error_t _aws_iot_mqtt_internal_read_packet(AWS_IoT_Client *pClient, Timer *pTimer, uint8_t *pPacketType) { size_t rem_len, total_bytes_read, bytes_to_be_read, read_len; IoT_Error_t rc; size_t offset = 0; MQTTHeader header = {0}; Timer packetTimer; init_timer(&packetTimer); countdown_ms(&packetTimer, pClient->clientData.packetTimeoutMs); rem_len = 0; total_bytes_read = 0; bytes_to_be_read = 0; read_len = 0; rc = _aws_iot_mqtt_internal_readWrapper( pClient, offset, 1, pTimer, &read_len ); /* 1. read the header byte. This has the packet type in it */ if(NETWORK_SSL_NOTHING_TO_READ == rc) { return MQTT_NOTHING_TO_READ; } else if(SUCCESS != rc) { return rc; } /* 2. read the remaining length. This is variable in itself */ rc = _aws_iot_mqtt_internal_decode_packet_remaining_len(pClient, &offset, &rem_len, pTimer); if(SUCCESS != rc) { return rc; } /* if the buffer is too short then the message will be dropped silently */ if((rem_len + offset) >= pClient->clientData.readBufSize) { bytes_to_be_read = pClient->clientData.readBufSize; do { rc = pClient->networkStack.read(&(pClient->networkStack), pClient->clientData.readBuf, bytes_to_be_read, pTimer, &read_len); if(SUCCESS == rc) { total_bytes_read += read_len; if((rem_len - total_bytes_read) >= pClient->clientData.readBufSize) { bytes_to_be_read = pClient->clientData.readBufSize; } else { bytes_to_be_read = rem_len - total_bytes_read; } } } while(total_bytes_read < rem_len && SUCCESS == rc); /* Check buffer was correctly emptied, otherwise, return error message. */ if ( total_bytes_read == rem_len ) { aws_iot_mqtt_internal_flushBuffers( pClient ); return MQTT_RX_BUFFER_TOO_SHORT_ERROR; } else { return rc; } } /* 3. read the rest of the buffer using a callback to supply the rest of the data */ if(rem_len > 0) { rc = _aws_iot_mqtt_internal_readWrapper( pClient, offset, rem_len, pTimer, &read_len ); if(SUCCESS != rc || read_len != rem_len) { return FAILURE; } } /* Pack has been received, we can flush the buffers for next call. */ aws_iot_mqtt_internal_flushBuffers( pClient ); header.byte = pClient->clientData.readBuf[0]; *pPacketType = MQTT_HEADER_FIELD_TYPE(header.byte); FUNC_EXIT_RC(rc); } // assume topic filter and name is in correct format // # can only be at end // + and # can only be next to separator static bool _aws_iot_mqtt_internal_is_topic_matched(char *pTopicFilter, char *pTopicName, uint16_t topicNameLen) { char *curf, *curn, *curn_end; if(NULL == pTopicFilter || NULL == pTopicName) { return false; } curf = pTopicFilter; curn = pTopicName; curn_end = curn + topicNameLen; while(*curf && (curn < curn_end)) { if(*curn == '/' && *curf != '/') { break; } if(*curf != '+' && *curf != '#' && *curf != *curn) { break; } if(*curf == '+') { /* skip until we meet the next separator, or end of string */ char *nextpos = curn + 1; while(nextpos < curn_end && *nextpos != '/') nextpos = ++curn + 1; } else if(*curf == '#') { /* skip until end of string */ curn = curn_end - 1; } curf++; curn++; }; return (curn == curn_end) && (*curf == '\0'); } static IoT_Error_t _aws_iot_mqtt_internal_deliver_message(AWS_IoT_Client *pClient, char *pTopicName, uint16_t topicNameLen, IoT_Publish_Message_Params *pMessageParams) { uint32_t itr; IoT_Error_t rc; ClientState clientState; FUNC_ENTRY; if(NULL == pTopicName) { FUNC_EXIT_RC(NULL_VALUE_ERROR); } /* This function can be called from all MQTT APIs * But while callback return is in progress, Yield should not be called. * The state for CB_RETURN accomplishes that, as yield cannot be called while in that state */ clientState = aws_iot_mqtt_get_client_state(pClient); aws_iot_mqtt_set_client_state(pClient, clientState, CLIENT_STATE_CONNECTED_WAIT_FOR_CB_RETURN); /* Find the right message handler - indexed by topic */ for(itr = 0; itr < AWS_IOT_MQTT_NUM_SUBSCRIBE_HANDLERS; ++itr) { if(NULL != pClient->clientData.messageHandlers[itr].topicName) { if(((topicNameLen == pClient->clientData.messageHandlers[itr].topicNameLen) && (strncmp(pTopicName, (char *) pClient->clientData.messageHandlers[itr].topicName, topicNameLen) == 0)) || _aws_iot_mqtt_internal_is_topic_matched((char *) pClient->clientData.messageHandlers[itr].topicName, pTopicName, topicNameLen)) { if(NULL != pClient->clientData.messageHandlers[itr].pApplicationHandler) { pClient->clientData.messageHandlers[itr].pApplicationHandler(pClient, pTopicName, topicNameLen, pMessageParams, pClient->clientData.messageHandlers[itr].pApplicationHandlerData); } } } } rc = aws_iot_mqtt_set_client_state(pClient, CLIENT_STATE_CONNECTED_WAIT_FOR_CB_RETURN, clientState); FUNC_EXIT_RC(rc); } static IoT_Error_t _aws_iot_mqtt_internal_handle_publish(AWS_IoT_Client *pClient, Timer *pTimer) { char *topicName; uint16_t topicNameLen; uint32_t len; IoT_Error_t rc; IoT_Publish_Message_Params msg; FUNC_ENTRY; topicName = NULL; topicNameLen = 0; len = 0; rc = aws_iot_mqtt_internal_deserialize_publish(&msg.isDup, &msg.qos, &msg.isRetained, &msg.id, &topicName, &topicNameLen, (unsigned char **) &msg.payload, &msg.payloadLen, pClient->clientData.readBuf, pClient->clientData.readBufSize); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } rc = _aws_iot_mqtt_internal_deliver_message(pClient, topicName, topicNameLen, &msg); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } if(QOS0 == msg.qos) { /* No further processing required for QoS0 */ FUNC_EXIT_RC(SUCCESS); } /* Message assumed to be QoS1 since we do not support QoS2 at this time */ rc = aws_iot_mqtt_internal_serialize_ack(pClient->clientData.writeBuf, pClient->clientData.writeBufSize, PUBACK, 0, msg.id, &len); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } rc = aws_iot_mqtt_internal_send_packet(pClient, len, pTimer); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } FUNC_EXIT_RC(SUCCESS); } IoT_Error_t aws_iot_mqtt_internal_cycle_read(AWS_IoT_Client *pClient, Timer *pTimer, uint8_t *pPacketType) { IoT_Error_t rc; #ifdef _ENABLE_THREAD_SUPPORT_ IoT_Error_t threadRc; #endif if(NULL == pClient || NULL == pTimer) { return NULL_VALUE_ERROR; } #ifdef _ENABLE_THREAD_SUPPORT_ threadRc = aws_iot_mqtt_client_lock_mutex(pClient, &(pClient->clientData.tls_read_mutex)); if(SUCCESS != threadRc) { FUNC_EXIT_RC(threadRc); } #endif /* read the socket, see what work is due */ rc = _aws_iot_mqtt_internal_read_packet(pClient, pTimer, pPacketType); #ifdef _ENABLE_THREAD_SUPPORT_ threadRc = aws_iot_mqtt_client_unlock_mutex(pClient, &(pClient->clientData.tls_read_mutex)); if(SUCCESS != threadRc && (MQTT_NOTHING_TO_READ == rc || SUCCESS == rc)) { return threadRc; } #endif if(MQTT_NOTHING_TO_READ == rc) { /* Nothing to read, not a cycle failure */ return SUCCESS; } else if(SUCCESS != rc) { return rc; } switch(*pPacketType) { case CONNACK: case PUBACK: case SUBACK: case UNSUBACK: /* SDK is blocking, these responses will be forwarded to calling function to process */ break; case PUBLISH: { rc = _aws_iot_mqtt_internal_handle_publish(pClient, pTimer); break; } case PUBREC: case PUBCOMP: /* QoS2 not supported at this time */ break; case PINGRESP: { pClient->clientStatus.isPingOutstanding = 0; countdown_sec(&pClient->pingTimer, pClient->clientData.keepAliveInterval); break; } default: { /* Either unknown packet type or Failure occurred * Should not happen */ rc = MQTT_RX_MESSAGE_PACKET_TYPE_INVALID_ERROR; break; } } return rc; } IoT_Error_t aws_iot_mqtt_internal_flushBuffers( AWS_IoT_Client *pClient ) { pClient->clientData.readBufIndex = 0; return SUCCESS; } /* only used in single-threaded mode where one command at a time is in process */ IoT_Error_t aws_iot_mqtt_internal_wait_for_read(AWS_IoT_Client *pClient, uint8_t packetType, Timer *pTimer) { IoT_Error_t rc; uint8_t read_packet_type; FUNC_ENTRY; if(NULL == pClient || NULL == pTimer) { FUNC_EXIT_RC(NULL_VALUE_ERROR); } read_packet_type = 0; do { if(has_timer_expired(pTimer)) { /* we timed out */ rc = MQTT_REQUEST_TIMEOUT_ERROR; break; } rc = aws_iot_mqtt_internal_cycle_read(pClient, pTimer, &read_packet_type); } while(((SUCCESS == rc) || (MQTT_NOTHING_TO_READ == rc)) && (read_packet_type != packetType)); /* If rc is SUCCESS, we have received the expected * MQTT packet. Otherwise rc tells the error. */ FUNC_EXIT_RC(rc); } /** * Serializes a 0-length packet into the supplied buffer, ready for writing to a socket * @param buf the buffer into which the packet will be serialized * @param buflen the length in bytes of the supplied buffer, to avoid overruns * @param packettype the message type * @param serialized length * @return IoT_Error_t indicating function execution status */ IoT_Error_t aws_iot_mqtt_internal_serialize_zero(unsigned char *pTxBuf, size_t txBufLen, MessageTypes packetType, size_t *pSerializedLength) { unsigned char *ptr; IoT_Error_t rc; MQTTHeader header = {0}; FUNC_ENTRY; if(NULL == pTxBuf || NULL == pSerializedLength) { FUNC_EXIT_RC(NULL_VALUE_ERROR); } /* Buffer should have at least 2 bytes for the header */ if(4 > txBufLen) { FUNC_EXIT_RC(MQTT_TX_BUFFER_TOO_SHORT_ERROR); } ptr = pTxBuf; rc = aws_iot_mqtt_internal_init_header(&header, packetType, QOS0, 0, 0); if(SUCCESS != rc) { FUNC_EXIT_RC(rc); } /* write header */ aws_iot_mqtt_internal_write_char(&ptr, header.byte); /* write remaining length */ ptr += aws_iot_mqtt_internal_write_len_to_buffer(ptr, 0); *pSerializedLength = (uint32_t) (ptr - pTxBuf); FUNC_EXIT_RC(SUCCESS); } #ifdef __cplusplus } #endif
989193.c
/* * led.c * * Created on: Oct 11, 2020 * Author: yagmu */ #include<stdint.h> #include "led.h" void delay(uint32_t count) { for(uint32_t i = 0 ; i < count ; i++); } void led_init_all(void) { uint32_t *pRccAhb1enr = (uint32_t*)0x40023830; uint32_t *pGpiodModeReg = (uint32_t*)0x40020C00; *pRccAhb1enr |= ( 1 << 3); //configure LED_GREEN *pGpiodModeReg |= ( 1 << (2 * LED_GREEN)); *pGpiodModeReg |= ( 1 << (2 * LED_ORANGE)); *pGpiodModeReg |= ( 1 << (2 * LED_RED)); *pGpiodModeReg |= ( 1 << (2 * LED_BLUE)); #if 0 //configure the outputtype *pGpioOpTypeReg |= ( 1 << (2 * LED_GREEN)); *pGpioOpTypeReg |= ( 1 << (2 * LED_ORANGE)); *pGpioOpTypeReg |= ( 1 << (2 * LED_RED)); *pGpioOpTypeReg |= ( 1 << (2 * LED_BLUE)); #endif led_off(LED_GREEN); led_off(LED_ORANGE); led_off(LED_RED); led_off(LED_BLUE); } void led_on(uint8_t led_no) { uint32_t *pGpiodDataReg = (uint32_t*)0x40020C14; *pGpiodDataReg |= ( 1 << led_no); } void led_off(uint8_t led_no) { uint32_t *pGpiodDataReg = (uint32_t*)0x40020C14; *pGpiodDataReg &= ~( 1 << led_no); }
612935.c
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "../../AnyOS/entrypoints.h" typedef uint16_t UChar; // Include System.Globalization.Native headers #include "pal_calendarData.h" #include "pal_casing.h" #include "pal_collation.h" #include "pal_locale.h" #include "pal_localeNumberData.h" #include "pal_localeStringData.h" #include "pal_icushim.h" #include "pal_idna.h" #include "pal_normalization.h" #include "pal_timeZoneInfo.h" static const Entry s_globalizationNative[] = { DllImportEntry(GlobalizationNative_ChangeCase) DllImportEntry(GlobalizationNative_ChangeCaseInvariant) DllImportEntry(GlobalizationNative_ChangeCaseTurkish) DllImportEntry(GlobalizationNative_CloseSortHandle) DllImportEntry(GlobalizationNative_CompareString) DllImportEntry(GlobalizationNative_EndsWith) DllImportEntry(GlobalizationNative_EnumCalendarInfo) DllImportEntry(GlobalizationNative_GetCalendarInfo) DllImportEntry(GlobalizationNative_GetCalendars) DllImportEntry(GlobalizationNative_GetDefaultLocaleName) DllImportEntry(GlobalizationNative_GetICUVersion) DllImportEntry(GlobalizationNative_GetJapaneseEraStartDate) DllImportEntry(GlobalizationNative_GetLatestJapaneseEra) DllImportEntry(GlobalizationNative_GetLocaleInfoGroupingSizes) DllImportEntry(GlobalizationNative_GetLocaleInfoInt) DllImportEntry(GlobalizationNative_GetLocaleInfoString) DllImportEntry(GlobalizationNative_GetLocaleName) DllImportEntry(GlobalizationNative_GetLocales) DllImportEntry(GlobalizationNative_GetLocaleTimeFormat) DllImportEntry(GlobalizationNative_GetSortHandle) DllImportEntry(GlobalizationNative_GetSortKey) DllImportEntry(GlobalizationNative_GetSortVersion) DllImportEntry(GlobalizationNative_GetTimeZoneDisplayName) DllImportEntry(GlobalizationNative_IanaIdToWindowsId) DllImportEntry(GlobalizationNative_IndexOf) DllImportEntry(GlobalizationNative_InitICUFunctions) DllImportEntry(GlobalizationNative_InitOrdinalCasingPage) DllImportEntry(GlobalizationNative_IsNormalized) DllImportEntry(GlobalizationNative_IsPredefinedLocale) DllImportEntry(GlobalizationNative_LastIndexOf) DllImportEntry(GlobalizationNative_LoadICU) DllImportEntry(GlobalizationNative_NormalizeString) DllImportEntry(GlobalizationNative_StartsWith) DllImportEntry(GlobalizationNative_ToAscii) DllImportEntry(GlobalizationNative_ToUnicode) DllImportEntry(GlobalizationNative_WindowsIdToIanaId) }; EXTERN_C const void* GlobalizationResolveDllImport(const char* name); EXTERN_C const void* GlobalizationResolveDllImport(const char* name) { return ResolveDllImport(s_globalizationNative, lengthof(s_globalizationNative), name); }
754444.c
/* * Copyright 2005-2017 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * * In applying this licence, ECMWF does not waive the privileges and immunities granted to it by * virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction. */ #include "grib_api_internal.h" /* This is used by make_class.pl START_CLASS_DEF CLASS = accessor SUPER = grib_accessor_class_double IMPLEMENTS = pack_double;unpack_string IMPLEMENTS = init;dump IMPLEMENTS = unpack_double MEMBERS=const char* laf MEMBERS=const char* lof MEMBERS=const char* lal MEMBERS=const char* lol MEMBERS=const char* div END_CLASS_DEF */ /* START_CLASS_IMP */ /* Don't edit anything between START_CLASS_IMP and END_CLASS_IMP Instead edit values between START_CLASS_DEF and END_CLASS_DEF or edit "accessor.class" and rerun ./make_class.pl */ static int pack_double(grib_accessor*, const double* val,size_t *len); static int unpack_double(grib_accessor*, double* val,size_t *len); static int unpack_string (grib_accessor*, char*, size_t *len); static void dump(grib_accessor*, grib_dumper*); static void init(grib_accessor*,const long, grib_arguments* ); static void init_class(grib_accessor_class*); typedef struct grib_accessor_g1area { grib_accessor att; /* Members defined in gen */ /* Members defined in double */ /* Members defined in g1area */ const char* laf; const char* lof; const char* lal; const char* lol; const char* div; } grib_accessor_g1area; extern grib_accessor_class* grib_accessor_class_double; static grib_accessor_class _grib_accessor_class_g1area = { &grib_accessor_class_double, /* super */ "g1area", /* name */ sizeof(grib_accessor_g1area), /* size */ 0, /* inited */ &init_class, /* init_class */ &init, /* init */ 0, /* post_init */ 0, /* free mem */ &dump, /* describes himself */ 0, /* get length of section */ 0, /* get length of string */ 0, /* get number of values */ 0, /* get number of bytes */ 0, /* get offset to bytes */ 0, /* get native type */ 0, /* get sub_section */ 0, /* grib_pack procedures long */ 0, /* grib_pack procedures long */ 0, /* grib_pack procedures long */ 0, /* grib_unpack procedures long */ &pack_double, /* grib_pack procedures double */ &unpack_double, /* grib_unpack procedures double */ 0, /* grib_pack procedures string */ &unpack_string, /* grib_unpack procedures string */ 0, /* grib_pack array procedures string */ 0, /* grib_unpack array procedures string */ 0, /* grib_pack procedures bytes */ 0, /* grib_unpack procedures bytes */ 0, /* pack_expression */ 0, /* notify_change */ 0, /* update_size */ 0, /* preferred_size */ 0, /* resize */ 0, /* nearest_smaller_value */ 0, /* next accessor */ 0, /* compare vs. another accessor */ 0, /* unpack only ith value */ 0, /* unpack a subarray */ 0, /* clear */ 0, /* clone accessor */ }; grib_accessor_class* grib_accessor_class_g1area = &_grib_accessor_class_g1area; static void init_class(grib_accessor_class* c) { c->next_offset = (*(c->super))->next_offset; c->string_length = (*(c->super))->string_length; c->value_count = (*(c->super))->value_count; c->byte_count = (*(c->super))->byte_count; c->byte_offset = (*(c->super))->byte_offset; c->get_native_type = (*(c->super))->get_native_type; c->sub_section = (*(c->super))->sub_section; c->pack_missing = (*(c->super))->pack_missing; c->is_missing = (*(c->super))->is_missing; c->pack_long = (*(c->super))->pack_long; c->unpack_long = (*(c->super))->unpack_long; c->pack_string = (*(c->super))->pack_string; c->pack_string_array = (*(c->super))->pack_string_array; c->unpack_string_array = (*(c->super))->unpack_string_array; c->pack_bytes = (*(c->super))->pack_bytes; c->unpack_bytes = (*(c->super))->unpack_bytes; c->pack_expression = (*(c->super))->pack_expression; c->notify_change = (*(c->super))->notify_change; c->update_size = (*(c->super))->update_size; c->preferred_size = (*(c->super))->preferred_size; c->resize = (*(c->super))->resize; c->nearest_smaller_value = (*(c->super))->nearest_smaller_value; c->next = (*(c->super))->next; c->compare = (*(c->super))->compare; c->unpack_double_element = (*(c->super))->unpack_double_element; c->unpack_double_subarray = (*(c->super))->unpack_double_subarray; c->clear = (*(c->super))->clear; c->make_clone = (*(c->super))->make_clone; } /* END_CLASS_IMP */ static void init(grib_accessor* a,const long l, grib_arguments* c) { grib_accessor_g1area* self = (grib_accessor_g1area*)a; int n = 0; self->laf = grib_arguments_get_name(grib_handle_of_accessor(a),c,n++); self->lof = grib_arguments_get_name(grib_handle_of_accessor(a),c,n++); self->lal = grib_arguments_get_name(grib_handle_of_accessor(a),c,n++); self->lol = grib_arguments_get_name(grib_handle_of_accessor(a),c,n++); } static int pack_double (grib_accessor* a, const double* val, size_t *len) { grib_accessor_g1area* self = (grib_accessor_g1area*)a; int ret = 0; ret = grib_set_double_internal(grib_handle_of_accessor(a), self->laf,val[0]); if(ret ) return ret; ret = grib_set_double_internal(grib_handle_of_accessor(a), self->lof,val[1]); if(ret ) return ret; ret = grib_set_double_internal(grib_handle_of_accessor(a), self->lal,val[2]); if(ret) return ret; ret = grib_set_double_internal(grib_handle_of_accessor(a), self->lol,val[3]); if(ret ) return ret; if (ret == GRIB_SUCCESS) *len = 4; return ret; } static int unpack_double (grib_accessor* a, double* val, size_t *len) { grib_accessor_g1area* self = (grib_accessor_g1area*)a; int ret = 0; if(*len < 4){ *len = 4; return GRIB_BUFFER_TOO_SMALL; } ret = grib_get_double_internal(grib_handle_of_accessor(a), self->laf,val++); if(ret) return ret; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->lof,val++); if(ret) return ret; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->lal,val++); if(ret) return ret; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->lol,val); if(ret ) return ret; if (ret == GRIB_SUCCESS) *len = 4; return ret; } static void dump(grib_accessor* a, grib_dumper* dumper) { grib_dump_string(dumper,a,NULL); } static int unpack_string(grib_accessor* a, char* val, size_t *len) { grib_accessor_g1area* self = (grib_accessor_g1area*)a; int ret = 0; double laf,lof,lal,lol; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->laf,&laf); if(ret) return ret; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->lof,&lof); if(ret) return ret; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->lal,&lal); if(ret) return ret; ret = grib_get_double_internal(grib_handle_of_accessor(a), self->lol,&lol); if(ret) return ret; if(*len < 60) { grib_context_log(a->context, GRIB_LOG_ERROR, " Buffer too smalle for %s (%d) ", a->name ,*len); len = 0; return GRIB_BUFFER_TOO_SMALL; } sprintf(val,"N:%3.5f W:%3.5f S:%3.5f E:%3.5f",((float)laf),((float)lof),((float)lal),((float)lol)); len[0] = strlen(val); return GRIB_SUCCESS; }
773302.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * altera-ci.c * * CI driver in conjunction with NetUp Dual DVB-T/C RF CI card * * Copyright (C) 2010,2011 NetUP Inc. * Copyright (C) 2010,2011 Igor M. Liplianin <[email protected]> */ /* * currently cx23885 GPIO's used. * GPIO-0 ~INT in * GPIO-1 TMS out * GPIO-2 ~reset chips out * GPIO-3 to GPIO-10 data/addr for CA in/out * GPIO-11 ~CS out * GPIO-12 AD_RG out * GPIO-13 ~WR out * GPIO-14 ~RD out * GPIO-15 ~RDY in * GPIO-16 TCK out * GPIO-17 TDO in * GPIO-18 TDI out */ /* * Bit definitions for MC417_RWD and MC417_OEN registers * bits 31-16 * +-----------+ * | Reserved | * +-----------+ * bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | TDI | TDO | TCK | RDY# | #RD | #WR | AD_RG | #CS | * +-------+-------+-------+-------+-------+-------+-------+-------+ * bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0 * +-------+-------+-------+-------+-------+-------+-------+-------+ * | DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0| * +-------+-------+-------+-------+-------+-------+-------+-------+ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <media/dvb_demux.h> #include <media/dvb_frontend.h> #include "altera-ci.h" #include <media/dvb_ca_en50221.h> /* FPGA regs */ #define NETUP_CI_INT_CTRL 0x00 #define NETUP_CI_BUSCTRL2 0x01 #define NETUP_CI_ADDR0 0x04 #define NETUP_CI_ADDR1 0x05 #define NETUP_CI_DATA 0x06 #define NETUP_CI_BUSCTRL 0x07 #define NETUP_CI_PID_ADDR0 0x08 #define NETUP_CI_PID_ADDR1 0x09 #define NETUP_CI_PID_DATA 0x0a #define NETUP_CI_TSA_DIV 0x0c #define NETUP_CI_TSB_DIV 0x0d #define NETUP_CI_REVISION 0x0f /* const for ci op */ #define NETUP_CI_FLG_CTL 1 #define NETUP_CI_FLG_RD 1 #define NETUP_CI_FLG_AD 1 static unsigned int ci_dbg; module_param(ci_dbg, int, 0644); MODULE_PARM_DESC(ci_dbg, "Enable CI debugging"); static unsigned int pid_dbg; module_param(pid_dbg, int, 0644); MODULE_PARM_DESC(pid_dbg, "Enable PID filtering debugging"); MODULE_DESCRIPTION("altera FPGA CI module"); MODULE_AUTHOR("Igor M. Liplianin <[email protected]>"); MODULE_LICENSE("GPL"); #define ci_dbg_print(fmt, args...) \ do { \ if (ci_dbg) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##args); \ } while (0) #define pid_dbg_print(fmt, args...) \ do { \ if (pid_dbg) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##args); \ } while (0) struct altera_ci_state; struct netup_hw_pid_filter; struct fpga_internal { void *dev; struct mutex fpga_mutex;/* two CI's on the same fpga */ struct netup_hw_pid_filter *pid_filt[2]; struct altera_ci_state *state[2]; struct work_struct work; int (*fpga_rw) (void *dev, int flag, int data, int rw); int cis_used; int filts_used; int strt_wrk; }; /* stores all private variables for communication with CI */ struct altera_ci_state { struct fpga_internal *internal; struct dvb_ca_en50221 ca; int status; int nr; }; /* stores all private variables for hardware pid filtering */ struct netup_hw_pid_filter { struct fpga_internal *internal; struct dvb_demux *demux; /* save old functions */ int (*start_feed)(struct dvb_demux_feed *feed); int (*stop_feed)(struct dvb_demux_feed *feed); int status; int nr; }; /* internal params node */ struct fpga_inode { /* pointer for internal params, one for each pair of CI's */ struct fpga_internal *internal; struct fpga_inode *next_inode; }; /* first internal params */ static struct fpga_inode *fpga_first_inode; /* find chip by dev */ static struct fpga_inode *find_inode(void *dev) { struct fpga_inode *temp_chip = fpga_first_inode; if (temp_chip == NULL) return temp_chip; /* Search for the last fpga CI chip or find it by dev */ while ((temp_chip != NULL) && (temp_chip->internal->dev != dev)) temp_chip = temp_chip->next_inode; return temp_chip; } /* check demux */ static struct fpga_internal *check_filter(struct fpga_internal *temp_int, void *demux_dev, int filt_nr) { if (temp_int == NULL) return NULL; if ((temp_int->pid_filt[filt_nr]) == NULL) return NULL; if (temp_int->pid_filt[filt_nr]->demux == demux_dev) return temp_int; return NULL; } /* find chip by demux */ static struct fpga_inode *find_dinode(void *demux_dev) { struct fpga_inode *temp_chip = fpga_first_inode; struct fpga_internal *temp_int; /* * Search of the last fpga CI chip or * find it by demux */ while (temp_chip != NULL) { if (temp_chip->internal != NULL) { temp_int = temp_chip->internal; if (check_filter(temp_int, demux_dev, 0)) break; if (check_filter(temp_int, demux_dev, 1)) break; } temp_chip = temp_chip->next_inode; } return temp_chip; } /* deallocating chip */ static void remove_inode(struct fpga_internal *internal) { struct fpga_inode *prev_node = fpga_first_inode; struct fpga_inode *del_node = find_inode(internal->dev); if (del_node != NULL) { if (del_node == fpga_first_inode) { fpga_first_inode = del_node->next_inode; } else { while (prev_node->next_inode != del_node) prev_node = prev_node->next_inode; if (del_node->next_inode == NULL) prev_node->next_inode = NULL; else prev_node->next_inode = prev_node->next_inode->next_inode; } kfree(del_node); } } /* allocating new chip */ static struct fpga_inode *append_internal(struct fpga_internal *internal) { struct fpga_inode *new_node = fpga_first_inode; if (new_node == NULL) { new_node = kmalloc(sizeof(struct fpga_inode), GFP_KERNEL); fpga_first_inode = new_node; } else { while (new_node->next_inode != NULL) new_node = new_node->next_inode; new_node->next_inode = kmalloc(sizeof(struct fpga_inode), GFP_KERNEL); if (new_node->next_inode != NULL) new_node = new_node->next_inode; else new_node = NULL; } if (new_node != NULL) { new_node->internal = internal; new_node->next_inode = NULL; } return new_node; } static int netup_fpga_op_rw(struct fpga_internal *inter, int addr, u8 val, u8 read) { inter->fpga_rw(inter->dev, NETUP_CI_FLG_AD, addr, 0); return inter->fpga_rw(inter->dev, 0, val, read); } /* flag - mem/io, read - read/write */ static int altera_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot, u8 flag, u8 read, int addr, u8 val) { struct altera_ci_state *state = en50221->data; struct fpga_internal *inter = state->internal; u8 store; int mem = 0; if (0 != slot) return -EINVAL; mutex_lock(&inter->fpga_mutex); netup_fpga_op_rw(inter, NETUP_CI_ADDR0, ((addr << 1) & 0xfe), 0); netup_fpga_op_rw(inter, NETUP_CI_ADDR1, ((addr >> 7) & 0x7f), 0); store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); store &= 0x0f; store |= ((state->nr << 7) | (flag << 6)); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, store, 0); mem = netup_fpga_op_rw(inter, NETUP_CI_DATA, val, read); mutex_unlock(&inter->fpga_mutex); ci_dbg_print("%s: %s: addr=[0x%02x], %s=%x\n", __func__, (read) ? "read" : "write", addr, (flag == NETUP_CI_FLG_CTL) ? "ctl" : "mem", (read) ? mem : val); return mem; } static int altera_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr) { return altera_ci_op_cam(en50221, slot, 0, NETUP_CI_FLG_RD, addr, 0); } static int altera_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data) { return altera_ci_op_cam(en50221, slot, 0, 0, addr, data); } static int altera_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr) { return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, NETUP_CI_FLG_RD, addr, 0); } static int altera_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data) { return altera_ci_op_cam(en50221, slot, NETUP_CI_FLG_CTL, 0, addr, data); } static int altera_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot) { struct altera_ci_state *state = en50221->data; struct fpga_internal *inter = state->internal; /* reasonable timeout for CI reset is 10 seconds */ unsigned long t_out = jiffies + msecs_to_jiffies(9999); int ret; ci_dbg_print("%s\n", __func__); if (0 != slot) return -EINVAL; mutex_lock(&inter->fpga_mutex); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, (ret & 0xcf) | (1 << (5 - state->nr)), 0); mutex_unlock(&inter->fpga_mutex); for (;;) { msleep(50); mutex_lock(&inter->fpga_mutex); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); mutex_unlock(&inter->fpga_mutex); if ((ret & (1 << (5 - state->nr))) == 0) break; if (time_after(jiffies, t_out)) break; } ci_dbg_print("%s: %d msecs\n", __func__, jiffies_to_msecs(jiffies + msecs_to_jiffies(9999) - t_out)); return 0; } static int altera_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot) { /* not implemented */ return 0; } static int altera_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot) { struct altera_ci_state *state = en50221->data; struct fpga_internal *inter = state->internal; int ret; ci_dbg_print("%s\n", __func__); if (0 != slot) return -EINVAL; mutex_lock(&inter->fpga_mutex); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, (ret & 0x0f) | (1 << (3 - state->nr)), 0); mutex_unlock(&inter->fpga_mutex); return 0; } /* work handler */ static void netup_read_ci_status(struct work_struct *work) { struct fpga_internal *inter = container_of(work, struct fpga_internal, work); int ret; ci_dbg_print("%s\n", __func__); mutex_lock(&inter->fpga_mutex); /* ack' irq */ ret = netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0, NETUP_CI_FLG_RD); ret = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL, 0, NETUP_CI_FLG_RD); mutex_unlock(&inter->fpga_mutex); if (inter->state[1] != NULL) { inter->state[1]->status = ((ret & 1) == 0 ? DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY : 0); ci_dbg_print("%s: setting CI[1] status = 0x%x\n", __func__, inter->state[1]->status); } if (inter->state[0] != NULL) { inter->state[0]->status = ((ret & 2) == 0 ? DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY : 0); ci_dbg_print("%s: setting CI[0] status = 0x%x\n", __func__, inter->state[0]->status); } } /* CI irq handler */ int altera_ci_irq(void *dev) { struct fpga_inode *temp_int = NULL; struct fpga_internal *inter = NULL; ci_dbg_print("%s\n", __func__); if (dev != NULL) { temp_int = find_inode(dev); if (temp_int != NULL) { inter = temp_int->internal; schedule_work(&inter->work); } } return 1; } EXPORT_SYMBOL(altera_ci_irq); static int altera_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open) { struct altera_ci_state *state = en50221->data; if (0 != slot) return -EINVAL; return state->status; } static void altera_hw_filt_release(void *main_dev, int filt_nr) { struct fpga_inode *temp_int = find_inode(main_dev); struct netup_hw_pid_filter *pid_filt = NULL; ci_dbg_print("%s\n", __func__); if (temp_int != NULL) { pid_filt = temp_int->internal->pid_filt[filt_nr - 1]; /* stored old feed controls */ pid_filt->demux->start_feed = pid_filt->start_feed; pid_filt->demux->stop_feed = pid_filt->stop_feed; if (((--(temp_int->internal->filts_used)) <= 0) && ((temp_int->internal->cis_used) <= 0)) { ci_dbg_print("%s: Actually removing\n", __func__); remove_inode(temp_int->internal); kfree(pid_filt->internal); } kfree(pid_filt); } } void altera_ci_release(void *dev, int ci_nr) { struct fpga_inode *temp_int = find_inode(dev); struct altera_ci_state *state = NULL; ci_dbg_print("%s\n", __func__); if (temp_int != NULL) { state = temp_int->internal->state[ci_nr - 1]; altera_hw_filt_release(dev, ci_nr); if (((temp_int->internal->filts_used) <= 0) && ((--(temp_int->internal->cis_used)) <= 0)) { ci_dbg_print("%s: Actually removing\n", __func__); remove_inode(temp_int->internal); kfree(state->internal); } if (state != NULL) { if (state->ca.data != NULL) dvb_ca_en50221_release(&state->ca); kfree(state); } } } EXPORT_SYMBOL(altera_ci_release); static void altera_pid_control(struct netup_hw_pid_filter *pid_filt, u16 pid, int onoff) { struct fpga_internal *inter = pid_filt->internal; u8 store = 0; /* pid 0-0x1f always enabled, don't touch them */ if ((pid == 0x2000) || (pid < 0x20)) return; mutex_lock(&inter->fpga_mutex); netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, (pid >> 3) & 0xff, 0); netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1, ((pid >> 11) & 0x03) | (pid_filt->nr << 2), 0); store = netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, 0, NETUP_CI_FLG_RD); if (onoff)/* 0 - on, 1 - off */ store |= (1 << (pid & 7)); else store &= ~(1 << (pid & 7)); netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, store, 0); mutex_unlock(&inter->fpga_mutex); pid_dbg_print("%s: (%d) set pid: %5d 0x%04x '%s'\n", __func__, pid_filt->nr, pid, pid, onoff ? "off" : "on"); } static void altera_toggle_fullts_streaming(struct netup_hw_pid_filter *pid_filt, int filt_nr, int onoff) { struct fpga_internal *inter = pid_filt->internal; u8 store = 0; int i; pid_dbg_print("%s: pid_filt->nr[%d] now %s\n", __func__, pid_filt->nr, onoff ? "off" : "on"); if (onoff)/* 0 - on, 1 - off */ store = 0xff;/* ignore pid */ else store = 0;/* enable pid */ mutex_lock(&inter->fpga_mutex); for (i = 0; i < 1024; i++) { netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR0, i & 0xff, 0); netup_fpga_op_rw(inter, NETUP_CI_PID_ADDR1, ((i >> 8) & 0x03) | (pid_filt->nr << 2), 0); /* pid 0-0x1f always enabled */ netup_fpga_op_rw(inter, NETUP_CI_PID_DATA, (i > 3 ? store : 0), 0); } mutex_unlock(&inter->fpga_mutex); } static int altera_pid_feed_control(void *demux_dev, int filt_nr, struct dvb_demux_feed *feed, int onoff) { struct fpga_inode *temp_int = find_dinode(demux_dev); struct fpga_internal *inter = temp_int->internal; struct netup_hw_pid_filter *pid_filt = inter->pid_filt[filt_nr - 1]; altera_pid_control(pid_filt, feed->pid, onoff ? 0 : 1); /* call old feed proc's */ if (onoff) pid_filt->start_feed(feed); else pid_filt->stop_feed(feed); if (feed->pid == 0x2000) altera_toggle_fullts_streaming(pid_filt, filt_nr, onoff ? 0 : 1); return 0; } static int altera_ci_start_feed(struct dvb_demux_feed *feed, int num) { altera_pid_feed_control(feed->demux, num, feed, 1); return 0; } static int altera_ci_stop_feed(struct dvb_demux_feed *feed, int num) { altera_pid_feed_control(feed->demux, num, feed, 0); return 0; } static int altera_ci_start_feed_1(struct dvb_demux_feed *feed) { return altera_ci_start_feed(feed, 1); } static int altera_ci_stop_feed_1(struct dvb_demux_feed *feed) { return altera_ci_stop_feed(feed, 1); } static int altera_ci_start_feed_2(struct dvb_demux_feed *feed) { return altera_ci_start_feed(feed, 2); } static int altera_ci_stop_feed_2(struct dvb_demux_feed *feed) { return altera_ci_stop_feed(feed, 2); } static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr) { struct netup_hw_pid_filter *pid_filt = NULL; struct fpga_inode *temp_int = find_inode(config->dev); struct fpga_internal *inter = NULL; int ret = 0; pid_filt = kzalloc(sizeof(struct netup_hw_pid_filter), GFP_KERNEL); ci_dbg_print("%s\n", __func__); if (!pid_filt) { ret = -ENOMEM; goto err; } if (temp_int != NULL) { inter = temp_int->internal; (inter->filts_used)++; ci_dbg_print("%s: Find Internal Structure!\n", __func__); } else { inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL); if (!inter) { ret = -ENOMEM; goto err; } temp_int = append_internal(inter); if (!temp_int) { ret = -ENOMEM; goto err; } inter->filts_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; mutex_init(&inter->fpga_mutex); inter->strt_wrk = 1; ci_dbg_print("%s: Create New Internal Structure!\n", __func__); } ci_dbg_print("%s: setting hw pid filter = %p for ci = %d\n", __func__, pid_filt, hw_filt_nr - 1); inter->pid_filt[hw_filt_nr - 1] = pid_filt; pid_filt->demux = config->demux; pid_filt->internal = inter; pid_filt->nr = hw_filt_nr - 1; /* store old feed controls */ pid_filt->start_feed = config->demux->start_feed; pid_filt->stop_feed = config->demux->stop_feed; /* replace with new feed controls */ if (hw_filt_nr == 1) { pid_filt->demux->start_feed = altera_ci_start_feed_1; pid_filt->demux->stop_feed = altera_ci_stop_feed_1; } else if (hw_filt_nr == 2) { pid_filt->demux->start_feed = altera_ci_start_feed_2; pid_filt->demux->stop_feed = altera_ci_stop_feed_2; } altera_toggle_fullts_streaming(pid_filt, 0, 1); return 0; err: ci_dbg_print("%s: Can't init hardware filter: Error %d\n", __func__, ret); kfree(pid_filt); kfree(inter); return ret; } int altera_ci_init(struct altera_ci_config *config, int ci_nr) { struct altera_ci_state *state; struct fpga_inode *temp_int = find_inode(config->dev); struct fpga_internal *inter = NULL; int ret = 0; u8 store = 0; state = kzalloc(sizeof(struct altera_ci_state), GFP_KERNEL); ci_dbg_print("%s\n", __func__); if (!state) { ret = -ENOMEM; goto err; } if (temp_int != NULL) { inter = temp_int->internal; (inter->cis_used)++; inter->fpga_rw = config->fpga_rw; ci_dbg_print("%s: Find Internal Structure!\n", __func__); } else { inter = kzalloc(sizeof(struct fpga_internal), GFP_KERNEL); if (!inter) { ret = -ENOMEM; goto err; } temp_int = append_internal(inter); if (!temp_int) { ret = -ENOMEM; goto err; } inter->cis_used = 1; inter->dev = config->dev; inter->fpga_rw = config->fpga_rw; mutex_init(&inter->fpga_mutex); inter->strt_wrk = 1; ci_dbg_print("%s: Create New Internal Structure!\n", __func__); } ci_dbg_print("%s: setting state = %p for ci = %d\n", __func__, state, ci_nr - 1); state->internal = inter; state->nr = ci_nr - 1; state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = altera_ci_read_attribute_mem; state->ca.write_attribute_mem = altera_ci_write_attribute_mem; state->ca.read_cam_control = altera_ci_read_cam_ctl; state->ca.write_cam_control = altera_ci_write_cam_ctl; state->ca.slot_reset = altera_ci_slot_reset; state->ca.slot_shutdown = altera_ci_slot_shutdown; state->ca.slot_ts_enable = altera_ci_slot_ts_ctl; state->ca.poll_slot_status = altera_poll_ci_slot_status; state->ca.data = state; ret = dvb_ca_en50221_init(config->adapter, &state->ca, /* flags */ 0, /* n_slots */ 1); if (0 != ret) goto err; inter->state[ci_nr - 1] = state; altera_hw_filt_init(config, ci_nr); if (inter->strt_wrk) { INIT_WORK(&inter->work, netup_read_ci_status); inter->strt_wrk = 0; } ci_dbg_print("%s: CI initialized!\n", __func__); mutex_lock(&inter->fpga_mutex); /* Enable div */ netup_fpga_op_rw(inter, NETUP_CI_TSA_DIV, 0x0, 0); netup_fpga_op_rw(inter, NETUP_CI_TSB_DIV, 0x0, 0); /* enable TS out */ store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD); store |= (3 << 4); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0); ret = netup_fpga_op_rw(inter, NETUP_CI_REVISION, 0, NETUP_CI_FLG_RD); /* enable irq */ netup_fpga_op_rw(inter, NETUP_CI_INT_CTRL, 0x44, 0); mutex_unlock(&inter->fpga_mutex); ci_dbg_print("%s: NetUP CI Revision = 0x%x\n", __func__, ret); schedule_work(&inter->work); return 0; err: ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret); kfree(state); kfree(inter); return ret; } EXPORT_SYMBOL(altera_ci_init); int altera_ci_tuner_reset(void *dev, int ci_nr) { struct fpga_inode *temp_int = find_inode(dev); struct fpga_internal *inter = NULL; u8 store; ci_dbg_print("%s\n", __func__); if (temp_int == NULL) return -1; if (temp_int->internal == NULL) return -1; inter = temp_int->internal; mutex_lock(&inter->fpga_mutex); store = netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, 0, NETUP_CI_FLG_RD); store &= ~(4 << (2 - ci_nr)); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0); msleep(100); store |= (4 << (2 - ci_nr)); netup_fpga_op_rw(inter, NETUP_CI_BUSCTRL2, store, 0); mutex_unlock(&inter->fpga_mutex); return 0; } EXPORT_SYMBOL(altera_ci_tuner_reset);
533150.c
/* ST Microelectronics IIS2DLPC 3-axis accelerometer driver * * Copyright (c) 2020 STMicroelectronics * * SPDX-License-Identifier: Apache-2.0 * * Datasheet: * https://www.st.com/resource/en/datasheet/iis2dlpc.pdf */ #define DT_DRV_COMPAT st_iis2dlpc #include <init.h> #include <sys/__assert.h> #include <sys/byteorder.h> #include <logging/log.h> #include <drivers/sensor.h> #if DT_ANY_INST_ON_BUS_STATUS_OKAY(spi) #include <drivers/spi.h> #elif DT_ANY_INST_ON_BUS_STATUS_OKAY(i2c) #include <drivers/i2c.h> #endif #include "iis2dlpc.h" LOG_MODULE_REGISTER(IIS2DLPC, CONFIG_SENSOR_LOG_LEVEL); /** * iis2dlpc_set_range - set full scale range for acc * @dev: Pointer to instance of struct device (I2C or SPI) * @range: Full scale range (2, 4, 8 and 16 G) */ static int iis2dlpc_set_range(struct device *dev, u16_t range) { int err; struct iis2dlpc_data *iis2dlpc = dev->driver_data; const struct iis2dlpc_device_config *cfg = dev->config_info; u8_t shift_gain = 0U; u8_t fs = IIS2DLPC_FS_TO_REG(range); err = iis2dlpc_full_scale_set(iis2dlpc->ctx, fs); if (cfg->pm == IIS2DLPC_CONT_LOW_PWR_12bit) { shift_gain = IIS2DLPC_SHFT_GAIN_NOLP1; } if (!err) { /* save internally gain for optimization */ iis2dlpc->gain = IIS2DLPC_FS_TO_GAIN(IIS2DLPC_FS_TO_REG(range), shift_gain); } return err; } /** * iis2dlpc_set_odr - set new sampling frequency * @dev: Pointer to instance of struct device (I2C or SPI) * @odr: Output data rate */ static int iis2dlpc_set_odr(struct device *dev, u16_t odr) { struct iis2dlpc_data *iis2dlpc = dev->driver_data; u8_t val; /* check if power off */ if (odr == 0U) { return iis2dlpc_data_rate_set(iis2dlpc->ctx, IIS2DLPC_XL_ODR_OFF); } val = IIS2DLPC_ODR_TO_REG(odr); if (val > IIS2DLPC_XL_ODR_1k6Hz) { LOG_ERR("ODR too high"); return -ENOTSUP; } return iis2dlpc_data_rate_set(iis2dlpc->ctx, val); } static inline void iis2dlpc_convert(struct sensor_value *val, int raw_val, float gain) { s64_t dval; /* Gain is in ug/LSB */ /* Convert to m/s^2 */ dval = ((s64_t)raw_val * gain * SENSOR_G) / 1000000LL; val->val1 = dval / 1000000LL; val->val2 = dval % 1000000LL; } static inline void iis2dlpc_channel_get_acc(struct device *dev, enum sensor_channel chan, struct sensor_value *val) { int i; u8_t ofs_start, ofs_stop; struct iis2dlpc_data *iis2dlpc = dev->driver_data; struct sensor_value *pval = val; switch (chan) { case SENSOR_CHAN_ACCEL_X: ofs_start = ofs_stop = 0U; break; case SENSOR_CHAN_ACCEL_Y: ofs_start = ofs_stop = 1U; break; case SENSOR_CHAN_ACCEL_Z: ofs_start = ofs_stop = 2U; break; default: ofs_start = 0U; ofs_stop = 2U; break; } for (i = ofs_start; i <= ofs_stop ; i++) { iis2dlpc_convert(pval++, iis2dlpc->acc[i], iis2dlpc->gain); } } static int iis2dlpc_channel_get(struct device *dev, enum sensor_channel chan, struct sensor_value *val) { switch (chan) { case SENSOR_CHAN_ACCEL_X: case SENSOR_CHAN_ACCEL_Y: case SENSOR_CHAN_ACCEL_Z: case SENSOR_CHAN_ACCEL_XYZ: iis2dlpc_channel_get_acc(dev, chan, val); return 0; default: LOG_DBG("Channel not supported"); break; } return -ENOTSUP; } static int iis2dlpc_config(struct device *dev, enum sensor_channel chan, enum sensor_attribute attr, const struct sensor_value *val) { switch (attr) { case SENSOR_ATTR_FULL_SCALE: return iis2dlpc_set_range(dev, sensor_ms2_to_g(val)); case SENSOR_ATTR_SAMPLING_FREQUENCY: return iis2dlpc_set_odr(dev, val->val1); default: LOG_DBG("Acc attribute not supported"); break; } return -ENOTSUP; } static int iis2dlpc_attr_set(struct device *dev, enum sensor_channel chan, enum sensor_attribute attr, const struct sensor_value *val) { switch (chan) { case SENSOR_CHAN_ACCEL_X: case SENSOR_CHAN_ACCEL_Y: case SENSOR_CHAN_ACCEL_Z: case SENSOR_CHAN_ACCEL_XYZ: return iis2dlpc_config(dev, chan, attr, val); default: LOG_DBG("Attr not supported on %d channel", chan); break; } return -ENOTSUP; } static int iis2dlpc_sample_fetch(struct device *dev, enum sensor_channel chan) { struct iis2dlpc_data *iis2dlpc = dev->driver_data; const struct iis2dlpc_device_config *cfg = dev->config_info; u8_t shift; union axis3bit16_t buf; /* fetch raw data sample */ if (iis2dlpc_acceleration_raw_get(iis2dlpc->ctx, buf.u8bit) < 0) { LOG_DBG("Failed to fetch raw data sample"); return -EIO; } /* adjust to resolution */ if (cfg->pm == IIS2DLPC_CONT_LOW_PWR_12bit) { shift = IIS2DLPC_SHIFT_PM1; } else { shift = IIS2DLPC_SHIFT_PMOTHER; } iis2dlpc->acc[0] = sys_le16_to_cpu(buf.i16bit[0]) >> shift; iis2dlpc->acc[1] = sys_le16_to_cpu(buf.i16bit[1]) >> shift; iis2dlpc->acc[2] = sys_le16_to_cpu(buf.i16bit[2]) >> shift; return 0; } static const struct sensor_driver_api iis2dlpc_driver_api = { .attr_set = iis2dlpc_attr_set, #if CONFIG_IIS2DLPC_TRIGGER .trigger_set = iis2dlpc_trigger_set, #endif /* CONFIG_IIS2DLPC_TRIGGER */ .sample_fetch = iis2dlpc_sample_fetch, .channel_get = iis2dlpc_channel_get, }; static int iis2dlpc_init_interface(struct device *dev) { struct iis2dlpc_data *iis2dlpc = dev->driver_data; const struct iis2dlpc_device_config *cfg = dev->config_info; iis2dlpc->bus = device_get_binding(cfg->bus_name); if (!iis2dlpc->bus) { LOG_DBG("master bus not found: %s", cfg->bus_name); return -EINVAL; } #if DT_ANY_INST_ON_BUS_STATUS_OKAY(spi) iis2dlpc_spi_init(dev); #elif DT_ANY_INST_ON_BUS_STATUS_OKAY(i2c) iis2dlpc_i2c_init(dev); #else #error "BUS MACRO NOT DEFINED IN DTS" #endif return 0; } static int iis2dlpc_set_power_mode(struct iis2dlpc_data *iis2dlpc, iis2dlpc_mode_t pm) { u8_t regval = IIS2DLPC_CONT_LOW_PWR_12bit; switch (pm) { case IIS2DLPC_CONT_LOW_PWR_2: case IIS2DLPC_CONT_LOW_PWR_3: case IIS2DLPC_CONT_LOW_PWR_4: case IIS2DLPC_HIGH_PERFORMANCE: regval = pm; break; default: LOG_DBG("Apply default Power Mode"); break; } return iis2dlpc_write_reg(iis2dlpc->ctx, IIS2DLPC_CTRL1, &regval, 1); } static int iis2dlpc_init(struct device *dev) { struct iis2dlpc_data *iis2dlpc = dev->driver_data; const struct iis2dlpc_device_config *cfg = dev->config_info; u8_t wai; if (iis2dlpc_init_interface(dev)) { return -EINVAL; } /* check chip ID */ if (iis2dlpc_device_id_get(iis2dlpc->ctx, &wai) < 0) { return -EIO; } if (wai != IIS2DLPC_ID) { LOG_ERR("Invalid chip ID"); return -EINVAL; } /* reset device */ if (iis2dlpc_reset_set(iis2dlpc->ctx, PROPERTY_ENABLE) < 0) { return -EIO; } k_busy_wait(100); if (iis2dlpc_block_data_update_set(iis2dlpc->ctx, PROPERTY_ENABLE) < 0) { return -EIO; } /* set power mode */ if (iis2dlpc_set_power_mode(iis2dlpc, CONFIG_IIS2DLPC_POWER_MODE)) { return -EIO; } /* set default odr and full scale for acc */ if (iis2dlpc_data_rate_set(iis2dlpc->ctx, IIS2DLPC_DEFAULT_ODR) < 0) { return -EIO; } if (iis2dlpc_full_scale_set(iis2dlpc->ctx, IIS2DLPC_ACC_FS) < 0) { return -EIO; } iis2dlpc->gain = IIS2DLPC_FS_TO_GAIN(IIS2DLPC_ACC_FS, cfg->pm == IIS2DLPC_CONT_LOW_PWR_12bit ? IIS2DLPC_SHFT_GAIN_NOLP1 : 0); #ifdef CONFIG_IIS2DLPC_TRIGGER if (iis2dlpc_init_interrupt(dev) < 0) { LOG_ERR("Failed to initialize interrupts"); return -EIO; } #ifdef CONFIG_IIS2DLPC_PULSE if (iis2dlpc_tap_mode_set(iis2dlpc->ctx, cfg->pulse_trigger) < 0) { LOG_ERR("Failed to select pulse trigger mode"); return -EIO; } if (iis2dlpc_tap_threshold_x_set(iis2dlpc->ctx, cfg->pulse_ths[0]) < 0) { LOG_ERR("Failed to set tap X axis threshold"); return -EIO; } if (iis2dlpc_tap_threshold_y_set(iis2dlpc->ctx, cfg->pulse_ths[1]) < 0) { LOG_ERR("Failed to set tap Y axis threshold"); return -EIO; } if (iis2dlpc_tap_threshold_z_set(iis2dlpc->ctx, cfg->pulse_ths[2]) < 0) { LOG_ERR("Failed to set tap Z axis threshold"); return -EIO; } if (iis2dlpc_tap_detection_on_x_set(iis2dlpc->ctx, CONFIG_IIS2DLPC_PULSE_X) < 0) { LOG_ERR("Failed to set tap detection on X axis"); return -EIO; } if (iis2dlpc_tap_detection_on_y_set(iis2dlpc->ctx, CONFIG_IIS2DLPC_PULSE_Y) < 0) { LOG_ERR("Failed to set tap detection on Y axis"); return -EIO; } if (iis2dlpc_tap_detection_on_z_set(iis2dlpc->ctx, CONFIG_IIS2DLPC_PULSE_Z) < 0) { LOG_ERR("Failed to set tap detection on Z axis"); return -EIO; } if (iis2dlpc_tap_shock_set(iis2dlpc->ctx, cfg->pulse_shock) < 0) { LOG_ERR("Failed to set tap shock duration"); return -EIO; } if (iis2dlpc_tap_dur_set(iis2dlpc->ctx, cfg->pulse_ltncy) < 0) { LOG_ERR("Failed to set tap latency"); return -EIO; } if (iis2dlpc_tap_quiet_set(iis2dlpc->ctx, cfg->pulse_quiet) < 0) { LOG_ERR("Failed to set tap quiet time"); return -EIO; } #endif /* CONFIG_IIS2DLPC_PULSE */ #endif /* CONFIG_IIS2DLPC_TRIGGER */ return 0; } const struct iis2dlpc_device_config iis2dlpc_cfg = { .bus_name = DT_INST_BUS_LABEL(0), .pm = CONFIG_IIS2DLPC_POWER_MODE, #ifdef CONFIG_IIS2DLPC_TRIGGER .int_gpio_port = DT_INST_GPIO_LABEL(0, drdy_gpios), .int_gpio_pin = DT_INST_GPIO_PIN(0, drdy_gpios), .int_gpio_flags = DT_INST_GPIO_FLAGS(0, drdy_gpios), #if defined(CONFIG_IIS2DLPC_INT_PIN_1) .int_pin = 1, #elif defined(CONFIG_IIS2DLPC_INT_PIN_2) .int_pin = 2, #endif /* CONFIG_IIS2DLPC_INT_PIN_* */ #ifdef CONFIG_IIS2DLPC_PULSE #if defined(CONFIG_IIS2DLPC_ONLY_SINGLE) .pulse_trigger = IIS2DLPC_ONLY_SINGLE, #elif defined(CONFIG_IIS2DLPC_SINGLE_DOUBLE) .pulse_trigger = IIS2DLPC_BOTH_SINGLE_DOUBLE, #endif .pulse_ths[0] = CONFIG_IIS2DLPC_PULSE_THSX, .pulse_ths[1] = CONFIG_IIS2DLPC_PULSE_THSY, .pulse_ths[2] = CONFIG_IIS2DLPC_PULSE_THSZ, .pulse_shock = CONFIG_IIS2DLPC_PULSE_SHOCK, .pulse_ltncy = CONFIG_IIS2DLPC_PULSE_LTNCY, .pulse_quiet = CONFIG_IIS2DLPC_PULSE_QUIET, #endif /* CONFIG_IIS2DLPC_PULSE */ #endif /* CONFIG_IIS2DLPC_TRIGGER */ }; struct iis2dlpc_data iis2dlpc_data; DEVICE_AND_API_INIT(iis2dlpc, DT_INST_LABEL(0), iis2dlpc_init, &iis2dlpc_data, &iis2dlpc_cfg, POST_KERNEL, CONFIG_SENSOR_INIT_PRIORITY, &iis2dlpc_driver_api);
44120.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE191_Integer_Underflow__int64_t_rand_predec_17.c Label Definition File: CWE191_Integer_Underflow.label.xml Template File: sources-sinks-17.tmpl.c */ /* * @description * CWE: 191 Integer Underflow * BadSource: rand Set data to result of rand() * GoodSource: Set data to a small, non-zero number (negative two) * Sinks: decrement * GoodSink: Ensure there will not be an underflow before decrementing data * BadSink : Decrement data, which can cause an Underflow * Flow Variant: 17 Control flow: for loops * * */ #include "std_testcase.h" #ifndef OMITBAD void CWE191_Integer_Underflow__int64_t_rand_predec_17_bad() { int i,j; int64_t data; data = 0LL; for(i = 0; i < 1; i++) { /* POTENTIAL FLAW: Use a random value */ data = (int64_t)RAND64(); } for(j = 0; j < 1; j++) { { /* POTENTIAL FLAW: Decrementing data could cause an underflow */ --data; int64_t result = data; printLongLongLine(result); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodB2G() - use badsource and goodsink in the for statements */ static void goodB2G() { int i,k; int64_t data; data = 0LL; for(i = 0; i < 1; i++) { /* POTENTIAL FLAW: Use a random value */ data = (int64_t)RAND64(); } for(k = 0; k < 1; k++) { /* FIX: Add a check to prevent an underflow from occurring */ if (data > LLONG_MIN) { --data; int64_t result = data; printLongLongLine(result); } else { printLine("data value is too large to perform arithmetic safely."); } } } /* goodG2B() - use goodsource and badsink in the for statements */ static void goodG2B() { int h,j; int64_t data; data = 0LL; for(h = 0; h < 1; h++) { /* FIX: Use a small, non-zero value that will not cause an underflow in the sinks */ data = -2; } for(j = 0; j < 1; j++) { { /* POTENTIAL FLAW: Decrementing data could cause an underflow */ --data; int64_t result = data; printLongLongLine(result); } } } void CWE191_Integer_Underflow__int64_t_rand_predec_17_good() { goodB2G(); goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE191_Integer_Underflow__int64_t_rand_predec_17_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE191_Integer_Underflow__int64_t_rand_predec_17_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
63666.c
/**************************************************************************//** * @file system_ARMCM23.c * @brief CMSIS Device System Source File for * ARMCM23 Device Series * @version V5.00 * @date 21. October 2016 ******************************************************************************/ /* * Copyright (c) 2009-2016 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined (ARMCM23) #include "ARMCM23.h" #elif defined (ARMCM23_TZ) #include "ARMCM23_TZ.h" #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) #include "partition_ARMCM23.h" #endif #else #error device not specified! #endif /*---------------------------------------------------------------------------- Define clocks *----------------------------------------------------------------------------*/ #define XTAL ( 5000000UL) /* Oscillator frequency */ #define SYSTEM_CLOCK (5U * XTAL) /*---------------------------------------------------------------------------- Externals *----------------------------------------------------------------------------*/ #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) extern uint32_t __Vectors; #endif /*---------------------------------------------------------------------------- System Core Clock Variable *----------------------------------------------------------------------------*/ uint32_t SystemCoreClock = SYSTEM_CLOCK; /*---------------------------------------------------------------------------- System Core Clock update function *----------------------------------------------------------------------------*/ void SystemCoreClockUpdate (void) { SystemCoreClock = SYSTEM_CLOCK; } /*---------------------------------------------------------------------------- System initialization function *----------------------------------------------------------------------------*/ void SystemInit (void) { #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) SCB->VTOR = (uint32_t) &__Vectors; #endif #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) TZ_SAU_Setup(); #endif SystemCoreClock = SYSTEM_CLOCK; }
514616.c
/******************************************************************************************************** * @file crc.c * * @brief for TLSR chips * * @author telink * * @par Copyright (c) Telink Semiconductor (Shanghai) Co., Ltd. * All rights reserved. * * The information contained herein is confidential and proprietary property of Telink * Semiconductor (Shanghai) Co., Ltd. and is available under the terms * of Commercial License Agreement between Telink Semiconductor (Shanghai) * Co., Ltd. and the licensee in separate contract or the terms described here-in. * This heading MUST NOT be removed from this file. * * Licensees are granted free, non-transferable use of the information in this * file under Mutual Non-Disclosure Agreement. NO WARRENTY of ANY KIND is provided. * *******************************************************************************************************/ unsigned short crc16 (unsigned char *pD, int len) { static unsigned short poly[2]={0, 0xa001}; //0x8005 <==> 0xa001 unsigned short crc = 0xffff; int i,j; for(j=len; j>0; j--) { unsigned char ds = *pD++; for(i=0; i<8; i++) { crc = (crc >> 1) ^ poly[(crc ^ ds ) & 1]; ds = ds >> 1; } } return crc; }
495967.c
#include "siori.h" #include "libmy/sram.h" #include "script.h" #include "bgm.h" #include "img.h" #include "text.h" #include "anime.h" #include "history.h" // SRAM内部構造 // 0x00 - "SZ"(2バイト) + バージョン(2バイト) + しおりサイズ(2バイト) // 0x10 - 共通フラグ(4バイト) // 0x20 - しおり(計9個) //--------------------------------------------------------------------------- // 例外参照 extern ST_SCRIPT Script; extern ST_BGM Bgm; extern ST_IMG Img; //--------------------------------------------------------------------------- ST_SIORI Siori; //--------------------------------------------------------------------------- EWRAM_CODE void SioriInit(void) { _Memset(&Siori, 0x00, sizeof(ST_SIORI)); Siori.size = 2 + TEXT_SIORI_CX * 2 + sizeof(ST_SCRIPT) + sizeof(ST_BGM) + sizeof(ST_IMG); TRACEOUT("[しおりサイズ: 0x%x]\n", Siori.size); } //--------------------------------------------------------------------------- EWRAM_CODE void SioriSaveSig(void) { TRACEOUT("[シグネチャ書き込み]\n"); SramSeek(0); SramWrite8('S'); SramWrite8('Z'); SramWrite16(100); SramWrite16(Siori.size); } //--------------------------------------------------------------------------- EWRAM_CODE void SioriSaveFlag(void) { TRACEOUT("[フラグ書き込み]\n"); SramSeek(0x10); u8* p = (u8*)&Script.flag; SramWrite8(*(p+0)); SramWrite8(*(p+1)); SramWrite8(*(p+7)); SramWrite8(*(p+8)); } //--------------------------------------------------------------------------- EWRAM_CODE void SioriLoadFlag(void) { if(SioriIsSram() == FALSE) { SioriSaveSig(); SioriSaveFlag(); TRACEOUT("[初回起動]\n"); return; } TRACEOUT("[フラグ読み込み]\n"); SramSeek(0x10); u8* pS = SramGetPointer(); u8* pD = (u8*)&Script.flag; pD[0] = *pS++; pD[1] = *pS++; pD[7] = *pS++; pD[8] = *pS++; } //--------------------------------------------------------------------------- EWRAM_CODE void SioriSave(u16 no) { _ASSERT(no < SIORI_MAX_CNT); TRACEOUT("[しおり セーブ: %d]\n", no); u16 i; u8* p; SioriSaveSig(); SioriSaveFlag(); SramSeek(0x20 + Siori.size * no); SramWrite8('S'); SramWrite8('V'); p = (u8*)TextGetSioriStr(); for(i=0; i<TEXT_SIORI_CX; i++) { SramWrite8(*p++); SramWrite8(*p++); } p = (u8*)&Script; for(i=0; i<sizeof(ST_SCRIPT); i++) { SramWrite8(*p++); } p = (u8*)&Bgm; for(i=0; i<sizeof(ST_BGM); i++) { SramWrite8(*p++); } p = (u8*)&Img; for(i=0; i<sizeof(ST_IMG); i++) { SramWrite8(*p++); } } //--------------------------------------------------------------------------- EWRAM_CODE bool SioriLoad(u16 no) { TRACEOUT("[しおり ロード: %d]\n", no); if(SioriIsSram() == FALSE) { return FALSE; } SramSeek(0x20 + Siori.size * no); u8* pS = SramGetPointer(); u8* pD; u16 i; if(*(pS+0) != 'S' || *(pS+1) != 'V') { return FALSE; } pS += 2 + (TEXT_SIORI_CX * 2); pD = (u8*)&Script; for(i=0; i<sizeof(ST_SCRIPT); i++) { *pD++ = *pS++; } pD = (u8*)&Bgm; for(i=0; i<sizeof(ST_BGM); i++) { *pD++ = *pS++; } pD = (u8*)&Img; for(i=0; i<sizeof(ST_IMG); i++) { *pD++ = *pS++; } // 復帰処理をします // 共有のフラグ SioriLoadFlag(); // スクリプト if(Script.act == SCRIPT_ACT_EVENT || Script.actTmp == SCRIPT_ACT_SELECT) { Script.act = SCRIPT_ACT_EVENT; Script.pEventCur = Script.pEventOld; } else { Script.act = SCRIPT_ACT_MSG; Script.pMsgCur = Script.pMsgOld; } // BGM BgmPlayRestart(); // 画面 TextSetClearNoBuf(); ImgSetEffectClear(IMG_EFFECT_FADE_BLACK); ImgSetEffectUpdate(IMG_EFFECT_FADE_BLACK); HistoryInit(); TextInit(); return TRUE; } //--------------------------------------------------------------------------- EWRAM_CODE void SioriSavePrev(void) { SioriSave(SIORI_MAX_CNT - 1); } //--------------------------------------------------------------------------- EWRAM_CODE void SioriLoadPrev(void) { SioriLoad(SIORI_MAX_CNT - 1); } //--------------------------------------------------------------------------- EWRAM_CODE char* SioriGetName(u16 no) { SramSeek(0x20 + Siori.size * no); u8* p = SramGetPointer(); if(*(p+0) != 'S' || *(p+1) != 'V') { return BLANK_SIORI_NAME; } return (char*)p + 2; } //--------------------------------------------------------------------------- EWRAM_CODE bool SioriIsSram(void) { SramSeek(0); if(SramRead8() != 'S') return FALSE; if(SramRead8() != 'Z') return FALSE; return TRUE; }
312168.c
/* * nrf24.c * * Created: 07.06.2019 23:14:39 * Author: ThePetrovich */ #include "nrf24.h" void nrf24_pinSetup(){ NRF_CE_DDR |= (1 << NRF_CE); NRF_CSN_DDR |= (1 << NRF_CSN); } void nrf24_setupTx(){ spi_busSetup(SPI_PRESCALER_4, MSBFIRST, SPI_MODE0, SPI_1X); NRF_CE_PORT |= (1 << NRF_CE); _delay_ms(10); NRF_CE_PORT &= ~(1 << NRF_CE); NRF_CSN_PORT &= ~(1 << NRF_CSN); spi_write(NRF_FLUSH); spi_writeRegister(NRF_STATUS_REG, 0x30, 0x20, 1); uint8_t buffer[5]; buffer[0] = 0xE7; buffer[1] = 0xE7; buffer[2] = 0xE7; buffer[3] = 0xE7; buffer[4] = 0x01; spi_transfer(SPI_WRITE, NRF_TX_REG, buffer, 5, 0x20); buffer[0] = 0xE7; buffer[1] = 0xE7; buffer[2] = 0xE7; buffer[3] = 0xE7; buffer[4] = 0x00; spi_transfer(SPI_WRITE, NRF_RX_REG_P0, buffer, 5, 0x20); spi_writeRegister(NRF_AA_REG, NRF24_AA_EN, 0x20, 1); spi_writeRegister(NRF_RXADDR_REG, NRF24_RXADDR, 0x20, 1); spi_writeRegister(NRF_CH_REG, NRF24_CHANNEL, 0x20, 1); spi_writeRegister(NRF_RFSET_REG, NRF24_RFSET, 0x20, 1); spi_writeRegister(NRF_CONFIG_REG, NRF24_CONFIG, 0x20, 1); NRF_CSN_PORT |= (1 << NRF_CSN); spi_busStop(); } void nrf24_transmit(char * data, uint8_t size){ spi_busSetup(SPI_PRESCALER_4, MSBFIRST, SPI_MODE0, SPI_1X); NRF_CSN_PORT &= ~(1 << NRF_CSN); spi_transfer(SPI_WRITE, NRF_TX_UPLOAD, (uint8_t *)data, size-1, 0x00); _delay_ms(2); NRF_CE_PORT |= (1 << NRF_CE); _delay_ms(1); NRF_CE_PORT &= ~(1 << NRF_CE); NRF_CSN_PORT |= (1 << NRF_CSN); spi_busStop(); }
593904.c
/** * Copyright (c) 2015 - present LibDriver All rights reserved * * The MIT License (MIT) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * @file driver_dht11_read_test.c * @brief driver dht11 read test source file * @version 2.0.0 * @author Shifeng Li * @date 2021-03-12 * * <h3>history</h3> * <table> * <tr><th>Date <th>Version <th>Author <th>Description * <tr><td>2021/03/12 <td>2.0 <td>Shifeng Li <td>format the code * <tr><td>2020/11/19 <td>1.0 <td>Shifeng Li <td>first upload * </table> */ #include "driver_dht11_read_test.h" static dht11_handle_t gs_handle; /**< dht11 handle */ /** * @brief read test * @param[in] times is the test times * @return status code * - 0 success * - 1 read failed * @note none */ uint8_t dht11_read_test(uint32_t times) { volatile uint8_t res; volatile uint32_t i; volatile uint16_t temperature_raw; volatile uint16_t humidity_raw; volatile float temperature; volatile uint8_t humidity; dht11_info_t info; /* link interface function */ DRIVER_DHT11_LINK_INIT(&gs_handle, dht11_handle_t); DRIVER_DHT11_LINK_BUS_INIT(&gs_handle, dht11_interface_init); DRIVER_DHT11_LINK_BUS_DEINIT(&gs_handle, dht11_interface_deinit); DRIVER_DHT11_LINK_BUS_READ(&gs_handle, dht11_interface_read); DRIVER_DHT11_LINK_BUS_WRITE(&gs_handle, dht11_interface_write); DRIVER_DHT11_LINK_DELAY_MS(&gs_handle, dht11_interface_delay_ms); DRIVER_DHT11_LINK_DELAY_US(&gs_handle, dht11_interface_delay_us); DRIVER_DHT11_LINK_ENABLE_IRQ(&gs_handle, dht11_interface_enable_irq); DRIVER_DHT11_LINK_DISABLE_IRQ(&gs_handle, dht11_interface_disable_irq); DRIVER_DHT11_LINK_DEBUG_PRINT(&gs_handle, dht11_interface_debug_print); /* get dht11 information */ res = dht11_info(&info); if (res) { dht11_interface_debug_print("dht11: get info failed.\n"); return 1; } else { /* print dht11 information */ dht11_interface_debug_print("dht11: chip is %s.\n", info.chip_name); dht11_interface_debug_print("dht11: manufacturer is %s.\n", info.manufacturer_name); dht11_interface_debug_print("dht11: interface is %s.\n", info.interface); dht11_interface_debug_print("dht11: driver version is %d.%d.\n", info.driver_version/1000, (info.driver_version%1000)/100); dht11_interface_debug_print("dht11: min supply voltage is %0.1fV.\n", info.supply_voltage_min_v); dht11_interface_debug_print("dht11: max supply voltage is %0.1fV.\n", info.supply_voltage_max_v); dht11_interface_debug_print("dht11: max current is %0.2fmA.\n", info.max_current_ma); dht11_interface_debug_print("dht11: max temperature is %0.1fC.\n", info.temperature_max); dht11_interface_debug_print("dht11: min temperature is %0.1fC.\n", info.temperature_min); } /* start basic read test */ dht11_interface_debug_print("dht11: start read test.\n"); /* dht11 init */ res = dht11_init(&gs_handle); if (res) { dht11_interface_debug_print("dht11: init failed.\n"); return 1; } /* delay 2000 ms for read */ dht11_interface_delay_ms(2000); for (i=0; i<times; i++) { /* read temperature and humidity */ res = dht11_read_temperature_humidity(&gs_handle, (uint16_t *)&temperature_raw, (float *)&temperature, (uint16_t *)&humidity_raw, (uint8_t *)&humidity); if (res) { dht11_interface_debug_print("dth11: read failed.\n"); dht11_deinit(&gs_handle); return 1; } /* print result */ dht11_interface_debug_print("dth11: temperature: %.01fC.\n", temperature); dht11_interface_debug_print("dth11: humidity: %d%%.\n", humidity); /* delay 2000 ms*/ dht11_interface_delay_ms(2000); } /* finish basic read test and exit */ dht11_interface_debug_print("dht11: finish read test.\n"); dht11_deinit(&gs_handle); return 0; }
675361.c
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 #include "sw/device/lib/dif/dif_otbn.h" #include "sw/device/lib/base/bitfield.h" #include "otbn_regs.h" // Generated. /** * Data width of big number subset, in bytes. */ const int kDifOtbnWlenBytes = 256 / 8; /** * Convert from a `dif_otbn_interrupt_t` to the appropriate bit index. * * INTR_STATE, INTR_ENABLE, and INTR_TEST registers have the same bit offset. */ static bool irq_bit_index_get(dif_otbn_interrupt_t irq_type, uint8_t *offset_out) { ptrdiff_t offset; switch (irq_type) { case kDifOtbnInterruptDone: offset = OTBN_INTR_COMMON_DONE_BIT; break; default: return false; } *offset_out = offset; return true; } dif_otbn_result_t dif_otbn_init(const dif_otbn_config_t *config, dif_otbn_t *otbn) { if (config == NULL || otbn == NULL) { return kDifOtbnBadArg; } otbn->base_addr = config->base_addr; dif_otbn_reset(otbn); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_reset(const dif_otbn_t *otbn) { if (otbn == NULL) { return kDifOtbnBadArg; } mmio_region_write32(otbn->base_addr, OTBN_INTR_ENABLE_REG_OFFSET, 0); // Clear all pending interrupts. mmio_region_write32(otbn->base_addr, OTBN_INTR_STATE_REG_OFFSET, 0xFFFFFFFF); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_irq_state_get(const dif_otbn_t *otbn, dif_otbn_interrupt_t irq_type, dif_otbn_enable_t *state) { if (otbn == NULL || state == NULL) { return kDifOtbnBadArg; } uint8_t bit_index; if (!irq_bit_index_get(irq_type, &bit_index)) { return kDifOtbnError; } bool enabled = bitfield_bit32_read( mmio_region_read32(otbn->base_addr, OTBN_INTR_STATE_REG_OFFSET), bit_index); *state = (enabled ? kDifOtbnEnable : kDifOtbnDisable); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_irq_state_clear(const dif_otbn_t *otbn, dif_otbn_interrupt_t irq_type) { if (otbn == NULL) { return kDifOtbnBadArg; } uint8_t bit_index; if (!irq_bit_index_get(irq_type, &bit_index)) { return kDifOtbnError; } uint32_t register_value = 0x0u; register_value = bitfield_bit32_write(register_value, bit_index, true); mmio_region_write32(otbn->base_addr, OTBN_INTR_STATE_REG_OFFSET, register_value); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_irqs_disable(const dif_otbn_t *otbn, uint32_t *state) { if (otbn == NULL) { return kDifOtbnBadArg; } // Pass the interrupt state back to the caller. if (state != NULL) { *state = mmio_region_read32(otbn->base_addr, OTBN_INTR_ENABLE_REG_OFFSET); } // Disable all interrupts. mmio_region_write32(otbn->base_addr, OTBN_INTR_ENABLE_REG_OFFSET, 0u); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_irqs_restore(const dif_otbn_t *otbn, uint32_t state) { if (otbn == NULL) { return kDifOtbnBadArg; } // Restore interrupt state. mmio_region_write32(otbn->base_addr, OTBN_INTR_ENABLE_REG_OFFSET, state); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_irq_control(const dif_otbn_t *otbn, dif_otbn_interrupt_t irq_type, dif_otbn_enable_t enable) { if (otbn == NULL) { return kDifOtbnBadArg; } uint8_t bit_index; if (!irq_bit_index_get(irq_type, &bit_index)) { return kDifOtbnError; } // Enable/Disable interrupt. uint32_t register_value = mmio_region_read32(otbn->base_addr, OTBN_INTR_ENABLE_REG_OFFSET); register_value = bitfield_bit32_write(register_value, bit_index, (enable == kDifOtbnEnable)); mmio_region_write32(otbn->base_addr, OTBN_INTR_ENABLE_REG_OFFSET, register_value); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_irq_force(const dif_otbn_t *otbn, dif_otbn_interrupt_t irq_type) { if (otbn == NULL) { return kDifOtbnBadArg; } uint8_t bit_index; if (!irq_bit_index_get(irq_type, &bit_index)) { return kDifOtbnError; } // Force the requested interrupt. uint32_t register_value = mmio_region_read32(otbn->base_addr, OTBN_INTR_TEST_REG_OFFSET); register_value = bitfield_bit32_write(register_value, bit_index, true); mmio_region_write32(otbn->base_addr, OTBN_INTR_TEST_REG_OFFSET, register_value); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_start(const dif_otbn_t *otbn, unsigned int start_addr) { if (otbn == NULL || start_addr % 4 != 0 || start_addr >= OTBN_IMEM_SIZE_BYTES) { return kDifOtbnBadArg; } mmio_region_write32(otbn->base_addr, OTBN_START_ADDR_REG_OFFSET, start_addr); uint32_t cmd_reg_val = 0x0u; cmd_reg_val = bitfield_bit32_write(cmd_reg_val, OTBN_CMD_START_BIT, true); mmio_region_write32(otbn->base_addr, OTBN_CMD_REG_OFFSET, cmd_reg_val); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_is_busy(const dif_otbn_t *otbn, bool *busy) { if (otbn == NULL || busy == NULL) { return kDifOtbnBadArg; } uint32_t status = mmio_region_read32(otbn->base_addr, OTBN_STATUS_REG_OFFSET); *busy = bitfield_field32_read(status, (bitfield_field32_t){ .mask = 1, .index = OTBN_STATUS_BUSY_BIT, }); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_get_err_code(const dif_otbn_t *otbn, dif_otbn_err_code_t *err_code) { if (otbn == NULL || err_code == NULL) { return kDifOtbnBadArg; } uint32_t err_code_raw = mmio_region_read32(otbn->base_addr, OTBN_ERR_CODE_REG_OFFSET); // Ensure that all values returned from hardware match explicitly defined // values in the DIF. switch (err_code_raw) { case kDifOtbnErrCodeNoError: case kDifOtbnErrCodeBadDataAddr: case kDifOtbnErrCodeCallStack: *err_code = (dif_otbn_err_code_t)err_code_raw; return kDifOtbnOk; default: return kDifOtbnUnexpectedData; } } dif_otbn_result_t dif_otbn_imem_write(const dif_otbn_t *otbn, uint32_t offset_bytes, const void *src, size_t len_bytes) { // Only 32b-aligned 32b word accesses are allowed. if (otbn == NULL || src == NULL || len_bytes % 4 != 0 || offset_bytes % 4 != 0 || offset_bytes + len_bytes > OTBN_IMEM_SIZE_BYTES) { return kDifOtbnBadArg; } mmio_region_memcpy_to_mmio32( otbn->base_addr, OTBN_IMEM_REG_OFFSET + offset_bytes, src, len_bytes); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_imem_read(const dif_otbn_t *otbn, uint32_t offset_bytes, void *dest, size_t len_bytes) { // Only 32b-aligned 32b word accesses are allowed. if (otbn == NULL || dest == NULL || len_bytes % 4 != 0 || offset_bytes % 4 != 0 || offset_bytes + len_bytes > OTBN_IMEM_SIZE_BYTES) { return kDifOtbnBadArg; } mmio_region_memcpy_from_mmio32( otbn->base_addr, OTBN_IMEM_REG_OFFSET + offset_bytes, dest, len_bytes); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_dmem_write(const dif_otbn_t *otbn, uint32_t offset_bytes, const void *src, size_t len_bytes) { // Only 32b-aligned 32b word accesses are allowed. if (otbn == NULL || src == NULL || len_bytes % 4 != 0 || offset_bytes % 4 != 0 || offset_bytes + len_bytes > OTBN_DMEM_SIZE_BYTES) { return kDifOtbnBadArg; } mmio_region_memcpy_to_mmio32( otbn->base_addr, OTBN_DMEM_REG_OFFSET + offset_bytes, src, len_bytes); return kDifOtbnOk; } dif_otbn_result_t dif_otbn_dmem_read(const dif_otbn_t *otbn, uint32_t offset_bytes, void *dest, size_t len_bytes) { // Only 32b-aligned 32b word accesses are allowed. if (otbn == NULL || dest == NULL || len_bytes % 4 != 0 || offset_bytes % 4 != 0 || offset_bytes + len_bytes > OTBN_DMEM_SIZE_BYTES) { return kDifOtbnBadArg; } mmio_region_memcpy_from_mmio32( otbn->base_addr, OTBN_DMEM_REG_OFFSET + offset_bytes, dest, len_bytes); return kDifOtbnOk; } size_t dif_otbn_get_dmem_size_bytes(const dif_otbn_t *otbn) { return OTBN_DMEM_SIZE_BYTES; } size_t dif_otbn_get_imem_size_bytes(const dif_otbn_t *otbn) { return OTBN_IMEM_SIZE_BYTES; }
941385.c
//============================================================================== // // hcl_uart.c - Seiko Epson Hardware Control Library // // This layer of indirection is added to allow the sample code to call generic // UART functions to work on multiple hardware platforms. This is the Linux // TERMIOS specific implementation. // // // THE SOFTWARE IS RELEASED INTO THE PUBLIC DOMAIN. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // NONINFRINGEMENT, SECURITY, SATISFACTORY QUALITY, AND FITNESS FOR A // PARTICULAR PURPOSE. IN NO EVENT SHALL EPSON BE LIABLE FOR ANY LOSS, DAMAGE // OR CLAIM, ARISING FROM OR IN CONNECTION WITH THE SOFTWARE OR THE USE OF THE // SOFTWARE. // //============================================================================== #include <assert.h> #include <errno.h> // Error number definitions #include <fcntl.h> // File control definitions #include <stdint.h> #include <stdio.h> #include <string.h> #include <sys/ioctl.h> // Needed for ioctl library functions #include <termios.h> // terminal io (serial port) interface #include <unistd.h> #include "hcl.h" #include "hcl_uart.h" #include "accel_epsonCommon.h" extern const char* ACCLSERIAL; extern int fd_serial; typedef int ComPortHandle; typedef unsigned char Byte; ComPortHandle openComPort(const char* fd_serialPath, speed_t baudRate); void closeComPort(ComPortHandle fd_serial); /***************************************************************************** ** Function name: uartInit ** Description: Initialize the COM port with the settings for ** communicating with the connected Epson ACCL. ** Call this function instead of openComPort(). ** Parameters: Pointer to device name, baudrate ** Return value: COM port handle if successful, -1=fail *****************************************************************************/ int uartInit(const char* fd_serialPath, int baudrate) { printf("Attempting to open port...%s\n", fd_serialPath); speed_t baudRate; if (baudrate == 460800) { baudRate = B460800; } else if (baudrate == 230400) { baudRate = B230400; } else { printf("Invalid baudrate\n"); return -1; } fd_serial = openComPort(fd_serialPath, baudRate); return fd_serial; } /***************************************************************************** ** Function name: uartRelease ** Description: Release the COM port (close) after a 100msec delay ** and closing the com port to the Epson ACCL. ** Call this function instead of closeComPort(). ** Parameters: COM port handle ** Return value: SUCCESS *****************************************************************************/ int uartRelease(ComPortHandle fd_serial) { seDelayMicroSecs( 100000); // Provide 100msec delay for any pending transfer to complete closeComPort(fd_serial); return SUCCESS; } /***************************************************************************** ** Function name: readComPort ** Description: Read the specified number of bytes from the COM port ** Parameters: COM port handle, pointer to output char array, # of *bytes to read ** Return value: # of bytes returned by COM port, or -1=fail *****************************************************************************/ int readComPort(ComPortHandle fd_serial, unsigned char* bytesToRead, int size) { return read(fd_serial, bytesToRead, size); } /***************************************************************************** ** Function name: writeComPort ** Description: Write specified number of bytes to the COM port ** Parameters: COM port handle, pointer to input char array, # of bytes *to send ** Return value: # of bytes sent, or -1=fail *****************************************************************************/ int writeComPort(ComPortHandle fd_serial, unsigned char* bytesToWrite, int size) { return write(fd_serial, bytesToWrite, size); } /***************************************************************************** ** Function name: numBytesReadComPort ** Description: Returns number of bytes in COM port read buffer ** Purpose is to check if data is available ** Parameters: COM port handle ** Return value: # of bytes in the COM port receive buffer *****************************************************************************/ int numBytesReadComPort(ComPortHandle fd_serial) { int numBytes; ioctl(fd_serial, FIONREAD, &numBytes); return numBytes; } /***************************************************************************** ** Function name: purgeComPort ** Description: Clears the com port's read and write buffers ** Parameters: COM port handle ** Return value: SUCCESS or FAIL *****************************************************************************/ int purgeComPort(ComPortHandle fd_serial) { if (tcflush(fd_serial, TCIOFLUSH) == -1) { printf("flush failed\n"); return FAIL; } return SUCCESS; } /***************************************************************************** ** Function name: openComPort ** Description: Com port is opened in raw mode and ** will timeout on reads after 2 second. ** This will return a fail if the port could not open or ** the port options could not be set. ** This is not intended to be called directly, but is ** called from uartInit() ** Parameters: Pointer to device name, Baudrate ** Return value: COM port handle if successful, -1=fail *****************************************************************************/ ComPortHandle openComPort(const char* fd_serialPath, speed_t baudRate) { // Read/Write, Not Controlling Terminal int port = open(fd_serialPath, O_RDWR | O_NOCTTY); if (port < 0) // Opening of port failed { printf("Unable to open com Port %s\n Errno = %i\n", fd_serialPath, errno); return -1; } // Get the current options for the port... struct termios options; tcgetattr(port, &options); // Set the baud rate to 460800 cfsetospeed(&options, baudRate); cfsetispeed(&options, baudRate); // Turn off character processing // Clear current char size mask // Force 8 bit input options.c_cflag &= ~CSIZE; // Mask the character size bits options.c_cflag |= CS8; // Set the number of stop bits to 1 options.c_cflag &= ~CSTOPB; // Set parity to None options.c_cflag &= ~PARENB; // Set for no input processing options.c_iflag = 0; // From https://en.wikibooks.org/wiki/Serial_Programming/termios // Input flags - Turn off input processing // convert break to null byte, no CR to NL translation, // no NL to CR translation, don't mark parity errors or breaks // no input parity check, don't strip high bit off, // no XON/XOFF software flow control // options.c_iflag &= ~(IGNPAR | IGNBRK | BRKINT | IGNCR | ICRNL | // INLCR | PARMRK | INPCK | ISTRIP | IXON | IXOFF | IXANY); // Output flags - Turn off output processing // From http://www.cmrr.umn.edu/~strupp/serial.html // options.c_oflag &= ~OPOST; options.c_oflag = 0; // raw output // No line processing // echo off, echo newline off, canonical mode off, // extended input processing off, signal chars off // From http://www.cmrr.umn.edu/~strupp/serial.html // options.c_lflag &= ~(ICANON | ECHO | ECHOE | ISIG);options.c_lflag &= // ~(ICANON | ECHO | ECHOE | ISIG); options.c_lflag = 0; // raw input // From http://www.cmrr.umn.edu/~strupp/serial.html // Timeouts are ignored in canonical input mode or when the NDELAY option is // set on the file via open or fcntl. VMIN specifies the minimum number of // characters to read. 1) If VMIN is set to 0, then the VTIME value specifies // the time to wait for every characters to be read. The read call will return // even if less than specified from the read request. 2) If VMIN is non-zero, // VTIME specifies the time to wait for the first character. If first // character is received within the specified VTIME, then it won't return // until VMIN number of characters are received. So any read call can return // either 0 characters or N-specified characters, but nothing inbetween. It // will block forever if RX characters are not in multiples of VMIN. 3) VTIME // specifies the amount of time to wait for incoming characters in tenths of // seconds. If VTIME is set to 0 (the default), reads will block (wait) // indefinitely unless the NDELAY option is set on the port with open or fcntl // Setting VTIME = 0, makes UART reads blocking, try experimenting with value // to prevent hanging waiting for reads // Current setting below: Non-blocking reads with first character recv timeout // of 2 seconds options.c_cc[VMIN] = 4; // block reading until VMIN 4 of characters are read. options.c_cc[VTIME] = 20; // Inter-Character Timer -- i.e. timeout= x*.1 s = 2 seconds // Set local mode and enable the receiver options.c_cflag |= (CLOCAL | CREAD); // Set the new options for the port... int status = tcsetattr(port, TCSANOW, &options); if (status != 0) // For error message { printf("Configuring comport failed\n"); closeComPort(port); return status; } // Purge serial port buffers purgeComPort(port); return port; } /***************************************************************************** ** Function name: closeComPort ** Description: Closes a Com port (previously opened with OpenComPort) ** This is not intended to be called directly, but is ** called from uartRelease() ** Parameters: COM port handle ** Return value: None *****************************************************************************/ void closeComPort(ComPortHandle fd_serial) { close(fd_serial); }
652427.c
/* * Copyright 2017-2020 AVSystem <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <avs_commons_init.h> #ifdef AVS_COMMONS_WITH_AVS_HTTP # include <avsystem/commons/avs_memory.h> # include <avsystem/commons/avs_stream_net.h> # include "../avs_body_receivers.h" # include "../avs_http_log.h" VISIBILITY_SOURCE_BEGIN typedef struct { const avs_stream_v_table_t *const vtable; avs_stream_t *backend; } dumb_proxy_receiver_t; static avs_error_t dumb_proxy_read(avs_stream_t *stream, size_t *out_bytes_read, bool *out_message_finished, void *buffer, size_t buffer_length) { return avs_stream_read(((dumb_proxy_receiver_t *) stream)->backend, out_bytes_read, out_message_finished, buffer, buffer_length); } static bool dumb_proxy_nonblock_read_ready(avs_stream_t *stream) { return avs_stream_nonblock_read_ready( ((dumb_proxy_receiver_t *) stream)->backend); } static avs_error_t dumb_proxy_peek(avs_stream_t *stream, size_t offset, char *out_value) { return avs_stream_peek(((dumb_proxy_receiver_t *) stream)->backend, offset, out_value); } static avs_error_t dumb_close(avs_stream_t *stream_) { dumb_proxy_receiver_t *stream = (dumb_proxy_receiver_t *) stream_; avs_stream_net_setsock(stream->backend, NULL); /* don't close the socket */ return avs_stream_cleanup(&stream->backend); } static const avs_stream_v_table_t dumb_body_receiver_vtable = { .read = dumb_proxy_read, .peek = dumb_proxy_peek, .close = dumb_close, &(avs_stream_v_table_extension_t[]){ { AVS_STREAM_V_TABLE_EXTENSION_NONBLOCK, &(avs_stream_v_table_extension_nonblock_t[]) { { .read_ready = dumb_proxy_nonblock_read_ready } }[0] }, AVS_STREAM_V_TABLE_EXTENSION_NULL }[0] }; avs_stream_t *_avs_http_body_receiver_dumb_create(avs_stream_t *backend) { dumb_proxy_receiver_t *retval = (dumb_proxy_receiver_t *) avs_malloc(sizeof(*retval)); LOG(TRACE, _("create_dumb_body_receiver")); if (retval) { *(const avs_stream_v_table_t **) (intptr_t) &retval->vtable = &dumb_body_receiver_vtable; retval->backend = backend; } return (avs_stream_t *) retval; } #endif // AVS_COMMONS_WITH_AVS_HTTP
412675.c
/* infback.c -- inflate using a call-back interface * Copyright (C) 1995-2005 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* This code is largely copied from inflate.c. Normally either infback.o or inflate.o would be linked into an application--not both. The interface with inffast.c is retained so that optimized assembler-coded versions of inflate_fast() can be used with either inflate.c or infback.c. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); /* strm provides memory allocation functions in zalloc and zfree, or Z_NULL to use the library memory allocation functions. windowBits is in the range 8..15, and window is a user-supplied window and output buffer that is 2**windowBits bytes. */ int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) z_streamp strm; int windowBits; unsigned char FAR *window; const char *version; int stream_size; { struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL || window == Z_NULL || windowBits < 8 || windowBits > 15) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { strm->zalloc = zcalloc; strm->opaque = (voidpf)0; } if (strm->zfree == (free_func)0) strm->zfree = zcfree; state = (struct inflate_state FAR *)ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->dmax = 32768U; state->wbits = windowBits; state->wsize = 1U << windowBits; state->window = window; state->write = 0; state->whave = 0; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } /* Macros for inflateBack(): */ /* Load returned state from inflate_fast() */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Set state from registers for inflate_fast() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Assure that some input is available. If input is requested, but denied, then return a Z_BUF_ERROR from inflateBack(). */ #define PULL() \ do { \ if (have == 0) { \ have = in(in_desc, &next); \ if (have == 0) { \ next = Z_NULL; \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflateBack() with an error if there is no input available. */ #define PULLBYTE() \ do { \ PULL(); \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflateBack() with an error. */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* Assure that some output space is available, by writing out the window if it's full. If the write fails, return from inflateBack() with a Z_BUF_ERROR. */ #define ROOM() \ do { \ if (left == 0) { \ put = state->window; \ left = state->wsize; \ state->whave = left; \ if (out(out_desc, put, left)) { \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* strm provides the memory allocation functions and window buffer on input, and provides information on the unused input on return. For Z_DATA_ERROR returns, strm will also provide an error message. in() and out() are the call-back input and output functions. When inflateBack() needs more input, it calls in(). When inflateBack() has filled the window with output, or when it completes with data in the window, it calls out() to write out the data. The application must not change the provided input until in() is called again or inflateBack() returns. The application must not change the window/output buffer until inflateBack() returns. in() and out() are called with a descriptor parameter provided in the inflateBack() call. This parameter can be a structure that provides the information required to do the read or write, as well as accumulated information on the input and output such as totals and check values. in() should return zero on failure. out() should return non-zero on failure. If either in() or out() fails, than inflateBack() returns a Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it was in() or out() that caused in the error. Otherwise, inflateBack() returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format error, or Z_MEM_ERROR if it could not allocate memory for the state. inflateBack() can also return Z_STREAM_ERROR if the input parameters are not correct, i.e. strm is Z_NULL or the state was not initialized. */ int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) z_streamp strm; in_func in; void FAR *in_desc; out_func out; void FAR *out_desc; { struct inflate_state FAR *state; unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code this; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* Check that the strm exists and that the state was initialized */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* Reset the state */ strm->msg = Z_NULL; state->mode = TYPE; state->last = 0; state->whave = 0; next = strm->next_in; have = next != Z_NULL ? strm->avail_in : 0; hold = 0; bits = 0; put = state->window; left = state->wsize; /* Inflate until end of block marked as last */ for (;;) switch (state->mode) { case TYPE: /* determine and dispatch block type */ if (state->last) { BYTEBITS(); state->mode = DONE; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN; /* decode codes */ break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: /* get and verify stored block length */ BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); /* copy stored block from input to output */ while (state->length != 0) { copy = state->length; PULL(); ROOM(); if (copy > have) copy = have; if (copy > left) copy = left; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: /* get dynamic table entries descriptor */ NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); /* get code length code lengths (not a typo) */ state->have = 0; while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); /* get length and distance code code lengths */ state->have = 0; while (state->have < state->nlen + state->ndist) { for (;;) { this = state->lencode[BITS(state->lenbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if (this.val < 16) { NEEDBITS(this.bits); DROPBITS(this.bits); state->lens[state->have++] = this.val; } else { if (this.val == 16) { NEEDBITS(this.bits + 2); DROPBITS(this.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = (unsigned)(state->lens[state->have - 1]); copy = 3 + BITS(2); DROPBITS(2); } else if (this.val == 17) { NEEDBITS(this.bits + 3); DROPBITS(this.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(this.bits + 7); DROPBITS(this.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* build code tables */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (code const FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; /*-fallthrough*/ case LEN: /* use inflate_fast() if we have enough input and output */ if (have >= 6 && left >= 258) { RESTORE(); if (state->whave < state->wsize) state->whave = state->wsize - left; inflate_fast(strm, state->wsize); LOAD(); break; } /* get a literal, length, or end-of-block code */ for (;;) { this = state->lencode[BITS(state->lenbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if (this.op && (this.op & 0xf0) == 0) { last = this; for (;;) { this = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + this.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(this.bits); state->length = (unsigned)this.val; /* process literal */ if (this.op == 0) { Tracevv((stderr, this.val >= 0x20 && this.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", this.val)); ROOM(); *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; } /* process end of block */ if (this.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } /* invalid code */ if (this.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } /* length code -- get extra bits, if any */ state->extra = (unsigned)(this.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); } Tracevv((stderr, "inflate: length %u\n", state->length)); /* get distance code */ for (;;) { this = state->distcode[BITS(state->distbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if ((this.op & 0xf0) == 0) { last = this; for (;;) { this = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + this.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(this.bits); if (this.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)this.val; /* get distance extra bits, if any */ state->extra = (unsigned)(this.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); } if (state->offset > state->wsize - (state->whave < state->wsize ? left : 0)) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } Tracevv((stderr, "inflate: distance %u\n", state->offset)); /* copy match from window to output */ do { ROOM(); copy = state->wsize - state->offset; if (copy < left) { from = put + copy; copy = left - copy; } else { from = put - state->offset; copy = left; } if (copy > state->length) copy = state->length; state->length -= copy; left -= copy; do { *put++ = *from++; } while (--copy); } while (state->length != 0); break; case DONE: /* inflate stream terminated properly -- write leftover output */ ret = Z_STREAM_END; if (left < state->wsize) { if (out(out_desc, state->window, state->wsize - left)) ret = Z_BUF_ERROR; } goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; default: /* can't happen, but makes compilers happy */ ret = Z_STREAM_ERROR; goto inf_leave; } /* Return unused input */ inf_leave: strm->next_in = next; strm->avail_in = have; return ret; } int ZEXPORT inflateBackEnd(strm) z_streamp strm; { if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) return Z_STREAM_ERROR; ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; }
435133.c
/* Taxonomy Classification: 0000000000000162000200 */ /* * WRITE/READ 0 write * WHICH BOUND 0 upper * DATA TYPE 0 char * MEMORY LOCATION 0 stack * SCOPE 0 same * CONTAINER 0 no * POINTER 0 no * INDEX COMPLEXITY 0 constant * ADDRESS COMPLEXITY 0 constant * LENGTH COMPLEXITY 0 N/A * ADDRESS ALIAS 0 none * INDEX ALIAS 0 none * LOCAL CONTROL FLOW 0 none * SECONDARY CONTROL FLOW 1 if * LOOP STRUCTURE 6 non-standard while * LOOP COMPLEXITY 2 one * ASYNCHRONY 0 no * TAINT 0 no * RUNTIME ENV. DEPENDENCE 0 no * MAGNITUDE 2 8 bytes * CONTINUOUS/DISCRETE 0 discrete * SIGNEDNESS 0 no */ /* Copyright 2005 Massachusetts Institute of Technology All rights reserved. Redistribution and use of software in source and binary forms, with or without modification, are permitted provided that the following conditions are met. - Redistributions of source code must retain the above copyright notice, this set of conditions and the disclaimer below. - Redistributions in binary form must reproduce the copyright notice, this set of conditions, and the disclaimer below in the documentation and/or other materials provided with the distribution. - Neither the name of the Massachusetts Institute of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS". ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ int main(int argc, char *argv[]) { int test_value; int loop_counter; char buf[10]; test_value = 17; loop_counter = 0; while(1) { /* BAD */ buf[17] = 'A'; loop_counter++; if (loop_counter > test_value) break; } return 0; }
849078.c
/* BFD support for the Motorola 68HC11 processor Copyright 1999, 2000, 2001 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "bfd.h" #include "sysdep.h" #include "libbfd.h" const bfd_arch_info_type bfd_m68hc11_arch = { 16, /* 16 bits in a word */ 16, /* 16 bits in an address */ 8, /* 8 bits in a byte */ bfd_arch_m68hc11, 0, "m68hc11", "m68hc11", 4, /* section alignment power */ true, bfd_default_compatible, bfd_default_scan, 0, };
714956.c
/** Quiz Button App This application is designed for a simple quiz button system, where only one button can be active at once. The app has four states: Ready - LED off, Waiting for a button press or message from another Wixel Arm_Active - LED off, broadcasting request to go active Active - LED on, waiting for timer to expire Locked - LED off, waiting for locked timer to expire The master button button is indicated by setting param_master=1. The master controls the state for the whole network. == Parameters == radio_channel: See description in radio_link.h. lockout_ms: Duration (in ms) to lockout when a button press is detected. packets format: byte 0 Length 1 Command 2-5 Source 6-7 Time Remaining Master sends 0000 for source, other nodes send their serial number as source. Currently, 'C' is the only command, which indicates that buttons should transition to Locked state. Messages from the master include a Winner, which is the ID of the node whose message was received first. Copyright 2015 Jeff Crow. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** Dependencies **************************************************************/ #include <cc2511_map.h> #include <gpio.h> #include <board.h> #include <random.h> #include <time.h> #include <usb.h> #include <usb_com.h> #include <radio_queue.h> #include <stdio.h> #include <string.h> #include <ctype.h> /** Parameters ****************************************************************/ uint32 CODE param_lockout_ms = 2000; uint32 CODE param_master = 0; /** Debug Definitions *********************************************************/ //#define DEBUG //#define PACKET_DEBUG #define USE_USB #ifdef DEBUG #define DEBUG_PRINTF(...) printf( __VA_ARGS__) #else #define DEBUG_PRINTF(...) #endif /** Definitions ***************************************************************/ typedef enum { READY, ARM_ACTIVE, ACTIVE, LOCKED } state_t ; #define WIN_LED 10 #define BUTTON_PIN 16 #define MASTER_RESENDS 10 /** Variables *****************************************************************/ static uint32 unlockTime = 0; /** Functions *****************************************************************/ void printPacket(uint8 XDATA * pkt); void updateLeds(state_t state) { // Time for LED toggle static uint32 flashTime = 0; // On/Off state of LEDs static BIT ledState = 0; // On/Off ratio for LED const uint32 dutyCycle[2] = {200, 300}; static uint32 blip = 0; usbShowStatusWithGreenLed(); switch(state) { case ACTIVE: LED_YELLOW(0); if (getMs() > flashTime) { ledState = !ledState; flashTime = getMs() + dutyCycle[ledState]; } LED_RED(ledState); setDigitalOutput(WIN_LED, ledState); break; case ARM_ACTIVE: LED_YELLOW(1); LED_RED(1); setDigitalOutput(WIN_LED, 0); break; case LOCKED: LED_YELLOW(1); LED_RED(0); setDigitalOutput(WIN_LED, 0); break; default: LED_YELLOW(0); LED_RED(0); // Flash Win LED as power indication setDigitalOutput(WIN_LED, blip <= 2); ledState = 0; if (blip++ == 200000) { blip = 0; } break; } } uint8 sendActive(uint8 cmd, uint8 *addr, uint32 timeLeft) { uint8 XDATA * packet = radioQueueTxCurrentPacket(); uint8 queued = radioQueueTxQueued(); // NOTE: We don't want to queue up packets since they may be stale when actually sent if (!queued && packet != 0) { packet[0] = 7; packet[1] = cmd; packet[2] = addr[0]; packet[3] = addr[1]; packet[4] = addr[2]; packet[5] = addr[3]; packet[6] = (uint8)((timeLeft & 0xFF00) >> 8); packet[7] = (uint8)(timeLeft & 0xFF); #ifdef PACKET_DEBUG if (usbComTxAvailable() >= 20) { DEBUG_PRINTF("t: %02x%02x %lu\r\n", packet[6], packet[7], getMs()); } #endif radioQueueTxSendPacket(); return 1; } return 0; } BIT receiveActive(void) { uint8 XDATA * packet = radioQueueRxCurrentPacket(); BIT retval = 0; if (packet != 0) { if (packet[0] == 2 && packet[1] == 'C' && packet[2] == 'C') { retval = 1; } } return retval; } uint32 getNextSendTime() { uint8 m = param_master ? 60 : 120; uint32 next = getMs() + (uint32)(randomNumber() % m) + 20; // Dont resend if 250ms from unlock if ( next > (unlockTime - 250) ) { next = ~0; } return next; } static uint8 active_addr[4]; #define setActive( a ) { \ active_addr[0] = a[0];\ active_addr[1] = a[1];\ active_addr[2] = a[2];\ active_addr[3] = a[3];\ } state_t updateState(const state_t state) { state_t newState = state; state_t oldState = state; uint8 XDATA *packet = radioQueueRxCurrentPacket(); uint8 XDATA *src = NULL; int gotLock = 0; static nextSendTime; static BIT prevButtonPress = 0; uint32 remoteTime = 0; if (packet) { #ifdef PACKET_DEBUG if (usbComTxAvailable() >= 20) { DEBUG_PRINTF("%d RECV %d %c %02x%02x-%02x%02x %02x%02x\r\n", state, packet[0], packet[1], packet[2], packet[3], packet[4], packet[5], packet[6], packet[7]); } #endif // Check for a transition packet if ((param_master && packet[0] == 7 && packet[1] == 'C') || (!param_master && packet[0] == 7 && packet[1] == 'A')) { gotLock = 1; src = &packet[2]; remoteTime = (uint32)packet[6] << 8 | (uint32)packet[7]; } radioQueueRxDoneWithPacket(); } switch(state) { case READY: if (gotLock && memcmp(src, serialNumber, 4) != 0) { if (param_master) { setActive(src); if (sendActive('A', active_addr, remoteTime)) nextSendTime = getNextSendTime(); else nextSendTime = getMs(); } newState = LOCKED; unlockTime = getMs() + remoteTime; } else if (isPinHigh(BUTTON_PIN) == LOW && !prevButtonPress) { if (param_master) { newState = ACTIVE; unlockTime = getMs() + param_lockout_ms; setActive(serialNumber); if (sendActive('A', serialNumber, param_lockout_ms)) nextSendTime = getNextSendTime(); else nextSendTime = getMs(); } else { newState = ARM_ACTIVE; unlockTime = getMs() + param_lockout_ms; if (sendActive('C', serialNumber, param_lockout_ms)) nextSendTime = getNextSendTime(); else nextSendTime = getMs(); } } break; case ARM_ACTIVE: // Only slaves can get into this state if (getMs() > unlockTime) { // Didn't hear back from the master so fail newState = READY; } else if (gotLock) { if (memcmp(src, serialNumber, 4) == 0) { newState = ACTIVE; } else // Locked on someone else { newState = LOCKED; unlockTime = remoteTime; } } else if (getMs() > nextSendTime) // resend { sendActive('C', serialNumber, unlockTime - getMs()); nextSendTime = getNextSendTime(); } break; case ACTIVE: if (getMs() > unlockTime) { newState = READY; } else if (param_master && getMs() > nextSendTime) { if (sendActive('A', serialNumber, unlockTime - getMs())) nextSendTime = getNextSendTime(); else nextSendTime = getMs(); } break; case LOCKED: if (getMs() > unlockTime) { newState = READY; } else if (param_master && getMs() > nextSendTime) { if (sendActive('A', serialNumber, unlockTime - getMs())) nextSendTime = getNextSendTime(); else nextSendTime = getMs(); } break; } prevButtonPress = (isPinHigh(BUTTON_PIN) == LOW); return newState; } // This is called by printf and printPacket. void putchar(char c) { usbComTxSendByte(c); } void main() { state_t g_state = READY; systemInit(); #ifdef USE_USB usbInit(); #endif radioQueueInit(); setDigitalOutput(WIN_LED, 0); randomSeedFromSerialNumber(); while(1) { boardService(); updateLeds(g_state); #ifdef USE_USB usbComService(); #endif g_state = updateState(g_state); } }
215283.c
/* ======================================================================== */ /* ========================= LICENSING & COPYRIGHT ======================== */ /* ======================================================================== */ /* * MUSASHI * Version 3.32 * * A portable Motorola M680x0 processor emulation engine. * Copyright Karl Stenerud. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <math.h> #include <stdio.h> #include <stdarg.h> #include "m68kcpu.h" extern void exit(int); static void fatalerror(char *format, ...) { va_list ap; va_start(ap,format); vfprintf(stderr,format,ap); // JFF: fixed. Was using fprintf and arguments were wrong va_end(ap); exit(1); } #define FPCC_N 0x08000000 #define FPCC_Z 0x04000000 #define FPCC_I 0x02000000 #define FPCC_NAN 0x01000000 #define DOUBLE_INFINITY (unsigned long long)(0x7ff0000000000000) #define DOUBLE_EXPONENT (unsigned long long)(0x7ff0000000000000) #define DOUBLE_MANTISSA (unsigned long long)(0x000fffffffffffff) extern flag floatx80_is_nan( floatx80 a ); // masks for packed dwords, positive k-factor static uint32 pkmask2[18] = { 0xffffffff, 0, 0xf0000000, 0xff000000, 0xfff00000, 0xffff0000, 0xfffff000, 0xffffff00, 0xfffffff0, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; static uint32 pkmask3[18] = { 0xffffffff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xf0000000, 0xff000000, 0xfff00000, 0xffff0000, 0xfffff000, 0xffffff00, 0xfffffff0, 0xffffffff, }; static inline double fx80_to_double(floatx80 fx) { uint64 d; double *foo; foo = (double *)&d; d = floatx80_to_float64(fx); return *foo; } static inline floatx80 double_to_fx80(double in) { uint64 *d; d = (uint64 *)&in; return float64_to_floatx80(*d); } static inline floatx80 load_extended_float80(uint32 ea) { uint32 d1,d2; uint16 d3; floatx80 fp; d3 = m68ki_read_16(ea); d1 = m68ki_read_32(ea+4); d2 = m68ki_read_32(ea+8); fp.high = d3; fp.low = ((uint64)d1<<32) | (d2 & 0xffffffff); return fp; } static inline void store_extended_float80(uint32 ea, floatx80 fpr) { m68ki_write_16(ea+0, fpr.high); m68ki_write_16(ea+2, 0); m68ki_write_32(ea+4, (fpr.low>>32)&0xffffffff); m68ki_write_32(ea+8, fpr.low&0xffffffff); } static inline floatx80 load_pack_float80(uint32 ea) { uint32 dw1, dw2, dw3; floatx80 result; double tmp; char str[128], *ch; dw1 = m68ki_read_32(ea); dw2 = m68ki_read_32(ea+4); dw3 = m68ki_read_32(ea+8); ch = &str[0]; if (dw1 & 0x80000000) // mantissa sign { *ch++ = '-'; } *ch++ = (char)((dw1 & 0xf) + '0'); *ch++ = '.'; *ch++ = (char)(((dw2 >> 28) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 24) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 20) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 16) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 12) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 8) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 4) & 0xf) + '0'); *ch++ = (char)(((dw2 >> 0) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 28) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 24) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 20) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 16) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 12) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 8) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 4) & 0xf) + '0'); *ch++ = (char)(((dw3 >> 0) & 0xf) + '0'); *ch++ = 'E'; if (dw1 & 0x40000000) // exponent sign { *ch++ = '-'; } *ch++ = (char)(((dw1 >> 24) & 0xf) + '0'); *ch++ = (char)(((dw1 >> 20) & 0xf) + '0'); *ch++ = (char)(((dw1 >> 16) & 0xf) + '0'); *ch = '\0'; sscanf(str, "%le", &tmp); result = double_to_fx80(tmp); return result; } static inline void store_pack_float80(uint32 ea, int k, floatx80 fpr) { uint32 dw1, dw2, dw3; char str[128], *ch; int i, j, exp; dw1 = dw2 = dw3 = 0; ch = &str[0]; sprintf(str, "%.16e", fx80_to_double(fpr)); if (*ch == '-') { ch++; dw1 = 0x80000000; } if (*ch == '+') { ch++; } dw1 |= (*ch++ - '0'); if (*ch == '.') { ch++; } // handle negative k-factor here if ((k <= 0) && (k >= -13)) { exp = 0; for (i = 0; i < 3; i++) { if (ch[18+i] >= '0' && ch[18+i] <= '9') { exp = (exp << 4) | (ch[18+i] - '0'); } } if (ch[17] == '-') { exp = -exp; } k = -k; // last digit is (k + exponent - 1) k += (exp - 1); // round up the last significant mantissa digit if (ch[k+1] >= '5') { ch[k]++; } // zero out the rest of the mantissa digits for (j = (k+1); j < 16; j++) { ch[j] = '0'; } // now zero out K to avoid tripping the positive K detection below k = 0; } // crack 8 digits of the mantissa for (i = 0; i < 8; i++) { dw2 <<= 4; if (*ch >= '0' && *ch <= '9') { dw2 |= *ch++ - '0'; } } // next 8 digits of the mantissa for (i = 0; i < 8; i++) { dw3 <<= 4; if (*ch >= '0' && *ch <= '9') dw3 |= *ch++ - '0'; } // handle masking if k is positive if (k >= 1) { if (k <= 17) { dw2 &= pkmask2[k]; dw3 &= pkmask3[k]; } else { dw2 &= pkmask2[17]; dw3 &= pkmask3[17]; // m68ki_cpu.fpcr |= (need to set OPERR bit) } } // finally, crack the exponent if (*ch == 'e' || *ch == 'E') { ch++; if (*ch == '-') { ch++; dw1 |= 0x40000000; } if (*ch == '+') { ch++; } j = 0; for (i = 0; i < 3; i++) { if (*ch >= '0' && *ch <= '9') { j = (j << 4) | (*ch++ - '0'); } } dw1 |= (j << 16); } m68ki_write_32(ea, dw1); m68ki_write_32(ea+4, dw2); m68ki_write_32(ea+8, dw3); } static inline void SET_CONDITION_CODES(floatx80 reg) { REG_FPSR &= ~(FPCC_N|FPCC_Z|FPCC_I|FPCC_NAN); // sign flag if (reg.high & 0x8000) { REG_FPSR |= FPCC_N; } // zero flag if (((reg.high & 0x7fff) == 0) && ((reg.low<<1) == 0)) { REG_FPSR |= FPCC_Z; } // infinity flag if (((reg.high & 0x7fff) == 0x7fff) && ((reg.low<<1) == 0)) { REG_FPSR |= FPCC_I; } // NaN flag if (floatx80_is_nan(reg)) { REG_FPSR |= FPCC_NAN; } } static inline int TEST_CONDITION(int condition) { int n = (REG_FPSR & FPCC_N) != 0; int z = (REG_FPSR & FPCC_Z) != 0; int nan = (REG_FPSR & FPCC_NAN) != 0; int r = 0; switch (condition) { case 0x10: case 0x00: return 0; // False case 0x11: case 0x01: return (z); // Equal case 0x12: case 0x02: return (!(nan || z || n)); // Greater Than case 0x13: case 0x03: return (z || !(nan || n)); // Greater or Equal case 0x14: case 0x04: return (n && !(nan || z)); // Less Than case 0x15: case 0x05: return (z || (n && !nan)); // Less Than or Equal case 0x16: case 0x06: return !nan && !z; case 0x17: case 0x07: return !nan; case 0x18: case 0x08: return nan; case 0x19: case 0x09: return nan || z; case 0x1a: case 0x0a: return (nan || !(n || z)); // Not Less Than or Equal case 0x1b: case 0x0b: return (nan || z || !n); // Not Less Than case 0x1c: case 0x0c: return (nan || (n && !z)); // Not Greater or Equal Than case 0x1d: case 0x0d: return (nan || z || n); // Not Greater Than case 0x1e: case 0x0e: return (!z); // Not Equal case 0x1f: case 0x0f: return 1; // True default: fatalerror("M68kFPU: test_condition: unhandled condition %02X\n", condition); } return r; } static uint8 READ_EA_8(int ea) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 0: // Dn { return REG_D[reg]; } case 2: // (An) { uint32 ea = REG_A[reg]; return m68ki_read_8(ea); } case 3: // (An)+ { uint32 ea = EA_AY_PI_8(); return m68ki_read_8(ea); } case 4: // -(An) { uint32 ea = EA_AY_PD_8(); return m68ki_read_8(ea); } case 5: // (d16, An) { uint32 ea = EA_AY_DI_8(); return m68ki_read_8(ea); } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_8(); return m68ki_read_8(ea); } case 7: { switch (reg) { case 0: // (xxx).W { uint32 ea = (uint32)OPER_I_16(); return m68ki_read_8(ea); } case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; return m68ki_read_8(ea); } case 4: // #<data> { return OPER_I_8(); } default: fatalerror("M68kFPU: READ_EA_8: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: READ_EA_8: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } return 0; } static uint16 READ_EA_16(int ea) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 0: // Dn { return (uint16)(REG_D[reg]); } case 2: // (An) { uint32 ea = REG_A[reg]; return m68ki_read_16(ea); } case 3: // (An)+ { uint32 ea = EA_AY_PI_16(); return m68ki_read_16(ea); } case 4: // -(An) { uint32 ea = EA_AY_PD_16(); return m68ki_read_16(ea); } case 5: // (d16, An) { uint32 ea = EA_AY_DI_16(); return m68ki_read_16(ea); } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_16(); return m68ki_read_16(ea); } case 7: { switch (reg) { case 0: // (xxx).W { uint32 ea = (uint32)OPER_I_16(); return m68ki_read_16(ea); } case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; return m68ki_read_16(ea); } case 4: // #<data> { return OPER_I_16(); } default: fatalerror("M68kFPU: READ_EA_16: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: READ_EA_16: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } return 0; } static uint32 READ_EA_32(int ea) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 0: // Dn { return REG_D[reg]; } case 2: // (An) { uint32 ea = REG_A[reg]; return m68ki_read_32(ea); } case 3: // (An)+ { uint32 ea = EA_AY_PI_32(); return m68ki_read_32(ea); } case 4: // -(An) { uint32 ea = EA_AY_PD_32(); return m68ki_read_32(ea); } case 5: // (d16, An) { uint32 ea = EA_AY_DI_32(); return m68ki_read_32(ea); } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_32(); return m68ki_read_32(ea); } case 7: { switch (reg) { case 0: // (xxx).W { uint32 ea = (uint32)OPER_I_16(); return m68ki_read_32(ea); } case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; return m68ki_read_32(ea); } case 2: // (d16, PC) { uint32 ea = EA_PCDI_32(); return m68ki_read_32(ea); } case 4: // #<data> { return OPER_I_32(); } default: fatalerror("M68kFPU: READ_EA_32: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: READ_EA_32: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } return 0; } static uint64 READ_EA_64(int ea) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); uint32 h1, h2; switch (mode) { case 2: // (An) { uint32 ea = REG_A[reg]; h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } case 3: // (An)+ { uint32 ea = REG_A[reg]; REG_A[reg] += 8; h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } case 4: // -(An) { REG_A[reg] -= 8; uint32 ea = REG_A[reg]; h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } case 5: // (d16, An) { uint32 ea = EA_AY_DI_32(); h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_16(); h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } case 7: { switch (reg) { case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } case 4: // #<data> { h1 = OPER_I_32(); h2 = OPER_I_32(); return (uint64)(h1) << 32 | (uint64)(h2); } case 2: // (d16, PC) { uint32 ea = EA_PCDI_32(); h1 = m68ki_read_32(ea+0); h2 = m68ki_read_32(ea+4); return (uint64)(h1) << 32 | (uint64)(h2); } default: fatalerror("M68kFPU: READ_EA_64: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: READ_EA_64: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } return 0; } static floatx80 READ_EA_FPE(int mode, int reg, uint32 di_mode_ea) { floatx80 fpr; switch (mode) { case 2: // (An) { uint32 ea = REG_A[reg]; fpr = load_extended_float80(ea); break; } case 3: // (An)+ { uint32 ea = REG_A[reg]; REG_A[reg] += 12; fpr = load_extended_float80(ea); break; } case 4: // -(An) { REG_A[reg] -= 12; uint32 ea = REG_A[reg]; fpr = load_extended_float80(ea); break; } case 5: // (d16, An) (added by JFF) { fpr = load_extended_float80(di_mode_ea); break; } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_16(); fpr = load_extended_float80(ea); break; } case 7: // extended modes { switch (reg) { case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); fpr = load_extended_float80((d1 << 16) | d2); } break; case 2: // (d16, PC) { uint32 ea = EA_PCDI_32(); fpr = load_extended_float80(ea); } break; case 3: // (d16,PC,Dx.w) { uint32 ea = EA_PCIX_32(); fpr = load_extended_float80(ea); } break; case 4: // immediate (JFF) { uint32 ea = REG_PC; fpr = load_extended_float80(ea); REG_PC += 12; } break; default: fatalerror("M68kFPU: READ_EA_FPE: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); break; } } break; default: fatalerror("M68kFPU: READ_EA_FPE: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); break; } return fpr; } static floatx80 READ_EA_PACK(int ea) { floatx80 fpr; int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 2: // (An) { uint32 ea = REG_A[reg]; fpr = load_pack_float80(ea); break; } case 3: // (An)+ { uint32 ea = REG_A[reg]; REG_A[reg] += 12; fpr = load_pack_float80(ea); break; } case 4: // -(An) { REG_A[reg] -= 12; uint32 ea = REG_A[reg]; fpr = load_pack_float80(ea); break; } case 7: // extended modes { switch (reg) { case 3: // (d16,PC,Dx.w) { uint32 ea = EA_PCIX_32(); fpr = load_pack_float80(ea); } break; default: fatalerror("M68kFPU: READ_EA_PACK: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); break; } } break; default: fatalerror("M68kFPU: READ_EA_PACK: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); break; } return fpr; } static void WRITE_EA_8(int ea, uint8 data) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 0: // Dn { REG_D[reg] = data; break; } case 2: // (An) { uint32 ea = REG_A[reg]; m68ki_write_8(ea, data); break; } case 3: // (An)+ { uint32 ea = EA_AY_PI_8(); m68ki_write_8(ea, data); break; } case 4: // -(An) { uint32 ea = EA_AY_PD_8(); m68ki_write_8(ea, data); break; } case 5: // (d16, An) { uint32 ea = EA_AY_DI_8(); m68ki_write_8(ea, data); break; } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_8(); m68ki_write_8(ea, data); break; } case 7: { switch (reg) { case 1: // (xxx).B { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; m68ki_write_8(ea, data); break; } case 2: // (d16, PC) { uint32 ea = EA_PCDI_16(); m68ki_write_8(ea, data); break; } default: fatalerror("M68kFPU: WRITE_EA_8: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: WRITE_EA_8: unhandled mode %d, reg %d, data %08X at %08X\n", mode, reg, data, REG_PC); } } static void WRITE_EA_16(int ea, uint16 data) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 0: // Dn { REG_D[reg] = data; break; } case 2: // (An) { uint32 ea = REG_A[reg]; m68ki_write_16(ea, data); break; } case 3: // (An)+ { uint32 ea = EA_AY_PI_16(); m68ki_write_16(ea, data); break; } case 4: // -(An) { uint32 ea = EA_AY_PD_16(); m68ki_write_16(ea, data); break; } case 5: // (d16, An) { uint32 ea = EA_AY_DI_16(); m68ki_write_16(ea, data); break; } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_16(); m68ki_write_16(ea, data); break; } case 7: { switch (reg) { case 1: // (xxx).W { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; m68ki_write_16(ea, data); break; } case 2: // (d16, PC) { uint32 ea = EA_PCDI_16(); m68ki_write_16(ea, data); break; } default: fatalerror("M68kFPU: WRITE_EA_16: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: WRITE_EA_16: unhandled mode %d, reg %d, data %08X at %08X\n", mode, reg, data, REG_PC); } } static void WRITE_EA_32(int ea, uint32 data) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 0: // Dn { REG_D[reg] = data; break; } case 1: // An { REG_A[reg] = data; break; } case 2: // (An) { uint32 ea = REG_A[reg]; m68ki_write_32(ea, data); break; } case 3: // (An)+ { uint32 ea = EA_AY_PI_32(); m68ki_write_32(ea, data); break; } case 4: // -(An) { uint32 ea = EA_AY_PD_32(); m68ki_write_32(ea, data); break; } case 5: // (d16, An) { uint32 ea = EA_AY_DI_32(); m68ki_write_32(ea, data); break; } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_32(); m68ki_write_32(ea, data); break; } case 7: { switch (reg) { case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; m68ki_write_32(ea, data); break; } case 2: // (d16, PC) { uint32 ea = EA_PCDI_32(); m68ki_write_32(ea, data); break; } default: fatalerror("M68kFPU: WRITE_EA_32: unhandled mode %d, reg %d at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: WRITE_EA_32: unhandled mode %d, reg %d, data %08X at %08X\n", mode, reg, data, REG_PC); } } static void WRITE_EA_64(int ea, uint64 data) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 2: // (An) { uint32 ea = REG_A[reg]; m68ki_write_32(ea, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } case 3: // (An)+ { uint32 ea; ea = REG_A[reg]; REG_A[reg] += 8; m68ki_write_32(ea+0, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } case 4: // -(An) { uint32 ea; REG_A[reg] -= 8; ea = REG_A[reg]; m68ki_write_32(ea+0, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } case 5: // (d16, An) { uint32 ea = EA_AY_DI_32(); m68ki_write_32(ea+0, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } case 6: // (An) + (Xn) + d8 { uint32 ea = EA_AY_IX_16(); m68ki_write_32(ea+0, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } case 7: { switch (reg) { case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; m68ki_write_32(ea+0, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } case 2: // (d16, PC) { uint32 ea = EA_PCDI_32(); m68ki_write_32(ea+0, (uint32)(data >> 32)); m68ki_write_32(ea+4, (uint32)(data)); break; } default: fatalerror("M68kFPU: WRITE_EA_64: unhandled mode %d, data %08X%08X at %08X\n", mode, reg, (uint32)(data >> 32), (uint32)(data), REG_PC); } break; } default: fatalerror("M68kFPU: WRITE_EA_64: unhandled mode %d, reg %d, data %08X%08X at %08X\n", mode, reg, (uint32)(data >> 32), (uint32)(data), REG_PC); } } static void WRITE_EA_FPE(int mode, int reg, floatx80 fpr, uint32 di_mode_ea) { switch (mode) { case 2: // (An) { uint32 ea; ea = REG_A[reg]; store_extended_float80(ea, fpr); break; } case 3: // (An)+ { uint32 ea; ea = REG_A[reg]; store_extended_float80(ea, fpr); REG_A[reg] += 12; break; } case 4: // -(An) { uint32 ea; REG_A[reg] -= 12; ea = REG_A[reg]; store_extended_float80(ea, fpr); break; } case 5: // (d16, An) (added by JFF) { // EA_AY_DI_32() should not be done here because fmovem would increase // PC each time, reading incorrect displacement & advancing PC too much // uint32 ea = EA_AY_DI_32(); store_extended_float80(di_mode_ea, fpr); break; } case 7: { switch (reg) { case 1: // (xxx).L { uint32 d1 = OPER_I_16(); uint32 d2 = OPER_I_16(); uint32 ea = (d1 << 16) | d2; store_extended_float80(ea, fpr); break; } default: fatalerror("M68kFPU: WRITE_EA_FPE: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); } break; } default: fatalerror("M68kFPU: WRITE_EA_FPE: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); } } static void WRITE_EA_PACK(int ea, int k, floatx80 fpr) { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); switch (mode) { case 2: // (An) { uint32 ea; ea = REG_A[reg]; store_pack_float80(ea, k, fpr); break; } case 3: // (An)+ { uint32 ea; ea = REG_A[reg]; store_pack_float80(ea, k, fpr); REG_A[reg] += 12; break; } case 4: // -(An) { uint32 ea; REG_A[reg] -= 12; ea = REG_A[reg]; store_pack_float80(ea, k, fpr); break; } case 7: { switch (reg) { default: fatalerror("M68kFPU: WRITE_EA_PACK: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); } } break; default: fatalerror("M68kFPU: WRITE_EA_PACK: unhandled mode %d, reg %d, at %08X\n", mode, reg, REG_PC); } } static inline int is_inf(floatx80 reg) { if (((reg.high & 0x7fff) == 0x7fff) && ((reg.low<<1) == 0)) return reg.high & 0x8000 ? -1 : 1; return 0; } static void fpgen_rm_reg(uint16 w2) { int ea = REG_IR & 0x3f; int rm = (w2 >> 14) & 0x1; int src = (w2 >> 10) & 0x7; int dst = (w2 >> 7) & 0x7; int opmode = w2 & 0x7f; floatx80 source; // fmovecr #$f, fp0 f200 5c0f if (rm) { switch (src) { case 0: // Long-Word Integer { sint32 d = READ_EA_32(ea); source = int32_to_floatx80(d); break; } case 1: // Single-precision Real { uint32 d = READ_EA_32(ea); source = float32_to_floatx80(d); break; } case 2: // Extended-precision Real { int imode = (ea >> 3) & 0x7; int reg = (ea & 0x7); uint32 di_mode_ea = imode == 5 ? (REG_A[reg]+MAKE_INT_16(m68ki_read_imm_16())) : 0; source = READ_EA_FPE(imode,reg,di_mode_ea); break; } case 3: // Packed-decimal Real { source = READ_EA_PACK(ea); break; } case 4: // Word Integer { sint16 d = READ_EA_16(ea); source = int32_to_floatx80((sint32)d); break; } case 5: // Double-precision Real { uint64 d = READ_EA_64(ea); source = float64_to_floatx80(d); break; } case 6: // Byte Integer { sint8 d = READ_EA_8(ea); source = int32_to_floatx80((sint32)d); break; } case 7: // FMOVECR load from constant ROM { switch (w2 & 0x7f) { case 0x0: // Pi source.high = 0x4000; source.low = U64(0xc90fdaa22168c235); break; case 0xb: // log10(2) source.high = 0x3ffd; source.low = U64(0x9a209a84fbcff798); break; case 0xc: // e source.high = 0x4000; source.low = U64(0xadf85458a2bb4a9b); break; case 0xd: // log2(e) source.high = 0x3fff; source.low = U64(0xb8aa3b295c17f0bc); break; case 0xe: // log10(e) source.high = 0x3ffd; source.low = U64(0xde5bd8a937287195); break; case 0xf: // 0.0 source = int32_to_floatx80((sint32)0); break; case 0x30: // ln(2) source.high = 0x3ffe; source.low = U64(0xb17217f7d1cf79ac); break; case 0x31: // ln(10) source.high = 0x4000; source.low = U64(0x935d8dddaaa8ac17); break; case 0x32: // 1 (or 100? manuals are unclear, but 1 would make more sense) source = int32_to_floatx80((sint32)1); break; case 0x33: // 10^1 source = int32_to_floatx80((sint32)10); break; case 0x34: // 10^2 source = int32_to_floatx80((sint32)10*10); break; case 0x35: // 10^4 source = int32_to_floatx80((sint32)10000); break; case 0x36: // 10^8 source = double_to_fx80(1e8); break; case 0x37: // 10^16 source = double_to_fx80(1e16); break; case 0x38: // 10^32 source = double_to_fx80(1e32); break; case 0x39: // 10^64 source = double_to_fx80(1e64); break; case 0x3a: // 10^128 source = double_to_fx80(1e128); break; case 0x3b: // 10^256 source = double_to_fx80(1e256); break; case 0x3c: // 10^512 source = double_to_fx80(1e256); source = floatx80_mul(source, source); break; case 0x3d: // 10^1024 source = double_to_fx80(1e256); source = floatx80_mul(source, source); source = floatx80_mul(source, source); break; case 0x3e: // 10^2048 source = double_to_fx80(1e256); source = floatx80_mul(source, source); source = floatx80_mul(source, source); source = floatx80_mul(source, source); break; case 0x3f: // 10^4096 source = double_to_fx80(1e256); source = floatx80_mul(source, source); source = floatx80_mul(source, source); source = floatx80_mul(source, source); source = floatx80_mul(source, source); break; default: source = int32_to_floatx80((sint32)0); break; } // handle it right here, the usual opmode bits aren't valid in the FMOVECR case REG_FP[dst] = source; SET_CONDITION_CODES(REG_FP[dst]); // JFF when destination is a register, we HAVE to update FPCR USE_CYCLES(4); return; } default: fatalerror("fmove_rm_reg: invalid source specifier %x at %08X\n", src, REG_PC-4); } } else { source = REG_FP[src]; } switch (opmode) { case 0x44: // FDMOVED - maybe add rounding? case 0x00: // FMOVE { REG_FP[dst] = source; SET_CONDITION_CODES(REG_FP[dst]); // JFF needs update condition codes USE_CYCLES(4); break; } case 0x01: // Fsint { sint32 temp; temp = floatx80_to_int32(source); REG_FP[dst] = int32_to_floatx80(temp); SET_CONDITION_CODES(REG_FP[dst]); // JFF needs update condition codes break; } case 0x03: // FsintRZ { sint32 temp; temp = floatx80_to_int32_round_to_zero(source); REG_FP[dst] = int32_to_floatx80(temp); SET_CONDITION_CODES(REG_FP[dst]); // JFF needs update condition codes break; } case 0x04: // FSQRT { REG_FP[dst] = floatx80_sqrt(source); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(109); break; } case 0x18: // FABS { REG_FP[dst] = source; REG_FP[dst].high &= 0x7fff; SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(3); break; } case 0x1a: // FNEG { REG_FP[dst] = source; REG_FP[dst].high ^= 0x8000; SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(3); break; } case 0x1e: // FGETEXP { sint16 temp; temp = source.high; // get the exponent temp -= 0x3fff; // take off the bias REG_FP[dst] = double_to_fx80((double)temp); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(6); break; } case 0x60: // FSDIVS (JFF) (source has already been converted to floatx80) case 0x64: // FDDIV - maybe add rounding? case 0x20: // FDIV { REG_FP[dst] = floatx80_div(REG_FP[dst], source); SET_CONDITION_CODES(REG_FP[dst]); // JFF USE_CYCLES(43); break; } case 0x24: // FSGLDIV { REG_FP[dst] = double_to_fx80((float)fx80_to_double(floatx80_div(REG_FP[dst], source))); SET_CONDITION_CODES(REG_FP[dst]); // JFF USE_CYCLES(43); break; } case 0x22: // FADD { REG_FP[dst] = floatx80_add(REG_FP[dst], source); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(9); break; } case 0x63: // FSMULS (JFF) (source has already been converted to floatx80) case 0x67: // FDMUL - maybe add rounding? case 0x23: // FMUL { REG_FP[dst] = floatx80_mul(REG_FP[dst], source); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(11); break; } case 0x27: // FSGLMUL { REG_FP[dst] = double_to_fx80((float)fx80_to_double(floatx80_mul(REG_FP[dst], source))); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(11); break; } case 0x25: // FREM { REG_FP[dst] = floatx80_rem(REG_FP[dst], source); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(43); // guess break; } case 0x28: // FSUB { REG_FP[dst] = floatx80_sub(REG_FP[dst], source); SET_CONDITION_CODES(REG_FP[dst]); USE_CYCLES(9); break; } case 0x38: // FCMP { floatx80 res; // handle inf in comparison if there is no nan. int d = is_inf(REG_FP[dst]); int s = is_inf(source); if (!floatx80_is_nan(REG_FP[dst]) && !floatx80_is_nan(source) && (d || s)) { REG_FPSR &= ~(FPCC_N|FPCC_Z|FPCC_I|FPCC_NAN); if (s < 0) { if (d < 0) REG_FPSR |= FPCC_N | FPCC_Z; } else if (s > 0) { if (d > 0) REG_FPSR |= FPCC_Z; else REG_FPSR |= FPCC_N; } else if (d < 0) REG_FPSR |= FPCC_N; } else { res = floatx80_sub(REG_FP[dst], source); SET_CONDITION_CODES(res); } USE_CYCLES(7); break; } case 0x3a: // FTST { floatx80 res; res = source; SET_CONDITION_CODES(res); USE_CYCLES(7); break; } default: fatalerror("fpgen_rm_reg: unimplemented opmode %02X at %08X\n", opmode, REG_PC-4); } } static void fmove_reg_mem(uint16 w2) { int ea = REG_IR & 0x3f; int src = (w2 >> 7) & 0x7; int dst = (w2 >> 10) & 0x7; int k = (w2 & 0x7f); switch (dst) { case 0: // Long-Word Integer { sint32 d = (sint32)floatx80_to_int32(REG_FP[src]); WRITE_EA_32(ea, d); break; } case 1: // Single-precision Real { uint32 d = floatx80_to_float32(REG_FP[src]); WRITE_EA_32(ea, d); break; } case 2: // Extended-precision Real { int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); uint32 di_mode_ea = mode == 5 ? (REG_A[reg]+MAKE_INT_16(m68ki_read_imm_16())) : 0; WRITE_EA_FPE(mode, reg, REG_FP[src], di_mode_ea); break; } case 3: // Packed-decimal Real with Static K-factor { // sign-extend k k = (k & 0x40) ? (k | 0xffffff80) : (k & 0x7f); WRITE_EA_PACK(ea, k, REG_FP[src]); break; } case 4: // Word Integer { WRITE_EA_16(ea, (sint16)floatx80_to_int32(REG_FP[src])); break; } case 5: // Double-precision Real { uint64 d; d = floatx80_to_float64(REG_FP[src]); WRITE_EA_64(ea, d); break; } case 6: // Byte Integer { WRITE_EA_8(ea, (sint8)floatx80_to_int32(REG_FP[src])); break; } case 7: // Packed-decimal Real with Dynamic K-factor { WRITE_EA_PACK(ea, REG_D[k>>4], REG_FP[src]); break; } } USE_CYCLES(12); } static void fmove_fpcr(uint16 w2) { int ea = REG_IR & 0x3f; int dir = (w2 >> 13) & 0x1; int reg = (w2 >> 10) & 0x7; if (dir) // From system control reg to <ea> { if (reg & 4) WRITE_EA_32(ea, REG_FPCR); if (reg & 2) WRITE_EA_32(ea, REG_FPSR); if (reg & 1) WRITE_EA_32(ea, REG_FPIAR); } else // From <ea> to system control reg { if (reg & 4) { REG_FPCR = READ_EA_32(ea); // JFF: need to update rounding mode from softfloat module float_rounding_mode = (REG_FPCR >> 4) & 0x3; } if (reg & 2) REG_FPSR = READ_EA_32(ea); if (reg & 1) REG_FPIAR = READ_EA_32(ea); } USE_CYCLES(10); } static void fmovem(uint16 w2) { int i; int ea = REG_IR & 0x3f; int dir = (w2 >> 13) & 0x1; int mode = (w2 >> 11) & 0x3; int reglist = w2 & 0xff; if (dir) // From FP regs to mem { switch (mode) { case 2: // (JFF): Static register list, postincrement or control addressing mode. { int imode = (ea >> 3) & 0x7; int reg = (ea & 0x7); int di_mode = imode == 5; uint32 di_mode_ea = di_mode ? (REG_A[reg]+MAKE_INT_16(m68ki_read_imm_16())) : 0; for (i=0; i < 8; i++) { if (reglist & (1 << i)) { WRITE_EA_FPE(imode,reg, REG_FP[7-i],di_mode_ea); USE_CYCLES(2); if (di_mode) { di_mode_ea += 12; } } } break; } case 0: // Static register list, predecrement addressing mode { int imode = (ea >> 3) & 0x7; int reg = (ea & 0x7); // the "di_mode_ea" parameter kludge is required here else WRITE_EA_FPE would have // to call EA_AY_DI_32() (that advances PC & reads displacement) each time // when the proper behaviour is 1) read once, 2) increment ea for each matching register // this forces to pre-read the mode (named "imode") so we can decide to read displacement, only once int di_mode = imode == 5; uint32 di_mode_ea = di_mode ? (REG_A[reg]+MAKE_INT_16(m68ki_read_imm_16())) : 0; for (i=0; i < 8; i++) { if (reglist & (1 << i)) { WRITE_EA_FPE(imode,reg, REG_FP[i],di_mode_ea); USE_CYCLES(2); if (di_mode) { di_mode_ea += 12; } } } break; } default: fatalerror("040fpu0: FMOVEM: mode %d unimplemented at %08X\n", mode, REG_PC-4); } } else // From mem to FP regs { switch (mode) { case 2: // Static register list, postincrement addressing mode { int imode = (ea >> 3) & 0x7; int reg = (ea & 0x7); int di_mode = imode == 5; uint32 di_mode_ea = di_mode ? (REG_A[reg]+MAKE_INT_16(m68ki_read_imm_16())) : 0; for (i=0; i < 8; i++) { if (reglist & (1 << i)) { REG_FP[7-i] = READ_EA_FPE(imode,reg,di_mode_ea); USE_CYCLES(2); if (di_mode) { di_mode_ea += 12; } } } break; } default: fatalerror("040fpu0: FMOVEM: mode %d unimplemented at %08X\n", mode, REG_PC-4); } } } static void fscc() { // added by JFF, this seems to work properly now int condition = OPER_I_16() & 0x3f; int cc = TEST_CONDITION(condition); int mode = (REG_IR & 0x38) >> 3; int v = (cc ? 0xff : 0x00); switch (mode) { case 0: // fscc Dx { // If the specified floating-point condition is true, sets the byte integer operand at // the destination to TRUE (all ones); otherwise, sets the byte to FALSE (all zeros). REG_D[REG_IR & 7] = (REG_D[REG_IR & 7] & 0xFFFFFF00) | v; break; } case 5: // (disp,Ax) { int reg = REG_IR & 7; uint32 ea = REG_A[reg]+MAKE_INT_16(m68ki_read_imm_16()); m68ki_write_8(ea,v); break; } default: { // unimplemented see fpu_uae.cpp around line 1300 fatalerror("040fpu0: fscc: mode %d not implemented at %08X\n", mode, REG_PC-4); } } USE_CYCLES(7); // JFF unsure of the number of cycles!! } static void fbcc16(void) { sint32 offset; int condition = REG_IR & 0x3f; offset = (sint16)(OPER_I_16()); // TODO: condition and jump!!! if (TEST_CONDITION(condition)) { m68ki_trace_t0(); /* auto-disable (see m68kcpu.h) */ m68ki_branch_16(offset-2); } USE_CYCLES(7); } static void fbcc32(void) { sint32 offset; int condition = REG_IR & 0x3f; offset = OPER_I_32(); // TODO: condition and jump!!! if (TEST_CONDITION(condition)) { m68ki_trace_t0(); /* auto-disable (see m68kcpu.h) */ m68ki_branch_32(offset-4); } USE_CYCLES(7); } void m68040_fpu_op0() { m68ki_cpu.fpu_just_reset = 0; switch ((REG_IR >> 6) & 0x3) { case 0: { uint16 w2 = OPER_I_16(); switch ((w2 >> 13) & 0x7) { case 0x0: // FPU ALU FP, FP case 0x2: // FPU ALU ea, FP { fpgen_rm_reg(w2); break; } case 0x3: // FMOVE FP, ea { fmove_reg_mem(w2); break; } case 0x4: // FMOVEM ea, FPCR case 0x5: // FMOVEM FPCR, ea { fmove_fpcr(w2); break; } case 0x6: // FMOVEM ea, list case 0x7: // FMOVEM list, ea { fmovem(w2); break; } default: fatalerror("M68kFPU: unimplemented subop %d at %08X\n", (w2 >> 13) & 0x7, REG_PC-4); } break; } case 1: // FScc (JFF) { fscc(); break; } case 2: // FBcc disp16 { fbcc16(); break; } case 3: // FBcc disp32 { fbcc32(); break; } default: fatalerror("M68kFPU: unimplemented main op %d at %08X\n", (m68ki_cpu.ir >> 6) & 0x3, REG_PC-4); } } static void perform_fsave(uint32 addr, int inc) { if (inc) { // 68881 IDLE, version 0x1f m68ki_write_32(addr, 0x1f180000); m68ki_write_32(addr+4, 0); m68ki_write_32(addr+8, 0); m68ki_write_32(addr+12, 0); m68ki_write_32(addr+16, 0); m68ki_write_32(addr+20, 0); m68ki_write_32(addr+24, 0x70000000); } else { m68ki_write_32(addr, 0x70000000); m68ki_write_32(addr-4, 0); m68ki_write_32(addr-8, 0); m68ki_write_32(addr-12, 0); m68ki_write_32(addr-16, 0); m68ki_write_32(addr-20, 0); m68ki_write_32(addr-24, 0x1f180000); } } // FRESTORE on a NULL frame reboots the FPU - all registers to NaN, the 3 status regs to 0 static void do_frestore_null() { int i; REG_FPCR = 0; REG_FPSR = 0; REG_FPIAR = 0; for (i = 0; i < 8; i++) { REG_FP[i].high = 0x7fff; REG_FP[i].low = U64(0xffffffffffffffff); } // Mac IIci at 408458e6 wants an FSAVE of a just-restored NULL frame to also be NULL // The PRM says it's possible to generate a NULL frame, but not how/when/why. (need the 68881/68882 manual!) m68ki_cpu.fpu_just_reset = 1; } void m68040_fpu_op1() { int ea = REG_IR & 0x3f; int mode = (ea >> 3) & 0x7; int reg = (ea & 0x7); uint32 addr, temp; switch ((REG_IR >> 6) & 0x3) { case 0: // FSAVE <ea> { switch (mode) { case 3: // (An)+ addr = EA_AY_PI_32(); if (m68ki_cpu.fpu_just_reset) { m68ki_write_32(addr, 0); } else { // we normally generate an IDLE frame REG_A[reg] += 6*4; perform_fsave(addr, 1); } break; case 4: // -(An) addr = EA_AY_PD_32(); if (m68ki_cpu.fpu_just_reset) { m68ki_write_32(addr, 0); } else { // we normally generate an IDLE frame REG_A[reg] -= 6*4; perform_fsave(addr, 0); } break; default: fatalerror("M68kFPU: FSAVE unhandled mode %d reg %d at %x\n", mode, reg, REG_PC); } break; } break; case 1: // FRESTORE <ea> { switch (mode) { case 2: // (An) addr = REG_A[reg]; temp = m68ki_read_32(addr); // check for NULL frame if (temp & 0xff000000) { // we don't handle non-NULL frames and there's no pre/post inc/dec to do here m68ki_cpu.fpu_just_reset = 0; } else { do_frestore_null(); } break; case 3: // (An)+ addr = EA_AY_PI_32(); temp = m68ki_read_32(addr); // check for NULL frame if (temp & 0xff000000) { m68ki_cpu.fpu_just_reset = 0; // how about an IDLE frame? if ((temp & 0x00ff0000) == 0x00180000) { REG_A[reg] += 6*4; } // check UNIMP else if ((temp & 0x00ff0000) == 0x00380000) { REG_A[reg] += 14*4; } // check BUSY else if ((temp & 0x00ff0000) == 0x00b40000) { REG_A[reg] += 45*4; } } else { do_frestore_null(); } break; default: fatalerror("M68kFPU: FRESTORE unhandled mode %d reg %d at %x\n", mode, reg, REG_PC); } break; } break; default: fatalerror("m68040_fpu_op1: unimplemented op %d at %08X\n", (REG_IR >> 6) & 0x3, REG_PC-2); } }
695627.c
#include <stdio.h> extern int remove_file(const char *path); int remove (const char *path) { return remove_file(path); }
986185.c
#include <assert.h> #include <bcon.h> #include <mongoc.h> #include <stdio.h> static void bulk3 (mongoc_collection_t *collection) { mongoc_bulk_operation_t *bulk; bson_error_t error; bson_t *query; bson_t *doc; bson_t reply; char *str; bool ret; /* false indicates unordered */ bulk = mongoc_collection_create_bulk_operation (collection, false, NULL); /* Add a document */ doc = BCON_NEW ("_id", BCON_INT32 (1)); mongoc_bulk_operation_insert (bulk, doc); bson_destroy (doc); /* remove {_id: 2} */ query = BCON_NEW ("_id", BCON_INT32 (2)); mongoc_bulk_operation_remove_one (bulk, query); bson_destroy (query); /* insert {_id: 3} */ doc = BCON_NEW ("_id", BCON_INT32 (3)); mongoc_bulk_operation_insert (bulk, doc); bson_destroy (doc); /* replace {_id:4} {'i': 1} */ query = BCON_NEW ("_id", BCON_INT32 (4)); doc = BCON_NEW ("i", BCON_INT32 (1)); mongoc_bulk_operation_replace_one (bulk, query, doc, false); bson_destroy (query); bson_destroy (doc); ret = mongoc_bulk_operation_execute (bulk, &reply, &error); str = bson_as_json (&reply, NULL); printf ("%s\n", str); bson_free (str); if (!ret) { printf ("Error: %s\n", error.message); } bson_destroy (&reply); mongoc_bulk_operation_destroy (bulk); } int main (int argc, char *argv[]) { mongoc_client_t *client; mongoc_collection_t *collection; mongoc_init (); client = mongoc_client_new ("mongodb://localhost/?appname=bulk3-example"); mongoc_client_set_error_api (client, 2); collection = mongoc_client_get_collection (client, "test", "test"); bulk3 (collection); mongoc_collection_destroy (collection); mongoc_client_destroy (client); mongoc_cleanup (); return 0; }
891334.c
/*! */ /*! ********************************************************************************* ************************************************************************************* * Include ************************************************************************************* ********************************************************************************** */ #include "ieee802p15p4_wrapper.h" /* Drv */ #include "LED.h" #include "Keyboard.h" /* Fwk */ #include "SerialManager.h" #include "TimersManager.h" #include "FunctionLib.h" #include "MemManager.h" #include "SecLib.h" /* KSDK */ #include "board.h" #include "fsl_os_abstraction.h" /************************************************************************************ ************************************************************************************* * Private macros ************************************************************************************* ************************************************************************************/ enum { stateInit, waitConnectionResponse, stateConnected }; #define mDefaultValueOfDataLen_c 20 #define gMessageMarkCR_c 0x0D /* Events */ #define gAppEvtDummyEvent_c (1 << 0) #define gAppEvtRxFromComm_c (1 << 1) #define gAppEvtMacManagement_c (1 << 2) #define gAppEvtMacData_c (1 << 3) #define gAppEvtButton_c (1 << 4) enum { errorNoError, errorWrongConfirm, errorNotSuccessful, errorNoMessage, errorAllocFailed, errorInvalidParameter, errorNoScanResults }; /************************************************************************************ ************************************************************************************* * Private prototypes ************************************************************************************* ************************************************************************************/ static void App_CommRxCallBack(void*); static void App_HandleKeys(key_event_t events); void App_init( void ); void AppThread (uint32_t argument); /************************************************************************************ ************************************************************************************* * Private memory declarations ************************************************************************************* ************************************************************************************/ static uint16_t mShortAddress = 0xFFFF; static uint16_t mPanId = 0xFFFF; static uint16_t mDestinationAddress = 0xFFFF; static uint8_t mChannel = 0xFF; static osaEventId_t mAppEvent; static bool_t node_connected = FALSE; static bool_t node_is_coordinator = FALSE; static uint8_t mInterfaceId; /* The current state of the applications state machine */ static uint8_t gState; static int mlme_event = 0; static int mcps_event = 0; static char received_data[128] = {0}; static uint16_t received_data_src = 0xFFFF; static uint8_t received_data_len = 0; static uint8_t button_event = 0; uint8_t mac_address[8] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01}; static uint8_t maCommDataBuffer[64] = {0}; /************************************************************************************ ************************************************************************************* * Public functions ************************************************************************************* ************************************************************************************/ /*! ********************************************************************************* * \brief This is the first task created by the OS. This task will initialize * the system * * \param[in] argument * * \return None. * * \pre * * \post * * \remarks * ********************************************************************************** */ void main_task(uint32_t param) { static uint8_t initialized = FALSE; if( !initialized ) { initialized = TRUE; hardware_init(); MEM_Init(); TMR_Init(); LED_Init(); SecLib_Init(); SerialManager_Init(); App_init(); } /* Call application task */ AppThread( param ); } /***************************************************************************** * Initialization function for the App Task. This is called during * initialization and should contain any application specific initialization * (ie. hardware initialization/setup, table initialization, power up * notification. * * Interface assumptions: None * * Return value: None * *****************************************************************************/ void App_init(void) { mAppEvent = OSA_EventCreate(TRUE); /* The initial application state */ gState = stateInit; /* Register keyboard callback function */ KBD_Init(App_HandleKeys); /* Initialize the serial terminal interface so that we can print out status messages */ Serial_InitInterface(&mInterfaceId, APP_SERIAL_INTERFACE_TYPE, APP_SERIAL_INTERFACE_INSTANCE); Serial_SetBaudRate(mInterfaceId, gUARTBaudRate57600_c); Serial_SetRxCallBack(mInterfaceId, App_CommRxCallBack, NULL); /*signal app ready*/ LED_StartSerialFlash(LED1); Serial_Print(mInterfaceId,"\n\rPress SW2 on the CR20 board to change the MAC address, SW1 to start the node initialization and connection\n\r", gAllowToBlock_d); } /***************************************************************************** * Handler for events from the MAC wrapper -- THIS IS A CALLBACK FUNCTION -- *****************************************************************************/ void mac_events_handler(void* evt_par) { mac_event_data_t* mac_evt_data = (mac_event_data_t* ) evt_par; switch(mac_evt_data->mac_event_type) { case mac_management_event_c: switch(mac_evt_data->evt_data.management_event_data->msgType){ case gMlmeAssociateCnf_c: /* Network found, started as End Device */ mShortAddress = mac_evt_data->evt_data.management_event_data->msgData.associateCnf.assocShortAddress; node_connected = TRUE; break; case gMlmeStartCnf_c: /* No network found started a new network as Coordinator */ mShortAddress = 0; node_connected = TRUE; node_is_coordinator = TRUE; break; default: mlme_event = mac_evt_data->evt_data.management_event_data->msgType; break; } OSA_EventSet(mAppEvent, gAppEvtMacManagement_c); break; case mac_data_event_c: switch(mac_evt_data->evt_data.data_event_data->msgType){ case gMcpsDataInd_c: FLib_MemSet(received_data, 0, sizeof(received_data)); FLib_MemCpy( received_data, mac_evt_data->evt_data.data_event_data->msgData.dataInd.pMsdu, mac_evt_data->evt_data.data_event_data->msgData.dataInd.msduLength); received_data_len = mac_evt_data->evt_data.data_event_data->msgData.dataInd.msduLength; FLib_MemCpy(&received_data_src, &(mac_evt_data->evt_data.data_event_data->msgData.dataInd.srcAddr), 2); break; default: received_data_len = 0; received_data_src = 0xFFFF; mcps_event = mac_evt_data->evt_data.data_event_data->msgType; break; } OSA_EventSet(mAppEvent, gAppEvtMacData_c); break; default: break; } } /***************************************************************************** * The AppTask(event_t events) function is the applicantion main loop and * will process any incoming event. Events include timers, messages and any * other user defined events. * * Interface assumptions: * None * * Return value: * None *****************************************************************************/ void AppThread(uint32_t argument) { osaEventFlags_t ev; /* Stores the error/success code returned by some functions. */ static uint8_t mCounter = 0; while(1) { OSA_EventWait(mAppEvent, osaEventFlagsAll_c, FALSE, osaWaitForever_c, &ev); switch(gState) { case stateInit: if(ev & gAppEvtButton_c){ if(button_event == gKBD_EventSW3_c) { Serial_Print(mInterfaceId,"MAC address: ", gAllowToBlock_d); for(int i = 0; i<8; i++) { mac_address[i]++; Serial_PrintHex(mInterfaceId,(uint8_t*)&mac_address[i], 1, 0); } Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); } if(button_event == gKBD_EventSW4_c) { /*Initialize the MAC Wrapper*/ LED_StopFlashingAllLeds(); Serial_Print(mInterfaceId,"Initializing MAC.\n\r", gAllowToBlock_d); mac_init(mac_address); Serial_Print(mInterfaceId,"Node is initialized and ready.\n\r", gAllowToBlock_d); /* Goto Energy Detection state. */ mPanId = 0xC0C0; mChannel = gLogicalChannel11_c; Serial_Print(mInterfaceId,"Starting connection, this can take several seconds.\n\r", gAllowToBlock_d); mac_connect(mChannel, mPanId, mac_events_handler); gState = waitConnectionResponse; } } break; case waitConnectionResponse: /* Handle connection response */ if(node_connected){ Serial_Print(mInterfaceId," Node Connected as ", gAllowToBlock_d); if(node_is_coordinator){ Serial_Print(mInterfaceId,"Coordinator with short address: ", gAllowToBlock_d); } else { Serial_Print(mInterfaceId,"End device with short address: ", gAllowToBlock_d); } Serial_PrintHex(mInterfaceId,(uint8_t*)&mShortAddress, 2, 0); Serial_Print(mInterfaceId," Pan Id: ", gAllowToBlock_d); Serial_PrintHex(mInterfaceId,(uint8_t*)&mPanId, 2, 0); Serial_Print(mInterfaceId," Channel: ", gAllowToBlock_d); Serial_PrintHex(mInterfaceId,(uint8_t*)&mChannel, 1, 0); Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); gState = stateConnected; OSA_EventSet(mAppEvent, gAppEvtDummyEvent_c); } break; case stateConnected: /* Handle events from the UART */ if (ev & gAppEvtRxFromComm_c) { uint16_t count; unsigned char received_byte = 0; (void)Serial_GetByteFromRxBuffer(mInterfaceId, &received_byte, &count); if((received_byte >= ' ') && (received_byte <= '~')) { maCommDataBuffer[mCounter++] = received_byte; } if((mCounter >= 64) || (received_byte == '\r')){ mac_transmit(mDestinationAddress, maCommDataBuffer, mCounter); FLib_MemSet(maCommDataBuffer, 0, 64); mCounter = 0; } } /* Handle MAC management events */ if(ev & gAppEvtMacManagement_c){ Serial_Print(mInterfaceId,"Network management event: ", gAllowToBlock_d); Serial_PrintHex(mInterfaceId,(uint8_t*)&mlme_event, 4, 0); Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); } /* Handle MAC data events */ if(ev & gAppEvtMacData_c){ if(received_data_len){ Serial_Print(mInterfaceId,"Message from ", gAllowToBlock_d); Serial_PrintHex(mInterfaceId,(uint8_t*)&received_data_src, 2, 0); Serial_Print(mInterfaceId," : ", gAllowToBlock_d); Serial_Print(mInterfaceId, received_data, gAllowToBlock_d); Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); } else { Serial_Print(mInterfaceId,"Network data event: ", gAllowToBlock_d); Serial_PrintHex(mInterfaceId,(uint8_t*)&mcps_event, 4, 0); Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); } } /* Handle button events */ if(ev & gAppEvtButton_c){ if(button_event == gKBD_EventSW3_c) { Serial_Print(mInterfaceId,"Destination address: ", gAllowToBlock_d); mDestinationAddress++; Serial_PrintHex(mInterfaceId,(uint8_t*)&mDestinationAddress, 2, 0); Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); } if(button_event == gKBD_EventSW4_c) { Serial_Print(mInterfaceId,"Destination address: ", gAllowToBlock_d); mDestinationAddress--; Serial_PrintHex(mInterfaceId,(uint8_t*)&mDestinationAddress, 2, 0); Serial_Print(mInterfaceId,"\r\n", gAllowToBlock_d); } } break; } /* end switch*/ } } /************************************************************************************ ************************************************************************************* * Private functions ************************************************************************************* ************************************************************************************/ /***************************************************************************** * App_CommRxCallBack * * This callback is triggered when a new byte is received over the serial terminal interface * *****************************************************************************/ static void App_CommRxCallBack(void *pData) { uint8_t pressedKey; uint16_t count; (void)pData; if(stateConnected == gState) { OSA_EventSet(mAppEvent, gAppEvtRxFromComm_c); } else { do{ (void)Serial_GetByteFromRxBuffer(mInterfaceId, &pressedKey, &count); }while(count); } } /***************************************************************************** * The App_HandleKeys(key_event_t events) function can handle different * key events. It waits for user to push a button in order to start * the application. * * Interface assumptions: * None * * Return value: * None *****************************************************************************/ static void App_HandleKeys( key_event_t events ) { button_event = events; OSA_EventSet(mAppEvent, gAppEvtButton_c); switch ( button_event ) { case gKBD_EventSW1_c: case gKBD_EventSW2_c: case gKBD_EventSW3_c: case gKBD_EventSW4_c: case gKBD_EventLongSW1_c: case gKBD_EventLongSW2_c: case gKBD_EventLongSW3_c: case gKBD_EventLongSW4_c: default: break; } }
782496.c
/** ****************************************************************************** * @file stm8s_gpio.gpio_init.c * @author MCD Application Team * @version V2.2.0 * @date 30-September-2014 * @brief This file contains all the functions for the GPIO peripheral. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2014 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm8s_gpio.h" /** @addtogroup STM8S_StdPeriph_Driver * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /* Private functions ---------------------------------------------------------*/ /* Public functions ----------------------------------------------------------*/ /** * @addtogroup GPIO_Public_Functions * @{ */ /** * @brief Initializes the GPIOx according to the specified parameters. * @param GPIOx : Select the GPIO peripheral number (x = A to I). * @param GPIO_Pin : This parameter contains the pin number, it can be any value * of the @ref GPIO_Pin_TypeDef enumeration. * @param GPIO_Mode : This parameter can be a value of the * @Ref GPIO_Mode_TypeDef enumeration. * @retval None */ void GPIO_Init(GPIO_TypeDef* GPIOx, GPIO_Pin_TypeDef GPIO_Pin, GPIO_Mode_TypeDef GPIO_Mode) { /*----------------------*/ /* Check the parameters */ /*----------------------*/ assert_param(IS_GPIO_MODE_OK(GPIO_Mode)); assert_param(IS_GPIO_PIN_OK(GPIO_Pin)); /* Reset corresponding bit to GPIO_Pin in CR2 register */ GPIOx->CR2 &= (uint8_t)(~(GPIO_Pin)); /*-----------------------------*/ /* Input/Output mode selection */ /*-----------------------------*/ if ((((uint8_t)(GPIO_Mode)) & (uint8_t)0x80) != (uint8_t)0x00) /* Output mode */ { if ((((uint8_t)(GPIO_Mode)) & (uint8_t)0x10) != (uint8_t)0x00) /* High level */ { GPIOx->ODR |= (uint8_t)GPIO_Pin; } else /* Low level */ { GPIOx->ODR &= (uint8_t)(~(GPIO_Pin)); } /* Set Output mode */ GPIOx->DDR |= (uint8_t)GPIO_Pin; } else /* Input mode */ { /* Set Input mode */ GPIOx->DDR &= (uint8_t)(~(GPIO_Pin)); } /*------------------------------------------------------------------------*/ /* Pull-Up/Float (Input) or Push-Pull/Open-Drain (Output) modes selection */ /*------------------------------------------------------------------------*/ if ((((uint8_t)(GPIO_Mode)) & (uint8_t)0x40) != (uint8_t)0x00) /* Pull-Up or Push-Pull */ { GPIOx->CR1 |= (uint8_t)GPIO_Pin; } else /* Float or Open-Drain */ { GPIOx->CR1 &= (uint8_t)(~(GPIO_Pin)); } /*-----------------------------------------------------*/ /* Interrupt (Input) or Slope (Output) modes selection */ /*-----------------------------------------------------*/ if ((((uint8_t)(GPIO_Mode)) & (uint8_t)0x20) != (uint8_t)0x00) /* Interrupt or Slow slope */ { GPIOx->CR2 |= (uint8_t)GPIO_Pin; } else /* No external interrupt or No slope control */ { GPIOx->CR2 &= (uint8_t)(~(GPIO_Pin)); } } /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
763258.c
#include <stdio.h> #include <stdlib.h> #include "assembler.h" int main(int argc, char const *argv[]) { if (argc < 3 || argc > 4){ fprintf(stderr, "ERROR: Wrong call format or number of parameters.\n" "FORMAT: <assembler exec.> in_file out_file [s|l|v]\n" "(s: simple, l: linker output, v: verbose)\n"); exit(1); } const char *src_addr = argv[1]; const char *dest_addr = argv[2]; output_mode om; if (argc == 4 && argv[3][0] == 'v'){ om = om_verbose; } if (argc == 4 && argv[3][0] == 's'){ om = om_simple; } else { om = om_linker; } asmAssemble(src_addr, dest_addr, om); return 0; }
165526.c
/************************************************************************** * * Copyright (C) 2014 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <unistd.h> #include <stdio.h> #include <errno.h> #include "pipe/p_shader_tokens.h" #include "pipe/p_defines.h" #include "pipe/p_state.h" #include "util/u_inlines.h" #include "util/u_memory.h" #include "util/u_dual_blend.h" #include "util/u_thread.h" #include "util/u_format.h" #include "tgsi/tgsi_parse.h" #include "vrend_object.h" #include "vrend_shader.h" #include "vrend_renderer.h" #include "vrend_blitter.h" #include "vrend_debug.h" #include "vrend_winsys.h" #include "vrend_blitter.h" #include "virgl_util.h" #include "virgl_hw.h" #include "virgl_resource.h" #include "virglrenderer.h" #include "virglrenderer_hw.h" #include "virgl_protocol.h" #include "tgsi/tgsi_text.h" #ifdef HAVE_EPOXY_GLX_H #include <epoxy/glx.h> #endif /* * VIRGL_RENDERER_CAPSET_VIRGL has version 0 and 1, but they are both * virgl_caps_v1 and are exactly the same. * * VIRGL_RENDERER_CAPSET_VIRGL2 has version 0, 1, and 2, but they are * all virgl_caps_v2 and are exactly the same. * * Since virgl_caps_v2 is growable and no backward-incompatible change is * expected, we don't bump up these versions anymore. */ #define VREND_CAPSET_VIRGL_MAX_VERSION 1 #define VREND_CAPSET_VIRGL2_MAX_VERSION 2 static const uint32_t fake_occlusion_query_samples_passed_default = 1024; const struct vrend_if_cbs *vrend_clicbs; struct vrend_fence { /* When the sync thread is waiting on the fence and the main thread * destroys the context, ctx is set to NULL. Otherwise, ctx is always * valid. */ struct vrend_context *ctx; uint32_t flags; void *fence_cookie; union { GLsync glsyncobj; #ifdef HAVE_EPOXY_EGL_H EGLSyncKHR eglsyncobj; #endif }; struct list_head fences; }; struct vrend_query { struct list_head waiting_queries; GLuint id; GLuint type; GLuint index; GLuint gltype; struct vrend_context *ctx; struct vrend_resource *res; uint64_t current_total; bool fake_samples_passed; }; struct global_error_state { enum virgl_errors last_error; }; enum features_id { feat_arb_or_gles_ext_texture_buffer, feat_arb_robustness, feat_arb_buffer_storage, feat_arrays_of_arrays, feat_ati_meminfo, feat_atomic_counters, feat_base_instance, feat_barrier, feat_bind_vertex_buffers, feat_bit_encoding, feat_blend_equation_advanced, feat_clear_texture, feat_clip_control, feat_compute_shader, feat_copy_image, feat_conditional_render_inverted, feat_conservative_depth, feat_cube_map_array, feat_cull_distance, feat_debug_cb, feat_depth_clamp, feat_draw_instance, feat_dual_src_blend, feat_egl_image_external, feat_egl_image_storage, feat_enhanced_layouts, feat_fb_no_attach, feat_framebuffer_fetch, feat_framebuffer_fetch_non_coherent, feat_geometry_shader, feat_gl_conditional_render, feat_gl_prim_restart, feat_gles_khr_robustness, feat_gles31_compatibility, feat_gles31_vertex_attrib_binding, feat_gpu_shader5, feat_images, feat_indep_blend, feat_indep_blend_func, feat_indirect_draw, feat_indirect_params, feat_khr_debug, feat_memory_object, feat_memory_object_fd, feat_mesa_invert, feat_ms_scaled_blit, feat_multisample, feat_multi_draw_indirect, feat_nv_conditional_render, feat_nv_prim_restart, feat_nvx_gpu_memory_info, feat_polygon_offset_clamp, feat_occlusion_query, feat_occlusion_query_boolean, feat_qbo, feat_robust_buffer_access, feat_sample_mask, feat_sample_shading, feat_samplers, feat_sampler_border_colors, feat_shader_clock, feat_separate_shader_objects, feat_ssbo, feat_ssbo_barrier, feat_srgb_write_control, feat_stencil_texturing, feat_storage_multisample, feat_tessellation, feat_texture_array, feat_texture_barrier, feat_texture_buffer_range, feat_texture_gather, feat_texture_multisample, feat_texture_query_lod, feat_texture_srgb_decode, feat_texture_storage, feat_texture_view, feat_timer_query, feat_transform_feedback, feat_transform_feedback2, feat_transform_feedback3, feat_transform_feedback_overflow_query, feat_txqs, feat_ubo, feat_viewport_array, feat_implicit_msaa, feat_anisotropic_filter, feat_last, }; #define FEAT_MAX_EXTS 4 #define UNAVAIL INT_MAX #define FEAT(NAME, GLVER, GLESVER, ...) \ [feat_ ## NAME ] = {GLVER, GLESVER, { __VA_ARGS__ }, #NAME} static const struct { int gl_ver; int gles_ver; const char *gl_ext[FEAT_MAX_EXTS]; const char *log_name; } feature_list[] = { FEAT(arb_or_gles_ext_texture_buffer, 31, UNAVAIL, "GL_ARB_texture_buffer_object", "GL_EXT_texture_buffer", NULL), FEAT(arb_robustness, UNAVAIL, UNAVAIL, "GL_ARB_robustness" ), FEAT(arb_buffer_storage, 44, UNAVAIL, "GL_ARB_buffer_storage", "GL_EXT_buffer_storage"), FEAT(arrays_of_arrays, 43, 31, "GL_ARB_arrays_of_arrays"), FEAT(ati_meminfo, UNAVAIL, UNAVAIL, "GL_ATI_meminfo" ), FEAT(atomic_counters, 42, 31, "GL_ARB_shader_atomic_counters" ), FEAT(base_instance, 42, UNAVAIL, "GL_ARB_base_instance", "GL_EXT_base_instance" ), FEAT(barrier, 42, 31, "GL_ARB_shader_image_load_store"), FEAT(bind_vertex_buffers, 44, UNAVAIL, NULL), FEAT(bit_encoding, 33, UNAVAIL, "GL_ARB_shader_bit_encoding" ), FEAT(blend_equation_advanced, UNAVAIL, 32, "GL_KHR_blend_equation_advanced" ), FEAT(clear_texture, 44, UNAVAIL, "GL_ARB_clear_texture", "GL_EXT_clear_texture"), FEAT(clip_control, 45, UNAVAIL, "GL_ARB_clip_control", "GL_EXT_clip_control"), FEAT(compute_shader, 43, 31, "GL_ARB_compute_shader" ), FEAT(copy_image, 43, 32, "GL_ARB_copy_image", "GL_EXT_copy_image", "GL_OES_copy_image" ), FEAT(conditional_render_inverted, 45, UNAVAIL, "GL_ARB_conditional_render_inverted" ), FEAT(conservative_depth, 42, UNAVAIL, "GL_ARB_conservative_depth", "GL_EXT_conservative_depth" ), FEAT(cube_map_array, 40, 32, "GL_ARB_texture_cube_map_array", "GL_EXT_texture_cube_map_array", "GL_OES_texture_cube_map_array" ), FEAT(cull_distance, 45, UNAVAIL, "GL_ARB_cull_distance", "GL_EXT_clip_cull_distance" ), FEAT(debug_cb, UNAVAIL, UNAVAIL, NULL), /* special case */ FEAT(draw_instance, 31, 30, "GL_ARB_draw_instanced" ), FEAT(dual_src_blend, 33, UNAVAIL, "GL_ARB_blend_func_extended", "GL_EXT_blend_func_extended" ), FEAT(depth_clamp, 32, UNAVAIL, "GL_ARB_depth_clamp", "GL_EXT_depth_clamp", "GL_NV_depth_clamp"), FEAT(enhanced_layouts, 44, UNAVAIL, "GL_ARB_enhanced_layouts"), FEAT(egl_image_external, UNAVAIL, UNAVAIL, "GL_OES_EGL_image_external"), FEAT(egl_image_storage, UNAVAIL, UNAVAIL, "GL_EXT_EGL_image_storage"), FEAT(fb_no_attach, 43, 31, "GL_ARB_framebuffer_no_attachments" ), FEAT(framebuffer_fetch, UNAVAIL, UNAVAIL, "GL_EXT_shader_framebuffer_fetch" ), FEAT(framebuffer_fetch_non_coherent, UNAVAIL, UNAVAIL, "GL_EXT_shader_framebuffer_fetch_non_coherent" ), FEAT(geometry_shader, 32, 32, "GL_EXT_geometry_shader", "GL_OES_geometry_shader"), FEAT(gl_conditional_render, 30, UNAVAIL, NULL), FEAT(gl_prim_restart, 31, 30, NULL), FEAT(gles_khr_robustness, UNAVAIL, UNAVAIL, "GL_KHR_robustness" ), FEAT(gles31_compatibility, 45, 31, "ARB_ES3_1_compatibility" ), FEAT(gles31_vertex_attrib_binding, 43, 31, "GL_ARB_vertex_attrib_binding" ), FEAT(gpu_shader5, 40, 32, "GL_ARB_gpu_shader5", "GL_EXT_gpu_shader5", "GL_OES_gpu_shader5" ), FEAT(images, 42, 31, "GL_ARB_shader_image_load_store" ), FEAT(indep_blend, 30, 32, "GL_EXT_draw_buffers2", "GL_OES_draw_buffers_indexed" ), FEAT(indep_blend_func, 40, 32, "GL_ARB_draw_buffers_blend", "GL_OES_draw_buffers_indexed"), FEAT(indirect_draw, 40, 31, "GL_ARB_draw_indirect" ), FEAT(indirect_params, 46, UNAVAIL, "GL_ARB_indirect_parameters" ), FEAT(khr_debug, 43, 32, "GL_KHR_debug" ), FEAT(memory_object, UNAVAIL, UNAVAIL, "GL_EXT_memory_object"), FEAT(memory_object_fd, UNAVAIL, UNAVAIL, "GL_EXT_memory_object_fd"), FEAT(mesa_invert, UNAVAIL, UNAVAIL, "GL_MESA_pack_invert" ), FEAT(ms_scaled_blit, UNAVAIL, UNAVAIL, "GL_EXT_framebuffer_multisample_blit_scaled" ), FEAT(multisample, 32, 30, "GL_ARB_texture_multisample" ), FEAT(multi_draw_indirect, 43, UNAVAIL, "GL_ARB_multi_draw_indirect", "GL_EXT_multi_draw_indirect" ), FEAT(nv_conditional_render, UNAVAIL, UNAVAIL, "GL_NV_conditional_render" ), FEAT(nv_prim_restart, UNAVAIL, UNAVAIL, "GL_NV_primitive_restart" ), FEAT(nvx_gpu_memory_info, UNAVAIL, UNAVAIL, "GL_NVX_gpu_memory_info" ), FEAT(polygon_offset_clamp, 46, UNAVAIL, "GL_ARB_polygon_offset_clamp", "GL_EXT_polygon_offset_clamp"), FEAT(occlusion_query, 15, UNAVAIL, "GL_ARB_occlusion_query"), FEAT(occlusion_query_boolean, 33, 30, "GL_EXT_occlusion_query_boolean", "GL_ARB_occlusion_query2"), FEAT(qbo, 44, UNAVAIL, "GL_ARB_query_buffer_object" ), FEAT(robust_buffer_access, 43, UNAVAIL, "GL_ARB_robust_buffer_access_behavior", "GL_KHR_robust_buffer_access_behavior" ), FEAT(sample_mask, 32, 31, "GL_ARB_texture_multisample" ), FEAT(sample_shading, 40, 32, "GL_ARB_sample_shading", "GL_OES_sample_shading" ), FEAT(samplers, 33, 30, "GL_ARB_sampler_objects" ), FEAT(sampler_border_colors, 33, 32, "GL_ARB_sampler_objects", "GL_EXT_texture_border_clamp", "GL_OES_texture_border_clamp" ), FEAT(separate_shader_objects, 41, 31, "GL_ARB_seperate_shader_objects"), FEAT(shader_clock, UNAVAIL, UNAVAIL, "GL_ARB_shader_clock" ), FEAT(ssbo, 43, 31, "GL_ARB_shader_storage_buffer_object" ), FEAT(ssbo_barrier, 43, 31, "GL_ARB_shader_storage_buffer_object"), FEAT(srgb_write_control, 30, UNAVAIL, "GL_EXT_sRGB_write_control"), FEAT(stencil_texturing, 43, 31, "GL_ARB_stencil_texturing" ), FEAT(storage_multisample, 43, 31, "GL_ARB_texture_storage_multisample" ), FEAT(tessellation, 40, 32, "GL_ARB_tessellation_shader", "GL_OES_tessellation_shader", "GL_EXT_tessellation_shader" ), FEAT(texture_array, 30, 30, "GL_EXT_texture_array" ), FEAT(texture_barrier, 45, UNAVAIL, "GL_ARB_texture_barrier" ), FEAT(texture_buffer_range, 43, 32, "GL_ARB_texture_buffer_range" ), FEAT(texture_gather, 40, 31, "GL_ARB_texture_gather" ), FEAT(texture_multisample, 32, 31, "GL_ARB_texture_multisample" ), FEAT(texture_query_lod, 40, UNAVAIL, "GL_ARB_texture_query_lod", "GL_EXT_texture_query_lod"), FEAT(texture_srgb_decode, UNAVAIL, UNAVAIL, "GL_EXT_texture_sRGB_decode" ), FEAT(texture_storage, 42, 30, "GL_ARB_texture_storage" ), FEAT(texture_view, 43, UNAVAIL, "GL_ARB_texture_view", "GL_OES_texture_view", "GL_EXT_texture_view" ), FEAT(timer_query, 33, UNAVAIL, "GL_ARB_timer_query", "GL_EXT_disjoint_timer_query"), FEAT(transform_feedback, 30, 30, "GL_EXT_transform_feedback" ), FEAT(transform_feedback2, 40, 30, "GL_ARB_transform_feedback2" ), FEAT(transform_feedback3, 40, UNAVAIL, "GL_ARB_transform_feedback3" ), FEAT(transform_feedback_overflow_query, 46, UNAVAIL, "GL_ARB_transform_feedback_overflow_query" ), FEAT(txqs, 45, UNAVAIL, "GL_ARB_shader_texture_image_samples" ), FEAT(ubo, 31, 30, "GL_ARB_uniform_buffer_object" ), FEAT(viewport_array, 41, UNAVAIL, "GL_ARB_viewport_array", "GL_OES_viewport_array"), FEAT(implicit_msaa, UNAVAIL, UNAVAIL, "GL_EXT_multisampled_render_to_texture"), FEAT(anisotropic_filter, 46, UNAVAIL, "GL_EXT_texture_filter_anisotropic", "GL_ARB_texture_filter_anisotropic"), }; struct global_renderer_state { struct vrend_context *ctx0; struct vrend_context *current_ctx; struct vrend_context *current_hw_ctx; /* fence_mutex should be locked before using the query list * if async fence callback are enabled */ struct list_head waiting_query_list; struct list_head fence_list; struct list_head fence_wait_list; struct vrend_fence *fence_waiting; struct vrend_context *current_sync_thread_ctx; int gl_major_ver; int gl_minor_ver; mtx_t fence_mutex; thrd_t sync_thread; virgl_gl_context sync_context; cnd_t fence_cond; float tess_factors[6]; int eventfd; uint32_t max_draw_buffers; uint32_t max_texture_2d_size; uint32_t max_texture_3d_size; uint32_t max_texture_cube_size; /* inferred GL caching type */ uint32_t inferred_gl_caching_type; uint64_t features[feat_last / 64 + 1]; uint32_t finishing : 1; uint32_t use_gles : 1; uint32_t use_core_profile : 1; uint32_t use_external_blob : 1; uint32_t use_integer : 1; /* these appeared broken on at least one driver */ uint32_t use_explicit_locations : 1; /* threaded sync */ uint32_t stop_sync_thread : 1; /* async fence callback */ bool use_async_fence_cb : 1; /* Needed on GLES to inject a TCS */ uint32_t bgra_srgb_emulation_loaded : 1; #ifdef HAVE_EPOXY_EGL_H uint32_t use_egl_fence : 1; #endif }; static struct global_renderer_state vrend_state; static inline bool has_feature(enum features_id feature_id) { int slot = feature_id / 64; uint64_t mask = 1ull << (feature_id & 63); bool retval = vrend_state.features[slot] & mask ? true : false; VREND_DEBUG(dbg_feature_use, NULL, "Try using feature %s:%d\n", feature_list[feature_id].log_name, retval); return retval; } static inline void set_feature(enum features_id feature_id) { int slot = feature_id / 64; uint64_t mask = 1ull << (feature_id & 63); vrend_state.features[slot] |= mask; } static inline void clear_feature(enum features_id feature_id) { int slot = feature_id / 64; uint64_t mask = 1ull << (feature_id & 63); vrend_state.features[slot] &= ~mask; } struct vrend_linked_shader_program { struct list_head head; struct list_head sl[PIPE_SHADER_TYPES]; GLuint id; bool dual_src_linked; struct vrend_shader *ss[PIPE_SHADER_TYPES]; uint64_t vs_fs_key; uint32_t ubo_used_mask[PIPE_SHADER_TYPES]; uint32_t samplers_used_mask[PIPE_SHADER_TYPES]; GLuint *shadow_samp_mask_locs[PIPE_SHADER_TYPES]; GLuint *shadow_samp_add_locs[PIPE_SHADER_TYPES]; GLint const_location[PIPE_SHADER_TYPES]; GLuint *attrib_locs; uint32_t shadow_samp_mask[PIPE_SHADER_TYPES]; GLuint vs_ws_adjust_loc; float viewport_neg_val; GLint fs_stipple_loc; GLint fs_alpha_ref_val_loc; GLint fs_alpha_func_loc; GLint clip_enabled_loc; GLuint clip_locs[8]; uint32_t images_used_mask[PIPE_SHADER_TYPES]; GLint *img_locs[PIPE_SHADER_TYPES]; uint32_t ssbo_used_mask[PIPE_SHADER_TYPES]; int32_t tex_levels_uniform_id[PIPE_SHADER_TYPES]; struct vrend_sub_context *ref_context; uint32_t gles_use_query_texturelevel_mask; }; struct vrend_shader { struct vrend_shader *next_variant; struct vrend_shader_selector *sel; struct vrend_variable_shader_info var_sinfo; struct vrend_strarray glsl_strings; GLuint id; uint32_t uid; bool is_compiled; struct vrend_shader_key key; struct list_head programs; }; struct vrend_shader_selector { struct pipe_reference reference; unsigned num_shaders; unsigned type; struct vrend_shader_info sinfo; struct vrend_shader *current; struct tgsi_token *tokens; uint32_t req_local_mem; char *tmp_buf; uint32_t buf_len; uint32_t buf_offset; }; struct vrend_texture { struct vrend_resource base; struct pipe_sampler_state state; GLint cur_swizzle[4]; GLuint cur_srgb_decode; GLuint cur_base, cur_max; }; struct vrend_surface { struct pipe_reference reference; GLuint id; GLuint res_handle; GLuint format; GLuint val0, val1; GLuint nr_samples; struct vrend_resource *texture; }; struct vrend_sampler_state { struct pipe_sampler_state base; GLuint ids[2]; }; struct vrend_so_target { struct pipe_reference reference; GLuint res_handle; unsigned buffer_offset; unsigned buffer_size; struct vrend_resource *buffer; struct vrend_sub_context *sub_ctx; }; struct vrend_sampler_view { struct pipe_reference reference; GLuint id; enum virgl_formats format; GLenum target; GLuint val0, val1; GLint gl_swizzle[4]; GLenum depth_texture_mode; GLuint srgb_decode; GLuint levels; struct vrend_resource *texture; }; struct vrend_image_view { GLuint id; GLenum access; GLenum format; union { struct { unsigned first_layer:16; /**< first layer to use for array textures */ unsigned last_layer:16; /**< last layer to use for array textures */ unsigned level:8; /**< mipmap level to use */ } tex; struct { unsigned offset; /**< offset in bytes */ unsigned size; /**< size of the accessible sub-range in bytes */ } buf; } u; struct vrend_resource *texture; }; struct vrend_ssbo { struct vrend_resource *res; unsigned buffer_size; unsigned buffer_offset; }; struct vrend_abo { struct vrend_resource *res; unsigned buffer_size; unsigned buffer_offset; }; struct vrend_vertex_element { struct pipe_vertex_element base; GLenum type; GLboolean norm; GLuint nr_chan; }; struct vrend_vertex_element_array { unsigned count; struct vrend_vertex_element elements[PIPE_MAX_ATTRIBS]; GLuint id; uint32_t signed_int_bitmask; uint32_t unsigned_int_bitmask; struct vrend_sub_context *owning_sub; }; struct vrend_constants { unsigned int *consts; uint32_t num_consts; uint32_t num_allocated_consts; }; struct vrend_shader_view { int num_views; struct vrend_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS]; uint32_t res_id[PIPE_MAX_SHADER_SAMPLER_VIEWS]; uint32_t old_ids[PIPE_MAX_SHADER_SAMPLER_VIEWS]; }; struct vrend_viewport { GLint cur_x, cur_y; GLsizei width, height; GLclampd near_val, far_val; }; /* create a streamout object to support pause/resume */ struct vrend_streamout_object { GLuint id; uint32_t num_targets; uint32_t handles[16]; struct list_head head; int xfb_state; struct vrend_so_target *so_targets[16]; }; #define XFB_STATE_OFF 0 #define XFB_STATE_STARTED_NEED_BEGIN 1 #define XFB_STATE_STARTED 2 #define XFB_STATE_PAUSED 3 struct vrend_vertex_buffer { struct pipe_vertex_buffer base; uint32_t res_id; }; #define VREND_PROGRAM_NQUEUES (1 << 8) #define VREND_PROGRAM_NQUEUE_MASK (VREND_PROGRAM_NQUEUES - 1) struct vrend_sub_context { struct list_head head; virgl_gl_context gl_context; int sub_ctx_id; GLuint vaoid; uint32_t enabled_attribs_bitmask; /* Using an array of lists only adds VREND_PROGRAM_NQUEUES - 1 list_head * structures to the consumed memory, but looking up the program can * be spead up by the factor VREND_PROGRAM_NQUEUES which makes this * worthwile. */ struct list_head gl_programs[VREND_PROGRAM_NQUEUES]; struct list_head cs_programs; struct util_hash_table *object_hash; struct vrend_vertex_element_array *ve; int num_vbos; int old_num_vbos; /* for cleaning up */ struct vrend_vertex_buffer vbo[PIPE_MAX_ATTRIBS]; struct pipe_index_buffer ib; uint32_t index_buffer_res_id; bool vbo_dirty; bool shader_dirty; bool cs_shader_dirty; bool stencil_state_dirty; bool image_state_dirty; bool blend_state_dirty; uint32_t long_shader_in_progress_handle[PIPE_SHADER_TYPES]; struct vrend_shader_selector *shaders[PIPE_SHADER_TYPES]; struct vrend_linked_shader_program *prog; GLuint prog_ids[PIPE_SHADER_TYPES]; struct vrend_shader_view views[PIPE_SHADER_TYPES]; struct vrend_constants consts[PIPE_SHADER_TYPES]; bool const_dirty[PIPE_SHADER_TYPES]; struct vrend_sampler_state *sampler_state[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS]; struct pipe_constant_buffer cbs[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS]; uint32_t const_bufs_used_mask[PIPE_SHADER_TYPES]; uint32_t const_bufs_dirty[PIPE_SHADER_TYPES]; int num_sampler_states[PIPE_SHADER_TYPES]; uint32_t sampler_views_dirty[PIPE_SHADER_TYPES]; int32_t texture_levels[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS]; int32_t n_samplers[PIPE_SHADER_TYPES]; uint32_t fb_id; int nr_cbufs, old_nr_cbufs; struct vrend_surface *zsurf; struct vrend_surface *surf[PIPE_MAX_COLOR_BUFS]; struct vrend_viewport vps[PIPE_MAX_VIEWPORTS]; /* viewport is negative */ uint32_t scissor_state_dirty; uint32_t viewport_state_dirty; uint32_t viewport_state_initialized; uint32_t fb_height; struct pipe_scissor_state ss[PIPE_MAX_VIEWPORTS]; struct pipe_blend_state blend_state; struct pipe_depth_stencil_alpha_state dsa_state; struct pipe_rasterizer_state rs_state; uint8_t stencil_refs[2]; bool viewport_is_negative; /* this is set if the contents of the FBO look upside down when viewed with 0,0 as the bottom corner */ bool inverted_fbo_content; GLuint blit_fb_ids[2]; struct pipe_depth_stencil_alpha_state *dsa; struct pipe_clip_state ucp_state; bool depth_test_enabled; bool alpha_test_enabled; bool stencil_test_enabled; bool framebuffer_srgb_enabled; GLuint program_id; int last_shader_idx; GLint draw_indirect_buffer; GLint draw_indirect_params_buffer; struct pipe_rasterizer_state hw_rs_state; struct pipe_blend_state hw_blend_state; struct list_head streamout_list; struct vrend_streamout_object *current_so; struct pipe_blend_color blend_color; uint32_t cond_render_q_id; GLenum cond_render_gl_mode; struct vrend_image_view image_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES]; uint32_t images_used_mask[PIPE_SHADER_TYPES]; struct vrend_ssbo ssbo[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS]; uint32_t ssbo_used_mask[PIPE_SHADER_TYPES]; struct vrend_abo abo[PIPE_MAX_HW_ATOMIC_BUFFERS]; uint32_t abo_used_mask; struct vrend_context_tweaks tweaks; uint8_t swizzle_output_rgb_to_bgr; uint8_t convert_linear_to_srgb_on_write; int fake_occlusion_query_samples_passed_multiplier; int prim_mode; bool drawing; struct vrend_context *parent; }; struct vrend_untyped_resource { struct virgl_resource *resource; struct list_head head; }; struct vrend_context { char debug_name[64]; struct list_head sub_ctxs; struct list_head vrend_resources; struct vrend_sub_context *sub; struct vrend_sub_context *sub0; int ctx_id; /* has this ctx gotten an error? */ bool in_error; bool ctx_switch_pending; bool pstip_inited; GLuint pstipple_tex_id; enum virgl_ctx_errors last_error; /* resource bounds to this context */ struct util_hash_table *res_hash; /* * vrend_context only works with typed virgl_resources. More specifically, * it works with vrend_resources that are inherited from pipe_resources * wrapped in virgl_resources. * * Normally, a vrend_resource is created first by * vrend_renderer_resource_create. It is then wrapped in a virgl_resource * by virgl_resource_create_from_pipe. Depending on whether it is a blob * resource or not, the two functions can be called from different paths. * But we always get both a virgl_resource and a vrend_resource as a * result. * * It is however possible that we encounter untyped virgl_resources that * have no pipe_resources. To work with untyped virgl_resources, we park * them in untyped_resources first when they are attached. We move them * into res_hash only after we get the type information and create the * vrend_resources in vrend_decode_pipe_resource_set_type. */ struct list_head untyped_resources; struct virgl_resource *untyped_resource_cache; struct list_head active_nontimer_query_list; struct vrend_shader_cfg shader_cfg; unsigned debug_flags; vrend_context_fence_retire fence_retire; void *fence_retire_data; }; static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle); static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause); static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx); static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx); static void vrend_destroy_query_object(void *obj_ptr); static void vrend_finish_context_switch(struct vrend_context *ctx); static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx); static void vrend_update_frontface_state(struct vrend_sub_context *ctx); static int vrender_get_glsl_version(void); static void vrend_destroy_program(struct vrend_linked_shader_program *ent); static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx, struct vrend_resource *res, uint32_t shader_type, int id, int sampler_id, struct vrend_sampler_view *tview); static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples); void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx); static struct vrend_format_table tex_conv_table[VIRGL_FORMAT_MAX_EXTENDED]; static uint32_t vrend_renderer_get_video_memory(void); static inline bool vrend_format_can_sample(enum virgl_formats format) { if (tex_conv_table[format].bindings & VIRGL_BIND_SAMPLER_VIEW) return true; #ifdef ENABLE_MINIGBM_ALLOCATION uint32_t gbm_format = 0; if (virgl_gbm_convert_format(&format, &gbm_format)) return false; if (!gbm || !gbm->device || !gbm_format) return false; uint32_t gbm_usage = GBM_BO_USE_TEXTURING; return gbm_device_is_format_supported(gbm->device, gbm_format, gbm_usage); #else return false; #endif } static inline bool vrend_format_can_readback(enum virgl_formats format) { return tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_READBACK; } static inline bool vrend_format_can_render(enum virgl_formats format) { return tex_conv_table[format].bindings & VIRGL_BIND_RENDER_TARGET; } static inline bool vrend_format_is_ds(enum virgl_formats format) { return tex_conv_table[format].bindings & VIRGL_BIND_DEPTH_STENCIL; } static inline bool vrend_format_can_scanout(enum virgl_formats format) { #ifdef ENABLE_MINIGBM_ALLOCATION uint32_t gbm_format = 0; if (virgl_gbm_convert_format(&format, &gbm_format)) return false; if (!gbm || !gbm->device || !gbm_format) return false; return gbm_device_is_format_supported(gbm->device, gbm_format, GBM_BO_USE_SCANOUT); #else (void)format; return true; #endif } #ifdef ENABLE_MINIGBM_ALLOCATION static inline bool vrend_format_can_texture_view(enum virgl_formats format) { return has_feature(feat_texture_view) && tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE; } #endif struct vrend_context_tweaks *vrend_get_context_tweaks(struct vrend_context *ctx) { return &ctx->sub->tweaks; } bool vrend_format_is_emulated_alpha(enum virgl_formats format) { if (vrend_state.use_gles || !vrend_state.use_core_profile) return false; return (format == VIRGL_FORMAT_A8_UNORM || format == VIRGL_FORMAT_A16_UNORM); } bool vrend_format_is_bgra(enum virgl_formats format) { return (format == VIRGL_FORMAT_B8G8R8X8_UNORM || format == VIRGL_FORMAT_B8G8R8A8_UNORM || format == VIRGL_FORMAT_B8G8R8X8_SRGB || format == VIRGL_FORMAT_B8G8R8A8_SRGB); } static bool vrend_resource_is_emulated_bgra(struct vrend_resource *res) { /* On all hosts, BGR* resources are swizzled on upload and stored with RGB* * internal format. On GLES hosts, we must perform that swizzle ourselves. * However, for externally-stored resources such as EGL images and * GBM-allocated dma-bufs, the pixel data is expected to be stored with BGR* * byte-ordering. Emulation is added during texture sampling, blitting, and * rendering to correct the red/blue color inversion caused by the mismatch * between storage expectation and the RGB* internal format given to the host * GL[ES] API. */ if (vrend_format_is_bgra(res->base.format) && (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE) || res->egl_image || has_bit(res->storage_bits, VREND_STORAGE_GBM_BUFFER) || res->gbm_bo)) return true; return false; } static bool vrend_resource_has_24bpp_internal_format(struct vrend_resource *res) { /* Some shared resources imported to guest mesa as EGL images occupy 24bpp instead of more common 32bpp. */ return (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE) && (res->base.format == VIRGL_FORMAT_B8G8R8X8_UNORM || res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM)); } static bool vrend_blit_needs_swizzle(enum virgl_formats src, enum virgl_formats dst) { for (int i = 0; i < 4; ++i) { if (tex_conv_table[src].swizzle[i] != tex_conv_table[dst].swizzle[i]) return true; } return false; } static inline const char *pipe_shader_to_prefix(int shader_type) { switch (shader_type) { case PIPE_SHADER_VERTEX: return "vs"; case PIPE_SHADER_FRAGMENT: return "fs"; case PIPE_SHADER_GEOMETRY: return "gs"; case PIPE_SHADER_TESS_CTRL: return "tc"; case PIPE_SHADER_TESS_EVAL: return "te"; case PIPE_SHADER_COMPUTE: return "cs"; default: return NULL; }; } static GLenum translate_blend_func_advanced(enum gl_advanced_blend_mode blend) { switch(blend){ case BLEND_MULTIPLY: return GL_MULTIPLY_KHR; case BLEND_SCREEN: return GL_SCREEN_KHR; case BLEND_OVERLAY: return GL_OVERLAY_KHR; case BLEND_DARKEN: return GL_DARKEN_KHR; case BLEND_LIGHTEN: return GL_LIGHTEN_KHR; case BLEND_COLORDODGE: return GL_COLORDODGE_KHR; case BLEND_COLORBURN: return GL_COLORBURN_KHR; case BLEND_HARDLIGHT: return GL_HARDLIGHT_KHR; case BLEND_SOFTLIGHT: return GL_SOFTLIGHT_KHR; case BLEND_DIFFERENCE: return GL_DIFFERENCE_KHR; case BLEND_EXCLUSION: return GL_EXCLUSION_KHR; case BLEND_HSL_HUE: return GL_HSL_HUE_KHR; case BLEND_HSL_SATURATION: return GL_HSL_SATURATION_KHR; case BLEND_HSL_COLOR: return GL_HSL_COLOR_KHR; case BLEND_HSL_LUMINOSITY: return GL_HSL_LUMINOSITY_KHR; default: assert("invalid blend token()" == NULL); return 0; } } static const char *vrend_ctx_error_strings[] = { [VIRGL_ERROR_CTX_NONE] = "None", [VIRGL_ERROR_CTX_UNKNOWN] = "Unknown", [VIRGL_ERROR_CTX_ILLEGAL_SHADER] = "Illegal shader", [VIRGL_ERROR_CTX_ILLEGAL_HANDLE] = "Illegal handle", [VIRGL_ERROR_CTX_ILLEGAL_RESOURCE] = "Illegal resource", [VIRGL_ERROR_CTX_ILLEGAL_SURFACE] = "Illegal surface", [VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT] = "Illegal vertex format", [VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER] = "Illegal command buffer", [VIRGL_ERROR_CTX_GLES_HAVE_TES_BUT_MISS_TCS] = "On GLES context and shader program has tesselation evaluation shader but no tesselation control shader", [VIRGL_ERROR_GL_ANY_SAMPLES_PASSED] = "Query for ANY_SAMPLES_PASSED not supported", [VIRGL_ERROR_CTX_ILLEGAL_FORMAT] = "Illegal format ID", [VIRGL_ERROR_CTX_ILLEGAL_SAMPLER_VIEW_TARGET] = "Illegat target for sampler view", [VIRGL_ERROR_CTX_TRANSFER_IOV_BOUNDS] = "IOV data size exceeds resource capacity", [VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND]= "Dual source blend not supported", [VIRGL_ERROR_CTX_UNSUPPORTED_FUNCTION] = "Unsupported host function called", }; void vrend_report_context_error_internal(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error, uint32_t value) { ctx->in_error = true; ctx->last_error = error; vrend_printf("%s: context error reported %d \"%s\" %s %d\n", fname, ctx->ctx_id, ctx->debug_name, vrend_ctx_error_strings[error], value); } #define CORE_PROFILE_WARN_NONE 0 #define CORE_PROFILE_WARN_STIPPLE 1 #define CORE_PROFILE_WARN_POLYGON_MODE 2 #define CORE_PROFILE_WARN_TWO_SIDE 3 #define CORE_PROFILE_WARN_CLAMP 4 #define CORE_PROFILE_WARN_SHADE_MODEL 5 static const char *vrend_core_profile_warn_strings[] = { [CORE_PROFILE_WARN_NONE] = "None", [CORE_PROFILE_WARN_STIPPLE] = "Stipple", [CORE_PROFILE_WARN_POLYGON_MODE] = "Polygon Mode", [CORE_PROFILE_WARN_TWO_SIDE] = "Two Side", [CORE_PROFILE_WARN_CLAMP] = "Clamping", [CORE_PROFILE_WARN_SHADE_MODEL] = "Shade Model", }; static void __report_core_warn(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error) { vrend_printf("%s: core profile violation reported %d \"%s\" %s\n", fname, ctx->ctx_id, ctx->debug_name, vrend_core_profile_warn_strings[error]); } #define report_core_warn(ctx, error) __report_core_warn(__func__, ctx, error) #define GLES_WARN_NONE 0 #define GLES_WARN_STIPPLE 1 #define GLES_WARN_POLYGON_MODE 2 #define GLES_WARN_DEPTH_RANGE 3 #define GLES_WARN_POINT_SIZE 4 #define GLES_WARN_SEAMLESS_CUBE_MAP 5 #define GLES_WARN_LOD_BIAS 6 #define GLES_WARN_TEXTURE_RECT 7 #define GLES_WARN_OFFSET_LINE 8 #define GLES_WARN_OFFSET_POINT 9 //#define GLES_WARN_ free slot 10 #define GLES_WARN_FLATSHADE_FIRST 11 #define GLES_WARN_LINE_SMOOTH 12 #define GLES_WARN_POLY_SMOOTH 13 #define GLES_WARN_DEPTH_CLEAR 14 #define GLES_WARN_LOGIC_OP 15 #define GLES_WARN_TIMESTAMP 16 #define GLES_WARN_IMPLICIT_MSAA_SURFACE 17 ASSERTED static const char *vrend_gles_warn_strings[] = { [GLES_WARN_NONE] = "None", [GLES_WARN_STIPPLE] = "Stipple", [GLES_WARN_POLYGON_MODE] = "Polygon Mode", [GLES_WARN_DEPTH_RANGE] = "Depth Range", [GLES_WARN_POINT_SIZE] = "Point Size", [GLES_WARN_SEAMLESS_CUBE_MAP] = "Seamless Cube Map", [GLES_WARN_LOD_BIAS] = "Lod Bias", [GLES_WARN_TEXTURE_RECT] = "Texture Rect", [GLES_WARN_OFFSET_LINE] = "Offset Line", [GLES_WARN_OFFSET_POINT] = "Offset Point", [GLES_WARN_FLATSHADE_FIRST] = "Flatshade First", [GLES_WARN_LINE_SMOOTH] = "Line Smooth", [GLES_WARN_POLY_SMOOTH] = "Poly Smooth", [GLES_WARN_DEPTH_CLEAR] = "Depth Clear", [GLES_WARN_LOGIC_OP] = "LogicOp", [GLES_WARN_TIMESTAMP] = "GL_TIMESTAMP", [GLES_WARN_IMPLICIT_MSAA_SURFACE] = "Implicit MSAA Surface", }; static void __report_gles_warn(ASSERTED const char *fname, ASSERTED struct vrend_context *ctx, ASSERTED enum virgl_ctx_errors error) { VREND_DEBUG(dbg_gles, ctx, "%s: GLES violation - %s\n", fname, vrend_gles_warn_strings[error]); } #define report_gles_warn(ctx, error) __report_gles_warn(__func__, ctx, error) static void __report_gles_missing_func(ASSERTED const char *fname, ASSERTED struct vrend_context *ctx, ASSERTED const char *missf) { VREND_DEBUG(dbg_gles, ctx, "%s: GLES function %s is missing\n", fname, missf); } #define report_gles_missing_func(ctx, missf) __report_gles_missing_func(__func__, ctx, missf) static void init_features(int gl_ver, int gles_ver) { for (enum features_id id = 0; id < feat_last; id++) { if (gl_ver >= feature_list[id].gl_ver || gles_ver >= feature_list[id].gles_ver) { set_feature(id); VREND_DEBUG(dbg_features, NULL, "Host feature %s provided by %s %3.1f\n", feature_list[id].log_name, (gl_ver > 0 ? "GL" : "GLES"), 0.1f * (gl_ver > 0 ? gl_ver : gles_ver)); } else { for (uint32_t i = 0; i < FEAT_MAX_EXTS; i++) { if (!feature_list[id].gl_ext[i]) break; if (epoxy_has_gl_extension(feature_list[id].gl_ext[i])) { set_feature(id); VREND_DEBUG(dbg_features, NULL, "Host feature %s provide by %s\n", feature_list[id].log_name, feature_list[id].gl_ext[i]); break; } } } } } static void vrend_destroy_surface(struct vrend_surface *surf) { if (surf->id != surf->texture->id) glDeleteTextures(1, &surf->id); vrend_resource_reference(&surf->texture, NULL); free(surf); } static inline void vrend_surface_reference(struct vrend_surface **ptr, struct vrend_surface *surf) { struct vrend_surface *old_surf = *ptr; if (pipe_reference(&(*ptr)->reference, &surf->reference)) vrend_destroy_surface(old_surf); *ptr = surf; } static void vrend_destroy_sampler_view(struct vrend_sampler_view *samp) { if (samp->texture->id != samp->id) glDeleteTextures(1, &samp->id); vrend_resource_reference(&samp->texture, NULL); free(samp); } static inline void vrend_sampler_view_reference(struct vrend_sampler_view **ptr, struct vrend_sampler_view *view) { struct vrend_sampler_view *old_view = *ptr; if (pipe_reference(&(*ptr)->reference, &view->reference)) vrend_destroy_sampler_view(old_view); *ptr = view; } static void vrend_destroy_so_target(struct vrend_so_target *target) { vrend_resource_reference(&target->buffer, NULL); free(target); } static inline void vrend_so_target_reference(struct vrend_so_target **ptr, struct vrend_so_target *target) { struct vrend_so_target *old_target = *ptr; if (pipe_reference(&(*ptr)->reference, &target->reference)) vrend_destroy_so_target(old_target); *ptr = target; } static void vrend_shader_dump(struct vrend_shader *shader) { const char *prefix = pipe_shader_to_prefix(shader->sel->type); if (shader->sel->tmp_buf) vrend_printf("%s: %d TGSI:\n%s\n", prefix, shader->id, shader->sel->tmp_buf); vrend_printf("%s: %d GLSL:\n", prefix, shader->id); strarray_dump_with_line_numbers(&shader->glsl_strings); vrend_printf("\n"); } static void vrend_shader_destroy(struct vrend_shader *shader) { struct vrend_linked_shader_program *ent, *tmp; LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &shader->programs, sl[shader->sel->type]) { vrend_destroy_program(ent); } glDeleteShader(shader->id); strarray_free(&shader->glsl_strings, true); free(shader); } static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel) { struct vrend_shader *p = sel->current, *c; unsigned i; while (p) { c = p->next_variant; vrend_shader_destroy(p); p = c; } if (sel->sinfo.so_names) for (i = 0; i < sel->sinfo.so_info.num_outputs; i++) free(sel->sinfo.so_names[i]); free(sel->tmp_buf); free(sel->sinfo.so_names); free(sel->sinfo.sampler_arrays); free(sel->sinfo.image_arrays); free(sel->tokens); free(sel); } static inline int conv_shader_type(int type) { switch (type) { case PIPE_SHADER_VERTEX: return GL_VERTEX_SHADER; case PIPE_SHADER_FRAGMENT: return GL_FRAGMENT_SHADER; case PIPE_SHADER_GEOMETRY: return GL_GEOMETRY_SHADER; case PIPE_SHADER_TESS_CTRL: return GL_TESS_CONTROL_SHADER; case PIPE_SHADER_TESS_EVAL: return GL_TESS_EVALUATION_SHADER; case PIPE_SHADER_COMPUTE: return GL_COMPUTE_SHADER; default: return 0; }; } static bool vrend_compile_shader(struct vrend_sub_context *sub_ctx, struct vrend_shader *shader) { GLint param; const char *shader_parts[SHADER_MAX_STRINGS]; for (int i = 0; i < shader->glsl_strings.num_strings; i++) shader_parts[i] = shader->glsl_strings.strings[i].buf; shader->id = glCreateShader(conv_shader_type(shader->sel->type)); glShaderSource(shader->id, shader->glsl_strings.num_strings, shader_parts, NULL); glCompileShader(shader->id); glGetShaderiv(shader->id, GL_COMPILE_STATUS, &param); if (param == GL_FALSE) { char infolog[65536]; int len; glGetShaderInfoLog(shader->id, 65536, &len, infolog); vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); vrend_printf("shader failed to compile\n%s\n", infolog); vrend_shader_dump(shader); return false; } shader->is_compiled = true; return true; } static inline void vrend_shader_state_reference(struct vrend_shader_selector **ptr, struct vrend_shader_selector *shader) { struct vrend_shader_selector *old_shader = *ptr; if (pipe_reference(&(*ptr)->reference, &shader->reference)) vrend_destroy_shader_selector(old_shader); *ptr = shader; } void vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings, uint32_t flags) { tex_conv_table[entry->format] = *entry; tex_conv_table[entry->format].bindings = bindings; tex_conv_table[entry->format].flags = flags; } void vrend_insert_format_swizzle(int override_format, struct vrend_format_table *entry, uint32_t bindings, uint8_t swizzle[4], uint32_t flags) { int i; tex_conv_table[override_format] = *entry; tex_conv_table[override_format].bindings = bindings; tex_conv_table[override_format].flags = flags | VIRGL_TEXTURE_NEED_SWIZZLE; for (i = 0; i < 4; i++) tex_conv_table[override_format].swizzle[i] = swizzle[i]; } const struct vrend_format_table * vrend_get_format_table_entry(enum virgl_formats format) { return &tex_conv_table[format]; } static bool vrend_is_timer_query(GLenum gltype) { return gltype == GL_TIMESTAMP || gltype == GL_TIME_ELAPSED; } static void vrend_use_program(struct vrend_sub_context *sub_ctx, GLuint program_id) { if (sub_ctx->program_id != program_id) { glUseProgram(program_id); sub_ctx->program_id = program_id; } } static void vrend_init_pstipple_texture(struct vrend_context *ctx) { glGenTextures(1, &ctx->pstipple_tex_id); glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id); glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, 32, 32, 0, GL_RED, GL_UNSIGNED_BYTE, NULL); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); ctx->pstip_inited = true; } static void vrend_depth_test_enable(struct vrend_context *ctx, bool depth_test_enable) { if (ctx->sub->depth_test_enabled != depth_test_enable) { ctx->sub->depth_test_enabled = depth_test_enable; if (depth_test_enable) glEnable(GL_DEPTH_TEST); else glDisable(GL_DEPTH_TEST); } } static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_enable) { if (vrend_state.use_core_profile) { /* handled in shaders */ return; } if (ctx->sub->alpha_test_enabled != alpha_test_enable) { ctx->sub->alpha_test_enabled = alpha_test_enable; if (alpha_test_enable) glEnable(GL_ALPHA_TEST); else glDisable(GL_ALPHA_TEST); } } static void vrend_stencil_test_enable(struct vrend_sub_context *sub_ctx, bool stencil_test_enable) { if (sub_ctx->stencil_test_enabled != stencil_test_enable) { sub_ctx->stencil_test_enabled = stencil_test_enable; if (stencil_test_enable) glEnable(GL_STENCIL_TEST); else glDisable(GL_STENCIL_TEST); } } ASSERTED static void dump_stream_out(struct pipe_stream_output_info *so) { unsigned i; if (!so) return; vrend_printf("streamout: %d\n", so->num_outputs); vrend_printf("strides: "); for (i = 0; i < 4; i++) vrend_printf("%d ", so->stride[i]); vrend_printf("\n"); vrend_printf("outputs:\n"); for (i = 0; i < so->num_outputs; i++) { vrend_printf("\t%d: reg: %d sc: %d, nc: %d ob: %d do: %d st: %d\n", i, so->output[i].register_index, so->output[i].start_component, so->output[i].num_components, so->output[i].output_buffer, so->output[i].dst_offset, so->output[i].stream); } } static char *get_skip_str(int *skip_val) { char *start_skip = NULL; if (*skip_val < 0) { *skip_val = 0; return NULL; } if (*skip_val == 1) { start_skip = strdup("gl_SkipComponents1"); *skip_val -= 1; } else if (*skip_val == 2) { start_skip = strdup("gl_SkipComponents2"); *skip_val -= 2; } else if (*skip_val == 3) { start_skip = strdup("gl_SkipComponents3"); *skip_val -= 3; } else if (*skip_val >= 4) { start_skip = strdup("gl_SkipComponents4"); *skip_val -= 4; } return start_skip; } static void set_stream_out_varyings(ASSERTED struct vrend_sub_context *sub_ctx, int prog_id, struct vrend_shader_info *sinfo) { struct pipe_stream_output_info *so = &sinfo->so_info; char *varyings[PIPE_MAX_SHADER_OUTPUTS*2]; int j; uint i, n_outputs = 0; int last_buffer = 0; char *start_skip; int buf_offset = 0; int skip; if (!so->num_outputs) return; VREND_DEBUG_EXT(dbg_shader_streamout, sub_ctx->parent, dump_stream_out(so)); for (i = 0; i < so->num_outputs; i++) { if (last_buffer != so->output[i].output_buffer) { skip = so->stride[last_buffer] - buf_offset; while (skip) { start_skip = get_skip_str(&skip); if (start_skip) varyings[n_outputs++] = start_skip; } for (j = last_buffer; j < so->output[i].output_buffer; j++) varyings[n_outputs++] = strdup("gl_NextBuffer"); last_buffer = so->output[i].output_buffer; buf_offset = 0; } skip = so->output[i].dst_offset - buf_offset; while (skip) { start_skip = get_skip_str(&skip); if (start_skip) varyings[n_outputs++] = start_skip; } buf_offset = so->output[i].dst_offset; buf_offset += so->output[i].num_components; if (sinfo->so_names[i]) varyings[n_outputs++] = strdup(sinfo->so_names[i]); } skip = so->stride[last_buffer] - buf_offset; while (skip) { start_skip = get_skip_str(&skip); if (start_skip) varyings[n_outputs++] = start_skip; } glTransformFeedbackVaryings(prog_id, n_outputs, (const GLchar **)varyings, GL_INTERLEAVED_ATTRIBS_EXT); for (i = 0; i < n_outputs; i++) if (varyings[i]) free(varyings[i]); } static int bind_sampler_locs(struct vrend_linked_shader_program *sprog, int shader_type, int next_sampler_id) { const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo; if (sinfo->samplers_used_mask) { uint32_t mask = sinfo->samplers_used_mask; sprog->shadow_samp_mask[shader_type] = sinfo->shadow_samp_mask; if (sinfo->shadow_samp_mask) { unsigned nsamp = util_bitcount(sinfo->samplers_used_mask); sprog->shadow_samp_mask_locs[shader_type] = calloc(nsamp, sizeof(uint32_t)); sprog->shadow_samp_add_locs[shader_type] = calloc(nsamp, sizeof(uint32_t)); } else { sprog->shadow_samp_mask_locs[shader_type] = sprog->shadow_samp_add_locs[shader_type] = NULL; } const char *prefix = pipe_shader_to_prefix(shader_type); int sampler_index = 0; while(mask) { uint32_t i = u_bit_scan(&mask); char name[64]; if (sinfo->num_sampler_arrays) { int arr_idx = vrend_shader_lookup_sampler_array(sinfo, i); snprintf(name, 32, "%ssamp%d[%d]", prefix, arr_idx, i - arr_idx); } else snprintf(name, 32, "%ssamp%d", prefix, i); glUniform1i(glGetUniformLocation(sprog->id, name), next_sampler_id++); if (sinfo->shadow_samp_mask & (1 << i)) { snprintf(name, 32, "%sshadmask%d", prefix, i); sprog->shadow_samp_mask_locs[shader_type][sampler_index] = glGetUniformLocation(sprog->id, name); snprintf(name, 32, "%sshadadd%d", prefix, i); sprog->shadow_samp_add_locs[shader_type][sampler_index] = glGetUniformLocation(sprog->id, name); } sampler_index++; } } else { sprog->shadow_samp_mask_locs[shader_type] = NULL; sprog->shadow_samp_add_locs[shader_type] = NULL; sprog->shadow_samp_mask[shader_type] = 0; } sprog->samplers_used_mask[shader_type] = sinfo->samplers_used_mask; return next_sampler_id; } static void bind_const_locs(struct vrend_linked_shader_program *sprog, int shader_type) { if (sprog->ss[shader_type]->sel->sinfo.num_consts) { char name[32]; snprintf(name, 32, "%sconst0", pipe_shader_to_prefix(shader_type)); sprog->const_location[shader_type] = glGetUniformLocation(sprog->id, name); } else sprog->const_location[shader_type] = -1; } static int bind_ubo_locs(struct vrend_linked_shader_program *sprog, int shader_type, int next_ubo_id) { if (!has_feature(feat_ubo)) return next_ubo_id; const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo; if (sinfo->ubo_used_mask) { const char *prefix = pipe_shader_to_prefix(shader_type); unsigned mask = sinfo->ubo_used_mask; while (mask) { uint32_t ubo_idx = u_bit_scan(&mask); char name[32]; if (sinfo->ubo_indirect) snprintf(name, 32, "%subo[%d]", prefix, ubo_idx - 1); else snprintf(name, 32, "%subo%d", prefix, ubo_idx); GLuint loc = glGetUniformBlockIndex(sprog->id, name); glUniformBlockBinding(sprog->id, loc, next_ubo_id++); } } sprog->ubo_used_mask[shader_type] = sinfo->ubo_used_mask; return next_ubo_id; } static void bind_ssbo_locs(struct vrend_linked_shader_program *sprog, int shader_type) { if (!has_feature(feat_ssbo)) return; sprog->ssbo_used_mask[shader_type] = sprog->ss[shader_type]->sel->sinfo.ssbo_used_mask; } static void bind_image_locs(struct vrend_linked_shader_program *sprog, int shader_type) { int i; char name[32]; const char *prefix = pipe_shader_to_prefix(shader_type); const struct vrend_shader_info *sinfo = &sprog->ss[shader_type]->sel->sinfo; uint32_t mask = sinfo->images_used_mask; if (!mask && !sinfo->num_image_arrays) return; if (!has_feature(feat_images)) return; int nsamp = util_last_bit(mask); if (nsamp) { sprog->img_locs[shader_type] = calloc(nsamp, sizeof(GLint)); if (!sprog->img_locs[shader_type]) return; } else sprog->img_locs[shader_type] = NULL; if (sinfo->num_image_arrays) { for (i = 0; i < sinfo->num_image_arrays; i++) { struct vrend_array *img_array = &sinfo->image_arrays[i]; for (int j = 0; j < img_array->array_size; j++) { snprintf(name, 32, "%simg%d[%d]", prefix, img_array->first, j); sprog->img_locs[shader_type][img_array->first + j] = glGetUniformLocation(sprog->id, name); if (sprog->img_locs[shader_type][img_array->first + j] == -1) vrend_printf( "failed to get uniform loc for image %s\n", name); } } } else if (mask) { for (i = 0; i < nsamp; i++) { if (mask & (1 << i)) { snprintf(name, 32, "%simg%d", prefix, i); sprog->img_locs[shader_type][i] = glGetUniformLocation(sprog->id, name); if (sprog->img_locs[shader_type][i] == -1) vrend_printf( "failed to get uniform loc for image %s\n", name); } else { sprog->img_locs[shader_type][i] = -1; } } } sprog->images_used_mask[shader_type] = mask; } static struct vrend_linked_shader_program *add_cs_shader_program(struct vrend_context *ctx, struct vrend_shader *cs) { struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program); GLuint prog_id; GLint lret; prog_id = glCreateProgram(); glAttachShader(prog_id, cs->id); glLinkProgram(prog_id); glGetProgramiv(prog_id, GL_LINK_STATUS, &lret); if (lret == GL_FALSE) { char infolog[65536]; int len; glGetProgramInfoLog(prog_id, 65536, &len, infolog); vrend_printf("got error linking\n%s\n", infolog); /* dump shaders */ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); vrend_shader_dump(cs); glDeleteProgram(prog_id); free(sprog); return NULL; } sprog->ss[PIPE_SHADER_COMPUTE] = cs; list_add(&sprog->sl[PIPE_SHADER_COMPUTE], &cs->programs); sprog->id = prog_id; list_addtail(&sprog->head, &ctx->sub->cs_programs); vrend_use_program(ctx->sub, prog_id); bind_sampler_locs(sprog, PIPE_SHADER_COMPUTE, 0); bind_ubo_locs(sprog, PIPE_SHADER_COMPUTE, 0); bind_ssbo_locs(sprog, PIPE_SHADER_COMPUTE); bind_const_locs(sprog, PIPE_SHADER_COMPUTE); bind_image_locs(sprog, PIPE_SHADER_COMPUTE); return sprog; } static struct vrend_linked_shader_program *add_shader_program(struct vrend_sub_context *sub_ctx, struct vrend_shader *vs, struct vrend_shader *fs, struct vrend_shader *gs, struct vrend_shader *tcs, struct vrend_shader *tes) { struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program); char name[64]; int i; GLuint prog_id; GLint lret; int last_shader; if (!sprog) return NULL; prog_id = glCreateProgram(); glAttachShader(prog_id, vs->id); if (tcs && tcs->id > 0) glAttachShader(prog_id, tcs->id); if (tes && tes->id > 0) glAttachShader(prog_id, tes->id); if (gs) { if (gs->id > 0) glAttachShader(prog_id, gs->id); set_stream_out_varyings(sub_ctx, prog_id, &gs->sel->sinfo); } else if (tes) set_stream_out_varyings(sub_ctx, prog_id, &tes->sel->sinfo); else set_stream_out_varyings(sub_ctx, prog_id, &vs->sel->sinfo); glAttachShader(prog_id, fs->id); if (fs->sel->sinfo.num_outputs > 1) { sprog->dual_src_linked = util_blend_state_is_dual(&sub_ctx->blend_state, 0); if (sprog->dual_src_linked) { if (has_feature(feat_dual_src_blend)) { if (!vrend_state.use_gles) { glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0"); glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1"); } else { glBindFragDataLocationIndexedEXT(prog_id, 0, 0, "fsout_c0"); glBindFragDataLocationIndexedEXT(prog_id, 0, 1, "fsout_c1"); } } else { vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_DUAL_SRC_BLEND, 0); } } else if (has_feature(feat_dual_src_blend)) { for (int i = 0; i < fs->sel->sinfo.num_outputs; ++i) { if (fs->sel->sinfo.fs_output_layout[i] >= 0) { char buf[64]; snprintf(buf, sizeof(buf), "fsout_c%d", fs->sel->sinfo.fs_output_layout[i]); if (!vrend_state.use_gles) glBindFragDataLocationIndexed(prog_id, fs->sel->sinfo.fs_output_layout[i], 0, buf); else glBindFragDataLocationIndexedEXT(prog_id, fs->sel->sinfo.fs_output_layout[i], 0, buf); } } } else { vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_UNSUPPORTED_FUNCTION, 0); } } else sprog->dual_src_linked = false; if (has_feature(feat_gles31_vertex_attrib_binding)) { uint32_t mask = vs->sel->sinfo.attrib_input_mask; while (mask) { i = u_bit_scan(&mask); snprintf(name, 32, "in_%d", i); glBindAttribLocation(prog_id, i, name); } } glLinkProgram(prog_id); glGetProgramiv(prog_id, GL_LINK_STATUS, &lret); if (lret == GL_FALSE) { char infolog[65536]; int len; glGetProgramInfoLog(prog_id, 65536, &len, infolog); vrend_printf("got error linking\n%s\n", infolog); /* dump shaders */ vrend_report_context_error(sub_ctx->parent, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0); vrend_shader_dump(vs); if (tcs) vrend_shader_dump(tcs); if (tes) vrend_shader_dump(tes); if (gs) vrend_shader_dump(gs); vrend_shader_dump(fs); glDeleteProgram(prog_id); free(sprog); return NULL; } sprog->ss[PIPE_SHADER_VERTEX] = vs; sprog->ss[PIPE_SHADER_FRAGMENT] = fs; sprog->vs_fs_key = (((uint64_t)fs->id) << 32) | (vs->id & ~VREND_PROGRAM_NQUEUE_MASK) | (sprog->dual_src_linked ? 1 : 0); sprog->ss[PIPE_SHADER_GEOMETRY] = gs; sprog->ss[PIPE_SHADER_TESS_CTRL] = tcs; sprog->ss[PIPE_SHADER_TESS_EVAL] = tes; list_add(&sprog->sl[PIPE_SHADER_VERTEX], &vs->programs); list_add(&sprog->sl[PIPE_SHADER_FRAGMENT], &fs->programs); if (gs) list_add(&sprog->sl[PIPE_SHADER_GEOMETRY], &gs->programs); if (tcs) list_add(&sprog->sl[PIPE_SHADER_TESS_CTRL], &tcs->programs); if (tes) list_add(&sprog->sl[PIPE_SHADER_TESS_EVAL], &tes->programs); last_shader = tes ? PIPE_SHADER_TESS_EVAL : (gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); sprog->id = prog_id; list_addtail(&sprog->head, &sub_ctx->gl_programs[vs->id & VREND_PROGRAM_NQUEUE_MASK]); if (fs->key.pstipple_tex) sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler"); else sprog->fs_stipple_loc = -1; if (vrend_state.use_core_profile) { sprog->fs_alpha_ref_val_loc = glGetUniformLocation(prog_id, "alpha_ref_val"); sprog->fs_alpha_func_loc = glGetUniformLocation(prog_id, "alpha_func"); } else { sprog->fs_alpha_ref_val_loc = -1; sprog->fs_alpha_func_loc = -1; } sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust_y"); vrend_use_program(sub_ctx, prog_id); int next_ubo_id = 0, next_sampler_id = 0; for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= last_shader; shader_type++) { if (!sprog->ss[shader_type]) continue; next_sampler_id = bind_sampler_locs(sprog, shader_type, next_sampler_id); bind_const_locs(sprog, shader_type); next_ubo_id = bind_ubo_locs(sprog, shader_type, next_ubo_id); bind_image_locs(sprog, shader_type); bind_ssbo_locs(sprog, shader_type); } if (!has_feature(feat_gles31_vertex_attrib_binding)) { if (vs->sel->sinfo.num_inputs) { sprog->attrib_locs = calloc(vs->sel->sinfo.num_inputs, sizeof(uint32_t)); if (sprog->attrib_locs) { for (i = 0; i < vs->sel->sinfo.num_inputs; i++) { snprintf(name, 32, "in_%d", i); sprog->attrib_locs[i] = glGetAttribLocation(prog_id, name); } } } else sprog->attrib_locs = NULL; } if (has_feature(feat_cull_distance)) { sprog->clip_enabled_loc = glGetUniformLocation(prog_id, "clip_plane_enabled"); for (i = 0; i < VIRGL_NUM_CLIP_PLANES; i++) { snprintf(name, 32, "clipp[%d]", i); sprog->clip_locs[i] = glGetUniformLocation(prog_id, name); } } return sprog; } static struct vrend_linked_shader_program *lookup_cs_shader_program(struct vrend_context *ctx, GLuint cs_id) { struct vrend_linked_shader_program *ent; LIST_FOR_EACH_ENTRY(ent, &ctx->sub->cs_programs, head) { if (ent->ss[PIPE_SHADER_COMPUTE]->id == cs_id) { list_del(&ent->head); list_add(&ent->head, &ctx->sub->cs_programs); return ent; } } return NULL; } static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_sub_context *sub_ctx, GLuint vs_id, GLuint fs_id, GLuint gs_id, GLuint tcs_id, GLuint tes_id, bool dual_src) { uint64_t vs_fs_key = (((uint64_t)fs_id) << 32) | (vs_id & ~VREND_PROGRAM_NQUEUE_MASK) | (dual_src ? 1 : 0); struct vrend_linked_shader_program *ent; struct list_head *programs = &sub_ctx->gl_programs[vs_id & VREND_PROGRAM_NQUEUE_MASK]; LIST_FOR_EACH_ENTRY(ent, programs, head) { if (likely(ent->vs_fs_key != vs_fs_key)) continue; if (ent->ss[PIPE_SHADER_GEOMETRY] && ent->ss[PIPE_SHADER_GEOMETRY]->id != gs_id) continue; if (ent->ss[PIPE_SHADER_TESS_CTRL] && ent->ss[PIPE_SHADER_TESS_CTRL]->id != tcs_id) continue; if (ent->ss[PIPE_SHADER_TESS_EVAL] && ent->ss[PIPE_SHADER_TESS_EVAL]->id != tes_id) continue; /* put the entry in front */ if (programs->next != &ent->head) { list_del(&ent->head); list_add(&ent->head, programs); } return ent; } return NULL; } static void vrend_destroy_program(struct vrend_linked_shader_program *ent) { int i; if (ent->ref_context && ent->ref_context->prog == ent) ent->ref_context->prog = NULL; glDeleteProgram(ent->id); list_del(&ent->head); for (i = PIPE_SHADER_VERTEX; i <= PIPE_SHADER_COMPUTE; i++) { if (ent->ss[i]) list_del(&ent->sl[i]); free(ent->shadow_samp_mask_locs[i]); free(ent->shadow_samp_add_locs[i]); free(ent->img_locs[i]); } free(ent->attrib_locs); free(ent); } static void vrend_free_programs(struct vrend_sub_context *sub) { struct vrend_linked_shader_program *ent, *tmp; if (!LIST_IS_EMPTY(&sub->cs_programs)) { LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->cs_programs, head) vrend_destroy_program(ent); } for (unsigned i = 0; i < VREND_PROGRAM_NQUEUES; ++i) { if (!LIST_IS_EMPTY(&sub->gl_programs[i])) { LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->gl_programs[i], head) vrend_destroy_program(ent); } } } static void vrend_destroy_streamout_object(struct vrend_streamout_object *obj) { unsigned i; list_del(&obj->head); for (i = 0; i < obj->num_targets; i++) vrend_so_target_reference(&obj->so_targets[i], NULL); if (has_feature(feat_transform_feedback2)) glDeleteTransformFeedbacks(1, &obj->id); FREE(obj); } void vrend_sync_make_current(virgl_gl_context gl_cxt) { GLsync sync = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); vrend_clicbs->make_current(gl_cxt); glWaitSync(sync, 0, GL_TIMEOUT_IGNORED); glDeleteSync(sync); } int vrend_create_surface(struct vrend_context *ctx, uint32_t handle, uint32_t res_handle, uint32_t format, uint32_t val0, uint32_t val1, uint32_t nr_samples) { struct vrend_surface *surf; struct vrend_resource *res; uint32_t ret_handle; if (format >= PIPE_FORMAT_COUNT) { return EINVAL; } res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return EINVAL; } surf = CALLOC_STRUCT(vrend_surface); if (!surf) return ENOMEM; surf->res_handle = res_handle; surf->format = format; surf->val0 = val0; surf->val1 = val1; surf->id = res->id; surf->nr_samples = nr_samples; if (!has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER) && has_bit(res->storage_bits, VREND_STORAGE_GL_IMMUTABLE) && has_feature(feat_texture_view)) { /* We don't need texture views for buffer objects. * Otherwise we only need a texture view if the * a) formats differ between the surface and base texture * b) we need to map a sub range > 1 layer to a surface, * GL can make a single layer fine without a view, and it * can map the whole texure fine. In those cases we don't * create a texture view. */ int first_layer = surf->val1 & 0xffff; int last_layer = (surf->val1 >> 16) & 0xffff; if ((first_layer != last_layer && (first_layer != 0 || (last_layer != (int)util_max_layer(&res->base, surf->val0)))) || surf->format != res->base.format) { GLenum target = res->target; GLenum internalformat = tex_conv_table[format].internalformat; if (vrend_resource_has_24bpp_internal_format(res)) internalformat = GL_RGB8; VREND_DEBUG(dbg_tex, ctx, "Create texture view from %s for %s\n", util_format_name(res->base.format), util_format_name(surf->format)); glGenTextures(1, &surf->id); if (vrend_state.use_gles) { if (target == GL_TEXTURE_RECTANGLE_NV || target == GL_TEXTURE_1D) target = GL_TEXTURE_2D; else if (target == GL_TEXTURE_1D_ARRAY) target = GL_TEXTURE_2D_ARRAY; } glTextureView(surf->id, target, res->id, internalformat, 0, res->base.last_level + 1, first_layer, last_layer - first_layer + 1); } } pipe_reference_init(&surf->reference, 1); vrend_resource_reference(&surf->texture, res); ret_handle = vrend_renderer_object_insert(ctx, surf, handle, VIRGL_OBJECT_SURFACE); if (ret_handle == 0) { FREE(surf); return ENOMEM; } return 0; } static void vrend_destroy_surface_object(void *obj_ptr) { struct vrend_surface *surface = obj_ptr; vrend_surface_reference(&surface, NULL); } static void vrend_destroy_sampler_view_object(void *obj_ptr) { struct vrend_sampler_view *samp = obj_ptr; vrend_sampler_view_reference(&samp, NULL); } static void vrend_destroy_so_target_object(void *obj_ptr) { struct vrend_so_target *target = obj_ptr; struct vrend_sub_context *sub_ctx = target->sub_ctx; struct vrend_streamout_object *obj, *tmp; bool found; unsigned i; LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub_ctx->streamout_list, head) { found = false; for (i = 0; i < obj->num_targets; i++) { if (obj->so_targets[i] == target) { found = true; break; } } if (found) { if (obj == sub_ctx->current_so) sub_ctx->current_so = NULL; if (obj->xfb_state == XFB_STATE_PAUSED) { if (has_feature(feat_transform_feedback2)) glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id); glEndTransformFeedback(); if (sub_ctx->current_so && has_feature(feat_transform_feedback2)) glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, sub_ctx->current_so->id); } vrend_destroy_streamout_object(obj); } } vrend_so_target_reference(&target, NULL); } static void vrend_destroy_vertex_elements_object(void *obj_ptr) { struct vrend_vertex_element_array *v = obj_ptr; if (v == v->owning_sub->ve) v->owning_sub->ve = NULL; if (has_feature(feat_gles31_vertex_attrib_binding)) { glDeleteVertexArrays(1, &v->id); } FREE(v); } static void vrend_destroy_sampler_state_object(void *obj_ptr) { struct vrend_sampler_state *state = obj_ptr; if (has_feature(feat_samplers)) glDeleteSamplers(2, state->ids); FREE(state); } static GLuint convert_wrap(int wrap) { switch(wrap){ case PIPE_TEX_WRAP_REPEAT: return GL_REPEAT; case PIPE_TEX_WRAP_CLAMP: if (vrend_state.use_core_profile == false) return GL_CLAMP; else return GL_CLAMP_TO_EDGE; case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE; case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER; case PIPE_TEX_WRAP_MIRROR_REPEAT: return GL_MIRRORED_REPEAT; case PIPE_TEX_WRAP_MIRROR_CLAMP: return GL_MIRROR_CLAMP_EXT; case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return GL_MIRROR_CLAMP_TO_EDGE_EXT; case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return GL_MIRROR_CLAMP_TO_BORDER_EXT; default: assert(0); return -1; } } static inline GLenum convert_mag_filter(unsigned int filter) { if (filter == PIPE_TEX_FILTER_NEAREST) return GL_NEAREST; return GL_LINEAR; } static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_filter) { if (mip_filter == PIPE_TEX_MIPFILTER_NONE) return convert_mag_filter(filter); else if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) { if (filter == PIPE_TEX_FILTER_NEAREST) return GL_NEAREST_MIPMAP_LINEAR; else return GL_LINEAR_MIPMAP_LINEAR; } else if (mip_filter == PIPE_TEX_MIPFILTER_NEAREST) { if (filter == PIPE_TEX_FILTER_NEAREST) return GL_NEAREST_MIPMAP_NEAREST; else return GL_LINEAR_MIPMAP_NEAREST; } assert(0); return 0; } static void apply_sampler_border_color(GLuint sampler, const GLuint colors[static 4]) { if (has_feature(feat_sampler_border_colors)) { glSamplerParameterIuiv(sampler, GL_TEXTURE_BORDER_COLOR, colors); } else if (colors[0] || colors[1] || colors[2] || colors[3]) { vrend_printf("sampler border color setting requested but not supported\n"); } } int vrend_create_sampler_state(struct vrend_context *ctx, uint32_t handle, struct pipe_sampler_state *templ) { struct vrend_sampler_state *state = CALLOC_STRUCT(vrend_sampler_state); int ret_handle; if (!state) return ENOMEM; state->base = *templ; if (has_feature(feat_samplers)) { glGenSamplers(2, state->ids); for (int i = 0; i < 2; ++i) { glSamplerParameteri(state->ids[i], GL_TEXTURE_WRAP_S, convert_wrap(templ->wrap_s)); glSamplerParameteri(state->ids[i], GL_TEXTURE_WRAP_T, convert_wrap(templ->wrap_t)); glSamplerParameteri(state->ids[i], GL_TEXTURE_WRAP_R, convert_wrap(templ->wrap_r)); glSamplerParameterf(state->ids[i], GL_TEXTURE_MIN_FILTER, convert_min_filter(templ->min_img_filter, templ->min_mip_filter)); glSamplerParameterf(state->ids[i], GL_TEXTURE_MAG_FILTER, convert_mag_filter(templ->mag_img_filter)); glSamplerParameterf(state->ids[i], GL_TEXTURE_MIN_LOD, templ->min_lod); glSamplerParameterf(state->ids[i], GL_TEXTURE_MAX_LOD, templ->max_lod); glSamplerParameteri(state->ids[i], GL_TEXTURE_COMPARE_MODE, templ->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE); glSamplerParameteri(state->ids[i], GL_TEXTURE_COMPARE_FUNC, GL_NEVER + templ->compare_func); if (vrend_state.use_gles) { if (templ->lod_bias) report_gles_warn(ctx, GLES_WARN_LOD_BIAS); } else glSamplerParameterf(state->ids[i], GL_TEXTURE_LOD_BIAS, templ->lod_bias); if (vrend_state.use_gles) { if (templ->seamless_cube_map != 0) { report_gles_warn(ctx, GLES_WARN_SEAMLESS_CUBE_MAP); } } else { glSamplerParameteri(state->ids[i], GL_TEXTURE_CUBE_MAP_SEAMLESS, templ->seamless_cube_map); } apply_sampler_border_color(state->ids[i], templ->border_color.ui); glSamplerParameteri(state->ids[i], GL_TEXTURE_SRGB_DECODE_EXT, i == 0 ? GL_SKIP_DECODE_EXT : GL_DECODE_EXT); } } ret_handle = vrend_renderer_object_insert(ctx, state, handle, VIRGL_OBJECT_SAMPLER_STATE); if (!ret_handle) { if (has_feature(feat_samplers)) glDeleteSamplers(2, state->ids); FREE(state); return ENOMEM; } return 0; } static inline GLenum to_gl_swizzle(int swizzle) { switch (swizzle) { case PIPE_SWIZZLE_RED: return GL_RED; case PIPE_SWIZZLE_GREEN: return GL_GREEN; case PIPE_SWIZZLE_BLUE: return GL_BLUE; case PIPE_SWIZZLE_ALPHA: return GL_ALPHA; case PIPE_SWIZZLE_ZERO: return GL_ZERO; case PIPE_SWIZZLE_ONE: return GL_ONE; default: assert(0); return 0; } } static inline int to_pipe_swizzle(GLenum swizzle) { switch (swizzle) { case GL_RED: return PIPE_SWIZZLE_RED; case GL_GREEN: return PIPE_SWIZZLE_GREEN; case GL_BLUE: return PIPE_SWIZZLE_BLUE; case GL_ALPHA: return PIPE_SWIZZLE_ALPHA; case GL_ZERO: return PIPE_SWIZZLE_ZERO; case GL_ONE: return PIPE_SWIZZLE_ONE; default: assert(0); return 0; } } int vrend_create_sampler_view(struct vrend_context *ctx, uint32_t handle, uint32_t res_handle, uint32_t format, uint32_t val0, uint32_t val1, uint32_t swizzle_packed) { struct vrend_sampler_view *view; struct vrend_resource *res; int ret_handle; uint8_t swizzle[4]; res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return EINVAL; } view = CALLOC_STRUCT(vrend_sampler_view); if (!view) return ENOMEM; pipe_reference_init(&view->reference, 1); view->format = format & 0xffffff; if (!view->format || view->format >= VIRGL_FORMAT_MAX) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_FORMAT, view->format); FREE(view); return EINVAL; } uint32_t pipe_target = (format >> 24) & 0xff; if (pipe_target >= PIPE_MAX_TEXTURE_TYPES) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SAMPLER_VIEW_TARGET, view->format); FREE(view); return EINVAL; } view->target = tgsitargettogltarget(pipe_target, res->base.nr_samples); /* Work around TEXTURE_RECTANGLE and TEXTURE_1D missing on GLES */ if (vrend_state.use_gles) { if (view->target == GL_TEXTURE_RECTANGLE_NV || view->target == GL_TEXTURE_1D) view->target = GL_TEXTURE_2D; else if (view->target == GL_TEXTURE_1D_ARRAY) view->target = GL_TEXTURE_2D_ARRAY; } view->val0 = val0; view->val1 = val1; swizzle[0] = swizzle_packed & 0x7; swizzle[1] = (swizzle_packed >> 3) & 0x7; swizzle[2] = (swizzle_packed >> 6) & 0x7; swizzle[3] = (swizzle_packed >> 9) & 0x7; vrend_resource_reference(&view->texture, res); view->id = view->texture->id; if (view->target == PIPE_BUFFER) view->target = view->texture->target; view->srgb_decode = GL_DECODE_EXT; if (view->format != view->texture->base.format) { if (util_format_is_srgb(view->texture->base.format) && !util_format_is_srgb(view->format)) view->srgb_decode = GL_SKIP_DECODE_EXT; } if (!(util_format_has_alpha(view->format) || util_format_is_depth_or_stencil(view->format))) { if (swizzle[0] == PIPE_SWIZZLE_ALPHA) swizzle[0] = PIPE_SWIZZLE_ONE; if (swizzle[1] == PIPE_SWIZZLE_ALPHA) swizzle[1] = PIPE_SWIZZLE_ONE; if (swizzle[2] == PIPE_SWIZZLE_ALPHA) swizzle[2] = PIPE_SWIZZLE_ONE; if (swizzle[3] == PIPE_SWIZZLE_ALPHA) swizzle[3] = PIPE_SWIZZLE_ONE; } if (tex_conv_table[view->format].flags & VIRGL_TEXTURE_NEED_SWIZZLE) { if (swizzle[0] <= PIPE_SWIZZLE_ALPHA) swizzle[0] = tex_conv_table[view->format].swizzle[swizzle[0]]; if (swizzle[1] <= PIPE_SWIZZLE_ALPHA) swizzle[1] = tex_conv_table[view->format].swizzle[swizzle[1]]; if (swizzle[2] <= PIPE_SWIZZLE_ALPHA) swizzle[2] = tex_conv_table[view->format].swizzle[swizzle[2]]; if (swizzle[3] <= PIPE_SWIZZLE_ALPHA) swizzle[3] = tex_conv_table[view->format].swizzle[swizzle[3]]; } if (vrend_resource_is_emulated_bgra(view->texture)) { uint8_t temp = swizzle[0]; swizzle[0] = swizzle[2]; swizzle[2] = temp; VREND_DEBUG(dbg_bgra, ctx, "swizzling sampler channels on %s resource: (%d %d %d %d)\n", util_format_name(view->texture->base.format), swizzle[0], swizzle[1], swizzle[2], swizzle[3]); } for (unsigned i = 0; i < 4; ++i) view->gl_swizzle[i] = to_gl_swizzle(swizzle[i]); if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) { enum virgl_formats format; bool needs_view = false; /* * Need to use a texture view if the gallium * view target is different than the underlying * texture target. */ if (view->target != view->texture->target) needs_view = true; /* * If the formats are different and this isn't * a DS texture a view is required. * DS are special as they use different gallium * formats for DS views into a combined resource. * GL texture views can't be use for this, stencil * texturing is used instead. For DS formats * aways program the underlying DS format as a * view could be required for layers. */ format = view->format; if (util_format_is_depth_or_stencil(view->texture->base.format)) format = view->texture->base.format; else if (view->format != view->texture->base.format) needs_view = true; if (needs_view && has_bit(view->texture->storage_bits, VREND_STORAGE_GL_IMMUTABLE) && has_feature(feat_texture_view)) { glGenTextures(1, &view->id); GLenum internalformat = tex_conv_table[format].internalformat; unsigned base_layer = view->val0 & 0xffff; unsigned max_layer = (view->val0 >> 16) & 0xffff; int base_level = view->val1 & 0xff; int max_level = (view->val1 >> 8) & 0xff; view->levels = (max_level - base_level) + 1; glTextureView(view->id, view->target, view->texture->id, internalformat, base_level, view->levels, base_layer, max_layer - base_layer + 1); glBindTexture(view->target, view->id); if (util_format_is_depth_or_stencil(view->format)) { if (vrend_state.use_core_profile == false) { /* setting depth texture mode is deprecated in core profile */ if (view->depth_texture_mode != GL_RED) { glTexParameteri(view->target, GL_DEPTH_TEXTURE_MODE, GL_RED); view->depth_texture_mode = GL_RED; } } if (has_feature(feat_stencil_texturing)) { const struct util_format_description *desc = util_format_description(view->format); if (!util_format_has_depth(desc)) { glTexParameteri(view->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX); } else { glTexParameteri(view->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_DEPTH_COMPONENT); } } } glTexParameteri(view->target, GL_TEXTURE_BASE_LEVEL, base_level); glTexParameteri(view->target, GL_TEXTURE_MAX_LEVEL, max_level); if (vrend_state.use_gles) { for (unsigned int i = 0; i < 4; ++i) { glTexParameteri(view->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]); } } else glTexParameteriv(view->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle); if (util_format_is_srgb(view->format) && has_feature(feat_texture_srgb_decode)) { glTexParameteri(view->target, GL_TEXTURE_SRGB_DECODE_EXT, view->srgb_decode); } glBindTexture(view->target, 0); } else if (needs_view && view->val0 < ARRAY_SIZE(res->aux_plane_egl_image) && res->aux_plane_egl_image[view->val0]) { void *image = res->aux_plane_egl_image[view->val0]; glGenTextures(1, &view->id); glBindTexture(view->target, view->id); glEGLImageTargetTexture2DOES(view->target, (GLeglImageOES) image); glBindTexture(view->target, 0); } } ret_handle = vrend_renderer_object_insert(ctx, view, handle, VIRGL_OBJECT_SAMPLER_VIEW); if (ret_handle == 0) { FREE(view); return ENOMEM; } return 0; } static void vrend_framebuffer_texture_2d(struct vrend_resource *res, GLenum target, GLenum attachment, GLenum textarget, uint32_t texture, int32_t level, uint32_t samples) { if (samples == 0) { glFramebufferTexture2D(target, attachment, textarget, texture, level); } else if (!has_feature(feat_implicit_msaa)) { /* fallback to non-msaa */ report_gles_warn(vrend_state.current_ctx, GLES_WARN_IMPLICIT_MSAA_SURFACE); glFramebufferTexture2D(target, attachment, textarget, texture, level); } else if (attachment == GL_COLOR_ATTACHMENT0){ glFramebufferTexture2DMultisampleEXT(target, attachment, textarget, texture, level, samples); } else if (attachment == GL_STENCIL_ATTACHMENT || attachment == GL_DEPTH_ATTACHMENT) { GLenum internalformat = attachment == GL_STENCIL_ATTACHMENT ? GL_STENCIL_INDEX8 : GL_DEPTH_COMPONENT16; glGenRenderbuffers(1, &res->rbo_id); glBindRenderbuffer(GL_RENDERBUFFER, res->rbo_id); glRenderbufferStorageMultisampleEXT(GL_RENDERBUFFER, samples, internalformat, res->base.width0, res->base.height0); glFramebufferRenderbuffer(GL_FRAMEBUFFER, attachment, GL_RENDERBUFFER, res->rbo_id); glBindRenderbuffer(GL_RENDERBUFFER, 0); } else { /* unsupported attachment for EXT_multisampled_render_to_texture, fallback to non-msaa */ report_gles_warn(vrend_state.current_ctx, GLES_WARN_IMPLICIT_MSAA_SURFACE); glFramebufferTexture2D(target, attachment, textarget, texture, level); } } static void debug_texture(ASSERTED const char *f, const struct vrend_resource *gt) { ASSERTED const struct pipe_resource *pr = &gt->base; #define PRINT_TARGET(X) case X: vrend_printf( #X); break VREND_DEBUG_EXT(dbg_tex, NULL, vrend_printf("%s: ", f); switch (tgsitargettogltarget(pr->target, pr->nr_samples)) { PRINT_TARGET(GL_TEXTURE_RECTANGLE_NV); PRINT_TARGET(GL_TEXTURE_1D); PRINT_TARGET(GL_TEXTURE_2D); PRINT_TARGET(GL_TEXTURE_3D); PRINT_TARGET(GL_TEXTURE_1D_ARRAY); PRINT_TARGET(GL_TEXTURE_2D_ARRAY); PRINT_TARGET(GL_TEXTURE_2D_MULTISAMPLE); PRINT_TARGET(GL_TEXTURE_CUBE_MAP); PRINT_TARGET(GL_TEXTURE_CUBE_MAP_ARRAY); default: vrend_printf("UNKNOWN"); } vrend_printf(" id:%d pipe_type:%d ms:%d format:%s size: %dx%dx%d mip:%d\n", gt->id, pr->target, pr->nr_samples, util_format_name(pr->format), pr->width0, pr->height0, pr->depth0, pr->last_level); ); #undef PRINT_TARGET } void vrend_fb_bind_texture_id(struct vrend_resource *res, int id, int idx, uint32_t level, uint32_t layer, uint32_t samples) { const struct util_format_description *desc = util_format_description(res->base.format); GLenum attachment = GL_COLOR_ATTACHMENT0 + idx; debug_texture(__func__, res); if (vrend_format_is_ds(res->base.format)) { if (util_format_has_stencil(desc)) { if (util_format_has_depth(desc)) attachment = GL_DEPTH_STENCIL_ATTACHMENT; else attachment = GL_STENCIL_ATTACHMENT; } else attachment = GL_DEPTH_ATTACHMENT; } switch (res->target) { case GL_TEXTURE_1D_ARRAY: case GL_TEXTURE_2D_ARRAY: case GL_TEXTURE_2D_MULTISAMPLE_ARRAY: case GL_TEXTURE_CUBE_MAP_ARRAY: if (layer == 0xffffffff) glFramebufferTexture(GL_FRAMEBUFFER, attachment, id, level); else glFramebufferTextureLayer(GL_FRAMEBUFFER, attachment, id, level, layer); break; case GL_TEXTURE_3D: if (layer == 0xffffffff) glFramebufferTexture(GL_FRAMEBUFFER, attachment, id, level); else if (vrend_state.use_gles) glFramebufferTexture3DOES(GL_FRAMEBUFFER, attachment, res->target, id, level, layer); else glFramebufferTexture3D(GL_FRAMEBUFFER, attachment, res->target, id, level, layer); break; case GL_TEXTURE_CUBE_MAP: if (layer == 0xffffffff) glFramebufferTexture(GL_FRAMEBUFFER, attachment, id, level); else vrend_framebuffer_texture_2d(res, GL_FRAMEBUFFER, attachment, GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer, id, level, samples); break; case GL_TEXTURE_1D: glFramebufferTexture1D(GL_FRAMEBUFFER, attachment, res->target, id, level); break; case GL_TEXTURE_2D: default: vrend_framebuffer_texture_2d(res, GL_FRAMEBUFFER, attachment, res->target, id, level, samples); break; } if (attachment == GL_DEPTH_ATTACHMENT) { switch (res->target) { case GL_TEXTURE_1D: glFramebufferTexture1D(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_1D, 0, 0); break; case GL_TEXTURE_2D: default: glFramebufferTexture2D(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); break; } } } void vrend_fb_bind_texture(struct vrend_resource *res, int idx, uint32_t level, uint32_t layer) { vrend_fb_bind_texture_id(res, res->id, idx, level, layer, 0); } static void vrend_hw_set_zsurf_texture(struct vrend_context *ctx) { struct vrend_surface *surf = ctx->sub->zsurf; if (!surf) { glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); } else { uint32_t first_layer = surf->val1 & 0xffff; uint32_t last_layer = (surf->val1 >> 16) & 0xffff; if (!surf->texture) return; vrend_fb_bind_texture_id(surf->texture, surf->id, 0, surf->val0, first_layer != last_layer ? 0xffffffff : first_layer, surf->nr_samples); } } static void vrend_hw_set_color_surface(struct vrend_sub_context *sub_ctx, int index) { struct vrend_surface *surf = sub_ctx->surf[index]; if (!surf) { GLenum attachment = GL_COLOR_ATTACHMENT0 + index; glFramebufferTexture2D(GL_FRAMEBUFFER, attachment, GL_TEXTURE_2D, 0, 0); } else { uint32_t first_layer = sub_ctx->surf[index]->val1 & 0xffff; uint32_t last_layer = (sub_ctx->surf[index]->val1 >> 16) & 0xffff; vrend_fb_bind_texture_id(surf->texture, surf->id, index, surf->val0, first_layer != last_layer ? 0xffffffff : first_layer, surf->nr_samples); } } static void vrend_hw_emit_framebuffer_state(struct vrend_sub_context *sub_ctx) { static const GLenum buffers[8] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2, GL_COLOR_ATTACHMENT3, GL_COLOR_ATTACHMENT4, GL_COLOR_ATTACHMENT5, GL_COLOR_ATTACHMENT6, GL_COLOR_ATTACHMENT7, }; if (sub_ctx->nr_cbufs == 0) { glReadBuffer(GL_NONE); if (has_feature(feat_srgb_write_control)) { glDisable(GL_FRAMEBUFFER_SRGB_EXT); sub_ctx->framebuffer_srgb_enabled = false; } } else if (has_feature(feat_srgb_write_control)) { struct vrend_surface *surf = NULL; bool use_srgb = false; int i; for (i = 0; i < sub_ctx->nr_cbufs; i++) { if (sub_ctx->surf[i]) { surf = sub_ctx->surf[i]; if (util_format_is_srgb(surf->format)) { use_srgb = true; break; } } } if (use_srgb) { glEnable(GL_FRAMEBUFFER_SRGB_EXT); } else { glDisable(GL_FRAMEBUFFER_SRGB_EXT); } sub_ctx->framebuffer_srgb_enabled = use_srgb; } sub_ctx->swizzle_output_rgb_to_bgr = 0; sub_ctx->convert_linear_to_srgb_on_write = 0; for (int i = 0; i < sub_ctx->nr_cbufs; i++) { if (sub_ctx->surf[i]) { struct vrend_surface *surf = sub_ctx->surf[i]; if (vrend_resource_is_emulated_bgra(surf->texture)) { VREND_DEBUG(dbg_bgra, sub_ctx->parent, "swizzling output for 0x%x (surface format is %s; resource format is %s)\n", i, util_format_name(surf->format), util_format_name(surf->texture->base.format)); sub_ctx->swizzle_output_rgb_to_bgr |= 1 << i; } /* [R8G8B8|B8G8R8]X8_UNORM formatted resources imported to mesa as EGL images occupy 24bpp instead of * more common 32bpp (with an ignored alpha channel). GL_RGB8 internal format must be specified when * interacting with these textures in the host driver. Unfortunately, GL_SRGB8 is not guaranteed to * be color-renderable on either GL or GLES, and is typically not supported. Thus, rendering to such * surfaces by using an SRGB texture view will have no colorspace conversion effects. * To work around this, manual colorspace conversion is used instead in the fragment shader and * during glClearColor() setting. */ if (vrend_resource_has_24bpp_internal_format(surf->texture) && util_format_is_srgb(surf->format)) { VREND_DEBUG(dbg_tex, sub_ctx->parent, "manually converting linear->srgb for EGL-backed framebuffer color attachment 0x%x" " (surface format is %s; resource format is %s)\n", i, util_format_name(surf->format), util_format_name(surf->texture->base.format)); sub_ctx->convert_linear_to_srgb_on_write |= 1 << i; } } } glDrawBuffers(sub_ctx->nr_cbufs, buffers); } void vrend_set_framebuffer_state(struct vrend_context *ctx, uint32_t nr_cbufs, uint32_t surf_handle[PIPE_MAX_COLOR_BUFS], uint32_t zsurf_handle) { struct vrend_surface *surf, *zsurf; int i; int old_num; GLenum status; GLint new_height = -1; bool new_ibf = false; struct vrend_sub_context *sub_ctx = ctx->sub; glBindFramebuffer(GL_FRAMEBUFFER, sub_ctx->fb_id); if (zsurf_handle) { zsurf = vrend_object_lookup(sub_ctx->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE); if (!zsurf) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle); return; } } else zsurf = NULL; if (sub_ctx->zsurf != zsurf) { vrend_surface_reference(&sub_ctx->zsurf, zsurf); vrend_hw_set_zsurf_texture(ctx); } old_num = sub_ctx->nr_cbufs; sub_ctx->nr_cbufs = nr_cbufs; sub_ctx->old_nr_cbufs = old_num; for (i = 0; i < (int)nr_cbufs; i++) { if (surf_handle[i] != 0) { surf = vrend_object_lookup(sub_ctx->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE); if (!surf) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]); return; } } else surf = NULL; if (sub_ctx->surf[i] != surf) { vrend_surface_reference(&sub_ctx->surf[i], surf); vrend_hw_set_color_surface(sub_ctx, i); } } if (old_num > sub_ctx->nr_cbufs) { for (i = sub_ctx->nr_cbufs; i < old_num; i++) { vrend_surface_reference(&sub_ctx->surf[i], NULL); vrend_hw_set_color_surface(sub_ctx, i); } } /* find a buffer to set fb_height from */ if (sub_ctx->nr_cbufs == 0 && !sub_ctx->zsurf) { new_height = 0; new_ibf = false; } else if (sub_ctx->nr_cbufs == 0) { new_height = u_minify(sub_ctx->zsurf->texture->base.height0, sub_ctx->zsurf->val0); new_ibf = sub_ctx->zsurf->texture->y_0_top ? true : false; } else { surf = NULL; for (i = 0; i < sub_ctx->nr_cbufs; i++) { if (sub_ctx->surf[i]) { surf = sub_ctx->surf[i]; break; } } if (surf == NULL) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i); return; } new_height = u_minify(surf->texture->base.height0, surf->val0); new_ibf = surf->texture->y_0_top ? true : false; } if (new_height != -1) { if (sub_ctx->fb_height != (uint32_t)new_height || sub_ctx->inverted_fbo_content != new_ibf) { sub_ctx->fb_height = new_height; sub_ctx->inverted_fbo_content = new_ibf; sub_ctx->viewport_state_dirty = (1 << 0); } } vrend_hw_emit_framebuffer_state(sub_ctx); if (sub_ctx->nr_cbufs > 0 || sub_ctx->zsurf) { status = glCheckFramebufferStatus(GL_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) vrend_printf("failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name); } sub_ctx->shader_dirty = true; sub_ctx->blend_state_dirty = true; } void vrend_set_framebuffer_state_no_attach(UNUSED struct vrend_context *ctx, uint32_t width, uint32_t height, uint32_t layers, uint32_t samples) { int gl_ver = vrend_state.gl_major_ver * 10 + vrend_state.gl_minor_ver; if (has_feature(feat_fb_no_attach)) { glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_DEFAULT_WIDTH, width); glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_DEFAULT_HEIGHT, height); if (!(vrend_state.use_gles && gl_ver <= 31)) glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_DEFAULT_LAYERS, layers); glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_DEFAULT_SAMPLES, samples); } } /* * if the viewport Y scale factor is > 0 then we are rendering to * an FBO already so don't need to invert rendering? */ void vrend_set_viewport_states(struct vrend_context *ctx, uint32_t start_slot, uint32_t num_viewports, const struct pipe_viewport_state *state) { /* convert back to glViewport */ GLint x, y; GLsizei width, height; GLclampd near_val, far_val; bool viewport_is_negative = (state[0].scale[1] < 0) ? true : false; uint i, idx; if (num_viewports > PIPE_MAX_VIEWPORTS || start_slot > (PIPE_MAX_VIEWPORTS - num_viewports)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, num_viewports); return; } for (i = 0; i < num_viewports; i++) { GLfloat abs_s1 = fabsf(state[i].scale[1]); idx = start_slot + i; width = state[i].scale[0] * 2.0f; height = abs_s1 * 2.0f; x = state[i].translate[0] - state[i].scale[0]; y = state[i].translate[1] - state[i].scale[1]; if (!ctx->sub->rs_state.clip_halfz) { near_val = state[i].translate[2] - state[i].scale[2]; far_val = near_val + (state[i].scale[2] * 2.0); } else { near_val = state[i].translate[2]; far_val = state[i].scale[2] + state[i].translate[2]; } if (ctx->sub->vps[idx].cur_x != x || ctx->sub->vps[idx].cur_y != y || ctx->sub->vps[idx].width != width || ctx->sub->vps[idx].height != height || ctx->sub->vps[idx].near_val != near_val || ctx->sub->vps[idx].far_val != far_val || (!(ctx->sub->viewport_state_initialized &= (1 << idx)))) { ctx->sub->vps[idx].cur_x = x; ctx->sub->vps[idx].cur_y = y; ctx->sub->vps[idx].width = width; ctx->sub->vps[idx].height = height; ctx->sub->vps[idx].near_val = near_val; ctx->sub->vps[idx].far_val = far_val; ctx->sub->viewport_state_dirty |= (1 << idx); } if (idx == 0) { if (ctx->sub->viewport_is_negative != viewport_is_negative) ctx->sub->viewport_is_negative = viewport_is_negative; } } } #define UPDATE_INT_SIGN_MASK(fmt, i, signed_mask, unsigned_mask) \ if (vrend_state.use_integer && \ util_format_is_pure_integer(fmt)) { \ if (util_format_is_pure_uint(fmt)) \ unsigned_mask |= (1 << i); \ else \ signed_mask |= (1 << i); \ } int vrend_create_vertex_elements_state(struct vrend_context *ctx, uint32_t handle, unsigned num_elements, const struct pipe_vertex_element *elements) { struct vrend_vertex_element_array *v; const struct util_format_description *desc; GLenum type; uint i; uint32_t ret_handle; if (num_elements > PIPE_MAX_ATTRIBS) return EINVAL; v = CALLOC_STRUCT(vrend_vertex_element_array); if (!v) return ENOMEM; v->count = num_elements; for (i = 0; i < num_elements; i++) { memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element)); desc = util_format_description(elements[i].src_format); if (!desc) { FREE(v); return EINVAL; } type = GL_FALSE; switch (desc->channel[0].type) { case UTIL_FORMAT_TYPE_FLOAT: switch (desc->channel[0].size) { case 16: type = GL_HALF_FLOAT; break; case 32: type = GL_FLOAT; break; case 64: type = GL_DOUBLE; break; } break; case UTIL_FORMAT_TYPE_UNSIGNED: switch (desc->channel[0].size) { case 8: type = GL_UNSIGNED_BYTE; break; case 16: type = GL_UNSIGNED_SHORT; break; case 32: type = GL_UNSIGNED_INT; break; } break; case UTIL_FORMAT_TYPE_SIGNED: switch (desc->channel[0].size) { case 8: type = GL_BYTE; break; case 16: type = GL_SHORT; break; case 32: type = GL_INT; break; } break; } if (type == GL_FALSE) { switch (elements[i].src_format) { case PIPE_FORMAT_R10G10B10A2_SSCALED: case PIPE_FORMAT_R10G10B10A2_SNORM: case PIPE_FORMAT_B10G10R10A2_SNORM: type = GL_INT_2_10_10_10_REV; break; case PIPE_FORMAT_R10G10B10A2_USCALED: case PIPE_FORMAT_R10G10B10A2_UNORM: case PIPE_FORMAT_B10G10R10A2_UNORM: type = GL_UNSIGNED_INT_2_10_10_10_REV; break; case PIPE_FORMAT_R11G11B10_FLOAT: type = GL_UNSIGNED_INT_10F_11F_11F_REV; break; default: ; } } if (type == GL_FALSE) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format); FREE(v); return EINVAL; } v->elements[i].type = type; if (desc->channel[0].normalized) v->elements[i].norm = GL_TRUE; if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z) v->elements[i].nr_chan = GL_BGRA; else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT) v->elements[i].nr_chan = 3; else v->elements[i].nr_chan = desc->nr_channels; } if (has_feature(feat_gles31_vertex_attrib_binding)) { glGenVertexArrays(1, &v->id); glBindVertexArray(v->id); for (i = 0; i < num_elements; i++) { struct vrend_vertex_element *ve = &v->elements[i]; if (util_format_is_pure_integer(ve->base.src_format)) { UPDATE_INT_SIGN_MASK(ve->base.src_format, i, v->signed_int_bitmask, v->unsigned_int_bitmask); glVertexAttribIFormat(i, ve->nr_chan, ve->type, ve->base.src_offset); } else glVertexAttribFormat(i, ve->nr_chan, ve->type, ve->norm, ve->base.src_offset); glVertexAttribBinding(i, ve->base.vertex_buffer_index); glVertexBindingDivisor(i, ve->base.instance_divisor); glEnableVertexAttribArray(i); } } ret_handle = vrend_renderer_object_insert(ctx, v, handle, VIRGL_OBJECT_VERTEX_ELEMENTS); if (!ret_handle) { FREE(v); return ENOMEM; } v->owning_sub = ctx->sub; return 0; } void vrend_bind_vertex_elements_state(struct vrend_context *ctx, uint32_t handle) { struct vrend_vertex_element_array *v; if (!handle) { ctx->sub->ve = NULL; return; } v = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_VERTEX_ELEMENTS); if (!v) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle); return; } if (ctx->sub->ve != v) ctx->sub->vbo_dirty = true; ctx->sub->ve = v; } void vrend_set_constants(struct vrend_context *ctx, uint32_t shader, uint32_t num_constant, const float *data) { struct vrend_constants *consts; consts = &ctx->sub->consts[shader]; ctx->sub->const_dirty[shader] = true; /* avoid reallocations by only growing the buffer */ if (consts->num_allocated_consts < num_constant) { free(consts->consts); consts->consts = malloc(num_constant * sizeof(float)); if (!consts->consts) return; consts->num_allocated_consts = num_constant; } memcpy(consts->consts, data, num_constant * sizeof(unsigned int)); consts->num_consts = num_constant; } void vrend_set_uniform_buffer(struct vrend_context *ctx, uint32_t shader, uint32_t index, uint32_t offset, uint32_t length, uint32_t res_handle) { struct vrend_resource *res; if (!has_feature(feat_ubo)) return; struct pipe_constant_buffer *cbs = &ctx->sub->cbs[shader][index]; const uint32_t mask = 1u << index; if (res_handle) { res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return; } cbs->buffer = (struct pipe_resource *)res; cbs->buffer_offset = offset; cbs->buffer_size = length; ctx->sub->const_bufs_used_mask[shader] |= mask; } else { cbs->buffer = NULL; cbs->buffer_offset = 0; cbs->buffer_size = 0; ctx->sub->const_bufs_used_mask[shader] &= ~mask; } ctx->sub->const_bufs_dirty[shader] |= mask; } void vrend_set_index_buffer(struct vrend_context *ctx, uint32_t res_handle, uint32_t index_size, uint32_t offset) { struct vrend_resource *res; ctx->sub->ib.index_size = index_size; ctx->sub->ib.offset = offset; if (res_handle) { if (ctx->sub->index_buffer_res_id != res_handle) { res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL); ctx->sub->index_buffer_res_id = 0; vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return; } vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, res); ctx->sub->index_buffer_res_id = res_handle; } } else { vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL); ctx->sub->index_buffer_res_id = 0; } } void vrend_set_single_vbo(struct vrend_context *ctx, uint32_t index, uint32_t stride, uint32_t buffer_offset, uint32_t res_handle) { struct vrend_resource *res; struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[index]; if (vbo->base.stride != stride || vbo->base.buffer_offset != buffer_offset || vbo->res_id != res_handle) ctx->sub->vbo_dirty = true; vbo->base.stride = stride; vbo->base.buffer_offset = buffer_offset; if (res_handle == 0) { vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, NULL); vbo->res_id = 0; } else if (vbo->res_id != res_handle) { res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); vbo->res_id = 0; return; } vrend_resource_reference((struct vrend_resource **)&vbo->base.buffer, res); vbo->res_id = res_handle; } } static void vrend_set_num_vbo_sub(struct vrend_sub_context *sub, int num_vbo) { int old_num = sub->num_vbos; int i; sub->num_vbos = num_vbo; sub->old_num_vbos = old_num; if (old_num != num_vbo) sub->vbo_dirty = true; for (i = num_vbo; i < old_num; i++) { vrend_resource_reference((struct vrend_resource **)&sub->vbo[i].base.buffer, NULL); sub->vbo[i].res_id = 0; } } void vrend_set_num_vbo(struct vrend_context *ctx, int num_vbo) { vrend_set_num_vbo_sub(ctx->sub, num_vbo); } void vrend_set_single_sampler_view(struct vrend_context *ctx, uint32_t shader_type, uint32_t index, uint32_t handle) { struct vrend_sampler_view *view = NULL; struct vrend_texture *tex; if (handle) { view = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SAMPLER_VIEW); if (!view) { ctx->sub->views[shader_type].views[index] = NULL; vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle); return; } if (ctx->sub->views[shader_type].views[index] == view) { return; } /* we should have a reference to this texture taken at create time */ tex = (struct vrend_texture *)view->texture; if (!tex) { return; } ctx->sub->sampler_views_dirty[shader_type] |= 1u << index; if (!has_bit(view->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) { if (view->texture->id == view->id) { glBindTexture(view->target, view->id); if (util_format_is_depth_or_stencil(view->format)) { if (vrend_state.use_core_profile == false) { /* setting depth texture mode is deprecated in core profile */ if (view->depth_texture_mode != GL_RED) { glTexParameteri(view->texture->target, GL_DEPTH_TEXTURE_MODE, GL_RED); view->depth_texture_mode = GL_RED; } } if (has_feature(feat_stencil_texturing)) { const struct util_format_description *desc = util_format_description(view->format); if (!util_format_has_depth(desc)) { glTexParameteri(view->texture->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX); } else { glTexParameteri(view->texture->target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_DEPTH_COMPONENT); } } } GLuint base_level = view->val1 & 0xff; GLuint max_level = (view->val1 >> 8) & 0xff; view->levels = max_level - base_level + 1; if (tex->cur_base != base_level) { glTexParameteri(view->texture->target, GL_TEXTURE_BASE_LEVEL, base_level); tex->cur_base = base_level; } if (tex->cur_max != max_level) { glTexParameteri(view->texture->target, GL_TEXTURE_MAX_LEVEL, max_level); tex->cur_max = max_level; } if (memcmp(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint))) { if (vrend_state.use_gles) { for (unsigned int i = 0; i < 4; ++i) { if (tex->cur_swizzle[i] != view->gl_swizzle[i]) { glTexParameteri(view->texture->target, GL_TEXTURE_SWIZZLE_R + i, view->gl_swizzle[i]); } } } else glTexParameteriv(view->texture->target, GL_TEXTURE_SWIZZLE_RGBA, view->gl_swizzle); memcpy(tex->cur_swizzle, view->gl_swizzle, 4 * sizeof(GLint)); } if (tex->cur_srgb_decode != view->srgb_decode && util_format_is_srgb(tex->base.base.format)) { if (has_feature(feat_samplers)) ctx->sub->sampler_views_dirty[shader_type] |= (1u << index); else if (has_feature(feat_texture_srgb_decode)) { glTexParameteri(view->texture->target, GL_TEXTURE_SRGB_DECODE_EXT, view->srgb_decode); tex->cur_srgb_decode = view->srgb_decode; } } } } else { GLenum internalformat; if (!view->texture->tbo_tex_id) glGenTextures(1, &view->texture->tbo_tex_id); glBindTexture(GL_TEXTURE_BUFFER, view->texture->tbo_tex_id); internalformat = tex_conv_table[view->format].internalformat; if (has_feature(feat_texture_buffer_range)) { unsigned offset = view->val0; unsigned size = view->val1 - view->val0 + 1; int blsize = util_format_get_blocksize(view->format); offset *= blsize; size *= blsize; glTexBufferRange(GL_TEXTURE_BUFFER, internalformat, view->texture->id, offset, size); } else glTexBuffer(GL_TEXTURE_BUFFER, internalformat, view->texture->id); } } vrend_sampler_view_reference(&ctx->sub->views[shader_type].views[index], view); } void vrend_set_num_sampler_views(struct vrend_context *ctx, uint32_t shader_type, uint32_t start_slot, uint32_t num_sampler_views) { int last_slot = start_slot + num_sampler_views; int i; for (i = last_slot; i < ctx->sub->views[shader_type].num_views; i++) vrend_sampler_view_reference(&ctx->sub->views[shader_type].views[i], NULL); ctx->sub->views[shader_type].num_views = last_slot; } void vrend_set_single_image_view(struct vrend_context *ctx, uint32_t shader_type, uint32_t index, uint32_t format, uint32_t access, uint32_t layer_offset, uint32_t level_size, uint32_t handle) { struct vrend_image_view *iview = &ctx->sub->image_views[shader_type][index]; struct vrend_resource *res; if (handle) { if (!has_feature(feat_images)) return; res = vrend_renderer_ctx_res_lookup(ctx, handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle); return; } iview->texture = res; iview->format = tex_conv_table[format].internalformat; iview->access = access; iview->u.buf.offset = layer_offset; iview->u.buf.size = level_size; ctx->sub->images_used_mask[shader_type] |= (1u << index); } else { iview->texture = NULL; iview->format = 0; ctx->sub->images_used_mask[shader_type] &= ~(1u << index); } } void vrend_set_single_ssbo(struct vrend_context *ctx, uint32_t shader_type, uint32_t index, uint32_t offset, uint32_t length, uint32_t handle) { struct vrend_ssbo *ssbo = &ctx->sub->ssbo[shader_type][index]; struct vrend_resource *res; if (!has_feature(feat_ssbo)) return; if (handle) { res = vrend_renderer_ctx_res_lookup(ctx, handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle); return; } ssbo->res = res; ssbo->buffer_offset = offset; ssbo->buffer_size = length; ctx->sub->ssbo_used_mask[shader_type] |= (1u << index); } else { ssbo->res = 0; ssbo->buffer_offset = 0; ssbo->buffer_size = 0; ctx->sub->ssbo_used_mask[shader_type] &= ~(1u << index); } } void vrend_set_single_abo(struct vrend_context *ctx, uint32_t index, uint32_t offset, uint32_t length, uint32_t handle) { struct vrend_abo *abo = &ctx->sub->abo[index]; struct vrend_resource *res; if (!has_feature(feat_atomic_counters)) return; if (handle) { res = vrend_renderer_ctx_res_lookup(ctx, handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, handle); return; } abo->res = res; abo->buffer_offset = offset; abo->buffer_size = length; ctx->sub->abo_used_mask |= (1u << index); } else { abo->res = 0; abo->buffer_offset = 0; abo->buffer_size = 0; ctx->sub->abo_used_mask &= ~(1u << index); } } void vrend_memory_barrier(UNUSED struct vrend_context *ctx, unsigned flags) { GLbitfield gl_barrier = 0; if (!has_feature(feat_barrier)) return; if ((flags & PIPE_BARRIER_ALL) == PIPE_BARRIER_ALL) gl_barrier = GL_ALL_BARRIER_BITS; else { if (flags & PIPE_BARRIER_VERTEX_BUFFER) gl_barrier |= GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT; if (flags & PIPE_BARRIER_INDEX_BUFFER) gl_barrier |= GL_ELEMENT_ARRAY_BARRIER_BIT; if (flags & PIPE_BARRIER_CONSTANT_BUFFER) gl_barrier |= GL_UNIFORM_BARRIER_BIT; if (flags & PIPE_BARRIER_TEXTURE) gl_barrier |= GL_TEXTURE_FETCH_BARRIER_BIT | GL_PIXEL_BUFFER_BARRIER_BIT; if (flags & PIPE_BARRIER_IMAGE) gl_barrier |= GL_SHADER_IMAGE_ACCESS_BARRIER_BIT; if (flags & PIPE_BARRIER_INDIRECT_BUFFER) gl_barrier |= GL_COMMAND_BARRIER_BIT; if (flags & PIPE_BARRIER_MAPPED_BUFFER) gl_barrier |= GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT; if (flags & PIPE_BARRIER_FRAMEBUFFER) gl_barrier |= GL_FRAMEBUFFER_BARRIER_BIT; if (flags & PIPE_BARRIER_STREAMOUT_BUFFER) gl_barrier |= GL_TRANSFORM_FEEDBACK_BARRIER_BIT; if (flags & PIPE_BARRIER_SHADER_BUFFER) { gl_barrier |= GL_ATOMIC_COUNTER_BARRIER_BIT; if (has_feature(feat_ssbo_barrier)) gl_barrier |= GL_SHADER_STORAGE_BARRIER_BIT; } if (has_feature(feat_qbo) && (flags & PIPE_BARRIER_QUERY_BUFFER)) gl_barrier |= GL_QUERY_BUFFER_BARRIER_BIT; } glMemoryBarrier(gl_barrier); } void vrend_texture_barrier(UNUSED struct vrend_context *ctx, unsigned flags) { if (has_feature(feat_texture_barrier) && (flags & PIPE_TEXTURE_BARRIER_SAMPLER)) glTextureBarrier(); if (has_feature(feat_blend_equation_advanced) && (flags & PIPE_TEXTURE_BARRIER_FRAMEBUFFER)) glBlendBarrierKHR(); } static void vrend_destroy_shader_object(void *obj_ptr) { struct vrend_shader_selector *state = obj_ptr; vrend_shader_state_reference(&state, NULL); } static inline bool can_emulate_logicop(enum pipe_logicop op) { if (has_feature(feat_framebuffer_fetch_non_coherent) || has_feature(feat_framebuffer_fetch)) return true; /* These ops don't need to read back from the framebuffer */ switch (op) { case PIPE_LOGICOP_CLEAR: case PIPE_LOGICOP_COPY: case PIPE_LOGICOP_SET: case PIPE_LOGICOP_COPY_INVERTED: return true; default: return false; } } static inline void vrend_sync_shader_io(struct vrend_sub_context *sub_ctx, struct vrend_shader_selector *sel, struct vrend_shader_key *key) { unsigned type = sel->type; int prev_type = (type != PIPE_SHADER_VERTEX) ? PIPE_SHADER_VERTEX : -1; /* Gallium sends and binds the shaders in the reverse order, so if an * old shader is still bound we should ignore the "previous" (as in * execution order) shader when the key is evaluated, unless the currently * bound shader selector is actually refers to the current shader. */ if (sub_ctx->shaders[type] == sel) { switch (type) { case PIPE_SHADER_GEOMETRY: if (key->tcs_present || key->tes_present) prev_type = PIPE_SHADER_TESS_EVAL; break; case PIPE_SHADER_FRAGMENT: if (key->gs_present) prev_type = PIPE_SHADER_GEOMETRY; else if (key->tcs_present || key->tes_present) prev_type = PIPE_SHADER_TESS_EVAL; break; case PIPE_SHADER_TESS_EVAL: if (key->tcs_present) prev_type = PIPE_SHADER_TESS_CTRL; break; default: break; } } struct vrend_shader_selector *prev = sub_ctx->shaders[prev_type]; if (prev_type != -1 && prev) { key->input = prev->sinfo.out; key->force_invariant_inputs = prev->sinfo.invariant_outputs; memcpy(key->prev_stage_generic_and_patch_outputs_layout, prev->sinfo.generic_outputs_layout, prev->sinfo.out.num_generic_and_patch * sizeof (struct vrend_layout_info)); key->num_in_clip = sub_ctx->shaders[prev_type]->current->var_sinfo.num_out_clip; key->num_in_cull = sub_ctx->shaders[prev_type]->current->var_sinfo.num_out_cull; if (vrend_state.use_gles && type == PIPE_SHADER_FRAGMENT) key->fs.available_color_in_bits = sub_ctx->shaders[prev_type]->current->var_sinfo.legacy_color_bits; } int next_type = -1; if (type == PIPE_SHADER_FRAGMENT) { key->fs.invert_origin = !sub_ctx->inverted_fbo_content; key->fs.swizzle_output_rgb_to_bgr = sub_ctx->swizzle_output_rgb_to_bgr; key->fs.convert_linear_to_srgb_on_write = sub_ctx->convert_linear_to_srgb_on_write; if (vrend_state.use_gles && can_emulate_logicop(sub_ctx->blend_state.logicop_func)) { key->fs.logicop_enabled = sub_ctx->blend_state.logicop_enable; key->fs.logicop_func = sub_ctx->blend_state.logicop_func; } int fs_prim_mode = sub_ctx->prim_mode; // inherit draw-call's mode // Only use coord_replace if frag shader receives GL_POINTS switch (prev_type) { case PIPE_SHADER_TESS_EVAL: if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode) fs_prim_mode = PIPE_PRIM_POINTS; break; case PIPE_SHADER_GEOMETRY: fs_prim_mode = sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim; break; } key->fs.prim_is_points = (fs_prim_mode == PIPE_PRIM_POINTS); key->fs.coord_replace = sub_ctx->rs_state.point_quad_rasterization && key->fs.prim_is_points ? sub_ctx->rs_state.sprite_coord_enable : 0x0; } else { if (sub_ctx->shaders[PIPE_SHADER_FRAGMENT]) { struct vrend_shader *fs = sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current; key->fs_info = &fs->var_sinfo.fs_info; next_type = PIPE_SHADER_FRAGMENT; } } switch (type) { case PIPE_SHADER_VERTEX: if (key->tcs_present) next_type = PIPE_SHADER_TESS_CTRL; else if (key->gs_present) next_type = PIPE_SHADER_GEOMETRY; else if (key->tes_present) { if (!vrend_state.use_gles) next_type = PIPE_SHADER_TESS_EVAL; else next_type = PIPE_SHADER_TESS_CTRL; } break; case PIPE_SHADER_TESS_CTRL: next_type = PIPE_SHADER_TESS_EVAL; break; case PIPE_SHADER_TESS_EVAL: if (key->gs_present) next_type = PIPE_SHADER_GEOMETRY; default: break; } if (next_type != -1 && sub_ctx->shaders[next_type]) { key->output = sub_ctx->shaders[next_type]->sinfo.in; /* FS gets the clip/cull info in the key from this shader, so * we can avoid re-translating this shader by not updating the * info in the key */ if (next_type != PIPE_SHADER_FRAGMENT) { key->num_out_clip = sub_ctx->shaders[next_type]->current->var_sinfo.num_in_clip; key->num_out_cull = sub_ctx->shaders[next_type]->current->var_sinfo.num_in_cull; } if (type == PIPE_SHADER_VERTEX && next_type == PIPE_SHADER_FRAGMENT) { if (sub_ctx->shaders[type]) { uint32_t fog_input = sub_ctx->shaders[next_type]->sinfo.fog_input_mask; uint32_t fog_output = sub_ctx->shaders[type]->sinfo.fog_output_mask; //We only want to issue the fixup for inputs not fed by the outputs of the //previous stage key->vs.fog_fixup_mask = (fog_input ^ fog_output) & fog_input; } } } } static inline void vrend_fill_shader_key(struct vrend_sub_context *sub_ctx, struct vrend_shader_selector *sel, struct vrend_shader_key *key) { unsigned type = sel->type; if (vrend_state.use_core_profile) { int i; /* Only use integer info when drawing to avoid stale info. * Since we can get here from link_shaders before actually drawing anything, * we may have no vertex element array */ if (vrend_state.use_integer && sub_ctx->drawing && sub_ctx->ve && type == PIPE_SHADER_VERTEX) { key->vs.attrib_signed_int_bitmask = sub_ctx->ve->signed_int_bitmask; key->vs.attrib_unsigned_int_bitmask = sub_ctx->ve->unsigned_int_bitmask; } if (type == PIPE_SHADER_FRAGMENT) { for (i = 0; i < sub_ctx->nr_cbufs; i++) { if (!sub_ctx->surf[i]) continue; if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format)) key->fs.cbufs_are_a8_bitmask |= (1 << i); if (util_format_is_pure_integer(sub_ctx->surf[i]->format)) { UPDATE_INT_SIGN_MASK(sub_ctx->surf[i]->format, i, key->fs.cbufs_signed_int_bitmask, key->fs.cbufs_unsigned_int_bitmask); } /* Currently we only use this information if logicop_enable is set */ if (sub_ctx->blend_state.logicop_enable) { key->fs.surface_component_bits[i] = util_format_get_component_bits(sub_ctx->surf[i]->format, UTIL_FORMAT_COLORSPACE_RGB, 0); } } } key->pstipple_tex = sub_ctx->rs_state.poly_stipple_enable; key->color_two_side = sub_ctx->rs_state.light_twoside; key->flatshade = sub_ctx->rs_state.flatshade ? true : false; } key->gs_present = !!sub_ctx->shaders[PIPE_SHADER_GEOMETRY]; key->tcs_present = !!sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]; key->tes_present = !!sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]; if (type != PIPE_SHADER_COMPUTE) vrend_sync_shader_io(sub_ctx, sel, key); if (type == PIPE_SHADER_GEOMETRY) key->gs.emit_clip_distance = sub_ctx->rs_state.clip_plane_enable != 0; for (int i = 0; i < sub_ctx->views[type].num_views; i++) { struct vrend_sampler_view *view = sub_ctx->views[type].views[i]; if (view && view->texture->target == GL_TEXTURE_BUFFER && tex_conv_table[view->format].flags & VIRGL_TEXTURE_NEED_SWIZZLE) { key->sampler_views_lower_swizzle_mask |= 1 << i; key->tex_swizzle[i] = to_pipe_swizzle(view->gl_swizzle[0]) | to_pipe_swizzle(view->gl_swizzle[1]) << 3 | to_pipe_swizzle(view->gl_swizzle[2]) << 6 | to_pipe_swizzle(view->gl_swizzle[3]) << 9; } } } static int vrend_shader_create(struct vrend_context *ctx, struct vrend_shader *shader, struct vrend_shader_key *key) { static uint32_t uid; shader->uid = ++uid; if (shader->sel->tokens) { VREND_DEBUG(dbg_shader_tgsi, ctx, "shader\n%s\n", shader->sel->tmp_buf); bool ret = vrend_convert_shader(ctx, &ctx->shader_cfg, shader->sel->tokens, shader->sel->req_local_mem, key, &shader->sel->sinfo, &shader->var_sinfo, &shader->glsl_strings); if (!ret) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, shader->sel->type); return -1; } } else if (!ctx->shader_cfg.use_gles && shader->sel->type != TGSI_PROCESSOR_TESS_CTRL) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, shader->sel->type); return -1; } shader->key = *key; return 0; } static int vrend_shader_select(struct vrend_sub_context *sub_ctx, struct vrend_shader_selector *sel, bool *dirty) { struct vrend_shader_key key; struct vrend_shader *shader = NULL; int r; memset(&key, 0, sizeof(key)); vrend_fill_shader_key(sub_ctx, sel, &key); if (sel->current && !memcmp(&sel->current->key, &key, sizeof(key))) return 0; if (sel->num_shaders > 1) { struct vrend_shader *p = sel->current; struct vrend_shader *c = p->next_variant; while (c && memcmp(&c->key, &key, sizeof(key)) != 0) { p = c; c = c->next_variant; } if (c) { p->next_variant = c->next_variant; shader = c; } } if (!shader) { shader = CALLOC_STRUCT(vrend_shader); shader->sel = sel; list_inithead(&shader->programs); strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS); r = vrend_shader_create(sub_ctx->parent, shader, &key); if (r) { sel->current = NULL; FREE(shader); return r; } sel->num_shaders++; } if (dirty) *dirty = true; shader->next_variant = sel->current; sel->current = shader; return 0; } static void *vrend_create_shader_state(const struct pipe_stream_output_info *so_info, uint32_t req_local_mem, unsigned pipe_shader_type) { struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector); if (!sel) return NULL; sel->req_local_mem = req_local_mem; sel->type = pipe_shader_type; sel->sinfo.so_info = *so_info; pipe_reference_init(&sel->reference, 1); return sel; } static int vrend_finish_shader(struct vrend_context *ctx, struct vrend_shader_selector *sel, const struct tgsi_token *tokens) { int r; sel->tokens = tgsi_dup_tokens(tokens); r = vrend_shader_select(ctx->sub, sel, NULL); if (r) { return EINVAL; } return 0; } int vrend_create_shader(struct vrend_context *ctx, uint32_t handle, const struct pipe_stream_output_info *so_info, uint32_t req_local_mem, const char *shd_text, uint32_t offlen, uint32_t num_tokens, uint32_t type, uint32_t pkt_length) { struct vrend_shader_selector *sel = NULL; int ret_handle; bool new_shader = true, long_shader = false; bool finished = false; int ret; if (type > PIPE_SHADER_COMPUTE) return EINVAL; if (type == PIPE_SHADER_GEOMETRY && !has_feature(feat_geometry_shader)) return EINVAL; if ((type == PIPE_SHADER_TESS_CTRL || type == PIPE_SHADER_TESS_EVAL) && !has_feature(feat_tessellation)) return EINVAL; if (type == PIPE_SHADER_COMPUTE && !has_feature(feat_compute_shader)) return EINVAL; if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT) new_shader = false; else if (((offlen + 3) / 4) > pkt_length) long_shader = true; struct vrend_sub_context *sub_ctx = ctx->sub; /* if we have an in progress one - don't allow a new shader of that type or a different handle. */ if (sub_ctx->long_shader_in_progress_handle[type]) { if (new_shader == true) return EINVAL; if (handle != sub_ctx->long_shader_in_progress_handle[type]) return EINVAL; } if (new_shader) { sel = vrend_create_shader_state(so_info, req_local_mem, type); if (sel == NULL) return ENOMEM; sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */ sel->tmp_buf = malloc(sel->buf_len); if (!sel->tmp_buf) { ret = ENOMEM; goto error; } memcpy(sel->tmp_buf, shd_text, pkt_length * 4); if (long_shader) { sel->buf_offset = pkt_length * 4; sub_ctx->long_shader_in_progress_handle[type] = handle; } else finished = true; } else { sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER); if (!sel) { vrend_printf( "got continuation without original shader %d\n", handle); ret = EINVAL; goto error; } offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT; if (offlen != sel->buf_offset) { vrend_printf( "Got mismatched shader continuation %d vs %d\n", offlen, sel->buf_offset); ret = EINVAL; goto error; } /*make sure no overflow */ if (pkt_length * 4 < pkt_length || pkt_length * 4 + sel->buf_offset < pkt_length * 4 || pkt_length * 4 + sel->buf_offset < sel->buf_offset) { ret = EINVAL; goto error; } if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) { vrend_printf( "Got too large shader continuation %d vs %d\n", pkt_length * 4 + sel->buf_offset, sel->buf_len); ret = EINVAL; goto error; } memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length * 4); sel->buf_offset += pkt_length * 4; if (sel->buf_offset >= sel->buf_len) { finished = true; shd_text = sel->tmp_buf; } } if (finished) { struct tgsi_token *tokens; /* check for null termination */ uint32_t last_chunk_offset = sel->buf_offset ? sel->buf_offset : pkt_length * 4; if (last_chunk_offset < 4 || !memchr(shd_text + last_chunk_offset - 4, '\0', 4)) { ret = EINVAL; goto error; } tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token)); if (!tokens) { ret = ENOMEM; goto error; } if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) { free(tokens); ret = EINVAL; goto error; } if (vrend_finish_shader(ctx, sel, tokens)) { free(tokens); ret = EINVAL; goto error; } else { #ifdef NDEBUG free(sel->tmp_buf); sel->tmp_buf = NULL; #endif } free(tokens); sub_ctx->long_shader_in_progress_handle[type] = 0; } if (new_shader) { ret_handle = vrend_renderer_object_insert(ctx, sel, handle, VIRGL_OBJECT_SHADER); if (ret_handle == 0) { ret = ENOMEM; goto error; } } return 0; error: if (new_shader) vrend_destroy_shader_selector(sel); else vrend_renderer_object_destroy(ctx, handle); return ret; } void vrend_bind_shader(struct vrend_context *ctx, uint32_t handle, uint32_t type) { struct vrend_shader_selector *sel; if (type > PIPE_SHADER_COMPUTE) return; struct vrend_sub_context *sub_ctx = ctx->sub; if (handle == 0) { if (type == PIPE_SHADER_COMPUTE) sub_ctx->cs_shader_dirty = true; else sub_ctx->shader_dirty = true; vrend_shader_state_reference(&sub_ctx->shaders[type], NULL); return; } sel = vrend_object_lookup(sub_ctx->object_hash, handle, VIRGL_OBJECT_SHADER); if (!sel) return; if (sel->type != type) return; if (sub_ctx->shaders[sel->type] != sel) { if (type == PIPE_SHADER_COMPUTE) sub_ctx->cs_shader_dirty = true; else sub_ctx->shader_dirty = true; sub_ctx->prog_ids[sel->type] = 0; } vrend_shader_state_reference(&sub_ctx->shaders[sel->type], sel); } static float vrend_color_convert_linear_to_srgb(float color) { return color <= 0.0031308f ? 12.92f * color : 1.055f * powf(color, (1.f / 2.4f)) - 0.055f; } void vrend_clear(struct vrend_context *ctx, unsigned buffers, const union pipe_color_union *color, double depth, unsigned stencil) { GLbitfield bits = 0; struct vrend_sub_context *sub_ctx = ctx->sub; if (ctx->in_error) return; if (ctx->ctx_switch_pending) vrend_finish_context_switch(ctx); vrend_update_frontface_state(sub_ctx); if (sub_ctx->stencil_state_dirty) vrend_update_stencil_state(sub_ctx); if (sub_ctx->scissor_state_dirty) vrend_update_scissor_state(sub_ctx); if (sub_ctx->viewport_state_dirty) vrend_update_viewport_state(sub_ctx); vrend_use_program(sub_ctx, 0); glDisable(GL_SCISSOR_TEST); float colorf[4]; memcpy(colorf, color->f, sizeof(colorf)); if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_resource_has_24bpp_internal_format(sub_ctx->surf[0]->texture) && util_format_is_srgb(sub_ctx->surf[0]->format)) { VREND_DEBUG(dbg_tex, ctx, "manually converting glClearColor from linear->srgb colorspace for EGL-backed framebuffer color attachment" " (surface format is %s; resource format is %s)\n", util_format_name(sub_ctx->surf[0]->format), util_format_name(sub_ctx->surf[0]->texture->base.format)); for (int i = 0; i < 3; ++i) // i < 3: don't convert alpha channel colorf[i] = vrend_color_convert_linear_to_srgb(colorf[i]); } if (buffers & PIPE_CLEAR_COLOR) { if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_format_is_emulated_alpha(sub_ctx->surf[0]->format)) { glClearColor(colorf[3], 0.0, 0.0, 0.0); } else if (sub_ctx->nr_cbufs && sub_ctx->surf[0] && vrend_resource_is_emulated_bgra(sub_ctx->surf[0]->texture)) { VREND_DEBUG(dbg_bgra, ctx, "swizzling glClearColor() since rendering surface is an externally-stored BGR* resource\n"); glClearColor(colorf[2], colorf[1], colorf[0], colorf[3]); } else { glClearColor(colorf[0], colorf[1], colorf[2], colorf[3]); } /* This function implements Gallium's full clear callback (st->pipe->clear) on the host. This callback requires no color component be masked. We must unmask all components before calling glClear* and restore the previous colormask afterwards, as Gallium expects. */ if (sub_ctx->hw_blend_state.independent_blend_enable && has_feature(feat_indep_blend)) { int i; for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) glColorMaskIndexedEXT(i, GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); } else glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); } if (buffers & PIPE_CLEAR_DEPTH) { /* gallium clears don't respect depth mask */ glDepthMask(GL_TRUE); if (vrend_state.use_gles) { if (0.0f < depth && depth > 1.0f) { // Only warn, it is clamped by the function. report_gles_warn(ctx, GLES_WARN_DEPTH_CLEAR); } glClearDepthf(depth); } else { glClearDepth(depth); } } if (buffers & PIPE_CLEAR_STENCIL) { glStencilMask(~0u); glClearStencil(stencil); } if (sub_ctx->hw_rs_state.rasterizer_discard) glDisable(GL_RASTERIZER_DISCARD); if (buffers & PIPE_CLEAR_COLOR) { uint32_t mask = 0; int i; for (i = 0; i < sub_ctx->nr_cbufs; i++) { if (sub_ctx->surf[i]) mask |= (1 << i); } if (mask != (buffers >> 2)) { mask = buffers >> 2; while (mask) { i = u_bit_scan(&mask); if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_uint(sub_ctx->surf[i] && sub_ctx->surf[i]->format)) glClearBufferuiv(GL_COLOR, i, (GLuint *)colorf); else if (i < PIPE_MAX_COLOR_BUFS && sub_ctx->surf[i] && util_format_is_pure_sint(sub_ctx->surf[i] && sub_ctx->surf[i]->format)) glClearBufferiv(GL_COLOR, i, (GLint *)colorf); else glClearBufferfv(GL_COLOR, i, (GLfloat *)colorf); } } else bits |= GL_COLOR_BUFFER_BIT; } if (buffers & PIPE_CLEAR_DEPTH) bits |= GL_DEPTH_BUFFER_BIT; if (buffers & PIPE_CLEAR_STENCIL) bits |= GL_STENCIL_BUFFER_BIT; if (bits) glClear(bits); /* Is it really necessary to restore the old states? The only reason we * get here is because the guest cleared all those states but gallium * didn't forward them before calling the clear command */ if (sub_ctx->hw_rs_state.rasterizer_discard) glEnable(GL_RASTERIZER_DISCARD); if (buffers & PIPE_CLEAR_DEPTH) { if (!sub_ctx->dsa_state.depth.writemask) glDepthMask(GL_FALSE); } /* Restore previous stencil buffer write masks for both front and back faces */ if (buffers & PIPE_CLEAR_STENCIL) { glStencilMaskSeparate(GL_FRONT, sub_ctx->dsa_state.stencil[0].writemask); glStencilMaskSeparate(GL_BACK, sub_ctx->dsa_state.stencil[1].writemask); } /* Restore previous colormask */ if (buffers & PIPE_CLEAR_COLOR) { if (sub_ctx->hw_blend_state.independent_blend_enable && has_feature(feat_indep_blend)) { int i; for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { struct pipe_blend_state *blend = &sub_ctx->hw_blend_state; glColorMaskIndexedEXT(i, blend->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE, blend->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE, blend->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE, blend->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE); } } else { glColorMask(sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE, sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE, sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE, sub_ctx->hw_blend_state.rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE); } } if (sub_ctx->hw_rs_state.scissor) glEnable(GL_SCISSOR_TEST); else glDisable(GL_SCISSOR_TEST); } void vrend_clear_texture(struct vrend_context* ctx, uint32_t handle, uint32_t level, const struct pipe_box *box, const void * data) { GLenum format, type; struct vrend_resource *res; if (handle) res = vrend_renderer_ctx_res_lookup(ctx, handle); else { vrend_printf( "cannot find resource for handle %d\n", handle); return; } enum virgl_formats fmt = res->base.format; format = tex_conv_table[fmt].glformat; type = tex_conv_table[fmt].gltype; if (vrend_state.use_gles) { glClearTexSubImageEXT(res->id, level, box->x, box->y, box->z, box->width, box->height, box->depth, format, type, data); } else { glClearTexSubImage(res->id, level, box->x, box->y, box->z, box->width, box->height, box->depth, format, type, data); } } static void vrend_update_scissor_state(struct vrend_sub_context *sub_ctx) { struct pipe_scissor_state *ss; GLint y; GLuint idx; unsigned mask = sub_ctx->scissor_state_dirty; while (mask) { idx = u_bit_scan(&mask); if (idx >= PIPE_MAX_VIEWPORTS) { vrend_report_buffer_error(sub_ctx->parent, 0); break; } ss = &sub_ctx->ss[idx]; y = ss->miny; if (idx > 0 && has_feature(feat_viewport_array)) glScissorIndexed(idx, ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny); else glScissor(ss->minx, y, ss->maxx - ss->minx, ss->maxy - ss->miny); } sub_ctx->scissor_state_dirty = 0; } static void vrend_update_viewport_state(struct vrend_sub_context *sub_ctx) { GLint cy; unsigned mask = sub_ctx->viewport_state_dirty; int idx; while (mask) { idx = u_bit_scan(&mask); if (sub_ctx->viewport_is_negative) cy = sub_ctx->vps[idx].cur_y - sub_ctx->vps[idx].height; else cy = sub_ctx->vps[idx].cur_y; if (idx > 0 && has_feature(feat_viewport_array)) glViewportIndexedf(idx, sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height); else glViewport(sub_ctx->vps[idx].cur_x, cy, sub_ctx->vps[idx].width, sub_ctx->vps[idx].height); if (idx && has_feature(feat_viewport_array)) if (vrend_state.use_gles) { glDepthRangeIndexedfOES(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val); } else glDepthRangeIndexed(idx, sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val); else if (vrend_state.use_gles) glDepthRangefOES(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val); else glDepthRange(sub_ctx->vps[idx].near_val, sub_ctx->vps[idx].far_val); } sub_ctx->viewport_state_dirty = 0; } static GLenum get_gs_xfb_mode(GLenum mode) { switch (mode) { case GL_POINTS: return GL_POINTS; case GL_LINE_STRIP: return GL_LINES; case GL_TRIANGLE_STRIP: return GL_TRIANGLES; default: vrend_printf( "illegal gs transform feedback mode %d\n", mode); return GL_POINTS; } } static GLenum get_tess_xfb_mode(int mode, bool is_point_mode) { if (is_point_mode) return GL_POINTS; switch (mode) { case GL_QUADS: case GL_TRIANGLES: return GL_TRIANGLES; case GL_LINES: return GL_LINES; default: vrend_printf( "illegal gs transform feedback mode %d\n", mode); return GL_POINTS; } } static GLenum get_xfb_mode(GLenum mode) { switch (mode) { case GL_POINTS: return GL_POINTS; case GL_TRIANGLES: case GL_TRIANGLE_STRIP: case GL_TRIANGLE_FAN: case GL_QUADS: case GL_QUAD_STRIP: case GL_POLYGON: return GL_TRIANGLES; case GL_LINES: case GL_LINE_LOOP: case GL_LINE_STRIP: return GL_LINES; default: vrend_printf( "failed to translate TFB %d\n", mode); return GL_POINTS; } } static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx, struct vrend_vertex_element_array *va) { uint32_t enable_bitmask; uint32_t disable_bitmask; int i; enable_bitmask = 0; disable_bitmask = ~((1ull << va->count) - 1); for (i = 0; i < (int)va->count; i++) { struct vrend_vertex_element *ve = &va->elements[i]; int vbo_index = ve->base.vertex_buffer_index; struct vrend_resource *res; GLint loc; if (i >= ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs) { /* XYZZY: debug this? */ break; } res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].base.buffer; if (!res) { vrend_printf("cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs); continue; } if (vrend_state.use_explicit_locations || has_feature(feat_gles31_vertex_attrib_binding)) { loc = i; } else { if (ctx->sub->prog->attrib_locs) { loc = ctx->sub->prog->attrib_locs[i]; } else loc = -1; if (loc == -1) { vrend_printf("%s: cannot find loc %d %d %d\n", ctx->debug_name, i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs); if (i == 0) { vrend_printf("%s: shader probably didn't compile - skipping rendering\n", ctx->debug_name); return; } continue; } } if (ve->type == GL_FALSE) { vrend_printf("failed to translate vertex type - skipping render\n"); return; } glBindBuffer(GL_ARRAY_BUFFER, res->id); struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[vbo_index]; if (vbo->base.stride == 0) { void *data; /* for 0 stride we are kinda screwed */ data = glMapBufferRange(GL_ARRAY_BUFFER, vbo->base.buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT); switch (ve->nr_chan) { case 1: glVertexAttrib1fv(loc, data); break; case 2: glVertexAttrib2fv(loc, data); break; case 3: glVertexAttrib3fv(loc, data); break; case 4: default: glVertexAttrib4fv(loc, data); break; } glUnmapBuffer(GL_ARRAY_BUFFER); disable_bitmask |= (1 << loc); } else { enable_bitmask |= (1 << loc); if (util_format_is_pure_integer(ve->base.src_format)) { glVertexAttribIPointer(loc, ve->nr_chan, ve->type, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset)); } else { glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, vbo->base.stride, (void *)(unsigned long)(ve->base.src_offset + vbo->base.buffer_offset)); } glVertexAttribDivisorARB(loc, ve->base.instance_divisor); } } if (ctx->sub->enabled_attribs_bitmask != enable_bitmask) { uint32_t mask = ctx->sub->enabled_attribs_bitmask & disable_bitmask; while (mask) { i = u_bit_scan(&mask); glDisableVertexAttribArray(i); } ctx->sub->enabled_attribs_bitmask &= ~disable_bitmask; mask = ctx->sub->enabled_attribs_bitmask ^ enable_bitmask; while (mask) { i = u_bit_scan(&mask); glEnableVertexAttribArray(i); } ctx->sub->enabled_attribs_bitmask = enable_bitmask; } } static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx, struct vrend_vertex_element_array *va) { int i; glBindVertexArray(va->id); if (ctx->sub->vbo_dirty) { struct vrend_vertex_buffer *vbo = &ctx->sub->vbo[0]; if (has_feature(feat_bind_vertex_buffers)) { GLsizei count = MAX2(ctx->sub->num_vbos, ctx->sub->old_num_vbos); GLuint buffers[PIPE_MAX_ATTRIBS]; GLintptr offsets[PIPE_MAX_ATTRIBS]; GLsizei strides[PIPE_MAX_ATTRIBS]; for (i = 0; i < ctx->sub->num_vbos; i++) { struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer; if (res) { buffers[i] = res->id; offsets[i] = vbo[i].base.buffer_offset; strides[i] = vbo[i].base.stride; } else { buffers[i] = 0; offsets[i] = 0; strides[i] = 0; } } for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) { buffers[i] = 0; offsets[i] = 0; strides[i] = 0; } glBindVertexBuffers(0, count, buffers, offsets, strides); } else { for (i = 0; i < ctx->sub->num_vbos; i++) { struct vrend_resource *res = (struct vrend_resource *)vbo[i].base.buffer; if (res) glBindVertexBuffer(i, res->id, vbo[i].base.buffer_offset, vbo[i].base.stride); else glBindVertexBuffer(i, 0, 0, 0); } for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) glBindVertexBuffer(i, 0, 0, 0); } ctx->sub->vbo_dirty = false; } } static int vrend_draw_bind_samplers_shader(struct vrend_sub_context *sub_ctx, int shader_type, int next_sampler_id) { int sampler_index = 0; int n_samplers = 0; uint32_t dirty = sub_ctx->sampler_views_dirty[shader_type]; uint32_t mask = sub_ctx->prog->samplers_used_mask[shader_type]; struct vrend_shader_view *sviews = &sub_ctx->views[shader_type]; while (mask) { int i = u_bit_scan(&mask); struct vrend_sampler_view *tview = sviews->views[i]; if ((dirty & (1 << i)) && tview) { if (sub_ctx->prog->shadow_samp_mask[shader_type] & (1 << i)) { glUniform4f(sub_ctx->prog->shadow_samp_mask_locs[shader_type][sampler_index], (tview->gl_swizzle[0] == GL_ZERO || tview->gl_swizzle[0] == GL_ONE) ? 0.0 : 1.0, (tview->gl_swizzle[1] == GL_ZERO || tview->gl_swizzle[1] == GL_ONE) ? 0.0 : 1.0, (tview->gl_swizzle[2] == GL_ZERO || tview->gl_swizzle[2] == GL_ONE) ? 0.0 : 1.0, (tview->gl_swizzle[3] == GL_ZERO || tview->gl_swizzle[3] == GL_ONE) ? 0.0 : 1.0); glUniform4f(sub_ctx->prog->shadow_samp_add_locs[shader_type][sampler_index], tview->gl_swizzle[0] == GL_ONE ? 1.0 : 0.0, tview->gl_swizzle[1] == GL_ONE ? 1.0 : 0.0, tview->gl_swizzle[2] == GL_ONE ? 1.0 : 0.0, tview->gl_swizzle[3] == GL_ONE ? 1.0 : 0.0); } if (tview->texture) { GLuint id = tview->id; struct vrend_resource *texture = tview->texture; GLenum target = tview->target; debug_texture(__func__, tview->texture); if (has_bit(tview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) { id = texture->tbo_tex_id; target = GL_TEXTURE_BUFFER; } glActiveTexture(GL_TEXTURE0 + next_sampler_id); glBindTexture(target, id); if (vrend_state.use_gles) { const unsigned levels = tview->levels ? tview->levels : tview->texture->base.last_level + 1u; sub_ctx->texture_levels[shader_type][n_samplers++] = levels; } if (sub_ctx->views[shader_type].old_ids[i] != id || sub_ctx->sampler_views_dirty[shader_type] & (1 << i)) { vrend_apply_sampler_state(sub_ctx, texture, shader_type, i, next_sampler_id, tview); sviews->old_ids[i] = id; } dirty &= ~(1 << i); } } sampler_index++; next_sampler_id++; } sub_ctx->n_samplers[shader_type] = n_samplers; sub_ctx->sampler_views_dirty[shader_type] = dirty; return next_sampler_id; } static int vrend_draw_bind_ubo_shader(struct vrend_sub_context *sub_ctx, int shader_type, int next_ubo_id) { uint32_t mask, dirty, update; struct pipe_constant_buffer *cb; struct vrend_resource *res; if (!has_feature(feat_ubo)) return next_ubo_id; mask = sub_ctx->prog->ubo_used_mask[shader_type]; dirty = sub_ctx->const_bufs_dirty[shader_type]; update = dirty & sub_ctx->const_bufs_used_mask[shader_type]; if (!update) return next_ubo_id + util_bitcount(mask); while (mask) { /* The const_bufs_used_mask stores the gallium uniform buffer indices */ int i = u_bit_scan(&mask); if (update & (1 << i)) { /* The cbs array is indexed using the gallium uniform buffer index */ cb = &sub_ctx->cbs[shader_type][i]; res = (struct vrend_resource *)cb->buffer; glBindBufferRange(GL_UNIFORM_BUFFER, next_ubo_id, res->id, cb->buffer_offset, cb->buffer_size); dirty &= ~(1 << i); } next_ubo_id++; } sub_ctx->const_bufs_dirty[shader_type] = dirty; return next_ubo_id; } static void vrend_draw_bind_const_shader(struct vrend_sub_context *sub_ctx, int shader_type, bool new_program) { if (sub_ctx->consts[shader_type].consts && sub_ctx->shaders[shader_type] && (sub_ctx->prog->const_location[shader_type] != -1) && (sub_ctx->const_dirty[shader_type] || new_program)) { glUniform4uiv(sub_ctx->prog->const_location[shader_type], sub_ctx->shaders[shader_type]->sinfo.num_consts, sub_ctx->consts[shader_type].consts); sub_ctx->const_dirty[shader_type] = false; } } static void vrend_draw_bind_ssbo_shader(struct vrend_sub_context *sub_ctx, int shader_type) { uint32_t mask; struct vrend_ssbo *ssbo; struct vrend_resource *res; int i; if (!has_feature(feat_ssbo)) return; if (!sub_ctx->prog->ssbo_used_mask[shader_type]) return; if (!sub_ctx->ssbo_used_mask[shader_type]) return; mask = sub_ctx->ssbo_used_mask[shader_type]; while (mask) { i = u_bit_scan(&mask); ssbo = &sub_ctx->ssbo[shader_type][i]; res = (struct vrend_resource *)ssbo->res; glBindBufferRange(GL_SHADER_STORAGE_BUFFER, i, res->id, ssbo->buffer_offset, ssbo->buffer_size); } } static void vrend_draw_bind_abo_shader(struct vrend_sub_context *sub_ctx) { uint32_t mask; struct vrend_abo *abo; struct vrend_resource *res; int i; if (!has_feature(feat_atomic_counters)) return; mask = sub_ctx->abo_used_mask; while (mask) { i = u_bit_scan(&mask); abo = &sub_ctx->abo[i]; res = (struct vrend_resource *)abo->res; glBindBufferRange(GL_ATOMIC_COUNTER_BUFFER, i, res->id, abo->buffer_offset, abo->buffer_size); } } static void vrend_draw_bind_images_shader(struct vrend_sub_context *sub_ctx, int shader_type) { GLenum access; GLboolean layered; struct vrend_image_view *iview; uint32_t mask, tex_id, level, first_layer; if (!sub_ctx->images_used_mask[shader_type]) return; if (!sub_ctx->prog->img_locs[shader_type]) return; if (!has_feature(feat_images)) return; mask = sub_ctx->images_used_mask[shader_type]; while (mask) { unsigned i = u_bit_scan(&mask); if (!(sub_ctx->prog->images_used_mask[shader_type] & (1 << i))) continue; iview = &sub_ctx->image_views[shader_type][i]; tex_id = iview->texture->id; if (has_bit(iview->texture->storage_bits, VREND_STORAGE_GL_BUFFER)) { if (!iview->texture->tbo_tex_id) glGenTextures(1, &iview->texture->tbo_tex_id); /* glTexBuffer doesn't accept GL_RGBA8_SNORM, find an appropriate replacement. */ uint32_t format = (iview->format == GL_RGBA8_SNORM) ? GL_RGBA8UI : iview->format; glBindBufferARB(GL_TEXTURE_BUFFER, iview->texture->id); glBindTexture(GL_TEXTURE_BUFFER, iview->texture->tbo_tex_id); if (has_feature(feat_arb_or_gles_ext_texture_buffer)) glTexBuffer(GL_TEXTURE_BUFFER, format, iview->texture->id); tex_id = iview->texture->tbo_tex_id; level = first_layer = 0; layered = GL_TRUE; } else { level = iview->u.tex.level; first_layer = iview->u.tex.first_layer; layered = !((iview->texture->base.array_size > 1 || iview->texture->base.depth0 > 1) && (iview->u.tex.first_layer == iview->u.tex.last_layer)); } if (!vrend_state.use_gles) glUniform1i(sub_ctx->prog->img_locs[shader_type][i], i); switch (iview->access) { case PIPE_IMAGE_ACCESS_READ: access = GL_READ_ONLY; break; case PIPE_IMAGE_ACCESS_WRITE: access = GL_WRITE_ONLY; break; case PIPE_IMAGE_ACCESS_READ_WRITE: access = GL_READ_WRITE; break; default: vrend_printf( "Invalid access specified\n"); return; } glBindImageTexture(i, tex_id, level, layered, first_layer, access, iview->format); } } static void vrend_draw_bind_objects(struct vrend_sub_context *sub_ctx, bool new_program) { int next_ubo_id = 0, next_sampler_id = 0; for (int shader_type = PIPE_SHADER_VERTEX; shader_type <= sub_ctx->last_shader_idx; shader_type++) { next_ubo_id = vrend_draw_bind_ubo_shader(sub_ctx, shader_type, next_ubo_id); vrend_draw_bind_const_shader(sub_ctx, shader_type, new_program); next_sampler_id = vrend_draw_bind_samplers_shader(sub_ctx, shader_type, next_sampler_id); vrend_draw_bind_images_shader(sub_ctx, shader_type); vrend_draw_bind_ssbo_shader(sub_ctx, shader_type); if (vrend_state.use_gles) { if (sub_ctx->prog->tex_levels_uniform_id[shader_type] != -1) { glUniform1iv(sub_ctx->prog->tex_levels_uniform_id[shader_type], sub_ctx->n_samplers[shader_type], sub_ctx->texture_levels[shader_type]); } } } vrend_draw_bind_abo_shader(sub_ctx); if (vrend_state.use_core_profile && sub_ctx->prog->fs_stipple_loc != -1) { glActiveTexture(GL_TEXTURE0 + next_sampler_id); glBindTexture(GL_TEXTURE_2D, sub_ctx->parent->pstipple_tex_id); glUniform1i(sub_ctx->prog->fs_stipple_loc, next_sampler_id); } if (sub_ctx->prog->fs_alpha_ref_val_loc != -1) { assert(sub_ctx->prog->fs_alpha_func_loc != -1); /* If it's an integer format surface, alpha test shouldn't do anything. */ if (sub_ctx->dsa_state.alpha.enabled && sub_ctx->surf[0] && !util_format_is_pure_integer(sub_ctx->surf[0]->format)) glUniform1i(sub_ctx->prog->fs_alpha_func_loc, sub_ctx->dsa_state.alpha.func); else glUniform1i(sub_ctx->prog->fs_alpha_func_loc, PIPE_FUNC_ALWAYS); glUniform1f(sub_ctx->prog->fs_alpha_ref_val_loc, sub_ctx->dsa_state.alpha.ref_value); } } static void vrend_inject_tcs(struct vrend_sub_context *sub_ctx, int vertices_per_patch) { struct pipe_stream_output_info so_info; memset(&so_info, 0, sizeof(so_info)); struct vrend_shader_selector *sel = vrend_create_shader_state(&so_info, false, PIPE_SHADER_TESS_CTRL); struct vrend_shader *shader; shader = CALLOC_STRUCT(vrend_shader); vrend_fill_shader_key(sub_ctx, sel, &shader->key); shader->sel = sel; list_inithead(&shader->programs); strarray_alloc(&shader->glsl_strings, SHADER_MAX_STRINGS); vrend_shader_create_passthrough_tcs(sub_ctx->parent, &sub_ctx->parent->shader_cfg, sub_ctx->shaders[PIPE_SHADER_VERTEX]->tokens, &shader->key, vrend_state.tess_factors, &sel->sinfo, &shader->glsl_strings, vertices_per_patch); // Need to add inject the selected shader to the shader selector and then the code below // can continue sel->tokens = NULL; sel->current = shader; sub_ctx->shaders[PIPE_SHADER_TESS_CTRL] = sel; sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->num_shaders = 1; vrend_compile_shader(sub_ctx, shader); } static bool vrend_select_program(struct vrend_sub_context *sub_ctx, ubyte vertices_per_patch) { struct vrend_linked_shader_program *prog; bool fs_dirty, vs_dirty, gs_dirty, tcs_dirty, tes_dirty; bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0); bool new_program = false; struct vrend_shader_selector **shaders = sub_ctx->shaders; sub_ctx->shader_dirty = false; if (!shaders[PIPE_SHADER_VERTEX] || !shaders[PIPE_SHADER_FRAGMENT]) { return false; } // For some GPU, we'd like to use integer variable in generated GLSL if // the input buffers are integer formats. But we actually don't know the // buffer formats when the shader is created, we only know it here. // Set it to true so the underlying code knows to use the buffer formats // now. sub_ctx->drawing = true; vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty); sub_ctx->drawing = false; if (shaders[PIPE_SHADER_TESS_CTRL] && shaders[PIPE_SHADER_TESS_CTRL]->tokens) vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty); else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) { VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n"); vrend_inject_tcs(sub_ctx, vertices_per_patch); vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty); } if (shaders[PIPE_SHADER_TESS_EVAL]) vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty); if (shaders[PIPE_SHADER_GEOMETRY]) vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_GEOMETRY], &gs_dirty); vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_FRAGMENT], &fs_dirty); // NOTE: run shader selection again as a workaround to #180 - "duplicated shader compilation" if (shaders[PIPE_SHADER_GEOMETRY]) vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_GEOMETRY], &gs_dirty); if (shaders[PIPE_SHADER_TESS_EVAL]) vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_EVAL], &tes_dirty); if (shaders[PIPE_SHADER_TESS_CTRL] && shaders[PIPE_SHADER_TESS_CTRL]->tokens) vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_TESS_CTRL], &tcs_dirty); else if (vrend_state.use_gles && shaders[PIPE_SHADER_TESS_EVAL]) { VREND_DEBUG(dbg_shader, sub_ctx->parent, "Need to inject a TCS\n"); vrend_inject_tcs(sub_ctx, vertices_per_patch); } sub_ctx->drawing = true; vrend_shader_select(sub_ctx, shaders[PIPE_SHADER_VERTEX], &vs_dirty); sub_ctx->drawing = false; uint8_t gles_emulate_query_texture_levels_mask = 0; for (uint i = 0; i < PIPE_SHADER_TYPES; i++) { struct vrend_shader_selector *sel = shaders[i]; if (!sel) continue; struct vrend_shader *shader = sel->current; if (shader && !shader->is_compiled) {//shader->sel->type == PIPE_SHADER_FRAGMENT || shader->sel->type == PIPE_SHADER_GEOMETRY) { if (!vrend_compile_shader(sub_ctx, shader)) return false; } if (vrend_state.use_gles && sel->sinfo.gles_use_tex_query_level) gles_emulate_query_texture_levels_mask |= 1 << i; } if (!shaders[PIPE_SHADER_VERTEX]->current || !shaders[PIPE_SHADER_FRAGMENT]->current || (shaders[PIPE_SHADER_GEOMETRY] && !shaders[PIPE_SHADER_GEOMETRY]->current) || (shaders[PIPE_SHADER_TESS_CTRL] && !shaders[PIPE_SHADER_TESS_CTRL]->current) || (shaders[PIPE_SHADER_TESS_EVAL] && !shaders[PIPE_SHADER_TESS_EVAL]->current)) { vrend_printf( "failure to compile shader variants: %s\n", sub_ctx->parent->debug_name); return false; } GLuint vs_id = shaders[PIPE_SHADER_VERTEX]->current->id; GLuint fs_id = shaders[PIPE_SHADER_FRAGMENT]->current->id; GLuint gs_id = shaders[PIPE_SHADER_GEOMETRY] ? shaders[PIPE_SHADER_GEOMETRY]->current->id : 0; GLuint tcs_id = shaders[PIPE_SHADER_TESS_CTRL] ? shaders[PIPE_SHADER_TESS_CTRL]->current->id : 0; GLuint tes_id = shaders[PIPE_SHADER_TESS_EVAL] ? shaders[PIPE_SHADER_TESS_EVAL]->current->id : 0; if (shaders[PIPE_SHADER_FRAGMENT]->current->sel->sinfo.num_outputs <= 1) dual_src = false; bool same_prog = sub_ctx->prog && vs_id == sub_ctx->prog_ids[PIPE_SHADER_VERTEX] && fs_id == sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] && gs_id == sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] && tcs_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] && tes_id == sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] && sub_ctx->prog->dual_src_linked == dual_src; if (!same_prog) { prog = lookup_shader_program(sub_ctx, vs_id, fs_id, gs_id, tcs_id, tes_id, dual_src); if (!prog) { prog = add_shader_program(sub_ctx, sub_ctx->shaders[PIPE_SHADER_VERTEX]->current, sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->current, gs_id ? sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->current : NULL, tcs_id ? sub_ctx->shaders[PIPE_SHADER_TESS_CTRL]->current : NULL, tes_id ? sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->current : NULL); if (!prog) return false; prog->gles_use_query_texturelevel_mask = gles_emulate_query_texture_levels_mask; } sub_ctx->last_shader_idx = sub_ctx->shaders[PIPE_SHADER_TESS_EVAL] ? PIPE_SHADER_TESS_EVAL : (sub_ctx->shaders[PIPE_SHADER_GEOMETRY] ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT); } else prog = sub_ctx->prog; if (sub_ctx->prog != prog) { new_program = true; sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = vs_id; sub_ctx->prog_ids[PIPE_SHADER_FRAGMENT] = fs_id; sub_ctx->prog_ids[PIPE_SHADER_GEOMETRY] = gs_id; sub_ctx->prog_ids[PIPE_SHADER_TESS_CTRL] = tcs_id; sub_ctx->prog_ids[PIPE_SHADER_TESS_EVAL] = tes_id; sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = 0; sub_ctx->prog = prog; /* mark all constbufs and sampler views as dirty */ for (int stage = PIPE_SHADER_VERTEX; stage <= PIPE_SHADER_FRAGMENT; stage++) { sub_ctx->const_bufs_dirty[stage] = ~0; sub_ctx->sampler_views_dirty[stage] = ~0; } prog->ref_context = sub_ctx; } sub_ctx->cs_shader_dirty = true; return new_program; } void vrend_link_program(struct vrend_context *ctx, uint32_t *handles) { /* Pre-compiling compute shaders needs some additional work */ if (handles[PIPE_SHADER_COMPUTE]) return; /* If we can't force linking, exit early */ if (!handles[PIPE_SHADER_VERTEX] || !handles[PIPE_SHADER_FRAGMENT]) return; struct vrend_shader_selector *prev_handles[PIPE_SHADER_TYPES]; memset(prev_handles, 0, sizeof(prev_handles)); uint32_t prev_shader_ids[PIPE_SHADER_TYPES]; memcpy(prev_shader_ids, ctx->sub->prog_ids, PIPE_SHADER_TYPES * sizeof(uint32_t)); struct vrend_linked_shader_program *prev_prog = ctx->sub->prog; for (uint32_t type = 0; type < PIPE_SHADER_TYPES; ++type) { vrend_shader_state_reference(&prev_handles[type], ctx->sub->shaders[type]); vrend_bind_shader(ctx, handles[type], type); } vrend_select_program(ctx->sub, 1); ctx->sub->shader_dirty = true; ctx->sub->cs_shader_dirty = true; /* undo state changes */ for (uint32_t type = 0; type < PIPE_SHADER_TYPES; ++type) { vrend_shader_state_reference(&ctx->sub->shaders[type], prev_handles[type]); vrend_shader_state_reference(&prev_handles[type], NULL); } memcpy(ctx->sub->prog_ids, prev_shader_ids, PIPE_SHADER_TYPES * sizeof(uint32_t)); ctx->sub->prog = prev_prog; } int vrend_draw_vbo(struct vrend_context *ctx, const struct pipe_draw_info *info, uint32_t cso, uint32_t indirect_handle, uint32_t indirect_draw_count_handle) { int i; bool new_program = false; struct vrend_resource *indirect_res = NULL; struct vrend_resource *indirect_params_res = NULL; struct vrend_sub_context *sub_ctx = ctx->sub; if (ctx->in_error) return 0; if (info->instance_count && !has_feature(feat_draw_instance)) return EINVAL; if (info->start_instance && !has_feature(feat_base_instance)) return EINVAL; if (info->indirect.draw_count > 1 && !has_feature(feat_multi_draw_indirect)) return EINVAL; if (indirect_handle) { if (!has_feature(feat_indirect_draw)) return EINVAL; indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle); if (!indirect_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_handle); return 0; } } /* this must be zero until we support the feature */ if (indirect_draw_count_handle) { if (!has_feature(feat_indirect_params)) return EINVAL; indirect_params_res = vrend_renderer_ctx_res_lookup(ctx, indirect_draw_count_handle); if (!indirect_params_res){ vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_draw_count_handle); return 0; } } if (ctx->ctx_switch_pending) vrend_finish_context_switch(ctx); vrend_update_frontface_state(sub_ctx); if (ctx->sub->stencil_state_dirty) vrend_update_stencil_state(sub_ctx); if (ctx->sub->scissor_state_dirty) vrend_update_scissor_state(sub_ctx); if (ctx->sub->viewport_state_dirty) vrend_update_viewport_state(sub_ctx); if (ctx->sub->blend_state_dirty) vrend_patch_blend_state(sub_ctx); // enable primitive-mode-dependent shader variants if (sub_ctx->prim_mode != (int)info->mode) { // Only refresh shader program when switching in/out of GL_POINTS primitive mode if (sub_ctx->prim_mode == PIPE_PRIM_POINTS || (int)info->mode == PIPE_PRIM_POINTS) sub_ctx->shader_dirty = true; sub_ctx->prim_mode = (int)info->mode; } if (!sub_ctx->ve) { vrend_printf("illegal VE setup - skipping renderering\n"); return 0; } if (sub_ctx->shader_dirty || sub_ctx->swizzle_output_rgb_to_bgr || sub_ctx->convert_linear_to_srgb_on_write) new_program = vrend_select_program(sub_ctx, info->vertices_per_patch); if (!sub_ctx->prog) { vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name); return 0; } vrend_use_program(sub_ctx, sub_ctx->prog->id); if (vrend_state.use_gles) { /* PIPE_SHADER and TGSI_SHADER have different ordering, so use two * different prefix arrays */ for (unsigned i = PIPE_SHADER_VERTEX; i < PIPE_SHADER_COMPUTE; ++i) { if (sub_ctx->prog->gles_use_query_texturelevel_mask & (1 << i)) { char loc_name[32]; snprintf(loc_name, 32, "%s_texlod", pipe_shader_to_prefix(i)); sub_ctx->prog->tex_levels_uniform_id[i] = glGetUniformLocation(sub_ctx->prog->id, loc_name); } else { sub_ctx->prog->tex_levels_uniform_id[i] = -1; } } } vrend_draw_bind_objects(sub_ctx, new_program); float viewport_neg_val = sub_ctx->viewport_is_negative ? -1.0 : 1.0; if (sub_ctx->prog->viewport_neg_val != viewport_neg_val) { glUniform1f(sub_ctx->prog->vs_ws_adjust_loc, viewport_neg_val); sub_ctx->prog->viewport_neg_val = viewport_neg_val; } if (has_feature(feat_cull_distance)) { if (sub_ctx->rs_state.clip_plane_enable) { glUniform1i(sub_ctx->prog->clip_enabled_loc, 1); for (i = 0 ; i < 8; i++) { glUniform4fv(sub_ctx->prog->clip_locs[i], 1, (const GLfloat *)&sub_ctx->ucp_state.ucp[i]); } } else { glUniform1i(sub_ctx->prog->clip_enabled_loc, 0); } } if (has_feature(feat_gles31_vertex_attrib_binding)) vrend_draw_bind_vertex_binding(ctx, sub_ctx->ve); else vrend_draw_bind_vertex_legacy(ctx, sub_ctx->ve); if (info->indexed) { struct vrend_resource *res = (struct vrend_resource *)sub_ctx->ib.buffer; if (!res) { vrend_printf( "VBO missing indexed array buffer\n"); return 0; } glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, res->id); } else glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); if (sub_ctx->current_so) { if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED_NEED_BEGIN) { if (sub_ctx->shaders[PIPE_SHADER_GEOMETRY]) glBeginTransformFeedback(get_gs_xfb_mode(sub_ctx->shaders[PIPE_SHADER_GEOMETRY]->sinfo.gs_out_prim)); else if (sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]) glBeginTransformFeedback(get_tess_xfb_mode(sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_prim, sub_ctx->shaders[PIPE_SHADER_TESS_EVAL]->sinfo.tes_point_mode)); else glBeginTransformFeedback(get_xfb_mode(info->mode)); sub_ctx->current_so->xfb_state = XFB_STATE_STARTED; } else if (sub_ctx->current_so->xfb_state == XFB_STATE_PAUSED) { glResumeTransformFeedback(); sub_ctx->current_so->xfb_state = XFB_STATE_STARTED; } } if (info->primitive_restart) { if (vrend_state.use_gles) { glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX); } else if (has_feature(feat_nv_prim_restart)) { glEnableClientState(GL_PRIMITIVE_RESTART_NV); glPrimitiveRestartIndexNV(info->restart_index); } else if (has_feature(feat_gl_prim_restart)) { glEnable(GL_PRIMITIVE_RESTART); glPrimitiveRestartIndex(info->restart_index); } } if (has_feature(feat_indirect_draw)) { GLint buf = indirect_res ? indirect_res->id : 0; if (sub_ctx->draw_indirect_buffer != buf) { glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buf); sub_ctx->draw_indirect_buffer = buf; } if (has_feature(feat_indirect_params)) { GLint buf = indirect_params_res ? indirect_params_res->id : 0; if (sub_ctx->draw_indirect_params_buffer != buf) { glBindBuffer(GL_PARAMETER_BUFFER_ARB, buf); sub_ctx->draw_indirect_params_buffer = buf; } } } if (info->vertices_per_patch && has_feature(feat_tessellation)) glPatchParameteri(GL_PATCH_VERTICES, info->vertices_per_patch); /* If the host support blend_equation_advanced but not fbfetch, * the guest driver will not lower the equation to fbfetch so we need to set up the renderer to * accept those blend equations. * When we transmit the blend mode through alpha_src_factor, alpha_dst_factor is always 0. */ uint32_t blend_mask_shader = sub_ctx->shaders[PIPE_SHADER_FRAGMENT]->sinfo.fs_blend_equation_advanced; uint32_t blend_mode = sub_ctx->blend_state.rt[0].alpha_src_factor; uint32_t alpha_dst_factor = sub_ctx->blend_state.rt[0].alpha_dst_factor; bool use_advanced_blending = !has_feature(feat_framebuffer_fetch) && has_feature(feat_blend_equation_advanced) && blend_mask_shader != 0 && blend_mode != 0 && alpha_dst_factor == 0; if(use_advanced_blending) { GLenum blend = translate_blend_func_advanced(blend_mode); glBlendEquation(blend); glEnable(GL_BLEND); } /* set the vertex state up now on a delay */ if (!info->indexed) { GLenum mode = info->mode; int count = cso ? cso : info->count; int start = cso ? 0 : info->start; if (indirect_handle) { if (indirect_params_res) glMultiDrawArraysIndirectCountARB(mode, (GLvoid const *)(unsigned long)info->indirect.offset, info->indirect.indirect_draw_count_offset, info->indirect.draw_count, info->indirect.stride); else if (info->indirect.draw_count > 1) glMultiDrawArraysIndirect(mode, (GLvoid const *)(unsigned long)info->indirect.offset, info->indirect.draw_count, info->indirect.stride); else glDrawArraysIndirect(mode, (GLvoid const *)(unsigned long)info->indirect.offset); } else if (info->instance_count > 0) { if (info->start_instance > 0) glDrawArraysInstancedBaseInstance(mode, start, count, info->instance_count, info->start_instance); else glDrawArraysInstancedARB(mode, start, count, info->instance_count); } else glDrawArrays(mode, start, count); } else { GLenum elsz; GLenum mode = info->mode; switch (sub_ctx->ib.index_size) { case 1: elsz = GL_UNSIGNED_BYTE; break; case 2: elsz = GL_UNSIGNED_SHORT; break; case 4: default: elsz = GL_UNSIGNED_INT; break; } if (indirect_handle) { if (indirect_params_res) glMultiDrawElementsIndirectCountARB(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset, info->indirect.indirect_draw_count_offset, info->indirect.draw_count, info->indirect.stride); else if (info->indirect.draw_count > 1) glMultiDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset, info->indirect.draw_count, info->indirect.stride); else glDrawElementsIndirect(mode, elsz, (GLvoid const *)(unsigned long)info->indirect.offset); } else if (info->index_bias) { if (info->instance_count > 0) { if (info->start_instance > 0) glDrawElementsInstancedBaseVertexBaseInstance(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count, info->index_bias, info->start_instance); else glDrawElementsInstancedBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count, info->index_bias); } else if (info->min_index != 0 || info->max_index != (unsigned)-1) glDrawRangeElementsBaseVertex(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias); else glDrawElementsBaseVertex(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->index_bias); } else if (info->instance_count > 1) { glDrawElementsInstancedARB(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset, info->instance_count); } else if (info->min_index != 0 || info->max_index != (unsigned)-1) glDrawRangeElements(mode, info->min_index, info->max_index, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset); else glDrawElements(mode, info->count, elsz, (void *)(unsigned long)sub_ctx->ib.offset); } if (info->primitive_restart) { if (vrend_state.use_gles) { glDisable(GL_PRIMITIVE_RESTART_FIXED_INDEX); } else if (has_feature(feat_nv_prim_restart)) { glDisableClientState(GL_PRIMITIVE_RESTART_NV); } else if (has_feature(feat_gl_prim_restart)) { glDisable(GL_PRIMITIVE_RESTART); } } if (sub_ctx->current_so && has_feature(feat_transform_feedback2)) { if (sub_ctx->current_so->xfb_state == XFB_STATE_STARTED) { glPauseTransformFeedback(); sub_ctx->current_so->xfb_state = XFB_STATE_PAUSED; } } return 0; } void vrend_launch_grid(struct vrend_context *ctx, UNUSED uint32_t *block, uint32_t *grid, uint32_t indirect_handle, uint32_t indirect_offset) { bool new_program = false; struct vrend_resource *indirect_res = NULL; if (!has_feature(feat_compute_shader)) return; struct vrend_sub_context *sub_ctx = ctx->sub; if (sub_ctx->cs_shader_dirty) { struct vrend_linked_shader_program *prog; bool cs_dirty; sub_ctx->cs_shader_dirty = false; if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]) { vrend_printf("dropping rendering due to missing shaders: %s\n", ctx->debug_name); return; } vrend_shader_select(sub_ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE], &cs_dirty); if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current) { vrend_printf( "failure to select compute shader variant: %s\n", ctx->debug_name); return; } if (!sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->is_compiled) { if(!vrend_compile_shader(sub_ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current)) { vrend_printf( "failure to compile compute shader variant: %s\n", ctx->debug_name); return; } } if (sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id != (GLuint)sub_ctx->prog_ids[PIPE_SHADER_COMPUTE]) { prog = lookup_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id); if (!prog) { prog = add_cs_shader_program(ctx, sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current); if (!prog) return; } } else prog = sub_ctx->prog; if (sub_ctx->prog != prog) { new_program = true; sub_ctx->prog_ids[PIPE_SHADER_VERTEX] = 0; sub_ctx->prog_ids[PIPE_SHADER_COMPUTE] = sub_ctx->shaders[PIPE_SHADER_COMPUTE]->current->id; sub_ctx->prog = prog; prog->ref_context = sub_ctx; } sub_ctx->shader_dirty = true; } if (!sub_ctx->prog) { vrend_printf("%s: Skipping compute shader execution due to missing shaders: %s\n", __func__, ctx->debug_name); return; } vrend_use_program(sub_ctx, sub_ctx->prog->id); vrend_draw_bind_ubo_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0); vrend_draw_bind_const_shader(sub_ctx, PIPE_SHADER_COMPUTE, new_program); vrend_draw_bind_samplers_shader(sub_ctx, PIPE_SHADER_COMPUTE, 0); vrend_draw_bind_images_shader(sub_ctx, PIPE_SHADER_COMPUTE); vrend_draw_bind_ssbo_shader(sub_ctx, PIPE_SHADER_COMPUTE); vrend_draw_bind_abo_shader(sub_ctx); if (indirect_handle) { indirect_res = vrend_renderer_ctx_res_lookup(ctx, indirect_handle); if (!indirect_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, indirect_handle); return; } } if (indirect_res) glBindBuffer(GL_DISPATCH_INDIRECT_BUFFER, indirect_res->id); else glBindBuffer(GL_DISPATCH_INDIRECT_BUFFER, 0); if (indirect_res) { glDispatchComputeIndirect(indirect_offset); } else { glDispatchCompute(grid[0], grid[1], grid[2]); } } static GLenum translate_blend_func(uint32_t pipe_blend) { switch(pipe_blend){ case PIPE_BLEND_ADD: return GL_FUNC_ADD; case PIPE_BLEND_SUBTRACT: return GL_FUNC_SUBTRACT; case PIPE_BLEND_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT; case PIPE_BLEND_MIN: return GL_MIN; case PIPE_BLEND_MAX: return GL_MAX; default: assert("invalid blend token()" == NULL); return 0; } } static GLenum translate_blend_factor(uint32_t pipe_factor) { switch (pipe_factor) { case PIPE_BLENDFACTOR_ONE: return GL_ONE; case PIPE_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR; case PIPE_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA; case PIPE_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR; case PIPE_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA; case PIPE_BLENDFACTOR_CONST_COLOR: return GL_CONSTANT_COLOR; case PIPE_BLENDFACTOR_CONST_ALPHA: return GL_CONSTANT_ALPHA; case PIPE_BLENDFACTOR_SRC1_COLOR: return GL_SRC1_COLOR; case PIPE_BLENDFACTOR_SRC1_ALPHA: return GL_SRC1_ALPHA; case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: return GL_SRC_ALPHA_SATURATE; case PIPE_BLENDFACTOR_ZERO: return GL_ZERO; case PIPE_BLENDFACTOR_INV_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR; case PIPE_BLENDFACTOR_INV_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA; case PIPE_BLENDFACTOR_INV_DST_COLOR: return GL_ONE_MINUS_DST_COLOR; case PIPE_BLENDFACTOR_INV_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA; case PIPE_BLENDFACTOR_INV_CONST_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR; case PIPE_BLENDFACTOR_INV_CONST_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA; case PIPE_BLENDFACTOR_INV_SRC1_COLOR: return GL_ONE_MINUS_SRC1_COLOR; case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: return GL_ONE_MINUS_SRC1_ALPHA; default: assert("invalid blend token()" == NULL); return 0; } } static GLenum translate_logicop(GLuint pipe_logicop) { switch (pipe_logicop) { #define CASE(x) case PIPE_LOGICOP_##x: return GL_##x CASE(CLEAR); CASE(NOR); CASE(AND_INVERTED); CASE(COPY_INVERTED); CASE(AND_REVERSE); CASE(INVERT); CASE(XOR); CASE(NAND); CASE(AND); CASE(EQUIV); CASE(NOOP); CASE(OR_INVERTED); CASE(COPY); CASE(OR_REVERSE); CASE(OR); CASE(SET); default: assert("invalid logicop token()" == NULL); return 0; } #undef CASE } static GLenum translate_stencil_op(GLuint op) { switch (op) { #define CASE(x) case PIPE_STENCIL_OP_##x: return GL_##x CASE(KEEP); CASE(ZERO); CASE(REPLACE); CASE(INCR); CASE(DECR); CASE(INCR_WRAP); CASE(DECR_WRAP); CASE(INVERT); default: assert("invalid stencilop token()" == NULL); return 0; } #undef CASE } static inline bool is_dst_blend(int blend_factor) { return (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA || blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA); } static inline int conv_a8_blend(int blend_factor) { if (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA) return PIPE_BLENDFACTOR_DST_COLOR; if (blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA) return PIPE_BLENDFACTOR_INV_DST_COLOR; return blend_factor; } static inline int conv_dst_blend(int blend_factor) { if (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA) return PIPE_BLENDFACTOR_ONE; if (blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA) return PIPE_BLENDFACTOR_ZERO; return blend_factor; } static inline bool is_const_blend(int blend_factor) { return (blend_factor == PIPE_BLENDFACTOR_CONST_COLOR || blend_factor == PIPE_BLENDFACTOR_CONST_ALPHA || blend_factor == PIPE_BLENDFACTOR_INV_CONST_COLOR || blend_factor == PIPE_BLENDFACTOR_INV_CONST_ALPHA); } static void vrend_hw_emit_blend(struct vrend_sub_context *sub_ctx, struct pipe_blend_state *state) { if (state->logicop_enable != sub_ctx->hw_blend_state.logicop_enable) { sub_ctx->hw_blend_state.logicop_enable = state->logicop_enable; if (vrend_state.use_gles) { if (can_emulate_logicop(state->logicop_func)) sub_ctx->shader_dirty = true; else report_gles_warn(sub_ctx->parent, GLES_WARN_LOGIC_OP); } else if (state->logicop_enable) { glEnable(GL_COLOR_LOGIC_OP); glLogicOp(translate_logicop(state->logicop_func)); } else { glDisable(GL_COLOR_LOGIC_OP); } } if (state->independent_blend_enable && has_feature(feat_indep_blend) && has_feature(feat_indep_blend_func)) { /* ARB_draw_buffers_blend is required for this */ int i; for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) { if (state->rt[i].blend_enable) { bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, i); if (dual_src && !has_feature(feat_dual_src_blend)) { vrend_printf( "dual src blend requested but not supported for rt %d\n", i); continue; } glBlendFuncSeparateiARB(i, translate_blend_factor(state->rt[i].rgb_src_factor), translate_blend_factor(state->rt[i].rgb_dst_factor), translate_blend_factor(state->rt[i].alpha_src_factor), translate_blend_factor(state->rt[i].alpha_dst_factor)); glBlendEquationSeparateiARB(i, translate_blend_func(state->rt[i].rgb_func), translate_blend_func(state->rt[i].alpha_func)); glEnableIndexedEXT(GL_BLEND, i); } else glDisableIndexedEXT(GL_BLEND, i); if (state->rt[i].colormask != sub_ctx->hw_blend_state.rt[i].colormask) { sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask; glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE, state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE, state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE, state->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE); } } } else { if (state->rt[0].blend_enable) { bool dual_src = util_blend_state_is_dual(&sub_ctx->blend_state, 0); if (dual_src && !has_feature(feat_dual_src_blend)) { vrend_printf( "dual src blend requested but not supported for rt 0\n"); } glBlendFuncSeparate(translate_blend_factor(state->rt[0].rgb_src_factor), translate_blend_factor(state->rt[0].rgb_dst_factor), translate_blend_factor(state->rt[0].alpha_src_factor), translate_blend_factor(state->rt[0].alpha_dst_factor)); glBlendEquationSeparate(translate_blend_func(state->rt[0].rgb_func), translate_blend_func(state->rt[0].alpha_func)); glEnable(GL_BLEND); } else glDisable(GL_BLEND); if (state->rt[0].colormask != sub_ctx->hw_blend_state.rt[0].colormask || (sub_ctx->hw_blend_state.independent_blend_enable && !state->independent_blend_enable)) { int i; for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) sub_ctx->hw_blend_state.rt[i].colormask = state->rt[i].colormask; glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE, state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE, state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE, state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE); } } sub_ctx->hw_blend_state.independent_blend_enable = state->independent_blend_enable; if (has_feature(feat_multisample)) { if (state->alpha_to_coverage) glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE); else glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE); if (!vrend_state.use_gles) { if (state->alpha_to_one) glEnable(GL_SAMPLE_ALPHA_TO_ONE); else glDisable(GL_SAMPLE_ALPHA_TO_ONE); } } if (state->dither) glEnable(GL_DITHER); else glDisable(GL_DITHER); } /* there are a few reasons we might need to patch the blend state. a) patching blend factors for dst with no alpha b) patching colormask/blendcolor/blendfactors for A8/A16 format emulation using GL_R8/GL_R16. */ static void vrend_patch_blend_state(struct vrend_sub_context *sub_ctx) { struct pipe_blend_state new_state = sub_ctx->blend_state; struct pipe_blend_state *state = &sub_ctx->blend_state; bool swizzle_blend_color = false; struct pipe_blend_color blend_color = sub_ctx->blend_color; int i; if (sub_ctx->nr_cbufs == 0) { sub_ctx->blend_state_dirty = false; return; } for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) { if (i < sub_ctx->nr_cbufs && sub_ctx->surf[i]) { if (vrend_format_is_emulated_alpha(sub_ctx->surf[i]->format)) { if (state->rt[i].blend_enable) { new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor); new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor); new_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO; new_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO; } new_state.rt[i].colormask = 0; if (state->rt[i].colormask & PIPE_MASK_A) new_state.rt[i].colormask |= PIPE_MASK_R; if (is_const_blend(new_state.rt[i].rgb_src_factor) || is_const_blend(new_state.rt[i].rgb_dst_factor)) { swizzle_blend_color = true; } } else if (!util_format_has_alpha(sub_ctx->surf[i]->format)) { if (!(is_dst_blend(state->rt[i].rgb_src_factor) || is_dst_blend(state->rt[i].rgb_dst_factor) || is_dst_blend(state->rt[i].alpha_src_factor) || is_dst_blend(state->rt[i].alpha_dst_factor))) continue; new_state.rt[i].rgb_src_factor = conv_dst_blend(state->rt[i].rgb_src_factor); new_state.rt[i].rgb_dst_factor = conv_dst_blend(state->rt[i].rgb_dst_factor); new_state.rt[i].alpha_src_factor = conv_dst_blend(state->rt[i].alpha_src_factor); new_state.rt[i].alpha_dst_factor = conv_dst_blend(state->rt[i].alpha_dst_factor); } } } vrend_hw_emit_blend(sub_ctx, &new_state); if (swizzle_blend_color) { blend_color.color[0] = blend_color.color[3]; blend_color.color[1] = 0.0f; blend_color.color[2] = 0.0f; blend_color.color[3] = 0.0f; } glBlendColor(blend_color.color[0], blend_color.color[1], blend_color.color[2], blend_color.color[3]); sub_ctx->blend_state_dirty = false; } void vrend_object_bind_blend(struct vrend_context *ctx, uint32_t handle) { struct pipe_blend_state *state; if (handle == 0) { memset(&ctx->sub->blend_state, 0, sizeof(ctx->sub->blend_state)); glDisable(GL_BLEND); return; } state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_BLEND); if (!state) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle); return; } ctx->sub->shader_dirty = true; ctx->sub->blend_state = *state; ctx->sub->blend_state_dirty = true; } static void vrend_hw_emit_dsa(struct vrend_context *ctx) { struct pipe_depth_stencil_alpha_state *state = &ctx->sub->dsa_state; if (state->depth.enabled) { vrend_depth_test_enable(ctx, true); glDepthFunc(GL_NEVER + state->depth.func); if (state->depth.writemask) glDepthMask(GL_TRUE); else glDepthMask(GL_FALSE); } else vrend_depth_test_enable(ctx, false); if (state->alpha.enabled) { vrend_alpha_test_enable(ctx, true); if (!vrend_state.use_core_profile) glAlphaFunc(GL_NEVER + state->alpha.func, state->alpha.ref_value); } else vrend_alpha_test_enable(ctx, false); } void vrend_object_bind_dsa(struct vrend_context *ctx, uint32_t handle) { struct pipe_depth_stencil_alpha_state *state; if (handle == 0) { memset(&ctx->sub->dsa_state, 0, sizeof(ctx->sub->dsa_state)); ctx->sub->dsa = NULL; ctx->sub->stencil_state_dirty = true; ctx->sub->shader_dirty = true; vrend_hw_emit_dsa(ctx); return; } state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_DSA); if (!state) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle); return; } if (ctx->sub->dsa != state) { ctx->sub->stencil_state_dirty = true; ctx->sub->shader_dirty = true; } ctx->sub->dsa_state = *state; ctx->sub->dsa = state; vrend_hw_emit_dsa(ctx); } static void vrend_update_frontface_state(struct vrend_sub_context *sub_ctx) { struct pipe_rasterizer_state *state = &sub_ctx->rs_state; int front_ccw = state->front_ccw; front_ccw ^= (sub_ctx->inverted_fbo_content ? 0 : 1); if (front_ccw) glFrontFace(GL_CCW); else glFrontFace(GL_CW); } void vrend_update_stencil_state(struct vrend_sub_context *sub_ctx) { struct pipe_depth_stencil_alpha_state *state = sub_ctx->dsa; int i; if (!state) return; if (!state->stencil[1].enabled) { if (state->stencil[0].enabled) { vrend_stencil_test_enable(sub_ctx, true); glStencilOp(translate_stencil_op(state->stencil[0].fail_op), translate_stencil_op(state->stencil[0].zfail_op), translate_stencil_op(state->stencil[0].zpass_op)); glStencilFunc(GL_NEVER + state->stencil[0].func, sub_ctx->stencil_refs[0], state->stencil[0].valuemask); glStencilMask(state->stencil[0].writemask); } else vrend_stencil_test_enable(sub_ctx, false); } else { vrend_stencil_test_enable(sub_ctx, true); for (i = 0; i < 2; i++) { GLenum face = (i == 1) ? GL_BACK : GL_FRONT; glStencilOpSeparate(face, translate_stencil_op(state->stencil[i].fail_op), translate_stencil_op(state->stencil[i].zfail_op), translate_stencil_op(state->stencil[i].zpass_op)); glStencilFuncSeparate(face, GL_NEVER + state->stencil[i].func, sub_ctx->stencil_refs[i], state->stencil[i].valuemask); glStencilMaskSeparate(face, state->stencil[i].writemask); } } sub_ctx->stencil_state_dirty = false; } static inline GLenum translate_fill(uint32_t mode) { switch (mode) { case PIPE_POLYGON_MODE_POINT: return GL_POINT; case PIPE_POLYGON_MODE_LINE: return GL_LINE; case PIPE_POLYGON_MODE_FILL: return GL_FILL; default: assert(0); return 0; } } static void vrend_hw_emit_rs(struct vrend_context *ctx) { struct pipe_rasterizer_state *state = &ctx->sub->rs_state; int i; if (has_feature(feat_depth_clamp)) { if (state->depth_clip) glDisable(GL_DEPTH_CLAMP); else glEnable(GL_DEPTH_CLAMP); } if (vrend_state.use_gles) { /* guest send invalid glPointSize parameter */ if (!state->point_size_per_vertex && state->point_size != 1.0f && state->point_size != 0.0f) { report_gles_warn(ctx, GLES_WARN_POINT_SIZE); } } else if (state->point_size_per_vertex) { glEnable(GL_PROGRAM_POINT_SIZE); } else { glDisable(GL_PROGRAM_POINT_SIZE); if (state->point_size) { glPointSize(state->point_size); } } /* line_width < 0 is invalid, the guest sometimes forgot to set it. */ glLineWidth(state->line_width <= 0 ? 1.0f : state->line_width); if (state->rasterizer_discard != ctx->sub->hw_rs_state.rasterizer_discard) { ctx->sub->hw_rs_state.rasterizer_discard = state->rasterizer_discard; if (state->rasterizer_discard) glEnable(GL_RASTERIZER_DISCARD); else glDisable(GL_RASTERIZER_DISCARD); } if (vrend_state.use_gles == true) { if (translate_fill(state->fill_front) != GL_FILL) { report_gles_warn(ctx, GLES_WARN_POLYGON_MODE); } if (translate_fill(state->fill_back) != GL_FILL) { report_gles_warn(ctx, GLES_WARN_POLYGON_MODE); } } else if (vrend_state.use_core_profile == false) { glPolygonMode(GL_FRONT, translate_fill(state->fill_front)); glPolygonMode(GL_BACK, translate_fill(state->fill_back)); } else if (state->fill_front == state->fill_back) { glPolygonMode(GL_FRONT_AND_BACK, translate_fill(state->fill_front)); } else report_core_warn(ctx, CORE_PROFILE_WARN_POLYGON_MODE); if (state->offset_tri) { glEnable(GL_POLYGON_OFFSET_FILL); } else { glDisable(GL_POLYGON_OFFSET_FILL); } if (vrend_state.use_gles) { if (state->offset_line) { report_gles_warn(ctx, GLES_WARN_OFFSET_LINE); } } else if (state->offset_line) { glEnable(GL_POLYGON_OFFSET_LINE); } else { glDisable(GL_POLYGON_OFFSET_LINE); } if (vrend_state.use_gles) { if (state->offset_point) { report_gles_warn(ctx, GLES_WARN_OFFSET_POINT); } } else if (state->offset_point) { glEnable(GL_POLYGON_OFFSET_POINT); } else { glDisable(GL_POLYGON_OFFSET_POINT); } if (state->flatshade != ctx->sub->hw_rs_state.flatshade) { ctx->sub->hw_rs_state.flatshade = state->flatshade; if (vrend_state.use_core_profile == false) { if (state->flatshade) { glShadeModel(GL_FLAT); } else { glShadeModel(GL_SMOOTH); } } } if (state->clip_halfz != ctx->sub->hw_rs_state.clip_halfz) { if (has_feature(feat_clip_control)) { /* We only need to handle clip_halfz here, the bottom_edge_rule is * already handled via Gallium */ GLenum depthrule = state->clip_halfz ? GL_ZERO_TO_ONE : GL_NEGATIVE_ONE_TO_ONE; glClipControl(GL_LOWER_LEFT, depthrule); ctx->sub->hw_rs_state.clip_halfz = state->clip_halfz; } else { vrend_printf("No clip control supported\n"); } } if (state->flatshade_first != ctx->sub->hw_rs_state.flatshade_first) { ctx->sub->hw_rs_state.flatshade_first = state->flatshade_first; if (vrend_state.use_gles) { if (state->flatshade_first) { report_gles_warn(ctx, GLES_WARN_FLATSHADE_FIRST); } } else if (state->flatshade_first) { glProvokingVertexEXT(GL_FIRST_VERTEX_CONVENTION_EXT); } else { glProvokingVertexEXT(GL_LAST_VERTEX_CONVENTION_EXT); } } if (!vrend_state.use_gles && has_feature(feat_polygon_offset_clamp)) glPolygonOffsetClampEXT(state->offset_scale, state->offset_units, state->offset_clamp); else glPolygonOffset(state->offset_scale, state->offset_units); if (vrend_state.use_core_profile == false) { if (state->poly_stipple_enable) glEnable(GL_POLYGON_STIPPLE); else glDisable(GL_POLYGON_STIPPLE); } else if (state->poly_stipple_enable) { if (!ctx->pstip_inited) vrend_init_pstipple_texture(ctx); } if (state->point_quad_rasterization) { if (vrend_state.use_core_profile == false && vrend_state.use_gles == false) { glEnable(GL_POINT_SPRITE); } if (vrend_state.use_gles == false) { glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, state->sprite_coord_mode ? GL_UPPER_LEFT : GL_LOWER_LEFT); } } else { if (vrend_state.use_core_profile == false && vrend_state.use_gles == false) { glDisable(GL_POINT_SPRITE); } } if (state->cull_face != PIPE_FACE_NONE) { switch (state->cull_face) { case PIPE_FACE_FRONT: glCullFace(GL_FRONT); break; case PIPE_FACE_BACK: glCullFace(GL_BACK); break; case PIPE_FACE_FRONT_AND_BACK: glCullFace(GL_FRONT_AND_BACK); break; default: vrend_printf( "unhandled cull-face: %x\n", state->cull_face); } glEnable(GL_CULL_FACE); } else glDisable(GL_CULL_FACE); /* two sided lighting handled in shader for core profile */ if (vrend_state.use_core_profile == false) { if (state->light_twoside) glEnable(GL_VERTEX_PROGRAM_TWO_SIDE); else glDisable(GL_VERTEX_PROGRAM_TWO_SIDE); } if (state->clip_plane_enable != ctx->sub->hw_rs_state.clip_plane_enable) { ctx->sub->hw_rs_state.clip_plane_enable = state->clip_plane_enable; for (i = 0; i < 8; i++) { if (state->clip_plane_enable & (1 << i)) glEnable(GL_CLIP_PLANE0 + i); else glDisable(GL_CLIP_PLANE0 + i); } } if (vrend_state.use_core_profile == false) { glLineStipple(state->line_stipple_factor, state->line_stipple_pattern); if (state->line_stipple_enable) glEnable(GL_LINE_STIPPLE); else glDisable(GL_LINE_STIPPLE); } else if (state->line_stipple_enable) { if (vrend_state.use_gles) report_core_warn(ctx, GLES_WARN_STIPPLE); else report_core_warn(ctx, CORE_PROFILE_WARN_STIPPLE); } if (vrend_state.use_gles) { if (state->line_smooth) { report_gles_warn(ctx, GLES_WARN_LINE_SMOOTH); } } else if (state->line_smooth) { glEnable(GL_LINE_SMOOTH); } else { glDisable(GL_LINE_SMOOTH); } if (vrend_state.use_gles) { if (state->poly_smooth) { report_gles_warn(ctx, GLES_WARN_POLY_SMOOTH); } } else if (state->poly_smooth) { glEnable(GL_POLYGON_SMOOTH); } else { glDisable(GL_POLYGON_SMOOTH); } if (vrend_state.use_core_profile == false) { if (state->clamp_vertex_color) glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_TRUE); else glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE); if (state->clamp_fragment_color) glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_TRUE); else glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE); } else { if (state->clamp_vertex_color || state->clamp_fragment_color) report_core_warn(ctx, CORE_PROFILE_WARN_CLAMP); } if (has_feature(feat_multisample)) { if (has_feature(feat_sample_mask)) { if (state->multisample) glEnable(GL_SAMPLE_MASK); else glDisable(GL_SAMPLE_MASK); } /* GLES doesn't have GL_MULTISAMPLE */ if (!vrend_state.use_gles) { if (state->multisample) glEnable(GL_MULTISAMPLE); else glDisable(GL_MULTISAMPLE); } if (has_feature(feat_sample_shading)) { if (state->force_persample_interp) glEnable(GL_SAMPLE_SHADING); else glDisable(GL_SAMPLE_SHADING); } } if (state->scissor) glEnable(GL_SCISSOR_TEST); else glDisable(GL_SCISSOR_TEST); ctx->sub->hw_rs_state.scissor = state->scissor; } void vrend_object_bind_rasterizer(struct vrend_context *ctx, uint32_t handle) { struct pipe_rasterizer_state *state; if (handle == 0) { memset(&ctx->sub->rs_state, 0, sizeof(ctx->sub->rs_state)); return; } state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_RASTERIZER); if (!state) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle); return; } ctx->sub->rs_state = *state; ctx->sub->shader_dirty = true; vrend_hw_emit_rs(ctx); } void vrend_bind_sampler_states(struct vrend_context *ctx, uint32_t shader_type, uint32_t start_slot, uint32_t num_states, const uint32_t *handles) { uint32_t i; struct vrend_sampler_state *state; if (shader_type >= PIPE_SHADER_TYPES) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, shader_type); return; } if (num_states > PIPE_MAX_SAMPLERS || start_slot > (PIPE_MAX_SAMPLERS - num_states)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, num_states); return; } ctx->sub->num_sampler_states[shader_type] = num_states; for (i = 0; i < num_states; i++) { if (handles[i] == 0) state = NULL; else state = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_SAMPLER_STATE); if (!state && handles[i]) vrend_printf("Failed to bind sampler state (handle=%d)\n", handles[i]); ctx->sub->sampler_state[shader_type][start_slot + i] = state; ctx->sub->sampler_views_dirty[shader_type] |= (1 << (start_slot + i)); } } static void vrend_apply_sampler_state(struct vrend_sub_context *sub_ctx, struct vrend_resource *res, uint32_t shader_type, int id, int sampler_id, struct vrend_sampler_view *tview) { struct vrend_texture *tex = (struct vrend_texture *)res; struct vrend_sampler_state *vstate = sub_ctx->sampler_state[shader_type][id]; struct pipe_sampler_state *state = &vstate->base; bool set_all = false; GLenum target = tex->base.target; assert(offsetof(struct vrend_sampler_state, base) == 0); if (!state) return; if (res->base.nr_samples > 0) { tex->state = *state; return; } if (has_bit(tex->base.storage_bits, VREND_STORAGE_GL_BUFFER)) { tex->state = *state; return; } /* * If we emulate alpha format with red, we need to tell * the sampler to use the red channel and not the alpha one * by swizzling the GL_TEXTURE_BORDER_COLOR parameter. */ bool is_emulated_alpha = vrend_format_is_emulated_alpha(tview->format); if (has_feature(feat_samplers)) { int sampler = vstate->ids[tview->srgb_decode == GL_SKIP_DECODE_EXT ? 0 : 1]; if (is_emulated_alpha) { union pipe_color_union border_color; border_color = state->border_color; border_color.ui[0] = border_color.ui[3]; border_color.ui[3] = 0; apply_sampler_border_color(sampler, border_color.ui); } glBindSampler(sampler_id, sampler); return; } if (tex->state.max_lod == -1) set_all = true; if (tex->state.wrap_s != state->wrap_s || set_all) glTexParameteri(target, GL_TEXTURE_WRAP_S, convert_wrap(state->wrap_s)); if (tex->state.wrap_t != state->wrap_t || set_all) glTexParameteri(target, GL_TEXTURE_WRAP_T, convert_wrap(state->wrap_t)); if (tex->state.wrap_r != state->wrap_r || set_all) glTexParameteri(target, GL_TEXTURE_WRAP_R, convert_wrap(state->wrap_r)); if (tex->state.min_img_filter != state->min_img_filter || tex->state.min_mip_filter != state->min_mip_filter || set_all) glTexParameterf(target, GL_TEXTURE_MIN_FILTER, convert_min_filter(state->min_img_filter, state->min_mip_filter)); if (tex->state.mag_img_filter != state->mag_img_filter || set_all) glTexParameterf(target, GL_TEXTURE_MAG_FILTER, convert_mag_filter(state->mag_img_filter)); if (res->target != GL_TEXTURE_RECTANGLE) { if (tex->state.min_lod != state->min_lod || set_all) glTexParameterf(target, GL_TEXTURE_MIN_LOD, state->min_lod); if (tex->state.max_lod != state->max_lod || set_all) glTexParameterf(target, GL_TEXTURE_MAX_LOD, state->max_lod); if (tex->state.lod_bias != state->lod_bias || set_all) { if (vrend_state.use_gles) { if (state->lod_bias) report_gles_warn(sub_ctx->parent, GLES_WARN_LOD_BIAS); } else { glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias); } } } if (tex->state.compare_mode != state->compare_mode || set_all) glTexParameteri(target, GL_TEXTURE_COMPARE_MODE, state->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE); if (tex->state.compare_func != state->compare_func || set_all) glTexParameteri(target, GL_TEXTURE_COMPARE_FUNC, GL_NEVER + state->compare_func); if (has_feature(feat_anisotropic_filter) && (tex->state.max_anisotropy != state->max_anisotropy || set_all)) glTexParameterf(target, GL_TEXTURE_MAX_ANISOTROPY, state->max_anisotropy); /* * Oh this is a fun one. On GLES 2.0 all cubemap MUST NOT be seamless. * But on GLES 3.0 all cubemaps MUST be seamless. Either way there is no * way to toggle between the behaviour when running on GLES. And adding * warnings will spew the logs quite bad. Ignore and hope for the best. */ if (!vrend_state.use_gles) { if (state->seamless_cube_map) { glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS); } else { glDisable(GL_TEXTURE_CUBE_MAP_SEAMLESS); } } if (memcmp(&tex->state.border_color, &state->border_color, 16) || set_all || is_emulated_alpha) { if (is_emulated_alpha) { union pipe_color_union border_color; border_color = state->border_color; border_color.ui[0] = border_color.ui[3]; border_color.ui[3] = 0; glTexParameterIuiv(target, GL_TEXTURE_BORDER_COLOR, border_color.ui); } else { glTexParameterIuiv(target, GL_TEXTURE_BORDER_COLOR, state->border_color.ui); } } tex->state = *state; } static GLenum tgsitargettogltarget(const enum pipe_texture_target target, int nr_samples) { switch(target) { case PIPE_TEXTURE_1D: return GL_TEXTURE_1D; case PIPE_TEXTURE_2D: return (nr_samples > 0) ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D; case PIPE_TEXTURE_3D: return GL_TEXTURE_3D; case PIPE_TEXTURE_RECT: return GL_TEXTURE_RECTANGLE_NV; case PIPE_TEXTURE_CUBE: return GL_TEXTURE_CUBE_MAP; case PIPE_TEXTURE_1D_ARRAY: return GL_TEXTURE_1D_ARRAY; case PIPE_TEXTURE_2D_ARRAY: return (nr_samples > 0) ? GL_TEXTURE_2D_MULTISAMPLE_ARRAY : GL_TEXTURE_2D_ARRAY; case PIPE_TEXTURE_CUBE_ARRAY: return GL_TEXTURE_CUBE_MAP_ARRAY; case PIPE_BUFFER: default: return PIPE_BUFFER; } return PIPE_BUFFER; } static inline void lock_sync(void) { if (vrend_state.sync_thread && vrend_state.use_async_fence_cb) mtx_lock(&vrend_state.fence_mutex); } static inline void unlock_sync(void) { if (vrend_state.sync_thread && vrend_state.use_async_fence_cb) mtx_unlock(&vrend_state.fence_mutex); } static void vrend_free_sync_thread(void) { if (!vrend_state.sync_thread) return; mtx_lock(&vrend_state.fence_mutex); vrend_state.stop_sync_thread = true; cnd_signal(&vrend_state.fence_cond); mtx_unlock(&vrend_state.fence_mutex); thrd_join(vrend_state.sync_thread, NULL); vrend_state.sync_thread = 0; cnd_destroy(&vrend_state.fence_cond); mtx_destroy(&vrend_state.fence_mutex); } static void free_fence_locked(struct vrend_fence *fence) { list_del(&fence->fences); #ifdef HAVE_EPOXY_EGL_H if (vrend_state.use_egl_fence) { virgl_egl_fence_destroy(egl, fence->eglsyncobj); } else #endif { glDeleteSync(fence->glsyncobj); } free(fence); } static void vrend_free_fences(void) { struct vrend_fence *fence, *stor; /* this is called after vrend_free_sync_thread */ assert(!vrend_state.sync_thread); LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) free_fence_locked(fence); LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) free_fence_locked(fence); } static void vrend_free_fences_for_context(struct vrend_context *ctx) { struct vrend_fence *fence, *stor; if (vrend_state.sync_thread) { mtx_lock(&vrend_state.fence_mutex); LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) { if (fence->ctx == ctx) free_fence_locked(fence); } LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) { if (fence->ctx == ctx) free_fence_locked(fence); } if (vrend_state.fence_waiting) { /* mark the fence invalid as the sync thread is still waiting on it */ vrend_state.fence_waiting->ctx = NULL; } mtx_unlock(&vrend_state.fence_mutex); } else { LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) { if (fence->ctx == ctx) free_fence_locked(fence); } } } static bool do_wait(struct vrend_fence *fence, bool can_block) { bool done = false; int timeout = can_block ? 1000000000 : 0; #ifdef HAVE_EPOXY_EGL_H if (vrend_state.use_egl_fence) { do { done = virgl_egl_client_wait_fence(egl, fence->eglsyncobj, timeout); } while (!done && can_block); return done; } #endif do { GLenum glret = glClientWaitSync(fence->glsyncobj, 0, timeout); if (glret == GL_WAIT_FAILED) { vrend_printf( "wait sync failed: illegal fence object %p\n", fence->glsyncobj); } done = glret != GL_TIMEOUT_EXPIRED; } while (!done && can_block); return done; } static void vrend_renderer_check_queries_locked(void); static void wait_sync(struct vrend_fence *fence) { struct vrend_context *ctx = fence->ctx; do_wait(fence, /* can_block */ true); mtx_lock(&vrend_state.fence_mutex); if (vrend_state.use_async_fence_cb) { vrend_renderer_check_queries_locked(); /* to be able to call free_fence_locked without locking */ list_inithead(&fence->fences); } else { list_addtail(&fence->fences, &vrend_state.fence_list); } vrend_state.fence_waiting = NULL; mtx_unlock(&vrend_state.fence_mutex); if (vrend_state.use_async_fence_cb) { ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data); free_fence_locked(fence); return; } if (write_eventfd(vrend_state.eventfd, 1)) { perror("failed to write to eventfd\n"); } } static int thread_sync(UNUSED void *arg) { virgl_gl_context gl_context = vrend_state.sync_context; struct vrend_fence *fence, *stor; u_thread_setname("vrend-sync"); mtx_lock(&vrend_state.fence_mutex); vrend_clicbs->make_current(gl_context); while (!vrend_state.stop_sync_thread) { if (LIST_IS_EMPTY(&vrend_state.fence_wait_list) && cnd_wait(&vrend_state.fence_cond, &vrend_state.fence_mutex) != 0) { vrend_printf( "error while waiting on condition\n"); break; } LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) { if (vrend_state.stop_sync_thread) break; list_del(&fence->fences); vrend_state.fence_waiting = fence; mtx_unlock(&vrend_state.fence_mutex); wait_sync(fence); mtx_lock(&vrend_state.fence_mutex); } } vrend_clicbs->make_current(0); vrend_clicbs->destroy_gl_context(vrend_state.sync_context); mtx_unlock(&vrend_state.fence_mutex); return 0; } static void vrend_renderer_use_threaded_sync(void) { struct virgl_gl_ctx_param ctx_params; ctx_params.shared = true; ctx_params.major_ver = vrend_state.gl_major_ver; ctx_params.minor_ver = vrend_state.gl_minor_ver; vrend_state.stop_sync_thread = false; vrend_state.sync_context = vrend_clicbs->create_gl_context(0, &ctx_params); if (vrend_state.sync_context == NULL) { vrend_printf( "failed to create sync opengl context\n"); return; } if (!vrend_state.use_async_fence_cb) { vrend_state.eventfd = create_eventfd(0); if (vrend_state.eventfd == -1) { vrend_printf( "Failed to create eventfd\n"); vrend_clicbs->destroy_gl_context(vrend_state.sync_context); return; } } cnd_init(&vrend_state.fence_cond); mtx_init(&vrend_state.fence_mutex, mtx_plain); vrend_state.sync_thread = u_thread_create(thread_sync, NULL); if (!vrend_state.sync_thread) { if (vrend_state.eventfd != -1) { close(vrend_state.eventfd); vrend_state.eventfd = -1; } vrend_clicbs->destroy_gl_context(vrend_state.sync_context); cnd_destroy(&vrend_state.fence_cond); mtx_destroy(&vrend_state.fence_mutex); } } static void vrend_debug_cb(UNUSED GLenum source, GLenum type, UNUSED GLuint id, UNUSED GLenum severity, UNUSED GLsizei length, UNUSED const GLchar* message, UNUSED const void* userParam) { if (type != GL_DEBUG_TYPE_ERROR) { return; } vrend_printf( "ERROR: %s\n", message); } static void vrend_pipe_resource_unref(struct pipe_resource *pres, UNUSED void *data) { struct vrend_resource *res = (struct vrend_resource *)pres; if (vrend_state.finishing || pipe_reference(&res->base.reference, NULL)) vrend_renderer_resource_destroy(res); } static void vrend_pipe_resource_attach_iov(struct pipe_resource *pres, const struct iovec *iov, int iov_count, UNUSED void *data) { struct vrend_resource *res = (struct vrend_resource *)pres; res->iov = iov; res->num_iovs = iov_count; if (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) { vrend_write_to_iovec(res->iov, res->num_iovs, 0, res->ptr, res->base.width0); } } static void vrend_pipe_resource_detach_iov(struct pipe_resource *pres, UNUSED void *data) { struct vrend_resource *res = (struct vrend_resource *)pres; if (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) { vrend_read_from_iovec(res->iov, res->num_iovs, 0, res->ptr, res->base.width0); } res->iov = NULL; res->num_iovs = 0; } static enum virgl_resource_fd_type vrend_pipe_resource_export_fd(UNUSED struct pipe_resource *pres, UNUSED int *fd, UNUSED void *data) { #ifdef ENABLE_MINIGBM_ALLOCATION struct vrend_resource *res = (struct vrend_resource *)pres; if (res->storage_bits & VREND_STORAGE_GBM_BUFFER) { int ret = virgl_gbm_export_fd(gbm->device, gbm_bo_get_handle(res->gbm_bo).u32, fd); if (!ret) return VIRGL_RESOURCE_FD_DMABUF; } #endif return VIRGL_RESOURCE_FD_INVALID; } static uint64_t vrend_pipe_resource_get_size(struct pipe_resource *pres, UNUSED void *data) { struct vrend_resource *res = (struct vrend_resource *)pres; return res->size; } const struct virgl_resource_pipe_callbacks * vrend_renderer_get_pipe_callbacks(void) { static const struct virgl_resource_pipe_callbacks callbacks = { .unref = vrend_pipe_resource_unref, .attach_iov = vrend_pipe_resource_attach_iov, .detach_iov = vrend_pipe_resource_detach_iov, .export_fd = vrend_pipe_resource_export_fd, .get_size = vrend_pipe_resource_get_size, }; return &callbacks; } static bool use_integer() { if (getenv("VIRGL_USE_INTEGER")) return true; const char * a = (const char *) glGetString(GL_VENDOR); if (!a) return false; if (strcmp(a, "ARM") == 0) return true; return false; } int vrend_renderer_init(const struct vrend_if_cbs *cbs, uint32_t flags) { bool gles; int gl_ver; virgl_gl_context gl_context; struct virgl_gl_ctx_param ctx_params; vrend_clicbs = cbs; /* Give some defaults to be able to run the tests */ vrend_state.max_texture_2d_size = vrend_state.max_texture_3d_size = vrend_state.max_texture_cube_size = 16384; #ifndef NDEBUG vrend_init_debug_flags(); #endif ctx_params.shared = false; for (uint32_t i = 0; i < ARRAY_SIZE(gl_versions); i++) { ctx_params.major_ver = gl_versions[i].major; ctx_params.minor_ver = gl_versions[i].minor; gl_context = vrend_clicbs->create_gl_context(0, &ctx_params); if (gl_context) break; } vrend_clicbs->make_current(gl_context); gl_ver = epoxy_gl_version(); /* enable error output as early as possible */ if (vrend_debug(NULL, dbg_khr) && epoxy_has_gl_extension("GL_KHR_debug")) { glDebugMessageCallback(vrend_debug_cb, NULL); glEnable(GL_DEBUG_OUTPUT); glDisable(GL_DEBUG_OUTPUT_SYNCHRONOUS); set_feature(feat_debug_cb); } /* make sure you have the latest version of libepoxy */ gles = epoxy_is_desktop_gl() == 0; vrend_state.gl_major_ver = gl_ver / 10; vrend_state.gl_minor_ver = gl_ver % 10; if (gles) { vrend_printf( "gl_version %d - es profile enabled\n", gl_ver); vrend_state.use_gles = true; /* for now, makes the rest of the code use the most GLES 3.x like path */ vrend_state.use_core_profile = 1; } else if (gl_ver > 30 && !epoxy_has_gl_extension("GL_ARB_compatibility")) { vrend_printf( "gl_version %d - core profile enabled\n", gl_ver); vrend_state.use_core_profile = 1; } else { vrend_printf( "gl_version %d - compat profile\n", gl_ver); } vrend_state.use_integer = use_integer(); init_features(gles ? 0 : gl_ver, gles ? gl_ver : 0); if (!vrend_winsys_has_gl_colorspace()) clear_feature(feat_srgb_write_control) ; glGetIntegerv(GL_MAX_DRAW_BUFFERS, (GLint *) &vrend_state.max_draw_buffers); /* Mesa clamps this value to 8 anyway, so just make sure that this side * doesn't exceed the number to be on the save side when using 8-bit masks * for the color buffers */ if (vrend_state.max_draw_buffers > 8) vrend_state.max_draw_buffers = 8; if (!has_feature(feat_arb_robustness) && !has_feature(feat_gles_khr_robustness)) { vrend_printf("WARNING: running without ARB/KHR robustness in place may crash\n"); } /* callbacks for when we are cleaning up the object table */ vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SHADER, vrend_destroy_shader_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object); vrend_object_set_destroy_callback(VIRGL_OBJECT_VERTEX_ELEMENTS, vrend_destroy_vertex_elements_object); /* disable for format testing, spews a lot of errors */ if (has_feature(feat_debug_cb)) { glDisable(GL_DEBUG_OUTPUT); } vrend_build_format_list_common(); if (vrend_state.use_gles) { vrend_build_format_list_gles(); } else { vrend_build_format_list_gl(); } vrend_check_texture_storage(tex_conv_table); /* disable for format testing */ if (has_feature(feat_debug_cb)) { glDisable(GL_DEBUG_OUTPUT); } vrend_clicbs->destroy_gl_context(gl_context); list_inithead(&vrend_state.fence_list); list_inithead(&vrend_state.fence_wait_list); list_inithead(&vrend_state.waiting_query_list); /* create 0 context */ vrend_state.ctx0 = vrend_create_context(0, strlen("HOST"), "HOST"); vrend_state.eventfd = -1; if (flags & VREND_USE_THREAD_SYNC) { if (flags & VREND_USE_ASYNC_FENCE_CB) vrend_state.use_async_fence_cb = true; vrend_renderer_use_threaded_sync(); } if (flags & VREND_USE_EXTERNAL_BLOB) vrend_state.use_external_blob = true; #ifdef HAVE_EPOXY_EGL_H if (vrend_state.use_gles) vrend_state.use_egl_fence = virgl_egl_supports_fences(egl); #endif return 0; } void vrend_renderer_fini(void) { vrend_state.finishing = true; if (vrend_state.eventfd != -1) { close(vrend_state.eventfd); vrend_state.eventfd = -1; } vrend_free_fences(); vrend_blitter_fini(); vrend_destroy_context(vrend_state.ctx0); vrend_state.current_ctx = NULL; vrend_state.current_hw_ctx = NULL; vrend_state.finishing = false; } static void vrend_destroy_sub_context(struct vrend_sub_context *sub) { int i, j; struct vrend_streamout_object *obj, *tmp; vrend_clicbs->make_current(sub->gl_context); if (sub->fb_id) glDeleteFramebuffers(1, &sub->fb_id); if (sub->blit_fb_ids[0]) glDeleteFramebuffers(2, sub->blit_fb_ids); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0); if (!has_feature(feat_gles31_vertex_attrib_binding)) { while (sub->enabled_attribs_bitmask) { i = u_bit_scan(&sub->enabled_attribs_bitmask); glDisableVertexAttribArray(i); } glDeleteVertexArrays(1, &sub->vaoid); } glBindVertexArray(0); if (sub->current_so) glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0); LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub->streamout_list, head) { vrend_destroy_streamout_object(obj); } vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_VERTEX], NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_FRAGMENT], NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_GEOMETRY], NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_TESS_CTRL], NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_TESS_EVAL], NULL); vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_COMPUTE], NULL); if (sub->prog) sub->prog->ref_context = NULL; vrend_free_programs(sub); for (i = 0; i < PIPE_SHADER_TYPES; i++) { free(sub->consts[i].consts); sub->consts[i].consts = NULL; for (j = 0; j < PIPE_MAX_SHADER_SAMPLER_VIEWS; j++) { vrend_sampler_view_reference(&sub->views[i].views[j], NULL); } } if (sub->zsurf) vrend_surface_reference(&sub->zsurf, NULL); for (i = 0; i < sub->nr_cbufs; i++) { if (!sub->surf[i]) continue; vrend_surface_reference(&sub->surf[i], NULL); } vrend_set_num_vbo_sub(sub, 0); vrend_resource_reference((struct vrend_resource **)&sub->ib.buffer, NULL); /* need to lock mutex before destroying queries, we could * be checking these in the sync thread */ lock_sync(); vrend_object_fini_ctx_table(sub->object_hash); unlock_sync(); vrend_clicbs->destroy_gl_context(sub->gl_context); list_del(&sub->head); FREE(sub); } void vrend_destroy_context(struct vrend_context *ctx) { bool switch_0 = (ctx == vrend_state.current_ctx); struct vrend_context *cur = vrend_state.current_ctx; struct vrend_sub_context *sub, *tmp; struct vrend_untyped_resource *untyped_res, *untyped_res_tmp; if (switch_0) { vrend_state.current_ctx = NULL; vrend_state.current_hw_ctx = NULL; } if (vrend_state.use_core_profile) { if (ctx->pstip_inited) glDeleteTextures(1, &ctx->pstipple_tex_id); ctx->pstip_inited = false; } vrend_clicbs->make_current(ctx->sub->gl_context); /* reset references on framebuffers */ vrend_set_framebuffer_state(ctx, 0, NULL, 0); vrend_set_num_sampler_views(ctx, PIPE_SHADER_VERTEX, 0, 0); vrend_set_num_sampler_views(ctx, PIPE_SHADER_FRAGMENT, 0, 0); vrend_set_num_sampler_views(ctx, PIPE_SHADER_GEOMETRY, 0, 0); vrend_set_num_sampler_views(ctx, PIPE_SHADER_TESS_CTRL, 0, 0); vrend_set_num_sampler_views(ctx, PIPE_SHADER_TESS_EVAL, 0, 0); vrend_set_num_sampler_views(ctx, PIPE_SHADER_COMPUTE, 0, 0); vrend_set_streamout_targets(ctx, 0, 0, NULL); vrend_set_index_buffer(ctx, 0, 0, 0); LIST_FOR_EACH_ENTRY_SAFE(sub, tmp, &ctx->sub_ctxs, head) vrend_destroy_sub_context(sub); if(ctx->ctx_id) vrend_renderer_force_ctx_0(); vrend_free_fences_for_context(ctx); LIST_FOR_EACH_ENTRY_SAFE(untyped_res, untyped_res_tmp, &ctx->untyped_resources, head) free(untyped_res); vrend_ctx_resource_fini_table(ctx->res_hash); FREE(ctx); if (!switch_0 && cur) vrend_hw_switch_context(cur, true); } struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *debug_name) { struct vrend_context *grctx = CALLOC_STRUCT(vrend_context); if (!grctx) return NULL; if (nlen && debug_name) { strncpy(grctx->debug_name, debug_name, nlen < sizeof(grctx->debug_name) - 1 ? nlen : sizeof(grctx->debug_name) - 1); grctx->debug_name[sizeof(grctx->debug_name) - 1] = 0; } VREND_DEBUG(dbg_caller, grctx, "create context\n"); grctx->ctx_id = id; list_inithead(&grctx->sub_ctxs); list_inithead(&grctx->vrend_resources); list_inithead(&grctx->active_nontimer_query_list); grctx->res_hash = vrend_ctx_resource_init_table(); list_inithead(&grctx->untyped_resources); grctx->shader_cfg.use_gles = vrend_state.use_gles; grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile; grctx->shader_cfg.use_explicit_locations = vrend_state.use_explicit_locations; grctx->shader_cfg.max_draw_buffers = vrend_state.max_draw_buffers; grctx->shader_cfg.has_arrays_of_arrays = has_feature(feat_arrays_of_arrays); grctx->shader_cfg.has_gpu_shader5 = has_feature(feat_gpu_shader5); grctx->shader_cfg.has_es31_compat = has_feature(feat_gles31_compatibility); grctx->shader_cfg.has_conservative_depth = has_feature(feat_conservative_depth); grctx->shader_cfg.use_integer = vrend_state.use_integer; grctx->shader_cfg.has_dual_src_blend = has_feature(feat_dual_src_blend); grctx->shader_cfg.has_fbfetch_coherent = has_feature(feat_framebuffer_fetch); grctx->shader_cfg.has_cull_distance = has_feature(feat_cull_distance); vrend_renderer_create_sub_ctx(grctx, 0); vrend_renderer_set_sub_ctx(grctx, 0); grctx->shader_cfg.glsl_version = vrender_get_glsl_version(); if (!grctx->ctx_id) grctx->fence_retire = vrend_clicbs->ctx0_fence_retire; return grctx; } static int check_resource_valid(const struct vrend_renderer_resource_create_args *args, char errmsg[256]) { /* limit the target */ if (args->target >= PIPE_MAX_TEXTURE_TYPES) { snprintf(errmsg, 256, "Invalid texture target %d (>= %d)", args->target, PIPE_MAX_TEXTURE_TYPES); return -1; } if (args->format >= VIRGL_FORMAT_MAX) { snprintf(errmsg, 256, "Invalid texture format %d (>=%d)", args->format, VIRGL_FORMAT_MAX); return -1; } bool format_can_texture_storage = has_feature(feat_texture_storage) && (tex_conv_table[args->format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE); /* only texture 2d and 2d array can have multiple samples */ if (args->nr_samples > 0) { if (!has_feature(feat_texture_multisample)) { snprintf(errmsg, 256, "Multisample textures not supported"); return -1; } if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_2D_ARRAY) { snprintf(errmsg, 256, "Multisample textures not 2D (target:%d)", args->target); return -1; } /* multisample can't have miplevels */ if (args->last_level > 0) { snprintf(errmsg, 256, "Multisample textures don't support mipmaps"); return -1; } if (!format_can_texture_storage && vrend_state.use_gles) { snprintf(errmsg, 256, "Unsupported multisample texture format %d", args->format); return -1; } } if (args->last_level > 0) { /* buffer and rect textures can't have mipmaps */ if (args->target == PIPE_BUFFER) { snprintf(errmsg, 256, "Buffers don't support mipmaps"); return -1; } if (args->target == PIPE_TEXTURE_RECT) { snprintf(errmsg, 256, "RECT textures don't support mipmaps"); return -1; } if (args->last_level > (floor(log2(MAX2(args->width, args->height))) + 1)) { snprintf(errmsg, 256, "Mipmap levels %d too large for texture size (%d, %d)", args->last_level, args->width, args->height); return -1; } } if (args->flags != 0) { uint32_t supported_mask = VIRGL_RESOURCE_Y_0_TOP | VIRGL_RESOURCE_FLAG_MAP_PERSISTENT | VIRGL_RESOURCE_FLAG_MAP_COHERENT; if (args->flags & ~supported_mask) { snprintf(errmsg, 256, "Resource flags 0x%x not supported", args->flags); return -1; } } if (args->flags & VIRGL_RESOURCE_Y_0_TOP) { if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_RECT) { snprintf(errmsg, 256, "VIRGL_RESOURCE_Y_0_TOP only supported for 2D or RECT textures"); return -1; } } /* array size for array textures only */ if (args->target == PIPE_TEXTURE_CUBE) { if (args->array_size != 6) { snprintf(errmsg, 256, "Cube map: unexpected array size %d", args->array_size); return -1; } } else if (args->target == PIPE_TEXTURE_CUBE_ARRAY) { if (!has_feature(feat_cube_map_array)) { snprintf(errmsg, 256, "Cube map arrays not supported"); return -1; } if (args->array_size % 6) { snprintf(errmsg, 256, "Cube map array: unexpected array size %d", args->array_size); return -1; } } else if (args->array_size > 1) { if (args->target != PIPE_TEXTURE_2D_ARRAY && args->target != PIPE_TEXTURE_1D_ARRAY) { snprintf(errmsg, 256, "Texture target %d can't be an array ", args->target); return -1; } if (!has_feature(feat_texture_array)) { snprintf(errmsg, 256, "Texture arrays are not supported"); return -1; } } if (args->target != PIPE_BUFFER && !args->width) { snprintf(errmsg, 256, "Texture width must be >0"); return -1; } if (args->bind == 0 || args->bind == VIRGL_BIND_CUSTOM || args->bind == VIRGL_BIND_STAGING || args->bind == VIRGL_BIND_INDEX_BUFFER || args->bind == VIRGL_BIND_STREAM_OUTPUT || args->bind == VIRGL_BIND_VERTEX_BUFFER || args->bind == VIRGL_BIND_CONSTANT_BUFFER || args->bind == VIRGL_BIND_QUERY_BUFFER || args->bind == VIRGL_BIND_COMMAND_ARGS || args->bind == VIRGL_BIND_SHADER_BUFFER) { if (args->target != PIPE_BUFFER) { snprintf(errmsg, 256, "Buffer bind flags requre the buffer target but this is target %d", args->target); return -1; } if (args->height != 1 || args->depth != 1) { snprintf(errmsg, 256, "Buffer target: Got height=%u, depth=%u, expect (1,1)", args->height, args->depth); return -1; } if (args->bind == VIRGL_BIND_QUERY_BUFFER && !has_feature(feat_qbo)) { snprintf(errmsg, 256, "Query buffers are not supported"); return -1; } if (args->bind == VIRGL_BIND_COMMAND_ARGS && !has_feature(feat_indirect_draw)) { snprintf(errmsg, 256, "Command args buffer requested but indirect draw is not supported"); return -1; } } else { if (!((args->bind & VIRGL_BIND_SAMPLER_VIEW) || (args->bind & VIRGL_BIND_DEPTH_STENCIL) || (args->bind & VIRGL_BIND_RENDER_TARGET) || (args->bind & VIRGL_BIND_CURSOR) || (args->bind & VIRGL_BIND_SHARED) || (args->bind & VIRGL_BIND_LINEAR))) { snprintf(errmsg, 256, "Invalid texture bind flags 0x%x", args->bind); return -1; } #ifdef ENABLE_MINIGBM_ALLOCATION if (!virgl_gbm_gpu_import_required(args->bind)) { return 0; } #endif if (args->target == PIPE_TEXTURE_2D || args->target == PIPE_TEXTURE_RECT || args->target == PIPE_TEXTURE_CUBE || args->target == PIPE_TEXTURE_2D_ARRAY || args->target == PIPE_TEXTURE_CUBE_ARRAY) { if (args->depth != 1) { snprintf(errmsg, 256, "2D texture target with depth=%u != 1", args->depth); return -1; } if (format_can_texture_storage && !args->height) { snprintf(errmsg, 256, "2D Texture storage requires non-zero height"); return -1; } } if (args->target == PIPE_TEXTURE_1D || args->target == PIPE_TEXTURE_1D_ARRAY) { if (args->height != 1 || args->depth != 1) { snprintf(errmsg, 256, "Got height=%u, depth=%u, expect (1,1)", args->height, args->depth); return -1; } if (args->width > vrend_state.max_texture_2d_size) { snprintf(errmsg, 256, "1D Texture width (%u) exceeds supported value (%u)", args->width, vrend_state.max_texture_2d_size); return -1; } } if (args->target == PIPE_TEXTURE_2D || args->target == PIPE_TEXTURE_RECT || args->target == PIPE_TEXTURE_2D_ARRAY) { if (args->width > vrend_state.max_texture_2d_size || args->height > vrend_state.max_texture_2d_size) { snprintf(errmsg, 256, "2D Texture size components (%u, %u) exceeds supported value (%u)", args->width, args->height, vrend_state.max_texture_2d_size); return -1; } } if (args->target == PIPE_TEXTURE_3D) { if (format_can_texture_storage && (!args->height || !args->depth)) { snprintf(errmsg, 256, "Texture storage expects non-zero height (%u) and depth (%u)", args->height, args->depth); return -1; } if (args->width > vrend_state.max_texture_3d_size || args->height > vrend_state.max_texture_3d_size || args->depth > vrend_state.max_texture_3d_size) { snprintf(errmsg, 256, "3D Texture sizes (%u, %u, %u) exceeds supported value (%u)", args->width, args->height, args->depth, vrend_state.max_texture_3d_size); return -1; } } if (args->target == PIPE_TEXTURE_2D_ARRAY || args->target == PIPE_TEXTURE_CUBE_ARRAY || args->target == PIPE_TEXTURE_1D_ARRAY) { if (format_can_texture_storage && !args->array_size) { snprintf(errmsg, 256, "Texture arrays require a non-zero arrays size " "when allocated with glTexStorage"); return -1; } } if (args->target == PIPE_TEXTURE_CUBE || args->target == PIPE_TEXTURE_CUBE_ARRAY) { if (args->width != args->height) { snprintf(errmsg, 256, "Cube maps require width (%u) == height (%u)", args->width, args->height); return -1; } if (args->width > vrend_state.max_texture_cube_size) { snprintf(errmsg, 256, "Cube maps size (%u) exceeds supported value (%u)", args->width, vrend_state.max_texture_cube_size); return -1; } } } return 0; } static void vrend_create_buffer(struct vrend_resource *gr, uint32_t width, uint32_t flags) { GLbitfield buffer_storage_flags = 0; if (flags & VIRGL_RESOURCE_FLAG_MAP_PERSISTENT) { buffer_storage_flags |= GL_MAP_PERSISTENT_BIT; /* Gallium's storage_flags_to_buffer_flags seems to drop some information, but we have to * satisfy the following: * * "If flags contains GL_MAP_PERSISTENT_BIT, it must also contain at least one of * GL_MAP_READ_BIT or GL_MAP_WRITE_BIT." */ buffer_storage_flags |= GL_MAP_READ_BIT | GL_MAP_WRITE_BIT; } if (flags & VIRGL_RESOURCE_FLAG_MAP_COHERENT) buffer_storage_flags |= GL_MAP_COHERENT_BIT; gr->storage_bits |= VREND_STORAGE_GL_BUFFER; glGenBuffersARB(1, &gr->id); glBindBufferARB(gr->target, gr->id); if (buffer_storage_flags) { if (has_feature(feat_arb_buffer_storage) && !vrend_state.use_external_blob) { glBufferStorage(gr->target, width, NULL, buffer_storage_flags); gr->map_info = vrend_state.inferred_gl_caching_type; } #ifdef ENABLE_MINIGBM_ALLOCATION else if (has_feature(feat_memory_object_fd) && has_feature(feat_memory_object)) { GLuint memobj = 0; int fd = -1; int ret; /* Could use VK too. */ struct gbm_bo *bo = gbm_bo_create(gbm->device, width, 1, GBM_FORMAT_R8, GBM_BO_USE_LINEAR); if (!bo) { vrend_printf("Failed to allocate emulated GL buffer backing storage"); return; } ret = virgl_gbm_export_fd(gbm->device, gbm_bo_get_handle(bo).u32, &fd); if (ret || fd < 0) { vrend_printf("Failed to get file descriptor\n"); return; } glCreateMemoryObjectsEXT(1, &memobj); glImportMemoryFdEXT(memobj, width, GL_HANDLE_TYPE_OPAQUE_FD_EXT, fd); glBufferStorageMemEXT(gr->target, width, memobj, 0); gr->gbm_bo = bo; gr->memobj = memobj; gr->storage_bits |= VREND_STORAGE_GBM_BUFFER | VREND_STORAGE_GL_MEMOBJ; if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915")) gr->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED; else gr->map_info = VIRGL_RENDERER_MAP_CACHE_WC; } #endif else { vrend_printf("Missing buffer storage and interop extensions\n"); return; } gr->storage_bits |= VREND_STORAGE_GL_IMMUTABLE; gr->buffer_storage_flags = buffer_storage_flags; gr->size = width; } else glBufferData(gr->target, width, NULL, GL_STREAM_DRAW); glBindBufferARB(gr->target, 0); } static int vrend_resource_alloc_buffer(struct vrend_resource *gr, uint32_t flags) { const uint32_t bind = gr->base.bind; const uint32_t size = gr->base.width0; if (bind == VIRGL_BIND_CUSTOM) { /* use iovec directly when attached */ gr->storage_bits |= VREND_STORAGE_HOST_SYSTEM_MEMORY; gr->ptr = calloc(1, size); if (!gr->ptr) return -ENOMEM; } else if (bind == VIRGL_BIND_STAGING) { /* staging buffers only use guest memory -- nothing to do. */ } else if (bind == VIRGL_BIND_INDEX_BUFFER) { gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB; vrend_create_buffer(gr, size, flags); } else if (bind == VIRGL_BIND_STREAM_OUTPUT) { gr->target = GL_TRANSFORM_FEEDBACK_BUFFER; vrend_create_buffer(gr, size, flags); } else if (bind == VIRGL_BIND_VERTEX_BUFFER) { gr->target = GL_ARRAY_BUFFER_ARB; vrend_create_buffer(gr, size, flags); } else if (bind == VIRGL_BIND_CONSTANT_BUFFER) { gr->target = GL_UNIFORM_BUFFER; vrend_create_buffer(gr, size, flags); } else if (bind == VIRGL_BIND_QUERY_BUFFER) { gr->target = GL_QUERY_BUFFER; vrend_create_buffer(gr, size, flags); } else if (bind == VIRGL_BIND_COMMAND_ARGS) { gr->target = GL_DRAW_INDIRECT_BUFFER; vrend_create_buffer(gr, size, flags); } else if (bind == 0 || bind == VIRGL_BIND_SHADER_BUFFER) { gr->target = GL_ARRAY_BUFFER_ARB; vrend_create_buffer(gr, size, flags); } else if (bind & VIRGL_BIND_SAMPLER_VIEW) { /* * On Desktop we use GL_ARB_texture_buffer_object on GLES we use * GL_EXT_texture_buffer (it is in the ANDRIOD extension pack). */ #if GL_TEXTURE_BUFFER != GL_TEXTURE_BUFFER_EXT #error "GL_TEXTURE_BUFFER enums differ, they shouldn't." #endif /* need to check GL version here */ if (has_feature(feat_arb_or_gles_ext_texture_buffer)) { gr->target = GL_TEXTURE_BUFFER; } else { gr->target = GL_PIXEL_PACK_BUFFER_ARB; } vrend_create_buffer(gr, size, flags); } else { vrend_printf("%s: Illegal buffer binding flags 0x%x\n", __func__, bind); return -EINVAL; } return 0; } static inline void vrend_renderer_resource_copy_args(const struct vrend_renderer_resource_create_args *args, struct vrend_resource *gr) { assert(gr); assert(args); gr->base.bind = args->bind; gr->base.width0 = args->width; gr->base.height0 = args->height; gr->base.depth0 = args->depth; gr->base.format = args->format; gr->base.target = args->target; gr->base.last_level = args->last_level; gr->base.nr_samples = args->nr_samples; gr->base.array_size = args->array_size; } /* * When GBM allocation is enabled, this function creates a GBM buffer and * EGL image given certain flags. */ static void vrend_resource_gbm_init(struct vrend_resource *gr, uint32_t format) { #ifdef ENABLE_MINIGBM_ALLOCATION uint32_t gbm_flags = virgl_gbm_convert_flags(gr->base.bind); uint32_t gbm_format = 0; if (virgl_gbm_convert_format(&format, &gbm_format)) return; if (vrend_winsys_different_gpu()) gbm_flags |= GBM_BO_USE_LINEAR; if (gr->base.depth0 != 1 || gr->base.last_level != 0 || gr->base.nr_samples != 0) return; if (!gbm || !gbm->device || !gbm_format || !gbm_flags) return; if (!virgl_gbm_external_allocation_preferred(gr->base.bind)) return; if (!gbm_device_is_format_supported(gbm->device, gbm_format, gbm_flags)) return; struct gbm_bo *bo = gbm_bo_create(gbm->device, gr->base.width0, gr->base.height0, gbm_format, gbm_flags); if (!bo) return; gr->gbm_bo = bo; gr->storage_bits |= VREND_STORAGE_GBM_BUFFER; /* This is true so far, but maybe gbm_bo_get_caching_type is needed in the future. */ if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915")) gr->map_info = VIRGL_RENDERER_MAP_CACHE_CACHED; else gr->map_info = VIRGL_RENDERER_MAP_CACHE_WC; int num_planes = gbm_bo_get_plane_count(bo); for (int plane = 0; plane < num_planes; plane++) gr->size += gbm_bo_get_plane_size(bo, plane); if (!virgl_gbm_gpu_import_required(gr->base.bind)) return; gr->egl_image = virgl_egl_image_from_gbm_bo(egl, bo); if (!gr->egl_image) { gr->gbm_bo = NULL; gbm_bo_destroy(bo); } gr->storage_bits |= VREND_STORAGE_EGL_IMAGE; #else (void)format; (void)gr; #endif } static int vrend_resource_alloc_texture(struct vrend_resource *gr, enum virgl_formats format, void *image_oes) { uint level; GLenum internalformat, glformat, gltype; struct vrend_texture *gt = (struct vrend_texture *)gr; struct pipe_resource *pr = &gr->base; const bool format_can_texture_storage = has_feature(feat_texture_storage) && (tex_conv_table[format].flags & VIRGL_TEXTURE_CAN_TEXTURE_STORAGE); if (format_can_texture_storage) gr->storage_bits |= VREND_STORAGE_GL_IMMUTABLE; if (!image_oes) { vrend_resource_gbm_init(gr, format); if (gr->gbm_bo && !has_bit(gr->storage_bits, VREND_STORAGE_EGL_IMAGE)) return 0; image_oes = gr->egl_image; } gr->target = tgsitargettogltarget(pr->target, pr->nr_samples); gr->storage_bits |= VREND_STORAGE_GL_TEXTURE; /* ugly workaround for texture rectangle missing on GLES */ if (vrend_state.use_gles && gr->target == GL_TEXTURE_RECTANGLE_NV) { /* for some guests this is the only usage of rect */ if (pr->width0 != 1 || pr->height0 != 1) { report_gles_warn(NULL, GLES_WARN_TEXTURE_RECT); } gr->target = GL_TEXTURE_2D; } /* fallback for 1D textures */ if (vrend_state.use_gles && gr->target == GL_TEXTURE_1D) { gr->target = GL_TEXTURE_2D; } /* fallback for 1D array textures */ if (vrend_state.use_gles && gr->target == GL_TEXTURE_1D_ARRAY) { gr->target = GL_TEXTURE_2D_ARRAY; } glGenTextures(1, &gr->id); glBindTexture(gr->target, gr->id); debug_texture(__func__, gr); if (image_oes) { if (has_bit(gr->storage_bits, VREND_STORAGE_GL_IMMUTABLE) && has_feature(feat_egl_image_storage)) { glEGLImageTargetTexStorageEXT(gr->target, (GLeglImageOES) image_oes, NULL); } else if (has_feature(feat_egl_image_external)) { gr->storage_bits &= ~VREND_STORAGE_GL_IMMUTABLE; glEGLImageTargetTexture2DOES(gr->target, (GLeglImageOES) image_oes); } else { vrend_printf( "missing GL_OES_EGL_image_external extensions\n"); glBindTexture(gr->target, 0); return EINVAL; } gr->storage_bits |= VREND_STORAGE_EGL_IMAGE; } else { internalformat = tex_conv_table[format].internalformat; glformat = tex_conv_table[format].glformat; gltype = tex_conv_table[format].gltype; if (internalformat == 0) { vrend_printf("unknown format is %d\n", pr->format); glBindTexture(gr->target, 0); return EINVAL; } if (pr->nr_samples > 0) { if (format_can_texture_storage) { if (gr->target == GL_TEXTURE_2D_MULTISAMPLE) { glTexStorage2DMultisample(gr->target, pr->nr_samples, internalformat, pr->width0, pr->height0, GL_TRUE); } else { glTexStorage3DMultisample(gr->target, pr->nr_samples, internalformat, pr->width0, pr->height0, pr->array_size, GL_TRUE); } } else { if (gr->target == GL_TEXTURE_2D_MULTISAMPLE) { glTexImage2DMultisample(gr->target, pr->nr_samples, internalformat, pr->width0, pr->height0, GL_TRUE); } else { glTexImage3DMultisample(gr->target, pr->nr_samples, internalformat, pr->width0, pr->height0, pr->array_size, GL_TRUE); } } } else if (gr->target == GL_TEXTURE_CUBE_MAP) { int i; if (format_can_texture_storage) glTexStorage2D(GL_TEXTURE_CUBE_MAP, pr->last_level + 1, internalformat, pr->width0, pr->height0); else { for (i = 0; i < 6; i++) { GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i; for (level = 0; level <= pr->last_level; level++) { unsigned mwidth = u_minify(pr->width0, level); unsigned mheight = u_minify(pr->height0, level); glTexImage2D(ctarget, level, internalformat, mwidth, mheight, 0, glformat, gltype, NULL); } } } } else if (gr->target == GL_TEXTURE_3D || gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) { if (format_can_texture_storage) { unsigned depth_param = (gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) ? pr->array_size : pr->depth0; glTexStorage3D(gr->target, pr->last_level + 1, internalformat, pr->width0, pr->height0, depth_param); } else { for (level = 0; level <= pr->last_level; level++) { unsigned depth_param = (gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) ? pr->array_size : u_minify(pr->depth0, level); unsigned mwidth = u_minify(pr->width0, level); unsigned mheight = u_minify(pr->height0, level); glTexImage3D(gr->target, level, internalformat, mwidth, mheight, depth_param, 0, glformat, gltype, NULL); } } } else if (gr->target == GL_TEXTURE_1D && vrend_state.use_gles) { report_gles_missing_func(NULL, "glTexImage1D"); } else if (gr->target == GL_TEXTURE_1D) { if (format_can_texture_storage) { glTexStorage1D(gr->target, pr->last_level + 1, internalformat, pr->width0); } else { for (level = 0; level <= pr->last_level; level++) { unsigned mwidth = u_minify(pr->width0, level); glTexImage1D(gr->target, level, internalformat, mwidth, 0, glformat, gltype, NULL); } } } else { if (format_can_texture_storage) glTexStorage2D(gr->target, pr->last_level + 1, internalformat, pr->width0, gr->target == GL_TEXTURE_1D_ARRAY ? pr->array_size : pr->height0); else { for (level = 0; level <= pr->last_level; level++) { unsigned mwidth = u_minify(pr->width0, level); unsigned mheight = u_minify(pr->height0, level); glTexImage2D(gr->target, level, internalformat, mwidth, gr->target == GL_TEXTURE_1D_ARRAY ? pr->array_size : mheight, 0, glformat, gltype, NULL); } } } } if (!format_can_texture_storage) { glTexParameteri(gr->target, GL_TEXTURE_BASE_LEVEL, 0); glTexParameteri(gr->target, GL_TEXTURE_MAX_LEVEL, pr->last_level); } glBindTexture(gr->target, 0); if (image_oes && gr->gbm_bo) { #ifdef ENABLE_MINIGBM_ALLOCATION if (!has_bit(gr->storage_bits, VREND_STORAGE_GL_BUFFER) && !vrend_format_can_texture_view(gr->base.format)) { for (int i = 0; i < gbm_bo_get_plane_count(gr->gbm_bo); i++) { gr->aux_plane_egl_image[i] = virgl_egl_aux_plane_image_from_gbm_bo(egl, gr->gbm_bo, i); } } #endif } gt->state.max_lod = -1; gt->cur_swizzle[0] = gt->cur_swizzle[1] = gt->cur_swizzle[2] = gt->cur_swizzle[3] = -1; gt->cur_base = -1; gt->cur_max = 10000; return 0; } static struct vrend_resource * vrend_resource_create(const struct vrend_renderer_resource_create_args *args) { struct vrend_resource *gr; int ret; char error_string[256]; ret = check_resource_valid(args, error_string); if (ret) { vrend_printf("%s, Illegal resource parameters, error: %s\n", __func__, error_string); return NULL; } gr = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture); if (!gr) return NULL; vrend_renderer_resource_copy_args(args, gr); gr->storage_bits = VREND_STORAGE_GUEST_MEMORY; if (args->flags & VIRGL_RESOURCE_Y_0_TOP) gr->y_0_top = true; pipe_reference_init(&gr->base.reference, 1); return gr; } struct pipe_resource * vrend_renderer_resource_create(const struct vrend_renderer_resource_create_args *args, void *image_oes) { struct vrend_resource *gr; int ret; gr = vrend_resource_create(args); if (!gr) return NULL; if (args->target == PIPE_BUFFER) { ret = vrend_resource_alloc_buffer(gr, args->flags); } else { const enum virgl_formats format = gr->base.format; ret = vrend_resource_alloc_texture(gr, format, image_oes); } if (ret) { FREE(gr); return NULL; } return &gr->base; } void vrend_renderer_resource_destroy(struct vrend_resource *res) { if (has_bit(res->storage_bits, VREND_STORAGE_GL_TEXTURE)) { glDeleteTextures(1, &res->id); } else if (has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER)) { glDeleteBuffers(1, &res->id); if (res->tbo_tex_id) glDeleteTextures(1, &res->tbo_tex_id); } else if (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) { free(res->ptr); } if (res->rbo_id) { glDeleteRenderbuffers(1, &res->rbo_id); } if (has_bit(res->storage_bits, VREND_STORAGE_GL_MEMOBJ)) { glDeleteMemoryObjectsEXT(1, &res->memobj); } #if HAVE_EPOXY_EGL_H if (res->egl_image) { virgl_egl_image_destroy(egl, res->egl_image); for (unsigned i = 0; i < ARRAY_SIZE(res->aux_plane_egl_image); i++) { if (res->aux_plane_egl_image[i]) { virgl_egl_image_destroy(egl, res->aux_plane_egl_image[i]); } } } #endif #ifdef ENABLE_MINIGBM_ALLOCATION if (res->gbm_bo) gbm_bo_destroy(res->gbm_bo); #endif free(res); } struct virgl_sub_upload_data { GLenum target; struct pipe_box *box; }; static void iov_buffer_upload(void *cookie, uint32_t doff, void *src, int len) { struct virgl_sub_upload_data *d = cookie; glBufferSubData(d->target, d->box->x + doff, len, src); } static void vrend_scale_depth(void *ptr, int size, float scale_val) { GLuint *ival = ptr; const GLfloat myscale = 1.0f / 0xffffff; int i; for (i = 0; i < size / 4; i++) { GLuint value = ival[i]; GLfloat d = ((float)(value >> 8) * myscale) * scale_val; d = CLAMP(d, 0.0F, 1.0F); ival[i] = (int)(d / myscale) << 8; } } static void read_transfer_data(const struct iovec *iov, unsigned int num_iovs, char *data, enum virgl_formats format, uint64_t offset, uint32_t src_stride, uint32_t src_layer_stride, struct pipe_box *box, bool invert) { int blsize = util_format_get_blocksize(format); uint32_t size = vrend_get_iovec_size(iov, num_iovs); uint32_t send_size = util_format_get_nblocks(format, box->width, box->height) * blsize * box->depth; uint32_t bwx = util_format_get_nblocksx(format, box->width) * blsize; int32_t bh = util_format_get_nblocksy(format, box->height); int d, h; if ((send_size == size || bh == 1) && !invert && box->depth == 1) vrend_read_from_iovec(iov, num_iovs, offset, data, send_size); else { if (invert) { for (d = 0; d < box->depth; d++) { uint32_t myoffset = offset + d * src_layer_stride; for (h = bh - 1; h >= 0; h--) { void *ptr = data + (h * bwx) + d * (bh * bwx); vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx); myoffset += src_stride; } } } else { for (d = 0; d < box->depth; d++) { uint32_t myoffset = offset + d * src_layer_stride; for (h = 0; h < bh; h++) { void *ptr = data + (h * bwx) + d * (bh * bwx); vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx); myoffset += src_stride; } } } } } static void write_transfer_data(struct pipe_resource *res, const struct iovec *iov, unsigned num_iovs, char *data, uint32_t dst_stride, struct pipe_box *box, uint32_t level, uint64_t offset, bool invert) { int blsize = util_format_get_blocksize(res->format); uint32_t size = vrend_get_iovec_size(iov, num_iovs); uint32_t send_size = util_format_get_nblocks(res->format, box->width, box->height) * blsize * box->depth; uint32_t bwx = util_format_get_nblocksx(res->format, box->width) * blsize; int32_t bh = util_format_get_nblocksy(res->format, box->height); int d, h; uint32_t stride = dst_stride ? dst_stride : util_format_get_nblocksx(res->format, u_minify(res->width0, level)) * blsize; if ((send_size == size || bh == 1) && !invert && box->depth == 1) { vrend_write_to_iovec(iov, num_iovs, offset, data, send_size); } else if (invert) { for (d = 0; d < box->depth; d++) { uint32_t myoffset = offset + d * stride * u_minify(res->height0, level); for (h = bh - 1; h >= 0; h--) { void *ptr = data + (h * bwx) + d * (bh * bwx); vrend_write_to_iovec(iov, num_iovs, myoffset, ptr, bwx); myoffset += stride; } } } else { for (d = 0; d < box->depth; d++) { uint32_t myoffset = offset + d * stride * u_minify(res->height0, level); for (h = 0; h < bh; h++) { void *ptr = data + (h * bwx) + d * (bh * bwx); vrend_write_to_iovec(iov, num_iovs, myoffset, ptr, bwx); myoffset += stride; } } } } static bool check_transfer_iovec(struct vrend_resource *res, const struct vrend_transfer_info *info) { return (info->iovec && info->iovec_cnt) || res->iov; } static bool check_transfer_bounds(struct vrend_resource *res, const struct vrend_transfer_info *info) { int lwidth, lheight; /* check mipmap level is in bounds */ if (info->level > res->base.last_level) return false; if (info->box->x < 0 || info->box->y < 0) return false; /* these will catch bad y/z/w/d with 1D textures etc */ lwidth = u_minify(res->base.width0, info->level); if (info->box->width > lwidth || info->box->width < 0) return false; if (info->box->x > lwidth) return false; if (info->box->width + info->box->x > lwidth) return false; lheight = u_minify(res->base.height0, info->level); if (info->box->height > lheight || info->box->height < 0) return false; if (info->box->y > lheight) return false; if (info->box->height + info->box->y > lheight) return false; if (res->base.target == PIPE_TEXTURE_3D) { int ldepth = u_minify(res->base.depth0, info->level); if (info->box->depth > ldepth || info->box->depth < 0) return false; if (info->box->z > ldepth) return false; if (info->box->z + info->box->depth > ldepth) return false; } else { if (info->box->depth > (int)res->base.array_size) return false; if (info->box->z > (int)res->base.array_size) return false; if (info->box->z + info->box->depth > (int)res->base.array_size) return false; } return true; } /* Calculate the size of the memory needed to hold all the data of a * transfer for particular stride values. */ static uint64_t vrend_transfer_size(struct vrend_resource *vres, const struct vrend_transfer_info *info, uint32_t stride, uint32_t layer_stride) { struct pipe_resource *pres = &vres->base; struct pipe_box *box = info->box; uint64_t size; /* For purposes of size calculation, assume that invalid dimension values * correspond to 1. */ int w = box->width > 0 ? box->width : 1; int h = box->height > 0 ? box->height : 1; int d = box->depth > 0 ? box->depth : 1; int nblocksx = util_format_get_nblocksx(pres->format, w); int nblocksy = util_format_get_nblocksy(pres->format, h); /* Calculate the box size, not including the last layer. The last layer * is the only one which may be incomplete, and is the only layer for * non 3d/2d-array formats. */ size = (d - 1) * layer_stride; /* Calculate the size of the last (or only) layer, not including the last * block row. The last block row is the only one which may be incomplete and * is the only block row for non 2d/1d-array formats. */ size += (nblocksy - 1) * stride; /* Calculate the size of the the last (or only) block row. */ size += nblocksx * util_format_get_blocksize(pres->format); return size; } static bool check_iov_bounds(struct vrend_resource *res, const struct vrend_transfer_info *info, const struct iovec *iov, int num_iovs) { GLuint transfer_size; GLuint iovsize = vrend_get_iovec_size(iov, num_iovs); GLuint valid_stride, valid_layer_stride; /* If the transfer specifies a stride, verify that it's at least as large as * the minimum required for the transfer. If no stride is specified use the * image stride for the specified level. */ if (info->stride) { GLuint min_stride = util_format_get_stride(res->base.format, info->box->width); if (info->stride < min_stride) return false; valid_stride = info->stride; } else { valid_stride = util_format_get_stride(res->base.format, u_minify(res->base.width0, info->level)); } /* If the transfer specifies a layer_stride, verify that it's at least as * large as the minimum required for the transfer. If no layer_stride is * specified use the image layer_stride for the specified level. */ if (info->layer_stride) { GLuint min_layer_stride = util_format_get_2d_size(res->base.format, valid_stride, info->box->height); if (info->layer_stride < min_layer_stride) return false; valid_layer_stride = info->layer_stride; } else { valid_layer_stride = util_format_get_2d_size(res->base.format, valid_stride, u_minify(res->base.height0, info->level)); } /* Calculate the size required for the transferred data, based on the * calculated or provided strides, and ensure that the iov, starting at the * specified offset, is able to hold at least that size. */ transfer_size = vrend_transfer_size(res, info, valid_stride, valid_layer_stride); if (iovsize < info->offset) return false; if (iovsize < transfer_size) return false; if (iovsize < info->offset + transfer_size) return false; return true; } static void get_current_texture(GLenum target, GLint* tex) { switch (target) { #define GET_TEXTURE(a) \ case GL_TEXTURE_ ## a: \ glGetIntegerv(GL_TEXTURE_BINDING_ ## a, tex); return GET_TEXTURE(1D); GET_TEXTURE(2D); GET_TEXTURE(3D); GET_TEXTURE(1D_ARRAY); GET_TEXTURE(2D_ARRAY); GET_TEXTURE(RECTANGLE); GET_TEXTURE(CUBE_MAP); GET_TEXTURE(CUBE_MAP_ARRAY); GET_TEXTURE(BUFFER); GET_TEXTURE(2D_MULTISAMPLE); GET_TEXTURE(2D_MULTISAMPLE_ARRAY); #undef GET_TEXTURE default: vrend_printf("Unknown texture target %x\n", target); } } static void vrend_swizzle_data_bgra(uint64_t size, void *data) { const size_t bpp = 4; const size_t num_pixels = size / bpp; for (size_t i = 0; i < num_pixels; ++i) { unsigned char *pixel = ((unsigned char*)data) + i * bpp; unsigned char first = *pixel; *pixel = *(pixel + 2); *(pixel + 2) = first; } } static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx, struct vrend_resource *res, const struct iovec *iov, int num_iovs, const struct vrend_transfer_info *info) { void *data; if ((is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) || has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) && res->iov) { return vrend_copy_iovec(iov, num_iovs, info->offset, res->iov, res->num_iovs, info->box->x, info->box->width, res->ptr); } if (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) { assert(!res->iov); vrend_read_from_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, info->box->width); return 0; } if (has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER)) { GLuint map_flags = GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_WRITE_BIT; struct virgl_sub_upload_data d; d.box = info->box; d.target = res->target; if (!info->synchronized) map_flags |= GL_MAP_UNSYNCHRONIZED_BIT; glBindBufferARB(res->target, res->id); data = glMapBufferRange(res->target, info->box->x, info->box->width, map_flags); if (data == NULL) { vrend_printf("map failed for element buffer\n"); vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d); } else { vrend_read_from_iovec(iov, num_iovs, info->offset, data, info->box->width); glUnmapBuffer(res->target); } glBindBufferARB(res->target, 0); } else { GLenum glformat; GLenum gltype; int need_temp = 0; int elsize = util_format_get_blocksize(res->base.format); int x = 0, y = 0; bool compressed; bool invert = false; float depth_scale; GLuint send_size = 0; uint32_t stride = info->stride; uint32_t layer_stride = info->layer_stride; if (ctx) vrend_use_program(ctx->sub, 0); else glUseProgram(0); if (!stride) stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level)) * elsize; if (!layer_stride) layer_stride = util_format_get_2d_size(res->base.format, stride, u_minify(res->base.height0, info->level)); compressed = util_format_is_compressed(res->base.format); if (num_iovs > 1 || compressed) { need_temp = true; } if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) && !vrend_resource_is_emulated_bgra(res)) need_temp = true; if (vrend_state.use_core_profile == true && (res->y_0_top || (res->base.format == VIRGL_FORMAT_Z24X8_UNORM))) { need_temp = true; if (res->y_0_top) invert = true; } send_size = util_format_get_nblocks(res->base.format, info->box->width, info->box->height) * elsize; if (res->target == GL_TEXTURE_3D || res->target == GL_TEXTURE_2D_ARRAY || res->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY || res->target == GL_TEXTURE_CUBE_MAP_ARRAY) send_size *= info->box->depth; else if (need_temp && info->box->depth != 1) return EINVAL; if (need_temp) { data = malloc(send_size); if (!data) return ENOMEM; read_transfer_data(iov, num_iovs, data, res->base.format, info->offset, stride, layer_stride, info->box, invert); } else { if (send_size > iov[0].iov_len - info->offset) return EINVAL; data = (char*)iov[0].iov_base + info->offset; } if (!need_temp) { assert(stride); glPixelStorei(GL_UNPACK_ROW_LENGTH, stride / elsize); glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, layer_stride / stride); } else glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); switch (elsize) { case 1: case 3: glPixelStorei(GL_UNPACK_ALIGNMENT, 1); break; case 2: case 6: glPixelStorei(GL_UNPACK_ALIGNMENT, 2); break; case 4: default: glPixelStorei(GL_UNPACK_ALIGNMENT, 4); break; case 8: glPixelStorei(GL_UNPACK_ALIGNMENT, 8); break; } glformat = tex_conv_table[res->base.format].glformat; gltype = tex_conv_table[res->base.format].gltype; if ((!vrend_state.use_core_profile) && (res->y_0_top)) { GLuint buffers; GLuint fb_id; glGenFramebuffers(1, &fb_id); glBindFramebuffer(GL_FRAMEBUFFER, fb_id); vrend_fb_bind_texture(res, 0, info->level, 0); buffers = GL_COLOR_ATTACHMENT0; glDrawBuffers(1, &buffers); glDisable(GL_BLEND); if (ctx) { vrend_depth_test_enable(ctx, false); vrend_alpha_test_enable(ctx, false); vrend_stencil_test_enable(ctx->sub, false); } else { glDisable(GL_DEPTH_TEST); glDisable(GL_ALPHA_TEST); glDisable(GL_STENCIL_TEST); } glPixelZoom(1.0f, res->y_0_top ? -1.0f : 1.0f); glWindowPos2i(info->box->x, res->y_0_top ? (int)res->base.height0 - info->box->y : info->box->y); glDrawPixels(info->box->width, info->box->height, glformat, gltype, data); glDeleteFramebuffers(1, &fb_id); } else { uint32_t comp_size; GLint old_tex = 0; get_current_texture(res->target, &old_tex); glBindTexture(res->target, res->id); if (compressed) { glformat = tex_conv_table[res->base.format].internalformat; comp_size = util_format_get_nblocks(res->base.format, info->box->width, info->box->height) * util_format_get_blocksize(res->base.format); } if (glformat == 0) { glformat = GL_BGRA; gltype = GL_UNSIGNED_BYTE; } x = info->box->x; y = invert ? (int)res->base.height0 - info->box->y - info->box->height : info->box->y; /* GLES doesn't allow format conversions, which we need for BGRA resources with RGBA * internal format. So we fallback to performing a CPU swizzle before uploading. */ if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) && !vrend_resource_is_emulated_bgra(res)) { VREND_DEBUG(dbg_bgra, ctx, "manually swizzling bgra->rgba on upload since gles+bgra\n"); vrend_swizzle_data_bgra(send_size, data); } /* mipmaps are usually passed in one iov, and we need to keep the offset * into the data in case we want to read back the data of a surface * that can not be rendered. Since we can not assume that the whole texture * is filled, we evaluate the offset for origin (0,0,0). Since it is also * possible that a resource is reused and resized update the offset every time. */ if (info->level < VR_MAX_TEXTURE_2D_LEVELS) { int64_t level_height = u_minify(res->base.height0, info->level); res->mipmap_offsets[info->level] = info->offset - ((info->box->z * level_height + y) * stride + x * elsize); } if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) { /* we get values from the guest as 24-bit scaled integers but we give them to the host GL and it interprets them as 32-bit scaled integers, so we need to scale them here */ depth_scale = 256.0; if (!vrend_state.use_core_profile) glPixelTransferf(GL_DEPTH_SCALE, depth_scale); else vrend_scale_depth(data, send_size, depth_scale); } if (res->target == GL_TEXTURE_CUBE_MAP) { GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z; if (compressed) { glCompressedTexSubImage2D(ctarget, info->level, x, y, info->box->width, info->box->height, glformat, comp_size, data); } else { glTexSubImage2D(ctarget, info->level, x, y, info->box->width, info->box->height, glformat, gltype, data); } } else if (res->target == GL_TEXTURE_3D || res->target == GL_TEXTURE_2D_ARRAY || res->target == GL_TEXTURE_CUBE_MAP_ARRAY) { if (compressed) { glCompressedTexSubImage3D(res->target, info->level, x, y, info->box->z, info->box->width, info->box->height, info->box->depth, glformat, comp_size, data); } else { glTexSubImage3D(res->target, info->level, x, y, info->box->z, info->box->width, info->box->height, info->box->depth, glformat, gltype, data); } } else if (res->target == GL_TEXTURE_1D) { if (vrend_state.use_gles) { /* Covers both compressed and none compressed. */ report_gles_missing_func(ctx, "gl[Compressed]TexSubImage1D"); } else if (compressed) { glCompressedTexSubImage1D(res->target, info->level, info->box->x, info->box->width, glformat, comp_size, data); } else { glTexSubImage1D(res->target, info->level, info->box->x, info->box->width, glformat, gltype, data); } } else { if (compressed) { glCompressedTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y, info->box->width, info->box->height, glformat, comp_size, data); } else { glTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y, info->box->width, res->target == GL_TEXTURE_1D_ARRAY ? info->box->depth : info->box->height, glformat, gltype, data); } } if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) { if (!vrend_state.use_core_profile) glPixelTransferf(GL_DEPTH_SCALE, 1.0); } glBindTexture(res->target, old_tex); } if (stride && !need_temp) { glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); glPixelStorei(GL_UNPACK_IMAGE_HEIGHT, 0); } glPixelStorei(GL_UNPACK_ALIGNMENT, 4); if (need_temp) free(data); } return 0; } static uint32_t vrend_get_texture_depth(struct vrend_resource *res, uint32_t level) { uint32_t depth = 1; if (res->target == GL_TEXTURE_3D) depth = u_minify(res->base.depth0, level); else if (res->target == GL_TEXTURE_1D_ARRAY || res->target == GL_TEXTURE_2D_ARRAY || res->target == GL_TEXTURE_CUBE_MAP || res->target == GL_TEXTURE_CUBE_MAP_ARRAY) depth = res->base.array_size; return depth; } static int vrend_transfer_send_getteximage(struct vrend_resource *res, const struct iovec *iov, int num_iovs, const struct vrend_transfer_info *info) { GLenum format, type; uint32_t tex_size; char *data; int elsize = util_format_get_blocksize(res->base.format); int compressed = util_format_is_compressed(res->base.format); GLenum target; uint32_t send_offset = 0; format = tex_conv_table[res->base.format].glformat; type = tex_conv_table[res->base.format].gltype; if (compressed) format = tex_conv_table[res->base.format].internalformat; tex_size = util_format_get_nblocks(res->base.format, u_minify(res->base.width0, info->level), u_minify(res->base.height0, info->level)) * util_format_get_blocksize(res->base.format) * vrend_get_texture_depth(res, info->level); if (info->box->z && res->target != GL_TEXTURE_CUBE_MAP) { send_offset = util_format_get_nblocks(res->base.format, u_minify(res->base.width0, info->level), u_minify(res->base.height0, info->level)) * util_format_get_blocksize(res->base.format) * info->box->z; } data = malloc(tex_size); if (!data) return ENOMEM; switch (elsize) { case 1: glPixelStorei(GL_PACK_ALIGNMENT, 1); break; case 2: glPixelStorei(GL_PACK_ALIGNMENT, 2); break; case 4: default: glPixelStorei(GL_PACK_ALIGNMENT, 4); break; case 8: glPixelStorei(GL_PACK_ALIGNMENT, 8); break; } GLint old_tex = 0; get_current_texture(res->target, &old_tex); glBindTexture(res->target, res->id); if (res->target == GL_TEXTURE_CUBE_MAP) { target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z; } else target = res->target; if (compressed) { if (has_feature(feat_arb_robustness)) { glGetnCompressedTexImageARB(target, info->level, tex_size, data); } else if (vrend_state.use_gles) { report_gles_missing_func(NULL, "glGetCompressedTexImage"); } else { glGetCompressedTexImage(target, info->level, data); } } else { if (has_feature(feat_arb_robustness)) { glGetnTexImageARB(target, info->level, format, type, tex_size, data); } else if (vrend_state.use_gles) { report_gles_missing_func(NULL, "glGetTexImage"); } else { glGetTexImage(target, info->level, format, type, data); } } glPixelStorei(GL_PACK_ALIGNMENT, 4); write_transfer_data(&res->base, iov, num_iovs, data + send_offset, info->stride, info->box, info->level, info->offset, false); free(data); glBindTexture(res->target, old_tex); return 0; } static void do_readpixels(struct vrend_resource *res, int idx, uint32_t level, uint32_t layer, GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data) { GLuint fb_id; glGenFramebuffers(1, &fb_id); glBindFramebuffer(GL_FRAMEBUFFER, fb_id); vrend_fb_bind_texture(res, idx, level, layer); /* Warn if the driver doesn't agree about the read format and type. On desktop GL we can use basically any format and type to glReadPixels, so we picked the format and type that matches the native format. But on GLES we are limited to a very few set, luckily most GLES implementations should return type and format that match the native formats, and can be used for glReadPixels acording to the GLES spec. But we have found that at least Mesa returned the wrong formats, again luckily we are able to change Mesa. But just in case there are more bad drivers out there, or we mess up the format somewhere, we warn here. */ if (vrend_state.use_gles) { GLint imp; if (type != GL_UNSIGNED_BYTE && type != GL_UNSIGNED_INT && type != GL_INT && type != GL_FLOAT) { glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_TYPE, &imp); if (imp != (GLint)type) { vrend_printf( "GL_IMPLEMENTATION_COLOR_READ_TYPE is not expected native type 0x%x != imp 0x%x\n", type, imp); } } if (format != GL_RGBA && format != GL_RGBA_INTEGER) { glGetIntegerv(GL_IMPLEMENTATION_COLOR_READ_FORMAT, &imp); if (imp != (GLint)format) { vrend_printf( "GL_IMPLEMENTATION_COLOR_READ_FORMAT is not expected native format 0x%x != imp 0x%x\n", format, imp); } } } /* read-color clamping is handled in the mesa frontend */ if (!vrend_state.use_gles) { glClampColor(GL_CLAMP_READ_COLOR_ARB, GL_FALSE); } if (has_feature(feat_arb_robustness)) glReadnPixelsARB(x, y, width, height, format, type, bufSize, data); else if (has_feature(feat_gles_khr_robustness)) glReadnPixelsKHR(x, y, width, height, format, type, bufSize, data); else glReadPixels(x, y, width, height, format, type, data); glDeleteFramebuffers(1, &fb_id); } static int vrend_transfer_send_readpixels(struct vrend_context *ctx, struct vrend_resource *res, const struct iovec *iov, int num_iovs, const struct vrend_transfer_info *info) { char *myptr = (char*)iov[0].iov_base + info->offset; int need_temp = 0; char *data; bool actually_invert, separate_invert = false; GLenum format, type; GLint y1; uint32_t send_size = 0; uint32_t h = u_minify(res->base.height0, info->level); int elsize = util_format_get_blocksize(res->base.format); float depth_scale; int row_stride = info->stride / elsize; GLint old_fbo; if (ctx) vrend_use_program(ctx->sub, 0); else glUseProgram(0); enum virgl_formats fmt = res->base.format; format = tex_conv_table[fmt].glformat; type = tex_conv_table[fmt].gltype; /* if we are asked to invert and reading from a front then don't */ actually_invert = res->y_0_top; if (actually_invert && !has_feature(feat_mesa_invert)) separate_invert = true; #if UTIL_ARCH_BIG_ENDIAN glPixelStorei(GL_PACK_SWAP_BYTES, 1); #endif if (num_iovs > 1 || separate_invert) need_temp = 1; if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) && !vrend_resource_is_emulated_bgra(res)) need_temp = true; if (need_temp) { send_size = util_format_get_nblocks(res->base.format, info->box->width, info->box->height) * info->box->depth * util_format_get_blocksize(res->base.format); data = malloc(send_size); if (!data) { vrend_printf("malloc failed %d\n", send_size); return ENOMEM; } } else { send_size = iov[0].iov_len - info->offset; data = myptr; if (!row_stride) row_stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level)); } glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &old_fbo); if (actually_invert) y1 = h - info->box->y - info->box->height; else y1 = info->box->y; if (has_feature(feat_mesa_invert) && actually_invert) glPixelStorei(GL_PACK_INVERT_MESA, 1); if (!need_temp && row_stride) glPixelStorei(GL_PACK_ROW_LENGTH, row_stride); switch (elsize) { case 1: glPixelStorei(GL_PACK_ALIGNMENT, 1); break; case 2: glPixelStorei(GL_PACK_ALIGNMENT, 2); break; case 4: default: glPixelStorei(GL_PACK_ALIGNMENT, 4); break; case 8: glPixelStorei(GL_PACK_ALIGNMENT, 8); break; } if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) { /* we get values from the guest as 24-bit scaled integers but we give them to the host GL and it interprets them as 32-bit scaled integers, so we need to scale them here */ depth_scale = 1.0 / 256.0; if (!vrend_state.use_core_profile) { glPixelTransferf(GL_DEPTH_SCALE, depth_scale); } } do_readpixels(res, 0, info->level, info->box->z, info->box->x, y1, info->box->width, info->box->height, format, type, send_size, data); /* on GLES, texture-backed BGR* resources are always stored with RGB* internal format, but * the guest will expect to readback the data in BGRA format. * Since the GLES API doesn't allow format conversions like GL, we CPU-swizzle the data * on upload and need to do the same on readback. * The notable exception is externally-stored (GBM/EGL) BGR* resources, for which BGR* * byte-ordering is used instead to match external access patterns. */ if (vrend_state.use_gles && vrend_format_is_bgra(res->base.format) && !vrend_resource_is_emulated_bgra(res)) { VREND_DEBUG(dbg_bgra, ctx, "manually swizzling rgba->bgra on readback since gles+bgra\n"); vrend_swizzle_data_bgra(send_size, data); } if (res->base.format == VIRGL_FORMAT_Z24X8_UNORM) { if (!vrend_state.use_core_profile) glPixelTransferf(GL_DEPTH_SCALE, 1.0); else vrend_scale_depth(data, send_size, depth_scale); } if (has_feature(feat_mesa_invert) && actually_invert) glPixelStorei(GL_PACK_INVERT_MESA, 0); if (!need_temp && row_stride) glPixelStorei(GL_PACK_ROW_LENGTH, 0); glPixelStorei(GL_PACK_ALIGNMENT, 4); #if UTIL_ARCH_BIG_ENDIAN glPixelStorei(GL_PACK_SWAP_BYTES, 0); #endif if (need_temp) { write_transfer_data(&res->base, iov, num_iovs, data, info->stride, info->box, info->level, info->offset, separate_invert); free(data); } glBindFramebuffer(GL_FRAMEBUFFER, old_fbo); return 0; } static int vrend_transfer_send_readonly(struct vrend_resource *res, const struct iovec *iov, int num_iovs, UNUSED const struct vrend_transfer_info *info) { bool same_iov = true; uint i; if (res->num_iovs == (uint32_t)num_iovs) { for (i = 0; i < res->num_iovs; i++) { if (res->iov[i].iov_len != iov[i].iov_len || res->iov[i].iov_base != iov[i].iov_base) { same_iov = false; } } } else { same_iov = false; } /* * When we detect that we are reading back to the same iovs that are * attached to the resource and we know that the resource can not * be rendered to (as this function is only called then), we do not * need to do anything more. */ if (same_iov) { return 0; } return -1; } static int vrend_renderer_transfer_send_iov(struct vrend_context *ctx, struct vrend_resource *res, const struct iovec *iov, int num_iovs, const struct vrend_transfer_info *info) { if (is_only_bit(res->storage_bits, VREND_STORAGE_GUEST_MEMORY) || (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY) && res->iov)) { return vrend_copy_iovec(res->iov, res->num_iovs, info->box->x, iov, num_iovs, info->offset, info->box->width, res->ptr); } if (has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) { assert(!res->iov); vrend_write_to_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, info->box->width); return 0; } if (has_bit(res->storage_bits, VREND_STORAGE_GL_BUFFER)) { uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format); void *data; glBindBufferARB(res->target, res->id); data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_READ_BIT); if (!data) vrend_printf("unable to open buffer for reading %d\n", res->target); else vrend_write_to_iovec(iov, num_iovs, info->offset, data, send_size); glUnmapBuffer(res->target); glBindBufferARB(res->target, 0); } else { int ret = -1; bool can_readpixels = true; can_readpixels = vrend_format_can_render(res->base.format) || vrend_format_is_ds(res->base.format); if (can_readpixels) ret = vrend_transfer_send_readpixels(ctx, res, iov, num_iovs, info); /* Can hit this on a non-error path as well. */ if (ret) { if (!vrend_state.use_gles) ret = vrend_transfer_send_getteximage(res, iov, num_iovs, info); else ret = vrend_transfer_send_readonly(res, iov, num_iovs, info); } return ret; } return 0; } static int vrend_renderer_transfer_internal(struct vrend_context *ctx, struct vrend_resource *res, const struct vrend_transfer_info *info, int transfer_mode) { const struct iovec *iov; int num_iovs; if (!info->box) return EINVAL; vrend_hw_switch_context(ctx, true); assert(check_transfer_iovec(res, info)); if (info->iovec && info->iovec_cnt) { iov = info->iovec; num_iovs = info->iovec_cnt; } else { iov = res->iov; num_iovs = res->num_iovs; } #ifdef ENABLE_MINIGBM_ALLOCATION if (res->gbm_bo && (transfer_mode == VIRGL_TRANSFER_TO_HOST || !has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE))) { assert(!info->synchronized); return virgl_gbm_transfer(res->gbm_bo, transfer_mode, iov, num_iovs, info); } #endif if (!check_transfer_bounds(res, info)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_TRANSFER_IOV_BOUNDS, res->id); return EINVAL; } if (!check_iov_bounds(res, info, iov, num_iovs)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_TRANSFER_IOV_BOUNDS, res->id); return EINVAL; } switch (transfer_mode) { case VIRGL_TRANSFER_TO_HOST: return vrend_renderer_transfer_write_iov(ctx, res, iov, num_iovs, info); case VIRGL_TRANSFER_FROM_HOST: return vrend_renderer_transfer_send_iov(ctx, res, iov, num_iovs, info); default: assert(0); } return 0; } int vrend_renderer_transfer_iov(struct vrend_context *ctx, uint32_t dst_handle, const struct vrend_transfer_info *info, int transfer_mode) { struct vrend_resource *res; res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } if (!check_transfer_iovec(res, info)) { if (has_bit(res->storage_bits, VREND_STORAGE_EGL_IMAGE)) return 0; else { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } } return vrend_renderer_transfer_internal(ctx, res, info, transfer_mode); } int vrend_renderer_transfer_pipe(struct pipe_resource *pres, const struct vrend_transfer_info *info, int transfer_mode) { struct vrend_resource *res = (struct vrend_resource *)pres; if (!check_transfer_iovec(res, info)) return EINVAL; return vrend_renderer_transfer_internal(vrend_state.ctx0, res, info, transfer_mode); } int vrend_transfer_inline_write(struct vrend_context *ctx, uint32_t dst_handle, const struct vrend_transfer_info *info) { struct vrend_resource *res; res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } if (!check_transfer_bounds(res, info)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle); return EINVAL; } if (!check_iov_bounds(res, info, info->iovec, info->iovec_cnt)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle); return EINVAL; } #ifdef ENABLE_MINIGBM_ALLOCATION if (res->gbm_bo) { assert(!info->synchronized); return virgl_gbm_transfer(res->gbm_bo, VIRGL_TRANSFER_TO_HOST, info->iovec, info->iovec_cnt, info); } #endif return vrend_renderer_transfer_write_iov(ctx, res, info->iovec, info->iovec_cnt, info); } int vrend_renderer_copy_transfer3d(struct vrend_context *ctx, uint32_t dst_handle, uint32_t src_handle, const struct vrend_transfer_info *info) { struct vrend_resource *src_res, *dst_res; src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle); dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!src_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle); return EINVAL; } if (!dst_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } if (!src_res->iov) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } if (!check_transfer_bounds(dst_res, info)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle); return EINVAL; } if (!check_iov_bounds(dst_res, info, src_res->iov, src_res->num_iovs)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle); return EINVAL; } #ifdef ENABLE_MINIGBM_ALLOCATION if (dst_res->gbm_bo) { bool use_gbm = true; /* The guest uses copy transfers against busy resources to avoid * waiting. The host GL driver is usually smart enough to avoid * blocking by putting the data in a staging buffer and doing a * pipelined copy. But when there is a GBM bo, we can only do that when * VREND_STORAGE_GL_IMMUTABLE is set because it implies that the * internal format is known and is known to be compatible with the * subsequence glTexSubImage2D. Otherwise, we glFinish and use GBM. * Also, EGL images with BGRX format are not compatible with * glTexSubImage2D, since they are stored with only 3bpp, so gbm * transfer is required. */ if (info->synchronized) { if (has_bit(dst_res->storage_bits, VREND_STORAGE_GL_IMMUTABLE) && dst_res->base.format != VIRGL_FORMAT_B8G8R8X8_UNORM) use_gbm = false; else glFinish(); } if (use_gbm) { return virgl_gbm_transfer(dst_res->gbm_bo, VIRGL_TRANSFER_TO_HOST, src_res->iov, src_res->num_iovs, info); } } #endif return vrend_renderer_transfer_write_iov(ctx, dst_res, src_res->iov, src_res->num_iovs, info); } int vrend_renderer_copy_transfer3d_from_host(struct vrend_context *ctx, uint32_t dst_handle, uint32_t src_handle, const struct vrend_transfer_info *info) { struct vrend_resource *src_res, *dst_res; src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle); dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!src_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle); return EINVAL; } if (!dst_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } if (!dst_res->iov) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return EINVAL; } if (!check_transfer_bounds(src_res, info)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle); return EINVAL; } if (!check_iov_bounds(src_res, info, dst_res->iov, dst_res->num_iovs)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, dst_handle); return EINVAL; } return vrend_renderer_transfer_send_iov(ctx, src_res, dst_res->iov, dst_res->num_iovs, info); } void vrend_set_stencil_ref(struct vrend_context *ctx, struct pipe_stencil_ref *ref) { if (ctx->sub->stencil_refs[0] != ref->ref_value[0] || ctx->sub->stencil_refs[1] != ref->ref_value[1]) { ctx->sub->stencil_refs[0] = ref->ref_value[0]; ctx->sub->stencil_refs[1] = ref->ref_value[1]; ctx->sub->stencil_state_dirty = true; } } void vrend_set_blend_color(struct vrend_context *ctx, struct pipe_blend_color *color) { ctx->sub->blend_color = *color; glBlendColor(color->color[0], color->color[1], color->color[2], color->color[3]); } void vrend_set_scissor_state(struct vrend_context *ctx, uint32_t start_slot, uint32_t num_scissor, struct pipe_scissor_state *ss) { uint i, idx; if (start_slot > PIPE_MAX_VIEWPORTS || num_scissor > (PIPE_MAX_VIEWPORTS - start_slot)) { vrend_report_buffer_error(ctx, 0); return; } for (i = 0; i < num_scissor; i++) { idx = start_slot + i; ctx->sub->ss[idx] = ss[i]; ctx->sub->scissor_state_dirty |= (1 << idx); } } void vrend_set_polygon_stipple(struct vrend_context *ctx, struct pipe_poly_stipple *ps) { if (vrend_state.use_core_profile) { static const unsigned bit31 = 1u << 31; GLubyte *stip = calloc(1, 1024); int i, j; if (!ctx->pstip_inited) vrend_init_pstipple_texture(ctx); if (!stip) return; for (i = 0; i < 32; i++) { for (j = 0; j < 32; j++) { if (ps->stipple[i] & (bit31 >> j)) stip[i * 32 + j] = 0; else stip[i * 32 + j] = 255; } } glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 32, 32, GL_RED, GL_UNSIGNED_BYTE, stip); glBindTexture(GL_TEXTURE_2D, 0); free(stip); return; } glPolygonStipple((const GLubyte *)ps->stipple); } void vrend_set_clip_state(struct vrend_context *ctx, struct pipe_clip_state *ucp) { if (vrend_state.use_core_profile) { ctx->sub->ucp_state = *ucp; } else { int i, j; GLdouble val[4]; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) val[j] = ucp->ucp[i][j]; glClipPlane(GL_CLIP_PLANE0 + i, val); } } } void vrend_set_sample_mask(UNUSED struct vrend_context *ctx, unsigned sample_mask) { if (has_feature(feat_sample_mask)) glSampleMaski(0, sample_mask); } void vrend_set_min_samples(struct vrend_context *ctx, unsigned min_samples) { float min_sample_shading = (float)min_samples; if (ctx->sub->nr_cbufs > 0 && ctx->sub->surf[0]) { assert(ctx->sub->surf[0]->texture); min_sample_shading /= MAX2(1, ctx->sub->surf[0]->texture->base.nr_samples); } if (has_feature(feat_sample_shading)) glMinSampleShading(min_sample_shading); } void vrend_set_tess_state(UNUSED struct vrend_context *ctx, const float tess_factors[6]) { if (has_feature(feat_tessellation)) { if (!vrend_state.use_gles) { glPatchParameterfv(GL_PATCH_DEFAULT_OUTER_LEVEL, tess_factors); glPatchParameterfv(GL_PATCH_DEFAULT_INNER_LEVEL, &tess_factors[4]); } else { memcpy(vrend_state.tess_factors, tess_factors, 6 * sizeof (float)); } } } static void vrend_hw_emit_streamout_targets(UNUSED struct vrend_context *ctx, struct vrend_streamout_object *so_obj) { uint i; for (i = 0; i < so_obj->num_targets; i++) { if (!so_obj->so_targets[i]) glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, i, 0); else if (so_obj->so_targets[i]->buffer_offset || so_obj->so_targets[i]->buffer_size < so_obj->so_targets[i]->buffer->base.width0) glBindBufferRange(GL_TRANSFORM_FEEDBACK_BUFFER, i, so_obj->so_targets[i]->buffer->id, so_obj->so_targets[i]->buffer_offset, so_obj->so_targets[i]->buffer_size); else glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, i, so_obj->so_targets[i]->buffer->id); } } void vrend_set_streamout_targets(struct vrend_context *ctx, UNUSED uint32_t append_bitmask, uint32_t num_targets, uint32_t *handles) { struct vrend_so_target *target; uint i; if (!has_feature(feat_transform_feedback)) return; if (num_targets) { bool found = false; struct vrend_streamout_object *obj; LIST_FOR_EACH_ENTRY(obj, &ctx->sub->streamout_list, head) { if (obj->num_targets == num_targets) { if (!memcmp(handles, obj->handles, num_targets * 4)) { found = true; break; } } } if (found) { ctx->sub->current_so = obj; glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id); return; } obj = CALLOC_STRUCT(vrend_streamout_object); if (has_feature(feat_transform_feedback2)) { glGenTransformFeedbacks(1, &obj->id); glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id); } obj->num_targets = num_targets; for (i = 0; i < num_targets; i++) { obj->handles[i] = handles[i]; if (handles[i] == 0) continue; target = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_STREAMOUT_TARGET); if (!target) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handles[i]); free(obj); return; } vrend_so_target_reference(&obj->so_targets[i], target); } vrend_hw_emit_streamout_targets(ctx, obj); list_addtail(&obj->head, &ctx->sub->streamout_list); ctx->sub->current_so = obj; obj->xfb_state = XFB_STATE_STARTED_NEED_BEGIN; } else { if (has_feature(feat_transform_feedback2)) glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0); ctx->sub->current_so = NULL; } } static void vrend_resource_buffer_copy(UNUSED struct vrend_context *ctx, struct vrend_resource *src_res, struct vrend_resource *dst_res, uint32_t dstx, uint32_t srcx, uint32_t width) { glBindBuffer(GL_COPY_READ_BUFFER, src_res->id); glBindBuffer(GL_COPY_WRITE_BUFFER, dst_res->id); glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, srcx, dstx, width); glBindBuffer(GL_COPY_READ_BUFFER, 0); glBindBuffer(GL_COPY_WRITE_BUFFER, 0); } static void vrend_resource_copy_fallback(struct vrend_resource *src_res, struct vrend_resource *dst_res, uint32_t dst_level, uint32_t dstx, uint32_t dsty, uint32_t dstz, uint32_t src_level, const struct pipe_box *src_box) { char *tptr; uint32_t total_size, src_stride, dst_stride, src_layer_stride; GLenum glformat, gltype; int elsize = util_format_get_blocksize(dst_res->base.format); int compressed = util_format_is_compressed(dst_res->base.format); int cube_slice = 1; uint32_t slice_size, slice_offset; int i; struct pipe_box box; if (src_res->target == GL_TEXTURE_CUBE_MAP) cube_slice = 6; if (src_res->base.format != dst_res->base.format) { vrend_printf( "copy fallback failed due to mismatched formats %d %d\n", src_res->base.format, dst_res->base.format); return; } box = *src_box; box.depth = vrend_get_texture_depth(src_res, src_level); dst_stride = util_format_get_stride(dst_res->base.format, dst_res->base.width0); /* this is ugly need to do a full GetTexImage */ slice_size = util_format_get_nblocks(src_res->base.format, u_minify(src_res->base.width0, src_level), u_minify(src_res->base.height0, src_level)) * util_format_get_blocksize(src_res->base.format); total_size = slice_size * vrend_get_texture_depth(src_res, src_level); tptr = malloc(total_size); if (!tptr) return; glformat = tex_conv_table[src_res->base.format].glformat; gltype = tex_conv_table[src_res->base.format].gltype; if (compressed) glformat = tex_conv_table[src_res->base.format].internalformat; /* If we are on gles we need to rely on the textures backing * iovec to have the data we need, otherwise we can use glGetTexture */ if (vrend_state.use_gles) { uint64_t src_offset = 0; uint64_t dst_offset = 0; if (src_level < VR_MAX_TEXTURE_2D_LEVELS) { src_offset = src_res->mipmap_offsets[src_level]; dst_offset = dst_res->mipmap_offsets[src_level]; } src_stride = util_format_get_nblocksx(src_res->base.format, u_minify(src_res->base.width0, src_level)) * elsize; src_layer_stride = util_format_get_2d_size(src_res->base.format, src_stride, u_minify(src_res->base.height0, src_level)); read_transfer_data(src_res->iov, src_res->num_iovs, tptr, src_res->base.format, src_offset, src_stride, src_layer_stride, &box, false); /* When on GLES sync the iov that backs the dst resource because * we might need it in a chain copy A->B, B->C */ write_transfer_data(&dst_res->base, dst_res->iov, dst_res->num_iovs, tptr, dst_stride, &box, src_level, dst_offset, false); /* we get values from the guest as 24-bit scaled integers but we give them to the host GL and it interprets them as 32-bit scaled integers, so we need to scale them here */ if (dst_res->base.format == VIRGL_FORMAT_Z24X8_UNORM) { float depth_scale = 256.0; vrend_scale_depth(tptr, total_size, depth_scale); } /* if this is a BGR* resource on GLES, the data needs to be manually swizzled to RGB* before * storing in a texture. Iovec data is assumed to have the original byte-order, namely BGR*, * and needs to be reordered when storing in the host's texture memory as RGB*. * On the contrary, externally-stored BGR* resources are assumed to remain in BGR* format at * all times. */ if (vrend_format_is_bgra(dst_res->base.format) && !vrend_resource_is_emulated_bgra(dst_res)) vrend_swizzle_data_bgra(total_size, tptr); } else { uint32_t read_chunk_size; switch (elsize) { case 1: case 3: glPixelStorei(GL_PACK_ALIGNMENT, 1); break; case 2: case 6: glPixelStorei(GL_PACK_ALIGNMENT, 2); break; case 4: default: glPixelStorei(GL_PACK_ALIGNMENT, 4); break; case 8: glPixelStorei(GL_PACK_ALIGNMENT, 8); break; } glBindTexture(src_res->target, src_res->id); slice_offset = 0; read_chunk_size = (src_res->target == GL_TEXTURE_CUBE_MAP) ? slice_size : total_size; for (i = 0; i < cube_slice; i++) { GLenum ctarget = src_res->target == GL_TEXTURE_CUBE_MAP ? (GLenum)(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i) : src_res->target; if (compressed) { if (has_feature(feat_arb_robustness)) glGetnCompressedTexImageARB(ctarget, src_level, read_chunk_size, tptr + slice_offset); else glGetCompressedTexImage(ctarget, src_level, tptr + slice_offset); } else { if (has_feature(feat_arb_robustness)) glGetnTexImageARB(ctarget, src_level, glformat, gltype, read_chunk_size, tptr + slice_offset); else glGetTexImage(ctarget, src_level, glformat, gltype, tptr + slice_offset); } slice_offset += slice_size; } } glPixelStorei(GL_PACK_ALIGNMENT, 4); switch (elsize) { case 1: case 3: glPixelStorei(GL_UNPACK_ALIGNMENT, 1); break; case 2: case 6: glPixelStorei(GL_UNPACK_ALIGNMENT, 2); break; case 4: default: glPixelStorei(GL_UNPACK_ALIGNMENT, 4); break; case 8: glPixelStorei(GL_UNPACK_ALIGNMENT, 8); break; } glBindTexture(dst_res->target, dst_res->id); slice_offset = src_box->z * slice_size; cube_slice = (src_res->target == GL_TEXTURE_CUBE_MAP) ? src_box->z + src_box->depth : cube_slice; i = (src_res->target == GL_TEXTURE_CUBE_MAP) ? src_box->z : 0; for (; i < cube_slice; i++) { GLenum ctarget = dst_res->target == GL_TEXTURE_CUBE_MAP ? (GLenum)(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i) : dst_res->target; if (compressed) { if (ctarget == GL_TEXTURE_1D) { glCompressedTexSubImage1D(ctarget, dst_level, dstx, src_box->width, glformat, slice_size, tptr + slice_offset); } else { glCompressedTexSubImage2D(ctarget, dst_level, dstx, dsty, src_box->width, src_box->height, glformat, slice_size, tptr + slice_offset); } } else { if (ctarget == GL_TEXTURE_1D) { glTexSubImage1D(ctarget, dst_level, dstx, src_box->width, glformat, gltype, tptr + slice_offset); } else if (ctarget == GL_TEXTURE_3D || ctarget == GL_TEXTURE_2D_ARRAY || ctarget == GL_TEXTURE_CUBE_MAP_ARRAY) { glTexSubImage3D(ctarget, dst_level, dstx, dsty, dstz, src_box->width, src_box->height, src_box->depth, glformat, gltype, tptr + slice_offset); } else { glTexSubImage2D(ctarget, dst_level, dstx, dsty, src_box->width, src_box->height, glformat, gltype, tptr + slice_offset); } } slice_offset += slice_size; } glPixelStorei(GL_UNPACK_ALIGNMENT, 4); free(tptr); glBindTexture(GL_TEXTURE_2D, 0); } static inline GLenum translate_gles_emulation_texture_target(GLenum target) { switch (target) { case GL_TEXTURE_1D: case GL_TEXTURE_RECTANGLE: return GL_TEXTURE_2D; case GL_TEXTURE_1D_ARRAY: return GL_TEXTURE_2D_ARRAY; default: return target; } } static inline void vrend_copy_sub_image(struct vrend_resource* src_res, struct vrend_resource * dst_res, uint32_t src_level, const struct pipe_box *src_box, uint32_t dst_level, uint32_t dstx, uint32_t dsty, uint32_t dstz) { GLenum src_target = tgsitargettogltarget(src_res->base.target, src_res->base.nr_samples); GLenum dst_target = tgsitargettogltarget(dst_res->base.target, dst_res->base.nr_samples); if (vrend_state.use_gles) { src_target = translate_gles_emulation_texture_target(src_target); dst_target = translate_gles_emulation_texture_target(dst_target); } glCopyImageSubData(src_res->id, src_target, src_level, src_box->x, src_box->y, src_box->z, dst_res->id, dst_target, dst_level, dstx, dsty, dstz, src_box->width, src_box->height,src_box->depth); } void vrend_renderer_resource_copy_region(struct vrend_context *ctx, uint32_t dst_handle, uint32_t dst_level, uint32_t dstx, uint32_t dsty, uint32_t dstz, uint32_t src_handle, uint32_t src_level, const struct pipe_box *src_box) { struct vrend_resource *src_res, *dst_res; GLbitfield glmask = 0; GLint sy1, sy2, dy1, dy2; unsigned int comp_flags; if (ctx->in_error) return; src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle); dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!src_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle); return; } if (!dst_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return; } VREND_DEBUG(dbg_copy_resource, ctx, "COPY_REGION: From %s ms:%d [%d, %d, %d]+[%d, %d, %d] lvl:%d " "To %s ms:%d [%d, %d, %d]\n", util_format_name(src_res->base.format), src_res->base.nr_samples, src_box->x, src_box->y, src_box->z, src_box->width, src_box->height, src_box->depth, src_level, util_format_name(dst_res->base.format), dst_res->base.nr_samples, dstx, dsty, dstz); if (src_res->base.target == PIPE_BUFFER && dst_res->base.target == PIPE_BUFFER) { /* do a buffer copy */ VREND_DEBUG(dbg_copy_resource, ctx, "COPY_REGION: buffer copy %d+%d\n", src_box->x, src_box->width); vrend_resource_buffer_copy(ctx, src_res, dst_res, dstx, src_box->x, src_box->width); return; } comp_flags = VREND_COPY_COMPAT_FLAG_ALLOW_COMPRESSED; if (src_res->egl_image) comp_flags |= VREND_COPY_COMPAT_FLAG_ONE_IS_EGL_IMAGE; if (dst_res->egl_image) comp_flags ^= VREND_COPY_COMPAT_FLAG_ONE_IS_EGL_IMAGE; if (has_feature(feat_copy_image) && format_is_copy_compatible(src_res->base.format,dst_res->base.format, comp_flags) && src_res->base.nr_samples == dst_res->base.nr_samples) { VREND_DEBUG(dbg_copy_resource, ctx, "COPY_REGION: use glCopyImageSubData\n"); vrend_copy_sub_image(src_res, dst_res, src_level, src_box, dst_level, dstx, dsty, dstz); return; } if (!vrend_format_can_render(src_res->base.format) || !vrend_format_can_render(dst_res->base.format)) { VREND_DEBUG(dbg_copy_resource, ctx, "COPY_REGION: use resource_copy_fallback\n"); vrend_resource_copy_fallback(src_res, dst_res, dst_level, dstx, dsty, dstz, src_level, src_box); return; } glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); VREND_DEBUG(dbg_copy_resource, ctx, "COPY_REGION: use glBlitFramebuffer\n"); /* clean out fb ids */ glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); vrend_fb_bind_texture(src_res, 0, src_level, src_box->z); glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); vrend_fb_bind_texture(dst_res, 0, dst_level, dstz); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); glmask = GL_COLOR_BUFFER_BIT; glDisable(GL_SCISSOR_TEST); if (!src_res->y_0_top) { sy1 = src_box->y; sy2 = src_box->y + src_box->height; } else { sy1 = src_res->base.height0 - src_box->y - src_box->height; sy2 = src_res->base.height0 - src_box->y; } if (!dst_res->y_0_top) { dy1 = dsty; dy2 = dsty + src_box->height; } else { dy1 = dst_res->base.height0 - dsty - src_box->height; dy2 = dst_res->base.height0 - dsty; } glBlitFramebuffer(src_box->x, sy1, src_box->x + src_box->width, sy2, dstx, dy1, dstx + src_box->width, dy2, glmask, GL_NEAREST); glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->fb_id); if (ctx->sub->rs_state.scissor) glEnable(GL_SCISSOR_TEST); } static GLuint vrend_make_view(struct vrend_resource *res, enum virgl_formats format) { GLuint view_id; GLenum tex_ifmt = tex_conv_table[res->base.format].internalformat; GLenum view_ifmt = tex_conv_table[format].internalformat; if (tex_ifmt == view_ifmt) return res->id; /* If the format doesn't support TextureStorage it is not immutable, so no TextureView*/ if (!has_bit(res->storage_bits, VREND_STORAGE_GL_IMMUTABLE)) return res->id; VREND_DEBUG(dbg_blit, NULL, "Create texture view from %s as %s\n", util_format_name(res->base.format), util_format_name(format)); if (vrend_state.use_gles) { assert(res->target != GL_TEXTURE_RECTANGLE_NV); assert(res->target != GL_TEXTURE_1D); assert(res->target != GL_TEXTURE_1D_ARRAY); } glGenTextures(1, &view_id); glTextureView(view_id, res->target, res->id, view_ifmt, 0, res->base.last_level + 1, 0, res->base.array_size); return view_id; } static bool vrend_blit_needs_redblue_swizzle(struct vrend_resource *src_res, struct vrend_resource *dst_res, const struct pipe_blit_info *info) { /* Virgl's BGR* formats always use GL_RGBA8 internal format so texture views have no format * conversion effects. Swizzling during blits is required instead. * Also, GBM/EGL-backed (i.e. external) BGR* resources are always stored with BGR* internal * format, despite Virgl's use of the GL_RGBA8 internal format, so special care must be taken * when determining the swizzling. */ bool needs_redblue_swizzle = false; if (vrend_resource_is_emulated_bgra(src_res) ^ vrend_resource_is_emulated_bgra(dst_res)) needs_redblue_swizzle = !needs_redblue_swizzle; /* Virgl blits support "views" on source/dest resources, allowing another level of format * conversion on top of the host's GL API. These views need to be reconciled manually when * any BGR* resources are involved, since they are internally stored with RGB* byte-ordering, * and externally stored with BGR* byte-ordering. */ if (vrend_format_is_bgra(src_res->base.format) ^ vrend_format_is_bgra(info->src.format)) needs_redblue_swizzle = !needs_redblue_swizzle; if (vrend_format_is_bgra(dst_res->base.format) ^ vrend_format_is_bgra(info->dst.format)) needs_redblue_swizzle = !needs_redblue_swizzle; return needs_redblue_swizzle; } static void vrend_renderer_prepare_blit_extra_info(struct vrend_context *ctx, struct vrend_resource *src_res, struct vrend_resource *dst_res, struct vrend_blit_info *info) { info->can_fbo_blit = true; info->gl_filter = convert_mag_filter(info->b.filter); if (!dst_res->y_0_top) { info->dst_y1 = info->b.dst.box.y + info->b.dst.box.height; info->dst_y2 = info->b.dst.box.y; } else { info->dst_y1 = dst_res->base.height0 - info->b.dst.box.y - info->b.dst.box.height; info->dst_y2 = dst_res->base.height0 - info->b.dst.box.y; } if (!src_res->y_0_top) { info->src_y1 = info->b.src.box.y + info->b.src.box.height; info->src_y2 = info->b.src.box.y; } else { info->src_y1 = src_res->base.height0 - info->b.src.box.y - info->b.src.box.height; info->src_y2 = src_res->base.height0 - info->b.src.box.y; } if (vrend_blit_needs_swizzle(info->b.dst.format, info->b.src.format)) { info->needs_swizzle = true; info->can_fbo_blit = false; } if (info->needs_swizzle && vrend_get_format_table_entry(dst_res->base.format)->flags & VIRGL_TEXTURE_NEED_SWIZZLE) memcpy(info->swizzle, tex_conv_table[dst_res->base.format].swizzle, sizeof(info->swizzle)); if (vrend_blit_needs_redblue_swizzle(src_res, dst_res, &info->b)) { VREND_DEBUG(dbg_blit, ctx, "Applying red/blue swizzle during blit involving an external BGR* resource\n"); uint8_t temp = info->swizzle[0]; info->swizzle[0] = info->swizzle[2]; info->swizzle[2] = temp; info->can_fbo_blit = false; } /* for scaled MS blits we either need extensions or hand roll */ if (info->b.mask & PIPE_MASK_RGBA && src_res->base.nr_samples > 0 && src_res->base.nr_samples != dst_res->base.nr_samples && (info->b.src.box.width != info->b.dst.box.width || info->b.src.box.height != info->b.dst.box.height)) { if (has_feature(feat_ms_scaled_blit)) info->gl_filter = GL_SCALED_RESOLVE_NICEST_EXT; else info->can_fbo_blit = false; } } /* Prepare the extra blit info and return true if a FBO blit can be used. */ static bool vrend_renderer_prepare_blit(struct vrend_context *ctx, struct vrend_resource *src_res, struct vrend_resource *dst_res, const struct vrend_blit_info *info) { if (!info->can_fbo_blit) return false; /* if we can't make FBO's use the fallback path */ if (!vrend_format_can_render(src_res->base.format) && !vrend_format_is_ds(src_res->base.format)) return false; if (!vrend_format_can_render(src_res->base.format) && !vrend_format_is_ds(src_res->base.format)) return false; /* different depth formats */ if (vrend_format_is_ds(src_res->base.format) && vrend_format_is_ds(dst_res->base.format)) { if (src_res->base.format != dst_res->base.format) { if (!(src_res->base.format == PIPE_FORMAT_S8_UINT_Z24_UNORM && (dst_res->base.format == PIPE_FORMAT_Z24X8_UNORM))) { return false; } } } /* glBlitFramebuffer - can support depth stencil with NEAREST which we use for mipmaps */ if ((info->b.mask & (PIPE_MASK_Z | PIPE_MASK_S)) && info->gl_filter == PIPE_TEX_FILTER_LINEAR) return false; /* since upstream mesa change * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5034 * an imported RGBX texture uses GL_RGB8 as internal format while * in virgl_formats, we use GL_RGBA8 internal format for RGBX texutre. * on GLES host, glBlitFramebuffer doesn't work in such case. */ if (vrend_state.use_gles && info->b.mask & PIPE_MASK_RGBA && src_res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM && dst_res->base.format == VIRGL_FORMAT_R8G8B8X8_UNORM && has_bit(src_res->storage_bits, VREND_STORAGE_EGL_IMAGE) != has_bit(dst_res->storage_bits, VREND_STORAGE_EGL_IMAGE) && (src_res->base.nr_samples || dst_res->base.nr_samples)) { return false; } /* GLES generally doesn't support blitting to a multi-sample FB, and also not * from a multi-sample FB where the regions are not exatly the same or the * source and target format are different. For * downsampling DS blits to zero samples we solve this by doing two blits */ if (vrend_state.use_gles && ((dst_res->base.nr_samples > 0) || ((info->b.mask & PIPE_MASK_RGBA) && (src_res->base.nr_samples > 0) && (info->b.src.box.x != info->b.dst.box.x || info->b.src.box.width != info->b.dst.box.width || info->dst_y1 != info->src_y1 || info->dst_y2 != info->src_y2 || info->b.src.format != info->b.dst.format)) )) { VREND_DEBUG(dbg_blit, ctx, "Use GL fallback because dst:ms:%d src:ms:%d (%d %d %d %d) -> (%d %d %d %d)\n", dst_res->base.nr_samples, src_res->base.nr_samples, info->b.src.box.x, info->b.src.box.x + info->b.src.box.width, info->src_y1, info->src_y2, info->b.dst.box.x, info->b.dst.box.x + info->b.dst.box.width, info->dst_y1, info->dst_y2); return false; } /* for 3D mipmapped blits - hand roll time */ if (info->b.src.box.depth != info->b.dst.box.depth) return false; return true; } static void vrend_renderer_blit_fbo(struct vrend_context *ctx, struct vrend_resource *src_res, struct vrend_resource *dst_res, const struct vrend_blit_info *info) { GLbitfield glmask = 0; if (info->b.mask & PIPE_MASK_Z) glmask |= GL_DEPTH_BUFFER_BIT; if (info->b.mask & PIPE_MASK_S) glmask |= GL_STENCIL_BUFFER_BIT; if (info->b.mask & PIPE_MASK_RGBA) glmask |= GL_COLOR_BUFFER_BIT; if (info->b.scissor_enable) { glScissor(info->b.scissor.minx, info->b.scissor.miny, info->b.scissor.maxx - info->b.scissor.minx, info->b.scissor.maxy - info->b.scissor.miny); ctx->sub->scissor_state_dirty = (1 << 0); glEnable(GL_SCISSOR_TEST); } else glDisable(GL_SCISSOR_TEST); /* An GLES GL_INVALID_OPERATION is generated if one wants to blit from a * multi-sample fbo to a non multi-sample fbo and the source and destination * rectangles are not defined with the same (X0, Y0) and (X1, Y1) bounds. * * Since stencil data can only be written in a fragment shader when * ARB_shader_stencil_export is available, the workaround using GL as given * above is usually not available. Instead, to work around the blit * limitations on GLES first copy the full frame to a non-multisample * surface and then copy the according area to the final target surface. */ bool make_intermediate_copy = false; GLuint intermediate_fbo = 0; struct vrend_resource *intermediate_copy = 0; if (vrend_state.use_gles && (info->b.mask & PIPE_MASK_ZS) && ((src_res->base.nr_samples > 0) && (src_res->base.nr_samples != dst_res->base.nr_samples)) && ((info->b.src.box.x != info->b.dst.box.x) || (info->src_y1 != info->dst_y1) || (info->b.src.box.width != info->b.dst.box.width) || (info->src_y2 != info->dst_y2))) { make_intermediate_copy = true; /* Create a texture that is the same like the src_res texture, but * without multi-sample */ struct vrend_renderer_resource_create_args args; memset(&args, 0, sizeof(struct vrend_renderer_resource_create_args)); args.width = src_res->base.width0; args.height = src_res->base.height0; args.depth = src_res->base.depth0; args.format = info->b.src.format; args.target = src_res->base.target; args.last_level = src_res->base.last_level; args.array_size = src_res->base.array_size; intermediate_copy = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture); vrend_renderer_resource_copy_args(&args, intermediate_copy); /* this is PIPE_MASK_ZS and bgra fixup is not needed */ ASSERTED int r = vrend_resource_alloc_texture(intermediate_copy, args.format, NULL); assert(!r); glGenFramebuffers(1, &intermediate_fbo); } else { /* If no intermediate copy is needed make the variables point to the * original source to simplify the code below. */ intermediate_fbo = ctx->sub->blit_fb_ids[0]; intermediate_copy = src_res; } glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); if (info->b.mask & PIPE_MASK_RGBA) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); else glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); if (info->b.mask & PIPE_MASK_RGBA) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); else if (info->b.mask & (PIPE_MASK_Z | PIPE_MASK_S)) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); int n_layers = info->b.src.box.depth == info->b.dst.box.depth ? info->b.dst.box.depth : 1; for (int i = 0; i < n_layers; i++) { glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); vrend_fb_bind_texture_id(src_res, info->src_view, 0, info->b.src.level, info->b.src.box.z + i, 0); if (make_intermediate_copy) { int level_width = u_minify(src_res->base.width0, info->b.src.level); int level_height = u_minify(src_res->base.width0, info->b.src.level); glBindFramebuffer(GL_FRAMEBUFFER, intermediate_fbo); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); vrend_fb_bind_texture(intermediate_copy, 0, info->b.src.level, info->b.src.box.z + i); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, intermediate_fbo); glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); glBlitFramebuffer(0, 0, level_width, level_height, 0, 0, level_width, level_height, glmask, info->gl_filter); } glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); vrend_fb_bind_texture_id(dst_res, info->dst_view, 0, info->b.dst.level, info->b.dst.box.z + i, 0); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); if (has_feature(feat_srgb_write_control)) { if (util_format_is_srgb(info->b.dst.format) || util_format_is_srgb(info->b.src.format)) glEnable(GL_FRAMEBUFFER_SRGB); else glDisable(GL_FRAMEBUFFER_SRGB); } glBindFramebuffer(GL_READ_FRAMEBUFFER, intermediate_fbo); glBlitFramebuffer(info->b.src.box.x, info->src_y1, info->b.src.box.x + info->b.src.box.width, info->src_y2, info->b.dst.box.x, info->dst_y1, info->b.dst.box.x + info->b.dst.box.width, info->dst_y2, glmask, info->gl_filter); } glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0, 0); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0); glBindFramebuffer(GL_FRAMEBUFFER, ctx->sub->fb_id); if (has_feature(feat_srgb_write_control)) { if (ctx->sub->framebuffer_srgb_enabled) glEnable(GL_FRAMEBUFFER_SRGB); else glDisable(GL_FRAMEBUFFER_SRGB); } if (make_intermediate_copy) { vrend_renderer_resource_destroy(intermediate_copy); glDeleteFramebuffers(1, &intermediate_fbo); } if (ctx->sub->rs_state.scissor) glEnable(GL_SCISSOR_TEST); else glDisable(GL_SCISSOR_TEST); } static void vrend_renderer_blit_int(struct vrend_context *ctx, struct vrend_resource *src_res, struct vrend_resource *dst_res, const struct pipe_blit_info *info) { struct vrend_blit_info blit_info = { .b = *info, .src_view = src_res->id, .dst_view = dst_res->id, .swizzle = {0, 1, 2, 3} }; /* We create the texture views in this function instead of doing it in * vrend_renderer_prepare_blit_extra_info because we also delete them here */ if ((src_res->base.format != info->src.format) && has_feature(feat_texture_view)) blit_info.src_view = vrend_make_view(src_res, info->src.format); if ((dst_res->base.format != info->dst.format) && has_feature(feat_texture_view)) blit_info.dst_view = vrend_make_view(dst_res, info->dst.format); vrend_renderer_prepare_blit_extra_info(ctx, src_res, dst_res, &blit_info); if (vrend_renderer_prepare_blit(ctx, src_res, dst_res, &blit_info)) { VREND_DEBUG(dbg_blit, ctx, "BLIT_INT: use FBO blit\n"); vrend_renderer_blit_fbo(ctx, src_res, dst_res, &blit_info); } else { blit_info.has_srgb_write_control = has_feature(feat_texture_srgb_decode); blit_info.has_texture_srgb_decode = has_feature(feat_srgb_write_control); VREND_DEBUG(dbg_blit, ctx, "BLIT_INT: use GL fallback\n"); vrend_renderer_blit_gl(ctx, src_res, dst_res, &blit_info); vrend_sync_make_current(ctx->sub->gl_context); } if (blit_info.src_view != src_res->id) glDeleteTextures(1, &blit_info.src_view); if (blit_info.dst_view != dst_res->id) glDeleteTextures(1, &blit_info.dst_view); } void vrend_renderer_blit(struct vrend_context *ctx, uint32_t dst_handle, uint32_t src_handle, const struct pipe_blit_info *info) { unsigned int comp_flags = 0; struct vrend_resource *src_res, *dst_res; src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle); dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle); if (!src_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle); return; } if (!dst_res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle); return; } if (ctx->in_error) return; if (!info->src.format || info->src.format >= VIRGL_FORMAT_MAX) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_FORMAT, info->src.format); return; } if (!info->dst.format || info->dst.format >= VIRGL_FORMAT_MAX) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_FORMAT, info->dst.format); return; } if (info->render_condition_enable == false) vrend_pause_render_condition(ctx, true); VREND_DEBUG(dbg_blit, ctx, "BLIT: rc:%d scissor:%d filter:%d alpha:%d mask:0x%x\n" " From %s(%s) ms:%d egl:%d gbm:%d [%d, %d, %d]+[%d, %d, %d] lvl:%d\n" " To %s(%s) ms:%d egl:%d gbm:%d [%d, %d, %d]+[%d, %d, %d] lvl:%d\n", info->render_condition_enable, info->scissor_enable, info->filter, info->alpha_blend, info->mask, util_format_name(src_res->base.format), util_format_name(info->src.format), src_res->base.nr_samples, has_bit(src_res->storage_bits, VREND_STORAGE_EGL_IMAGE), has_bit(src_res->storage_bits, VREND_STORAGE_GBM_BUFFER), info->src.box.x, info->src.box.y, info->src.box.z, info->src.box.width, info->src.box.height, info->src.box.depth, info->src.level, util_format_name(dst_res->base.format), util_format_name(info->dst.format), dst_res->base.nr_samples, has_bit(dst_res->storage_bits, VREND_STORAGE_EGL_IMAGE), has_bit(dst_res->storage_bits, VREND_STORAGE_GBM_BUFFER), info->dst.box.x, info->dst.box.y, info->dst.box.z, info->dst.box.width, info->dst.box.height, info->dst.box.depth, info->dst.level); if (src_res->egl_image) comp_flags |= VREND_COPY_COMPAT_FLAG_ONE_IS_EGL_IMAGE; if (dst_res->egl_image) comp_flags ^= VREND_COPY_COMPAT_FLAG_ONE_IS_EGL_IMAGE; /* The Gallium blit function can be called for a general blit that may * scale, convert the data, and apply some rander states, or it is called via * glCopyImageSubData. If the src or the dst image are equal, or the two * images formats are the same, then Galliums such calles are redirected * to resource_copy_region, in this case and if no render states etx need * to be applied, forward the call to glCopyImageSubData, otherwise do a * normal blit. */ if (has_feature(feat_copy_image) && (!info->render_condition_enable || !ctx->sub->cond_render_gl_mode) && format_is_copy_compatible(info->src.format,info->dst.format, comp_flags) && !info->scissor_enable && (info->filter == PIPE_TEX_FILTER_NEAREST) && !info->alpha_blend && (info->mask == PIPE_MASK_RGBA) && src_res->base.nr_samples == dst_res->base.nr_samples && info->src.box.width == info->dst.box.width && info->src.box.height == info->dst.box.height && info->src.box.depth == info->dst.box.depth) { VREND_DEBUG(dbg_blit, ctx, " Use glCopyImageSubData\n"); vrend_copy_sub_image(src_res, dst_res, info->src.level, &info->src.box, info->dst.level, info->dst.box.x, info->dst.box.y, info->dst.box.z); } else { VREND_DEBUG(dbg_blit, ctx, " Use blit_int\n"); vrend_renderer_blit_int(ctx, src_res, dst_res, info); } if (info->render_condition_enable == false) vrend_pause_render_condition(ctx, false); } void vrend_renderer_set_fence_retire(struct vrend_context *ctx, vrend_context_fence_retire retire, void *retire_data) { assert(ctx->ctx_id); ctx->fence_retire = retire; ctx->fence_retire_data = retire_data; } int vrend_renderer_create_fence(struct vrend_context *ctx, uint32_t flags, void *fence_cookie) { struct vrend_fence *fence; if (!ctx) return EINVAL; fence = malloc(sizeof(struct vrend_fence)); if (!fence) return ENOMEM; fence->ctx = ctx; fence->flags = flags; fence->fence_cookie = fence_cookie; #ifdef HAVE_EPOXY_EGL_H if (vrend_state.use_egl_fence) { fence->eglsyncobj = virgl_egl_fence_create(egl); } else #endif { fence->glsyncobj = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); } glFlush(); if (fence->glsyncobj == NULL) goto fail; if (vrend_state.sync_thread) { mtx_lock(&vrend_state.fence_mutex); list_addtail(&fence->fences, &vrend_state.fence_wait_list); cnd_signal(&vrend_state.fence_cond); mtx_unlock(&vrend_state.fence_mutex); } else list_addtail(&fence->fences, &vrend_state.fence_list); return 0; fail: vrend_printf( "failed to create fence sync object\n"); free(fence); return ENOMEM; } static bool need_fence_retire_signal_locked(struct vrend_fence *fence, const struct list_head *signaled_list) { struct vrend_fence *next; /* last fence */ if (fence->fences.next == signaled_list) return true; /* next fence belongs to a different context */ next = LIST_ENTRY(struct vrend_fence, fence->fences.next, fences); if (next->ctx != fence->ctx) return true; /* not mergeable */ if (!(fence->flags & VIRGL_RENDERER_FENCE_FLAG_MERGEABLE)) return true; return false; } void vrend_renderer_check_fences(void) { struct list_head retired_fences; struct vrend_fence *fence, *stor; /* No need to check the fence list, fences are retired directly in * the polling thread in that case. */ if (vrend_state.use_async_fence_cb) return; list_inithead(&retired_fences); if (vrend_state.sync_thread) { flush_eventfd(vrend_state.eventfd); mtx_lock(&vrend_state.fence_mutex); LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) { /* vrend_free_fences_for_context might have marked the fence invalid * by setting fence->ctx to NULL */ if (!fence->ctx) { free_fence_locked(fence); continue; } if (need_fence_retire_signal_locked(fence, &vrend_state.fence_list)) { list_del(&fence->fences); list_addtail(&fence->fences, &retired_fences); } else { free_fence_locked(fence); } } mtx_unlock(&vrend_state.fence_mutex); } else { vrend_renderer_force_ctx_0(); LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) { if (do_wait(fence, /* can_block */ false)) { list_del(&fence->fences); list_addtail(&fence->fences, &retired_fences); } else { /* don't bother checking any subsequent ones */ break; } } LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) { if (!need_fence_retire_signal_locked(fence, &retired_fences)) free_fence_locked(fence); } } if (LIST_IS_EMPTY(&retired_fences)) return; /* no need to lock when not using a sync thread */ vrend_renderer_check_queries_locked(); LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &retired_fences, fences) { struct vrend_context *ctx = fence->ctx; ctx->fence_retire(fence->fence_cookie, ctx->fence_retire_data); free_fence_locked(fence); } } static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result) { GLuint ready; GLuint passed; GLuint64 pass64; glGetQueryObjectuiv(query_id, GL_QUERY_RESULT_AVAILABLE_ARB, &ready); if (!ready) return false; if (use_64) { glGetQueryObjectui64v(query_id, GL_QUERY_RESULT_ARB, &pass64); *result = pass64; } else { glGetQueryObjectuiv(query_id, GL_QUERY_RESULT_ARB, &passed); *result = passed; } return true; } static inline void vrend_update_oq_samples_multiplier(struct vrend_context *ctx) { if (!vrend_state.current_ctx->sub->fake_occlusion_query_samples_passed_multiplier) { uint32_t multiplier = 0; bool tweaked = vrend_get_tweak_is_active_with_params(vrend_get_context_tweaks(ctx), virgl_tweak_gles_tf3_samples_passes_multiplier, &multiplier); vrend_state.current_ctx->sub->fake_occlusion_query_samples_passed_multiplier = tweaked ? multiplier: fake_occlusion_query_samples_passed_default; } } static bool vrend_check_query_locked(struct vrend_query *query) { struct virgl_host_query_state state; bool ret; state.result_size = vrend_is_timer_query(query->gltype) ? 8 : 4; ret = vrend_get_one_query_result(query->id, state.result_size == 8, &state.result); if (ret == false) return false; /* We got a boolean, but the client wanted the actual number of samples * blow the number up so that the client doesn't think it was just one pixel * and discards an object that might be bigger */ if (query->fake_samples_passed) { vrend_update_oq_samples_multiplier(vrend_state.current_ctx); state.result *= vrend_state.current_ctx->sub->fake_occlusion_query_samples_passed_multiplier; } state.query_state = VIRGL_QUERY_STATE_DONE; if (query->res->iov) { vrend_write_to_iovec(query->res->iov, query->res->num_iovs, 0, (const void *) &state, sizeof(state)); } else { *((struct virgl_host_query_state *) query->res->ptr) = state; } return true; } static bool vrend_hw_switch_query_context(struct vrend_context *ctx) { if (vrend_state.use_async_fence_cb) { if (!ctx) return false; if (ctx == vrend_state.current_sync_thread_ctx) return true; if (ctx->ctx_id != 0 && ctx->in_error) return false; vrend_clicbs->make_current(ctx->sub->gl_context); vrend_state.current_sync_thread_ctx = ctx; return true; } else { return vrend_hw_switch_context(ctx, true); } } static void vrend_renderer_check_queries_locked(void) { struct vrend_query *query, *stor; LIST_FOR_EACH_ENTRY_SAFE(query, stor, &vrend_state.waiting_query_list, waiting_queries) { if (!vrend_hw_switch_query_context(query->ctx) || vrend_check_query_locked(query)) list_delinit(&query->waiting_queries); } } bool vrend_hw_switch_context(struct vrend_context *ctx, bool now) { if (!ctx) return false; if (ctx == vrend_state.current_ctx && ctx->ctx_switch_pending == false) return true; if (ctx->ctx_id != 0 && ctx->in_error) { return false; } ctx->ctx_switch_pending = true; if (now == true) { vrend_finish_context_switch(ctx); } vrend_state.current_ctx = ctx; return true; } static void vrend_finish_context_switch(struct vrend_context *ctx) { if (ctx->ctx_switch_pending == false) return; ctx->ctx_switch_pending = false; if (vrend_state.current_hw_ctx == ctx) return; vrend_state.current_hw_ctx = ctx; vrend_clicbs->make_current(ctx->sub->gl_context); } void vrend_renderer_object_destroy(struct vrend_context *ctx, uint32_t handle) { vrend_object_remove(ctx->sub->object_hash, handle, 0); } uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data, uint32_t handle, enum virgl_object_type type) { return vrend_object_insert(ctx->sub->object_hash, data, handle, type); } int vrend_create_query(struct vrend_context *ctx, uint32_t handle, uint32_t query_type, uint32_t query_index, uint32_t res_handle, UNUSED uint32_t offset) { struct vrend_query *q; struct vrend_resource *res; uint32_t ret_handle; bool fake_samples_passed = false; res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res || !has_bit(res->storage_bits, VREND_STORAGE_HOST_SYSTEM_MEMORY)) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return EINVAL; } /* If we don't have ARB_occlusion_query, at least try to fake GL_SAMPLES_PASSED * by using GL_ANY_SAMPLES_PASSED (i.e. EXT_occlusion_query_boolean) */ if (!has_feature(feat_occlusion_query) && query_type == PIPE_QUERY_OCCLUSION_COUNTER) { VREND_DEBUG(dbg_query, ctx, "GL_SAMPLES_PASSED not supported will try GL_ANY_SAMPLES_PASSED\n"); query_type = PIPE_QUERY_OCCLUSION_PREDICATE; fake_samples_passed = true; } if (query_type == PIPE_QUERY_OCCLUSION_PREDICATE && !has_feature(feat_occlusion_query_boolean)) { vrend_report_context_error(ctx, VIRGL_ERROR_GL_ANY_SAMPLES_PASSED, res_handle); return EINVAL; } q = CALLOC_STRUCT(vrend_query); if (!q) return ENOMEM; list_inithead(&q->waiting_queries); q->type = query_type; q->index = query_index; q->ctx = ctx; q->fake_samples_passed = fake_samples_passed; vrend_resource_reference(&q->res, res); switch (q->type) { case PIPE_QUERY_OCCLUSION_COUNTER: q->gltype = GL_SAMPLES_PASSED_ARB; break; case PIPE_QUERY_OCCLUSION_PREDICATE: if (has_feature(feat_occlusion_query_boolean)) { q->gltype = GL_ANY_SAMPLES_PASSED; break; } else return EINVAL; case PIPE_QUERY_TIMESTAMP: if (!has_feature(feat_timer_query)) return EINVAL; q->gltype = GL_TIMESTAMP; break; case PIPE_QUERY_TIME_ELAPSED: if (!has_feature(feat_timer_query)) return EINVAL; q->gltype = GL_TIME_ELAPSED; break; case PIPE_QUERY_PRIMITIVES_GENERATED: q->gltype = GL_PRIMITIVES_GENERATED; break; case PIPE_QUERY_PRIMITIVES_EMITTED: q->gltype = GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN; break; case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: q->gltype = GL_ANY_SAMPLES_PASSED_CONSERVATIVE; break; case PIPE_QUERY_SO_OVERFLOW_PREDICATE: if (!has_feature(feat_transform_feedback_overflow_query)) return EINVAL; q->gltype = GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB; break; case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE: if (!has_feature(feat_transform_feedback_overflow_query)) return EINVAL; q->gltype = GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB; break; default: vrend_printf("unknown query object received %d\n", q->type); break; } glGenQueries(1, &q->id); ret_handle = vrend_renderer_object_insert(ctx, q, handle, VIRGL_OBJECT_QUERY); if (!ret_handle) { FREE(q); return ENOMEM; } return 0; } static void vrend_destroy_query(struct vrend_query *query) { vrend_resource_reference(&query->res, NULL); list_del(&query->waiting_queries); glDeleteQueries(1, &query->id); free(query); } static void vrend_destroy_query_object(void *obj_ptr) { struct vrend_query *query = obj_ptr; vrend_destroy_query(query); } int vrend_begin_query(struct vrend_context *ctx, uint32_t handle) { struct vrend_query *q; q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY); if (!q) return EINVAL; if (q->index > 0 && !has_feature(feat_transform_feedback3)) return EINVAL; lock_sync(); list_delinit(&q->waiting_queries); unlock_sync(); if (q->gltype == GL_TIMESTAMP) return 0; if (q->index > 0) glBeginQueryIndexed(q->gltype, q->index, q->id); else glBeginQuery(q->gltype, q->id); return 0; } int vrend_end_query(struct vrend_context *ctx, uint32_t handle) { struct vrend_query *q; q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY); if (!q) return EINVAL; if (q->index > 0 && !has_feature(feat_transform_feedback3)) return EINVAL; if (vrend_is_timer_query(q->gltype)) { if (q->gltype == GL_TIMESTAMP && !has_feature(feat_timer_query)) { report_gles_warn(ctx, GLES_WARN_TIMESTAMP); } else if (q->gltype == GL_TIMESTAMP) { glQueryCounter(q->id, q->gltype); } else { /* remove from active query list for this context */ glEndQuery(q->gltype); } return 0; } if (q->index > 0) glEndQueryIndexed(q->gltype, q->index); else glEndQuery(q->gltype); return 0; } void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle, UNUSED uint32_t wait) { struct vrend_query *q; bool ret; q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY); if (!q) return; lock_sync(); ret = vrend_check_query_locked(q); if (ret) { list_delinit(&q->waiting_queries); } else if (LIST_IS_EMPTY(&q->waiting_queries)) { list_addtail(&q->waiting_queries, &vrend_state.waiting_query_list); } unlock_sync(); } #define COPY_QUERY_RESULT_TO_BUFFER(resid, offset, pvalue, size, multiplier) \ glBindBuffer(GL_QUERY_BUFFER, resid); \ value *= multiplier; \ void* buf = glMapBufferRange(GL_QUERY_BUFFER, offset, size, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT); \ if (buf) memcpy(buf, &value, size); \ glUnmapBuffer(GL_QUERY_BUFFER); static inline void *buffer_offset(intptr_t i) { return (void *)i; } void vrend_get_query_result_qbo(struct vrend_context *ctx, uint32_t handle, uint32_t qbo_handle, uint32_t wait, uint32_t result_type, uint32_t offset, int32_t index) { struct vrend_query *q; struct vrend_resource *res; if (!has_feature(feat_qbo)) return; q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY); if (!q) return; res = vrend_renderer_ctx_res_lookup(ctx, qbo_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, qbo_handle); return; } VREND_DEBUG(dbg_query, ctx, "Get query result from Query:%d\n", q->id); GLenum qtype; if (index == -1) qtype = GL_QUERY_RESULT_AVAILABLE; else qtype = wait ? GL_QUERY_RESULT : GL_QUERY_RESULT_NO_WAIT; if (!q->fake_samples_passed) { glBindBuffer(GL_QUERY_BUFFER, res->id); switch ((enum pipe_query_value_type)result_type) { case PIPE_QUERY_TYPE_I32: glGetQueryObjectiv(q->id, qtype, buffer_offset(offset)); break; case PIPE_QUERY_TYPE_U32: glGetQueryObjectuiv(q->id, qtype, buffer_offset(offset)); break; case PIPE_QUERY_TYPE_I64: glGetQueryObjecti64v(q->id, qtype, buffer_offset(offset)); break; case PIPE_QUERY_TYPE_U64: glGetQueryObjectui64v(q->id, qtype, buffer_offset(offset)); break; } } else { VREND_DEBUG(dbg_query, ctx, "Was emulating GL_PIXELS_PASSED by GL_ANY_PIXELS_PASSED, artifically upscaling the result\n"); /* The application expects a sample count but we have only a boolean * so we blow the result up by 1/10 of the screen space to make sure the * app doesn't think only one sample passed. */ vrend_update_oq_samples_multiplier(ctx); switch ((enum pipe_query_value_type)result_type) { case PIPE_QUERY_TYPE_I32: { GLint value; glGetQueryObjectiv(q->id, qtype, &value); COPY_QUERY_RESULT_TO_BUFFER(q->id, offset, value, 4, ctx->sub->fake_occlusion_query_samples_passed_multiplier); break; } case PIPE_QUERY_TYPE_U32: { GLuint value; glGetQueryObjectuiv(q->id, qtype, &value); COPY_QUERY_RESULT_TO_BUFFER(q->id, offset, value, 4, ctx->sub->fake_occlusion_query_samples_passed_multiplier); break; } case PIPE_QUERY_TYPE_I64: { GLint64 value; glGetQueryObjecti64v(q->id, qtype, &value); COPY_QUERY_RESULT_TO_BUFFER(q->id, offset, value, 8, ctx->sub->fake_occlusion_query_samples_passed_multiplier); break; } case PIPE_QUERY_TYPE_U64: { GLuint64 value; glGetQueryObjectui64v(q->id, qtype, &value); COPY_QUERY_RESULT_TO_BUFFER(q->id, offset, value, 8, ctx->sub->fake_occlusion_query_samples_passed_multiplier); break; } } } glBindBuffer(GL_QUERY_BUFFER, 0); } static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause) { if (pause) { if (ctx->sub->cond_render_q_id) { if (has_feature(feat_gl_conditional_render)) glEndConditionalRender(); else if (has_feature(feat_nv_conditional_render)) glEndConditionalRenderNV(); } } else { if (ctx->sub->cond_render_q_id) { if (has_feature(feat_gl_conditional_render)) glBeginConditionalRender(ctx->sub->cond_render_q_id, ctx->sub->cond_render_gl_mode); else if (has_feature(feat_nv_conditional_render)) glBeginConditionalRenderNV(ctx->sub->cond_render_q_id, ctx->sub->cond_render_gl_mode); } } } void vrend_render_condition(struct vrend_context *ctx, uint32_t handle, bool condition, uint mode) { struct vrend_query *q; GLenum glmode = 0; if (handle == 0) { if (has_feature(feat_gl_conditional_render)) glEndConditionalRender(); else if (has_feature(feat_nv_conditional_render)) glEndConditionalRenderNV(); ctx->sub->cond_render_q_id = 0; ctx->sub->cond_render_gl_mode = 0; return; } q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY); if (!q) return; if (condition && !has_feature(feat_conditional_render_inverted)) return; switch (mode) { case PIPE_RENDER_COND_WAIT: glmode = condition ? GL_QUERY_WAIT_INVERTED : GL_QUERY_WAIT; break; case PIPE_RENDER_COND_NO_WAIT: glmode = condition ? GL_QUERY_NO_WAIT_INVERTED : GL_QUERY_NO_WAIT; break; case PIPE_RENDER_COND_BY_REGION_WAIT: glmode = condition ? GL_QUERY_BY_REGION_WAIT_INVERTED : GL_QUERY_BY_REGION_WAIT; break; case PIPE_RENDER_COND_BY_REGION_NO_WAIT: glmode = condition ? GL_QUERY_BY_REGION_NO_WAIT_INVERTED : GL_QUERY_BY_REGION_NO_WAIT; break; default: vrend_printf( "unhandled condition %x\n", mode); } ctx->sub->cond_render_q_id = q->id; ctx->sub->cond_render_gl_mode = glmode; if (has_feature(feat_gl_conditional_render)) glBeginConditionalRender(q->id, glmode); if (has_feature(feat_nv_conditional_render)) glBeginConditionalRenderNV(q->id, glmode); } int vrend_create_so_target(struct vrend_context *ctx, uint32_t handle, uint32_t res_handle, uint32_t buffer_offset, uint32_t buffer_size) { struct vrend_so_target *target; struct vrend_resource *res; int ret_handle; res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return EINVAL; } target = CALLOC_STRUCT(vrend_so_target); if (!target) return ENOMEM; pipe_reference_init(&target->reference, 1); target->res_handle = res_handle; target->buffer_offset = buffer_offset; target->buffer_size = buffer_size; target->sub_ctx = ctx->sub; vrend_resource_reference(&target->buffer, res); ret_handle = vrend_renderer_object_insert(ctx, target, handle, VIRGL_OBJECT_STREAMOUT_TARGET); if (ret_handle == 0) { FREE(target); return ENOMEM; } return 0; } static int vrender_get_glsl_version(void) { int major_local = 0, minor_local = 0; const GLubyte *version_str; ASSERTED int c; version_str = glGetString(GL_SHADING_LANGUAGE_VERSION); if (vrend_state.use_gles) { char tmp[20]; c = sscanf((const char *)version_str, "%s %s %s %s %i.%i", tmp, tmp, tmp, tmp, &major_local, &minor_local); assert(c == 6); } else { c = sscanf((const char *)version_str, "%i.%i", &major_local, &minor_local); assert(c == 2); } return (major_local * 100) + minor_local; } static void vrend_fill_caps_glsl_version(int gl_ver, int gles_ver, union virgl_caps *caps) { if (gles_ver > 0) { caps->v1.glsl_level = 120; if (gles_ver >= 31) caps->v1.glsl_level = 310; else if (gles_ver >= 30) caps->v1.glsl_level = 130; } if (gl_ver > 0) { caps->v1.glsl_level = 130; if (gl_ver == 31) caps->v1.glsl_level = 140; else if (gl_ver == 32) caps->v1.glsl_level = 150; else if (gl_ver >= 33) caps->v1.glsl_level = 10 * gl_ver; } if (caps->v1.glsl_level < 400) { if (has_feature(feat_tessellation) && has_feature(feat_geometry_shader) && has_feature(feat_gpu_shader5)) { /* This is probably a lie, but Gallium enables * OES_geometry_shader and ARB_gpu_shader5 * based on this value, apart from that it doesn't * seem to be a crucial value */ caps->v1.glsl_level = 400; /* Let's lie a bit more */ if (has_feature(feat_separate_shader_objects)) { caps->v1.glsl_level = 410; /* Compute shaders require GLSL 4.30 unless the shader explicitely * specifies GL_ARB_compute_shader as required. However, on OpenGL ES * they are already supported with version 3.10, so if we already * advertise a feature level of 410, just lie a bit more to make * compute shaders available to GL programs that don't specify the * extension within the shaders. */ if (has_feature(feat_compute_shader)) caps->v1.glsl_level = 430; } } } vrend_printf("GLSL feature level %d\n", caps->v1.glsl_level); } static void set_format_bit(struct virgl_supported_format_mask *mask, enum virgl_formats fmt) { assert(fmt < VIRGL_FORMAT_MAX); unsigned val = (unsigned)fmt; unsigned idx = val / 32; unsigned bit = val % 32; assert(idx < ARRAY_SIZE(mask->bitmask)); mask->bitmask[idx] |= 1u << bit; } /* * Does all of the common caps setting, * if it dedects a early out returns true. */ static void vrend_renderer_fill_caps_v1(int gl_ver, int gles_ver, union virgl_caps *caps) { int i; GLint max; /* * We can't fully support this feature on GLES, * but it is needed for OpenGL 2.1 so lie. */ caps->v1.bset.occlusion_query = 1; /* Set supported prims here as we now know what shaders we support. */ caps->v1.prim_mask = (1 << PIPE_PRIM_POINTS) | (1 << PIPE_PRIM_LINES) | (1 << PIPE_PRIM_LINE_STRIP) | (1 << PIPE_PRIM_LINE_LOOP) | (1 << PIPE_PRIM_TRIANGLES) | (1 << PIPE_PRIM_TRIANGLE_STRIP) | (1 << PIPE_PRIM_TRIANGLE_FAN); if (gl_ver > 0 && !vrend_state.use_core_profile) { caps->v1.bset.poly_stipple = 1; caps->v1.bset.color_clamping = 1; caps->v1.prim_mask |= (1 << PIPE_PRIM_QUADS) | (1 << PIPE_PRIM_QUAD_STRIP) | (1 << PIPE_PRIM_POLYGON); } if (caps->v1.glsl_level >= 150) { caps->v1.prim_mask |= (1 << PIPE_PRIM_LINES_ADJACENCY) | (1 << PIPE_PRIM_LINE_STRIP_ADJACENCY) | (1 << PIPE_PRIM_TRIANGLES_ADJACENCY) | (1 << PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY); } if (caps->v1.glsl_level >= 400 || has_feature(feat_tessellation)) caps->v1.prim_mask |= (1 << PIPE_PRIM_PATCHES); if (epoxy_has_gl_extension("GL_ARB_vertex_type_10f_11f_11f_rev")) set_format_bit(&caps->v1.vertexbuffer, VIRGL_FORMAT_R11G11B10_FLOAT); if (has_feature(feat_nv_conditional_render) || has_feature(feat_gl_conditional_render)) caps->v1.bset.conditional_render = 1; if (has_feature(feat_indep_blend)) caps->v1.bset.indep_blend_enable = 1; if (has_feature(feat_draw_instance)) caps->v1.bset.instanceid = 1; if (has_feature(feat_ubo)) { glGetIntegerv(GL_MAX_VERTEX_UNIFORM_BLOCKS, &max); caps->v1.max_uniform_blocks = max + 1; } if (has_feature(feat_depth_clamp)) caps->v1.bset.depth_clip_disable = 1; if (gl_ver >= 32) { caps->v1.bset.fragment_coord_conventions = 1; caps->v1.bset.seamless_cube_map = 1; } else { if (epoxy_has_gl_extension("GL_ARB_fragment_coord_conventions")) caps->v1.bset.fragment_coord_conventions = 1; if (epoxy_has_gl_extension("GL_ARB_seamless_cube_map") || gles_ver >= 30) caps->v1.bset.seamless_cube_map = 1; } if (epoxy_has_gl_extension("GL_AMD_seamless_cube_map_per_texture")) { caps->v1.bset.seamless_cube_map_per_texture = 1; } if (has_feature(feat_texture_multisample)) caps->v1.bset.texture_multisample = 1; if (has_feature(feat_tessellation)) caps->v1.bset.has_tessellation_shaders = 1; if (has_feature(feat_sample_shading)) caps->v1.bset.has_sample_shading = 1; if (has_feature(feat_indirect_draw)) caps->v1.bset.has_indirect_draw = 1; if (has_feature(feat_indep_blend_func)) caps->v1.bset.indep_blend_func = 1; if (has_feature(feat_cube_map_array)) caps->v1.bset.cube_map_array = 1; if (has_feature(feat_texture_query_lod)) caps->v1.bset.texture_query_lod = 1; if (gl_ver >= 40) { caps->v1.bset.has_fp64 = 1; } else { /* need gpu shader 5 for bitfield insert */ if (epoxy_has_gl_extension("GL_ARB_gpu_shader_fp64") && epoxy_has_gl_extension("GL_ARB_gpu_shader5")) caps->v1.bset.has_fp64 = 1; } if (has_feature(feat_base_instance)) caps->v1.bset.start_instance = 1; if (epoxy_has_gl_extension("GL_ARB_shader_stencil_export")) { caps->v1.bset.shader_stencil_export = 1; } if (has_feature(feat_conditional_render_inverted)) caps->v1.bset.conditional_render_inverted = 1; if (gl_ver >= 45) { caps->v1.bset.has_cull = 1; caps->v1.bset.derivative_control = 1; } else { if (has_feature(feat_cull_distance)) caps->v1.bset.has_cull = 1; if (epoxy_has_gl_extension("GL_ARB_derivative_control")) caps->v1.bset.derivative_control = 1; } if (has_feature(feat_polygon_offset_clamp)) caps->v1.bset.polygon_offset_clamp = 1; if (has_feature(feat_transform_feedback_overflow_query)) caps->v1.bset.transform_feedback_overflow_query = 1; if (epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp") || epoxy_has_gl_extension("GL_ARB_texture_mirror_clamp_to_edge") || epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp_to_edge")) { caps->v1.bset.mirror_clamp = true; } if (has_feature(feat_texture_array)) { glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max); caps->v1.max_texture_array_layers = max; } /* we need tf3 so we can do gallium skip buffers */ if (has_feature(feat_transform_feedback)) { if (has_feature(feat_transform_feedback2)) caps->v1.bset.streamout_pause_resume = 1; if (has_feature(feat_transform_feedback3)) { glGetIntegerv(GL_MAX_TRANSFORM_FEEDBACK_BUFFERS, &max); caps->v1.max_streamout_buffers = max; } else if (gles_ver > 0) { glGetIntegerv(GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS, &max); /* As with the earlier version of transform feedback this min 4. */ if (max >= 4) { caps->v1.max_streamout_buffers = 4; } } else caps->v1.max_streamout_buffers = 4; } if (has_feature(feat_dual_src_blend)) { glGetIntegerv(GL_MAX_DUAL_SOURCE_DRAW_BUFFERS, &max); caps->v1.max_dual_source_render_targets = max; } if (has_feature(feat_arb_or_gles_ext_texture_buffer)) { glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &max); caps->v1.max_tbo_size = max; } if (has_feature(feat_texture_gather)) { if (gl_ver > 0) { glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB, &max); caps->v1.max_texture_gather_components = max; } else { caps->v1.max_texture_gather_components = 4; } } if (has_feature(feat_viewport_array)) { glGetIntegerv(GL_MAX_VIEWPORTS, &max); caps->v1.max_viewports = max; } else { caps->v1.max_viewports = 1; } /* Common limits for all backends. */ caps->v1.max_render_targets = vrend_state.max_draw_buffers; glGetIntegerv(GL_MAX_SAMPLES, &max); caps->v1.max_samples = max; /* All of the formats are common. */ for (i = 0; i < VIRGL_FORMAT_MAX; i++) { enum virgl_formats fmt = (enum virgl_formats)i; if (tex_conv_table[i].internalformat != 0 || fmt == VIRGL_FORMAT_YV12 || fmt == VIRGL_FORMAT_NV12) { if (vrend_format_can_sample(fmt)) { set_format_bit(&caps->v1.sampler, fmt); if (vrend_format_can_render(fmt)) set_format_bit(&caps->v1.render, fmt); } } } /* These are filled in by the init code, so are common. */ if (has_feature(feat_nv_prim_restart) || has_feature(feat_gl_prim_restart)) { caps->v1.bset.primitive_restart = 1; } } static void vrend_renderer_fill_caps_v2(int gl_ver, int gles_ver, union virgl_caps *caps) { GLint max; GLfloat range[2]; uint32_t video_memory; const char *renderer = (const char *)glGetString(GL_RENDERER); /* Count this up when you add a feature flag that is used to set a CAP in * the guest that was set unconditionally before. Then check that flag and * this value to avoid regressions when a guest with a new mesa version is * run on an old virgl host. Use it also to indicate non-cap fixes on the * host that help enable features in the guest. */ caps->v2.host_feature_check_version = 7; /* Forward host GL_RENDERER to the guest. */ strncpy(caps->v2.renderer, renderer, sizeof(caps->v2.renderer) - 1); glGetFloatv(GL_ALIASED_POINT_SIZE_RANGE, range); caps->v2.min_aliased_point_size = range[0]; caps->v2.max_aliased_point_size = range[1]; glGetFloatv(GL_ALIASED_LINE_WIDTH_RANGE, range); caps->v2.min_aliased_line_width = range[0]; caps->v2.max_aliased_line_width = range[1]; if (gl_ver > 0) { glGetFloatv(GL_SMOOTH_POINT_SIZE_RANGE, range); caps->v2.min_smooth_point_size = range[0]; caps->v2.max_smooth_point_size = range[1]; glGetFloatv(GL_SMOOTH_LINE_WIDTH_RANGE, range); caps->v2.min_smooth_line_width = range[0]; caps->v2.max_smooth_line_width = range[1]; } glGetFloatv(GL_MAX_TEXTURE_LOD_BIAS, &caps->v2.max_texture_lod_bias); glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, (GLint*)&caps->v2.max_vertex_attribs); if (gl_ver >= 32 || (vrend_state.use_gles && gl_ver >= 30)) glGetIntegerv(GL_MAX_VERTEX_OUTPUT_COMPONENTS, &max); else max = 64; // minimum required value caps->v2.max_vertex_outputs = max / 4; glGetIntegerv(GL_MIN_PROGRAM_TEXEL_OFFSET, &caps->v2.min_texel_offset); glGetIntegerv(GL_MAX_PROGRAM_TEXEL_OFFSET, &caps->v2.max_texel_offset); glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, (GLint*)&caps->v2.uniform_buffer_offset_alignment); glGetIntegerv(GL_MAX_TEXTURE_SIZE, (GLint*)&caps->v2.max_texture_2d_size); glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, (GLint*)&caps->v2.max_texture_3d_size); glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, (GLint*)&caps->v2.max_texture_cube_size); vrend_state.max_texture_2d_size = caps->v2.max_texture_2d_size; vrend_state.max_texture_3d_size = caps->v2.max_texture_3d_size; vrend_state.max_texture_cube_size = caps->v2.max_texture_cube_size; VREND_DEBUG(dbg_features, NULL, "Texture limits: 2D:%u 3D:%u Cube:%u\n", vrend_state.max_texture_2d_size, vrend_state.max_texture_3d_size, vrend_state.max_texture_cube_size); if (has_feature(feat_geometry_shader)) { glGetIntegerv(GL_MAX_GEOMETRY_OUTPUT_VERTICES, (GLint*)&caps->v2.max_geom_output_vertices); glGetIntegerv(GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS, (GLint*)&caps->v2.max_geom_total_output_components); } if (has_feature(feat_tessellation)) { glGetIntegerv(GL_MAX_TESS_PATCH_COMPONENTS, &max); caps->v2.max_shader_patch_varyings = max / 4; } else caps->v2.max_shader_patch_varyings = 0; if (has_feature(feat_texture_gather)) { glGetIntegerv(GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET, &caps->v2.min_texture_gather_offset); glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET, &caps->v2.max_texture_gather_offset); } if (has_feature(feat_texture_buffer_range)) { glGetIntegerv(GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT, (GLint*)&caps->v2.texture_buffer_offset_alignment); } if (has_feature(feat_ssbo)) { glGetIntegerv(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, (GLint*)&caps->v2.shader_buffer_offset_alignment); glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &max); if (max > PIPE_MAX_SHADER_BUFFERS) max = PIPE_MAX_SHADER_BUFFERS; caps->v2.max_shader_buffer_other_stages = max; glGetIntegerv(GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS, &max); if (max > PIPE_MAX_SHADER_BUFFERS) max = PIPE_MAX_SHADER_BUFFERS; caps->v2.max_shader_buffer_frag_compute = max; glGetIntegerv(GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS, (GLint*)&caps->v2.max_combined_shader_buffers); } if (has_feature(feat_images)) { glGetIntegerv(GL_MAX_VERTEX_IMAGE_UNIFORMS, &max); if (max > PIPE_MAX_SHADER_IMAGES) max = PIPE_MAX_SHADER_IMAGES; caps->v2.max_shader_image_other_stages = max; glGetIntegerv(GL_MAX_FRAGMENT_IMAGE_UNIFORMS, &max); if (max > PIPE_MAX_SHADER_IMAGES) max = PIPE_MAX_SHADER_IMAGES; caps->v2.max_shader_image_frag_compute = max; if (gl_ver > 0) /* Seems GLES doesn't support multisample images */ glGetIntegerv(GL_MAX_IMAGE_SAMPLES, (GLint*)&caps->v2.max_image_samples); } if (has_feature(feat_storage_multisample)) caps->v1.max_samples = vrend_renderer_query_multisample_caps(caps->v1.max_samples, &caps->v2); caps->v2.capability_bits |= VIRGL_CAP_TGSI_INVARIANT | VIRGL_CAP_SET_MIN_SAMPLES | VIRGL_CAP_TGSI_PRECISE | VIRGL_CAP_APP_TWEAK_SUPPORT; /* If attribute isn't supported, assume 2048 which is the minimum allowed by the specification. */ if (gl_ver >= 44 || gles_ver >= 31) glGetIntegerv(GL_MAX_VERTEX_ATTRIB_STRIDE, (GLint*)&caps->v2.max_vertex_attrib_stride); else caps->v2.max_vertex_attrib_stride = 2048; if (has_feature(feat_compute_shader) && (vrend_state.use_gles || gl_ver >= 33)) { glGetIntegerv(GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS, (GLint*)&caps->v2.max_compute_work_group_invocations); glGetIntegerv(GL_MAX_COMPUTE_SHARED_MEMORY_SIZE, (GLint*)&caps->v2.max_compute_shared_memory_size); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 0, (GLint*)&caps->v2.max_compute_grid_size[0]); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 1, (GLint*)&caps->v2.max_compute_grid_size[1]); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_COUNT, 2, (GLint*)&caps->v2.max_compute_grid_size[2]); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 0, (GLint*)&caps->v2.max_compute_block_size[0]); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 1, (GLint*)&caps->v2.max_compute_block_size[1]); glGetIntegeri_v(GL_MAX_COMPUTE_WORK_GROUP_SIZE, 2, (GLint*)&caps->v2.max_compute_block_size[2]); caps->v2.capability_bits |= VIRGL_CAP_COMPUTE_SHADER; } if (has_feature(feat_atomic_counters)) { glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTERS, (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_VERTEX)); glGetIntegerv(GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS, (GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_VERTEX)); glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTERS, (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_FRAGMENT)); glGetIntegerv(GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS, (GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_FRAGMENT)); if (has_feature(feat_geometry_shader)) { glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTERS, (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_GEOMETRY)); glGetIntegerv(GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS, (GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_GEOMETRY)); } if (has_feature(feat_tessellation)) { glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS, (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_CTRL)); glGetIntegerv(GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS, (GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_TESS_CTRL)); glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS, (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_TESS_EVAL)); glGetIntegerv(GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS, (GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_TESS_EVAL)); } if (has_feature(feat_compute_shader)) { glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTERS, (GLint*)(caps->v2.max_atomic_counters + PIPE_SHADER_COMPUTE)); glGetIntegerv(GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS, (GLint*)(caps->v2.max_atomic_counter_buffers + PIPE_SHADER_COMPUTE)); } glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTERS, (GLint*)&caps->v2.max_combined_atomic_counters); glGetIntegerv(GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS, (GLint*)&caps->v2.max_combined_atomic_counter_buffers); } if (has_feature(feat_fb_no_attach)) caps->v2.capability_bits |= VIRGL_CAP_FB_NO_ATTACH; if (has_feature(feat_texture_view)) caps->v2.capability_bits |= VIRGL_CAP_TEXTURE_VIEW; if (has_feature(feat_txqs)) caps->v2.capability_bits |= VIRGL_CAP_TXQS; if (has_feature(feat_barrier)) caps->v2.capability_bits |= VIRGL_CAP_MEMORY_BARRIER; if (has_feature(feat_copy_image)) caps->v2.capability_bits |= VIRGL_CAP_COPY_IMAGE; if (has_feature(feat_robust_buffer_access)) caps->v2.capability_bits |= VIRGL_CAP_ROBUST_BUFFER_ACCESS; if (has_feature(feat_framebuffer_fetch)) caps->v2.capability_bits |= VIRGL_CAP_TGSI_FBFETCH; if (has_feature(feat_shader_clock)) caps->v2.capability_bits |= VIRGL_CAP_SHADER_CLOCK; if (has_feature(feat_texture_barrier)) caps->v2.capability_bits |= VIRGL_CAP_TEXTURE_BARRIER; /* If we enable input arrays and don't have enhanced layouts then we * can't support components. */ if (has_feature(feat_enhanced_layouts)) caps->v2.capability_bits |= VIRGL_CAP_TGSI_COMPONENTS; if (has_feature(feat_srgb_write_control)) caps->v2.capability_bits |= VIRGL_CAP_SRGB_WRITE_CONTROL; if (has_feature(feat_transform_feedback3)) caps->v2.capability_bits |= VIRGL_CAP_TRANSFORM_FEEDBACK3; /* Enable feature use just now otherwise we just get a lot noise because * of the caps setting */ if (vrend_debug(NULL, dbg_features)) vrend_debug_add_flag(dbg_feature_use); /* always enable, only indicates that the CMD is supported */ caps->v2.capability_bits |= VIRGL_CAP_GUEST_MAY_INIT_LOG; if (has_feature(feat_qbo)) caps->v2.capability_bits |= VIRGL_CAP_QBO; caps->v2.capability_bits |= VIRGL_CAP_TRANSFER; if (vrend_check_framebuffer_mixed_color_attachements()) caps->v2.capability_bits |= VIRGL_CAP_FBO_MIXED_COLOR_FORMATS; /* We want to expose ARB_gpu_shader_fp64 when running on top of ES */ if (vrend_state.use_gles) { caps->v2.capability_bits |= VIRGL_CAP_FAKE_FP64; } if (has_feature(feat_indirect_draw)) caps->v2.capability_bits |= VIRGL_CAP_BIND_COMMAND_ARGS; if (has_feature(feat_multi_draw_indirect)) caps->v2.capability_bits |= VIRGL_CAP_MULTI_DRAW_INDIRECT; if (has_feature(feat_indirect_params)) caps->v2.capability_bits |= VIRGL_CAP_INDIRECT_PARAMS; for (int i = 0; i < VIRGL_FORMAT_MAX; i++) { enum virgl_formats fmt = (enum virgl_formats)i; if (tex_conv_table[i].internalformat != 0) { if (vrend_format_can_readback(fmt)) { VREND_DEBUG(dbg_features, NULL, "Support readback of %s\n", util_format_name(fmt)); set_format_bit(&caps->v2.supported_readback_formats, fmt); } } if (vrend_format_can_scanout(fmt)) set_format_bit(&caps->v2.scanout, fmt); } if (has_feature(feat_clear_texture)) caps->v2.capability_bits |= VIRGL_CAP_CLEAR_TEXTURE; if (has_feature(feat_clip_control)) caps->v2.capability_bits |= VIRGL_CAP_CLIP_HALFZ; if (epoxy_has_gl_extension("GL_KHR_texture_compression_astc_sliced_3d")) caps->v2.capability_bits |= VIRGL_CAP_3D_ASTC; caps->v2.capability_bits |= VIRGL_CAP_INDIRECT_INPUT_ADDR; caps->v2.capability_bits |= VIRGL_CAP_COPY_TRANSFER; if (has_feature(feat_arb_buffer_storage) && !vrend_state.use_external_blob) { const char *vendor = (const char *)glGetString(GL_VENDOR); bool is_mesa = ((strstr(renderer, "Mesa") != NULL) || (strstr(renderer, "DRM") != NULL)); /* * Intel GPUs (aside from Atom, which doesn't expose GL4.5) are cache-coherent. * Mesa AMDGPUs use write-combine mappings for coherent/persistent memory (see * RADEON_FLAG_GTT_WC in si_buffer.c/r600_buffer_common.c). For Nvidia, we can guess and * check. Long term, maybe a GL extension or using VK could replace these heuristics. * * Note Intel VMX ignores the caching type returned from virglrenderer, while AMD SVM and * ARM honor it. */ if (is_mesa) { if (strstr(vendor, "Intel") != NULL) vrend_state.inferred_gl_caching_type = VIRGL_RENDERER_MAP_CACHE_CACHED; else if (strstr(vendor, "AMD") != NULL) vrend_state.inferred_gl_caching_type = VIRGL_RENDERER_MAP_CACHE_WC; } else { /* This is an educated guess since things don't explode with VMX + Nvidia. */ if (strstr(renderer, "Quadro K2200") != NULL) vrend_state.inferred_gl_caching_type = VIRGL_RENDERER_MAP_CACHE_CACHED; } if (vrend_state.inferred_gl_caching_type) caps->v2.capability_bits |= VIRGL_CAP_ARB_BUFFER_STORAGE; } #ifdef ENABLE_MINIGBM_ALLOCATION if (has_feature(feat_memory_object) && has_feature(feat_memory_object_fd)) { if (!strcmp(gbm_device_get_backend_name(gbm->device), "i915") && !vrend_winsys_different_gpu()) caps->v2.capability_bits |= VIRGL_CAP_ARB_BUFFER_STORAGE; } #endif if (has_feature(feat_blend_equation_advanced)) caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_BLEND_EQUATION; #ifdef HAVE_EPOXY_EGL_H if (egl) caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_UNTYPED_RESOURCE; #endif video_memory = vrend_renderer_get_video_memory(); if (video_memory) { caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_VIDEO_MEMORY; caps->v2.max_video_memory = video_memory; } if (has_feature(feat_ati_meminfo) || has_feature(feat_nvx_gpu_memory_info)) { caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_MEMINFO; } if (has_feature(feat_khr_debug)) caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_STRING_MARKER; if (has_feature(feat_implicit_msaa)) caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_IMPLICIT_MSAA; if (vrend_winsys_different_gpu()) caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_DIFFERENT_GPU; // we use capability bits (not a version of protocol), because // we disable this on client side if virglrenderer is used under // vtest. vtest can't support this, because size of resource // is used to create shmem. On drm path, we can use this, because // size of drm resource (bo) is not passed to virglrenderer and // we can pass "1" as size on drm path, but not on vtest. caps->v2.capability_bits_v2 |= VIRGL_CAP_V2_COPY_TRANSFER_BOTH_DIRECTIONS; if (has_feature(feat_anisotropic_filter)) { float max_aniso; glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY, &max_aniso); caps->v2.max_anisotropy = MIN2(max_aniso, 16.0); } glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &max); caps->v2.max_texture_image_units = max; } void vrend_renderer_fill_caps(uint32_t set, uint32_t version, union virgl_caps *caps) { int gl_ver, gles_ver; GLenum err; bool fill_capset2 = false; if (!caps) return; switch (set) { case VIRGL_RENDERER_CAPSET_VIRGL: if (version > VREND_CAPSET_VIRGL_MAX_VERSION) return; memset(caps, 0, sizeof(struct virgl_caps_v1)); caps->max_version = VREND_CAPSET_VIRGL_MAX_VERSION; break; case VIRGL_RENDERER_CAPSET_VIRGL2: if (version > VREND_CAPSET_VIRGL2_MAX_VERSION) return; memset(caps, 0, sizeof(*caps)); caps->max_version = VREND_CAPSET_VIRGL2_MAX_VERSION; fill_capset2 = true; break; default: return; } /* We don't want to deal with stale error states that the caller might not * have cleaned up propperly, so read the error state until we are okay. */ while ((err = glGetError()) != GL_NO_ERROR) vrend_printf("%s: Entering with stale GL error: %d\n", __func__, err); if (vrend_state.use_gles) { gles_ver = epoxy_gl_version(); gl_ver = 0; } else { gles_ver = 0; gl_ver = epoxy_gl_version(); } vrend_fill_caps_glsl_version(gl_ver, gles_ver, caps); VREND_DEBUG(dbg_features, NULL, "GLSL support level: %d", caps->v1.glsl_level); vrend_renderer_fill_caps_v1(gl_ver, gles_ver, caps); if (!fill_capset2) return; vrend_renderer_fill_caps_v2(gl_ver, gles_ver, caps); } GLint64 vrend_renderer_get_timestamp(void) { GLint64 v; glGetInteger64v(GL_TIMESTAMP, &v); return v; } void *vrend_renderer_get_cursor_contents(struct pipe_resource *pres, uint32_t *width, uint32_t *height) { struct vrend_resource *res = (struct vrend_resource *)pres; GLenum format, type; int blsize; char *data, *data2; int size; uint h; if (res->base.width0 > 128 || res->base.height0 > 128) return NULL; if (res->target != GL_TEXTURE_2D) return NULL; if (!width || !height) return NULL; *width = res->base.width0; *height = res->base.height0; format = tex_conv_table[res->base.format].glformat; type = tex_conv_table[res->base.format].gltype; blsize = util_format_get_blocksize(res->base.format); size = util_format_get_nblocks(res->base.format, res->base.width0, res->base.height0) * blsize; data = malloc(size); data2 = malloc(size); if (!data || !data2) { free(data); free(data2); return NULL; } if (has_feature(feat_arb_robustness)) { glBindTexture(res->target, res->id); glGetnTexImageARB(res->target, 0, format, type, size, data); } else if (vrend_state.use_gles) { do_readpixels(res, 0, 0, 0, 0, 0, *width, *height, format, type, size, data); } else { glBindTexture(res->target, res->id); glGetTexImage(res->target, 0, format, type, data); } for (h = 0; h < res->base.height0; h++) { uint32_t doff = (res->base.height0 - h - 1) * res->base.width0 * blsize; uint32_t soff = h * res->base.width0 * blsize; memcpy(data2 + doff, data + soff, res->base.width0 * blsize); } free(data); glBindTexture(res->target, 0); return data2; } void vrend_renderer_force_ctx_0(void) { vrend_state.current_ctx = NULL; vrend_state.current_hw_ctx = NULL; vrend_hw_switch_context(vrend_state.ctx0, true); } void vrend_renderer_get_rect(struct pipe_resource *pres, const struct iovec *iov, unsigned int num_iovs, uint32_t offset, int x, int y, int width, int height) { struct vrend_resource *res = (struct vrend_resource *)pres; struct vrend_transfer_info transfer_info; struct pipe_box box; int elsize; memset(&transfer_info, 0, sizeof(transfer_info)); elsize = util_format_get_blocksize(res->base.format); box.x = x; box.y = y; box.z = 0; box.width = width; box.height = height; box.depth = 1; transfer_info.box = &box; transfer_info.stride = util_format_get_nblocksx(res->base.format, res->base.width0) * elsize; transfer_info.offset = offset; transfer_info.iovec = iov; transfer_info.iovec_cnt = num_iovs; vrend_renderer_transfer_pipe(pres, &transfer_info, VIRGL_TRANSFER_FROM_HOST); } void vrend_renderer_attach_res_ctx(struct vrend_context *ctx, struct virgl_resource *res) { if (!res->pipe_resource) { /* move the last untyped resource from cache to list */ if (unlikely(ctx->untyped_resource_cache)) { struct virgl_resource *last = ctx->untyped_resource_cache; struct vrend_untyped_resource *wrapper = malloc(sizeof(*wrapper)); if (wrapper) { wrapper->resource = last; list_add(&wrapper->head, &ctx->untyped_resources); } else { vrend_printf("dropping attached resource %d due to OOM\n", last->res_id); } } ctx->untyped_resource_cache = res; /* defer to vrend_renderer_pipe_resource_set_type */ return; } vrend_ctx_resource_insert(ctx->res_hash, res->res_id, (struct vrend_resource *)res->pipe_resource); } void vrend_renderer_detach_res_ctx(struct vrend_context *ctx, struct virgl_resource *res) { if (!res->pipe_resource) { if (ctx->untyped_resource_cache == res) { ctx->untyped_resource_cache = NULL; } else { struct vrend_untyped_resource *iter; LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) { if (iter->resource == res) { list_del(&iter->head); free(iter); break; } } } return; } vrend_ctx_resource_remove(ctx->res_hash, res->res_id); } static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle) { return vrend_ctx_resource_lookup(ctx->res_hash, res_handle); } void vrend_context_set_debug_flags(struct vrend_context *ctx, const char *flagstring) { if (vrend_debug_can_override()) { ctx->debug_flags |= vrend_get_debug_flags(flagstring); if (ctx->debug_flags & dbg_features) vrend_debug_add_flag(dbg_feature_use); } } void vrend_renderer_resource_get_info(struct pipe_resource *pres, struct vrend_renderer_resource_info *info) { struct vrend_resource *res = (struct vrend_resource *)pres; int elsize; elsize = util_format_get_blocksize(res->base.format); info->tex_id = res->id; info->width = res->base.width0; info->height = res->base.height0; info->depth = res->base.depth0; info->format = res->base.format; info->flags = res->y_0_top ? VIRGL_RESOURCE_Y_0_TOP : 0; info->stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, 0)) * elsize; } void vrend_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver, uint32_t *max_size) { switch (cap_set) { case VIRGL_RENDERER_CAPSET_VIRGL: *max_ver = VREND_CAPSET_VIRGL_MAX_VERSION; *max_size = sizeof(struct virgl_caps_v1); break; case VIRGL_RENDERER_CAPSET_VIRGL2: *max_ver = VREND_CAPSET_VIRGL2_MAX_VERSION; *max_size = sizeof(struct virgl_caps_v2); break; default: *max_ver = 0; *max_size = 0; break; } } void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id) { struct vrend_sub_context *sub; struct virgl_gl_ctx_param ctx_params; GLuint i; LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) { if (sub->sub_ctx_id == sub_ctx_id) { return; } } sub = CALLOC_STRUCT(vrend_sub_context); if (!sub) return; ctx_params.shared = (ctx->ctx_id == 0 && sub_ctx_id == 0) ? false : true; ctx_params.major_ver = vrend_state.gl_major_ver; ctx_params.minor_ver = vrend_state.gl_minor_ver; sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params); sub->parent = ctx; vrend_clicbs->make_current(sub->gl_context); /* enable if vrend_renderer_init function has done it as well */ if (has_feature(feat_debug_cb)) { glDebugMessageCallback(vrend_debug_cb, NULL); glEnable(GL_DEBUG_OUTPUT); glDisable(GL_DEBUG_OUTPUT_SYNCHRONOUS); } sub->sub_ctx_id = sub_ctx_id; /* initialize the depth far_val to 1 */ for (i = 0; i < PIPE_MAX_VIEWPORTS; i++) { sub->vps[i].far_val = 1.0; } if (!has_feature(feat_gles31_vertex_attrib_binding)) { glGenVertexArrays(1, &sub->vaoid); glBindVertexArray(sub->vaoid); } glGenFramebuffers(1, &sub->fb_id); glBindFramebuffer(GL_FRAMEBUFFER, sub->fb_id); glGenFramebuffers(2, sub->blit_fb_ids); for (int i = 0; i < VREND_PROGRAM_NQUEUES; ++i) list_inithead(&sub->gl_programs[i]); list_inithead(&sub->cs_programs); list_inithead(&sub->streamout_list); sub->object_hash = vrend_object_init_ctx_table(); ctx->sub = sub; list_add(&sub->head, &ctx->sub_ctxs); if (sub_ctx_id == 0) ctx->sub0 = sub; vrend_set_tweak_from_env(&ctx->sub->tweaks); } unsigned vrend_context_has_debug_flag(const struct vrend_context *ctx, enum virgl_debug_flags flag) { return ctx && (ctx->debug_flags & flag); } void vrend_print_context_name(const struct vrend_context *ctx) { if (ctx) vrend_printf("%s: ", ctx->debug_name); else vrend_printf("HOST: "); } void vrend_renderer_destroy_sub_ctx(struct vrend_context *ctx, int sub_ctx_id) { struct vrend_sub_context *sub, *tofree = NULL; /* never destroy sub context id 0 */ if (sub_ctx_id == 0) return; LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) { if (sub->sub_ctx_id == sub_ctx_id) { tofree = sub; } } if (tofree) { if (ctx->sub == tofree) { ctx->sub = ctx->sub0; } vrend_destroy_sub_context(tofree); vrend_clicbs->make_current(ctx->sub->gl_context); } } void vrend_renderer_set_sub_ctx(struct vrend_context *ctx, int sub_ctx_id) { struct vrend_sub_context *sub; /* find the sub ctx */ if (ctx->sub && ctx->sub->sub_ctx_id == sub_ctx_id) return; LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) { if (sub->sub_ctx_id == sub_ctx_id) { ctx->sub = sub; vrend_clicbs->make_current(sub->gl_context); break; } } } void vrend_renderer_prepare_reset(void) { /* make sure user contexts are no longer accessed */ vrend_free_sync_thread(); vrend_hw_switch_context(vrend_state.ctx0, true); } void vrend_renderer_reset(void) { vrend_free_fences(); vrend_blitter_fini(); vrend_destroy_context(vrend_state.ctx0); vrend_state.ctx0 = vrend_create_context(0, strlen("HOST"), "HOST"); /* TODO respawn sync thread */ } int vrend_renderer_get_poll_fd(void) { return vrend_state.eventfd; } int vrend_renderer_export_query(struct pipe_resource *pres, struct virgl_renderer_export_query *export_query) { struct vrend_resource *res = (struct vrend_resource *)pres; #ifdef ENABLE_MINIGBM_ALLOCATION if (res->gbm_bo) return virgl_gbm_export_query(res->gbm_bo, export_query); #else (void)res; #endif /* * Implementations that support eglExportDMABUFImageMESA can also export certain resources. * This is omitted currently since virgl_renderer_get_fd_for_texture supports that use case. */ export_query->out_num_fds = 0; export_query->out_fourcc = 0; export_query->out_modifier = DRM_FORMAT_MOD_INVALID; if (export_query->in_export_fds) return -EINVAL; return 0; } int vrend_renderer_pipe_resource_create(struct vrend_context *ctx, uint32_t blob_id, const struct vrend_renderer_resource_create_args *args) { struct vrend_resource *res; res = (struct vrend_resource *)vrend_renderer_resource_create(args, NULL); if (!res) return EINVAL; res->blob_id = blob_id; list_addtail(&res->head, &ctx->vrend_resources); return 0; } struct pipe_resource *vrend_get_blob_pipe(struct vrend_context *ctx, uint64_t blob_id) { uint32_t id = (uint32_t)blob_id; struct vrend_resource *res, *stor; LIST_FOR_EACH_ENTRY_SAFE(res, stor, &ctx->vrend_resources, head) { if (res->blob_id != id) continue; list_del(&res->head); /* Set the blob id to zero, since it won't be used anymore */ res->blob_id = 0; return &res->base; } return NULL; } int vrend_renderer_pipe_resource_set_type(struct vrend_context *ctx, uint32_t res_id, const struct vrend_renderer_resource_set_type_args *args) { struct virgl_resource *res = NULL; /* look up the untyped resource */ if (ctx->untyped_resource_cache && ctx->untyped_resource_cache->res_id == res_id) { res = ctx->untyped_resource_cache; ctx->untyped_resource_cache = NULL; } else { /* cache miss */ struct vrend_untyped_resource *iter; LIST_FOR_EACH_ENTRY(iter, &ctx->untyped_resources, head) { if (iter->resource->res_id == res_id) { res = iter->resource; list_del(&iter->head); free(iter); break; } } } /* either a bad res_id or the resource is already typed */ if (!res) { if (vrend_renderer_ctx_res_lookup(ctx, res_id)) return 0; vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_id); return EINVAL; } /* resource is still untyped */ if (!res->pipe_resource) { #ifdef HAVE_EPOXY_EGL_H const struct vrend_renderer_resource_create_args create_args = { .target = PIPE_TEXTURE_2D, .format = args->format, .bind = args->bind, .width = args->width, .height = args->height, .depth = 1, .array_size = 1, .last_level = 0, .nr_samples = 0, .flags = 0, }; int plane_fds[VIRGL_GBM_MAX_PLANES]; struct vrend_resource *gr; uint32_t virgl_format; uint32_t drm_format; int ret; if (res->fd_type != VIRGL_RESOURCE_FD_DMABUF) return EINVAL; for (uint32_t i = 0; i < args->plane_count; i++) plane_fds[i] = res->fd; gr = vrend_resource_create(&create_args); if (!gr) return ENOMEM; virgl_format = gr->base.format; drm_format = 0; if (virgl_gbm_convert_format(&virgl_format, &drm_format)) { vrend_printf("%s: unsupported format %d\n", __func__, virgl_format); FREE(gr); return EINVAL; } gr->egl_image = virgl_egl_image_from_dmabuf(egl, args->width, args->height, drm_format, args->modifier, args->plane_count, plane_fds, args->plane_strides, args->plane_offsets); if (!gr->egl_image) { vrend_printf("%s: failed to create egl image\n", __func__); FREE(gr); return EINVAL; } gr->storage_bits |= VREND_STORAGE_EGL_IMAGE; ret = vrend_resource_alloc_texture(gr, virgl_format, gr->egl_image); if (ret) { virgl_egl_image_destroy(egl, gr->egl_image); FREE(gr); return ret; } /* "promote" the fd to pipe_resource */ close(res->fd); res->fd = -1; res->fd_type = VIRGL_RESOURCE_FD_INVALID; res->pipe_resource = &gr->base; #else /* HAVE_EPOXY_EGL_H */ (void)args; vrend_printf("%s: no EGL support \n", __func__); return EINVAL; #endif /* HAVE_EPOXY_EGL_H */ } vrend_ctx_resource_insert(ctx->res_hash, res->res_id, (struct vrend_resource *)res->pipe_resource); return 0; } uint32_t vrend_renderer_resource_get_map_info(struct pipe_resource *pres) { struct vrend_resource *res = (struct vrend_resource *)pres; return res->map_info; } int vrend_renderer_resource_map(struct pipe_resource *pres, void **map, uint64_t *out_size) { struct vrend_resource *res = (struct vrend_resource *)pres; if (!has_bits(res->storage_bits, VREND_STORAGE_GL_BUFFER | VREND_STORAGE_GL_IMMUTABLE)) return -EINVAL; glBindBufferARB(res->target, res->id); *map = glMapBufferRange(res->target, 0, res->size, res->buffer_storage_flags); if (!*map) return -EINVAL; glBindBufferARB(res->target, 0); *out_size = res->size; return 0; } int vrend_renderer_resource_unmap(struct pipe_resource *pres) { struct vrend_resource *res = (struct vrend_resource *)pres; if (!has_bits(res->storage_bits, VREND_STORAGE_GL_BUFFER | VREND_STORAGE_GL_IMMUTABLE)) return -EINVAL; glBindBufferARB(res->target, res->id); glUnmapBuffer(res->target); glBindBufferARB(res->target, 0); return 0; } int vrend_renderer_create_ctx0_fence(uint32_t fence_id) { void *fence_cookie = (void *)(uintptr_t)fence_id; return vrend_renderer_create_fence(vrend_state.ctx0, VIRGL_RENDERER_FENCE_FLAG_MERGEABLE, fence_cookie); } #ifdef HAVE_EPOXY_EGL_H static bool find_ctx0_fence_locked(struct list_head *fence_list, void *fence_cookie, bool *seen_first, struct vrend_fence **fence) { struct vrend_fence *iter; LIST_FOR_EACH_ENTRY(iter, fence_list, fences) { /* only consider ctx0 fences */ if (iter->ctx != vrend_state.ctx0) continue; if (iter->fence_cookie == fence_cookie) { *fence = iter; return true; } if (!*seen_first) { if (fence_cookie < iter->fence_cookie) return true; *seen_first = true; } } return false; } #endif int vrend_renderer_export_ctx0_fence(uint32_t fence_id, int* out_fd) { #ifdef HAVE_EPOXY_EGL_H if (!vrend_state.use_egl_fence) { return -EINVAL; } if (vrend_state.sync_thread) mtx_lock(&vrend_state.fence_mutex); void *fence_cookie = (void *)(uintptr_t)fence_id; bool seen_first = false; struct vrend_fence *fence = NULL; bool found = find_ctx0_fence_locked(&vrend_state.fence_list, fence_cookie, &seen_first, &fence); if (!found) { found = find_ctx0_fence_locked(&vrend_state.fence_wait_list, fence_cookie, &seen_first, &fence); /* consider signaled when no active ctx0 fence at all */ if (!found && !seen_first) found = true; } if (vrend_state.sync_thread) mtx_unlock(&vrend_state.fence_mutex); if (found) { if (fence) return virgl_egl_export_fence(egl, fence->eglsyncobj, out_fd) ? 0 : -EINVAL; else return virgl_egl_export_signaled_fence(egl, out_fd) ? 0 : -EINVAL; } #else (void)fence_id; (void)out_fd; #endif return -EINVAL; } void vrend_renderer_get_meminfo(struct vrend_context *ctx, uint32_t res_handle) { struct vrend_resource *res; struct virgl_memory_info *info; res = vrend_renderer_ctx_res_lookup(ctx, res_handle); if (!res) { vrend_report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle); return; } info = (struct virgl_memory_info *)res->iov->iov_base; if (has_feature(feat_nvx_gpu_memory_info)) { GLint i; glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &i); info->total_device_memory = i; glGetIntegerv(GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX, &i); info->total_staging_memory = i - info->total_device_memory; glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX, &i); info->nr_device_memory_evictions = i; glGetIntegerv(GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX, &i); info->device_memory_evicted = i; } if (has_feature(feat_ati_meminfo)) { GLint i[4]; glGetIntegerv(GL_VBO_FREE_MEMORY_ATI, i); info->avail_device_memory = i[0]; info->avail_staging_memory = i[2]; } } static uint32_t vrend_renderer_get_video_memory(void) { GLint video_memory = vrend_winsys_query_video_memory(); if (!video_memory && has_feature(feat_nvx_gpu_memory_info)) glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &video_memory); return video_memory; } void vrend_context_emit_string_marker(struct vrend_context *ctx, GLsizei length, const char * message) { VREND_DEBUG(dbg_khr, ctx, "MARKER: '%.*s'\n", length, message); #ifdef ENABLE_TRACING char buf[256]; if (length > 6 && !strncmp(message, "BEGIN:", 6)) { snprintf(buf, 256, "%.*s", length - 6, &message[6]); TRACE_SCOPE_BEGIN(buf); } else if (length > 4 && !strncmp(message, "END:", 4)) { snprintf(buf, 256, "%.*s", length - 4, &message[4]); const char *scope = buf; TRACE_SCOPE_END(scope); } #endif if (has_feature(feat_khr_debug)) { if (vrend_state.use_gles) glDebugMessageInsertKHR(GL_DEBUG_SOURCE_APPLICATION_KHR, GL_DEBUG_TYPE_MARKER_KHR, 0, GL_DEBUG_SEVERITY_NOTIFICATION, length, message); else glDebugMessageInsert(GL_DEBUG_SOURCE_APPLICATION, GL_DEBUG_TYPE_MARKER, 0, GL_DEBUG_SEVERITY_NOTIFICATION_KHR, length, message); } }
142260.c
/* * Copyright (c) 2021 Frust GmbH * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _MATH_HEADERS_H #include "math_headers.h" #endif /* _MATH_HEADERS_H */ /****************************************************************************/ #if defined(FLOATING_POINT_SUPPORT) /****************************************************************************/ #if LDBL_MANT_DIG == DBL_MANT_DIG && LDBL_MAX_EXP == DBL_MAX_EXP /****************************************************************************/ long double frexpl(long double x, int *nptr) { return frexp(x, nptr); } /****************************************************************************/ #endif /* LDBL_MANT_DIG == DBL_MANT_DIG && LDBL_MAX_EXP == DBL_MAX_EXP */ /****************************************************************************/ #endif /* FLOATING_POINT_SUPPORT */
302032.c
//------------------------------------------------------------------------------ // GB_AxB__times_div_uint32.c: matrix multiply for a single semiring //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated1/ or Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB_dev.h" #ifndef GBCOMPACT #include "GB.h" #include "GB_control.h" #include "GB_bracket.h" #include "GB_sort.h" #include "GB_atomics.h" #include "GB_AxB_saxpy.h" #if 1 #include "GB_AxB__include2.h" #else #include "GB_AxB__include1.h" #endif #include "GB_unused.h" #include "GB_bitmap_assign_methods.h" #include "GB_ek_slice_search.c" // This C=A*B semiring is defined by the following types and operators: // A'*B (dot2): GB (_Adot2B__times_div_uint32) // A'*B (dot3): GB (_Adot3B__times_div_uint32) // C+=A'*B (dot4): GB (_Adot4B__times_div_uint32) // A*B (saxpy bitmap): GB (_AsaxbitB__times_div_uint32) // A*B (saxpy3): GB (_Asaxpy3B__times_div_uint32) // A*B (saxpy4): GB (_Asaxpy4B__times_div_uint32) // no mask: GB (_Asaxpy3B_noM__times_div_uint32) // mask M: GB (_Asaxpy3B_M__times_div_uint32) // mask !M: GB (_Asaxpy3B_notM__times_div_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // Multiply: z = GB_IDIV_UNSIGNED (aik, bkj, 32) // Add: cij *= z // 'any' monoid? 0 // atomic? 1 // OpenMP atomic? 1 // MultAdd: uint32_t x_op_y = GB_IDIV_UNSIGNED (aik, bkj, 32) ; cij *= x_op_y // Identity: 1 // Terminal: if (cij == 0) { break ; } #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t #define GB_ASIZE \ sizeof (uint32_t) #define GB_BSIZE \ sizeof (uint32_t) #define GB_CSIZE \ sizeof (uint32_t) // true for int64, uint64, float, double, float complex, and double complex #define GB_CTYPE_IGNORE_OVERFLOW \ 0 // aik = Ax [pA] #define GB_GETA(aik,Ax,pA,A_iso) \ uint32_t aik = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bkj = Bx [pB] #define GB_GETB(bkj,Bx,pB,B_iso) \ uint32_t bkj = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // Gx [pG] = Ax [pA] #define GB_LOADA(Gx,pG,Ax,pA,A_iso) \ Gx [pG] = GBX (Ax, pA, A_iso) // Gx [pG] = Bx [pB] #define GB_LOADB(Gx,pG,Bx,pB,B_iso) \ Gx [pG] = GBX (Bx, pB, B_iso) #define GB_CX(p) \ Cx [p] // multiply operator #define GB_MULT(z, x, y, i, k, j) \ z = GB_IDIV_UNSIGNED (x, y, 32) // cast from a real scalar (or 2, if C is complex) to the type of C #define GB_CTYPE_CAST(x,y) \ ((uint32_t) x) // cast from a real scalar (or 2, if A is complex) to the type of A #define GB_ATYPE_CAST(x,y) \ ((uint32_t) x) // multiply-add #define GB_MULTADD(z, x, y, i, k, j) \ uint32_t x_op_y = GB_IDIV_UNSIGNED (x, y, 32) ; z *= x_op_y // monoid identity value #define GB_IDENTITY \ 1 // 1 if the identity value can be assigned via memset, with all bytes the same #define GB_HAS_IDENTITY_BYTE \ 0 // identity byte, for memset #define GB_IDENTITY_BYTE \ (none) // break if cij reaches the terminal value (dot product only) #define GB_DOT_TERMINAL(cij) \ if (cij == 0) { break ; } // simd pragma for dot-product loop vectorization #define GB_PRAGMA_SIMD_DOT(cij) \ ; // simd pragma for other loop vectorization #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // 1 for the PLUS_PAIR_(real) semirings, not for the complex case #define GB_IS_PLUS_PAIR_REAL_SEMIRING \ 0 // declare the cij scalar (initialize cij to zero for PLUS_PAIR) #define GB_CIJ_DECLARE(cij) \ uint32_t cij // cij = Cx [pC] for dot4 method only #define GB_GET4C(cij,p) \ cij = (C_in_iso) ? cinput : Cx [p] // Cx [pC] = cij #define GB_PUTC(cij,p) \ Cx [p] = cij // Cx [p] = t #define GB_CIJ_WRITE(p,t) \ Cx [p] = t // C(i,j) += t #define GB_CIJ_UPDATE(p,t) \ Cx [p] *= t // x + y #define GB_ADD_FUNCTION(x,y) \ x * y // bit pattern for bool, 8-bit, 16-bit, and 32-bit integers #define GB_CTYPE_BITS \ 0xffffffffL // 1 if monoid update can skipped entirely (the ANY monoid) #define GB_IS_ANY_MONOID \ 0 // 1 if monoid update is EQ #define GB_IS_EQ_MONOID \ 0 // 1 if monoid update can be done atomically, 0 otherwise #define GB_HAS_ATOMIC \ 1 // 1 if monoid update can be done with an OpenMP atomic update, 0 otherwise #if GB_MICROSOFT #define GB_HAS_OMP_ATOMIC \ 1 #else #define GB_HAS_OMP_ATOMIC \ 1 #endif // 1 for the ANY_PAIR_ISO semiring #define GB_IS_ANY_PAIR_SEMIRING \ 0 // 1 if PAIR is the multiply operator #define GB_IS_PAIR_MULTIPLIER \ 0 // 1 if monoid is PLUS_FC32 #define GB_IS_PLUS_FC32_MONOID \ 0 // 1 if monoid is PLUS_FC64 #define GB_IS_PLUS_FC64_MONOID \ 0 // 1 if monoid is ANY_FC32 #define GB_IS_ANY_FC32_MONOID \ 0 // 1 if monoid is ANY_FC64 #define GB_IS_ANY_FC64_MONOID \ 0 // 1 if monoid is MIN for signed or unsigned integers #define GB_IS_IMIN_MONOID \ 0 // 1 if monoid is MAX for signed or unsigned integers #define GB_IS_IMAX_MONOID \ 0 // 1 if monoid is MIN for float or double #define GB_IS_FMIN_MONOID \ 0 // 1 if monoid is MAX for float or double #define GB_IS_FMAX_MONOID \ 0 // 1 for the FIRSTI or FIRSTI1 multiply operator #define GB_IS_FIRSTI_MULTIPLIER \ 0 // 1 for the FIRSTJ or FIRSTJ1 multiply operator #define GB_IS_FIRSTJ_MULTIPLIER \ 0 // 1 for the SECONDJ or SECONDJ1 multiply operator #define GB_IS_SECONDJ_MULTIPLIER \ 0 // atomic compare-exchange #define GB_ATOMIC_COMPARE_EXCHANGE(target, expected, desired) \ GB_ATOMIC_COMPARE_EXCHANGE_32 (target, expected, desired) // Hx [i] = t #define GB_HX_WRITE(i,t) \ Hx [i] = t // Cx [p] = Hx [i] #define GB_CIJ_GATHER(p,i) \ Cx [p] = Hx [i] // Cx [p] += Hx [i] #define GB_CIJ_GATHER_UPDATE(p,i) \ Cx [p] *= Hx [i] // Hx [i] += t #define GB_HX_UPDATE(i,t) \ Hx [i] *= t // memcpy (&(Cx [p]), &(Hx [i]), len) #define GB_CIJ_MEMCPY(p,i,len) \ memcpy (Cx +(p), Hx +(i), (len) * sizeof(uint32_t)); // disable this semiring and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32 || GxB_NO_DIV_UINT32 || GxB_NO_TIMES_DIV_UINT32) //------------------------------------------------------------------------------ // GB_Adot2B: C=A'*B, C<M>=A'*B, or C<!M>=A'*B: dot product method, C is bitmap //------------------------------------------------------------------------------ // if A_not_transposed is true, then C=A*B is computed where A is bitmap or full GrB_Info GB (_Adot2B__times_div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const bool A_not_transposed, const GrB_Matrix A, int64_t *restrict A_slice, const GrB_Matrix B, int64_t *restrict B_slice, int nthreads, int naslice, int nbslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot2_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // GB_Adot3B: C<M>=A'*B: masked dot product, C is sparse or hyper //------------------------------------------------------------------------------ GrB_Info GB (_Adot3B__times_div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const GB_task_struct *restrict TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot3_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // GB_Adot4B: C+=A'*B: dense dot product (not used for ANY_PAIR_ISO) //------------------------------------------------------------------------------ #if 1 GrB_Info GB (_Adot4B__times_div_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict A_slice, int naslice, const GrB_Matrix B, int64_t *restrict B_slice, int nbslice, const int nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot4_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // GB_AsaxbitB: C=A*B, C<M>=A*B, C<!M>=A*B: saxpy method, C is bitmap/full //------------------------------------------------------------------------------ #include "GB_AxB_saxpy3_template.h" GrB_Info GB (_AsaxbitB__times_div_uint32) ( GrB_Matrix C, // bitmap or full const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_AxB_saxpy_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // GB_Asaxpy4B: C += A*B when C is full //------------------------------------------------------------------------------ #if 1 GrB_Info GB (_Asaxpy4B__times_div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int ntasks, const int nthreads, const int nfine_tasks_per_vector, const bool use_coarse_tasks, const bool use_atomics, const int64_t *A_slice, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_saxpy4_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // GB_Asaxpy3B: C=A*B, C<M>=A*B, C<!M>=A*B: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ GrB_Info GB (_Asaxpy3B__times_div_uint32) ( GrB_Matrix C, // C<any M>=A*B, C sparse or hypersparse const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const bool M_in_place, const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; if (M == NULL) { // C = A*B, no mask return (GB (_Asaxpy3B_noM__times_div_uint32) (C, A, B, SaxpyTasks, ntasks, nfine, nthreads, do_sort, Context)) ; } else if (!Mask_comp) { // C<M> = A*B return (GB (_Asaxpy3B_M__times_div_uint32) (C, M, Mask_struct, M_in_place, A, B, SaxpyTasks, ntasks, nfine, nthreads, do_sort, Context)) ; } else { // C<!M> = A*B return (GB (_Asaxpy3B_notM__times_div_uint32) (C, M, Mask_struct, M_in_place, A, B, SaxpyTasks, ntasks, nfine, nthreads, do_sort, Context)) ; } #endif } //------------------------------------------------------------------------------ // GB_Asaxpy3B_M: C<M>=A*Bi: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ #if ( !GB_DISABLE ) GrB_Info GB (_Asaxpy3B_M__times_div_uint32) ( GrB_Matrix C, // C<M>=A*B, C sparse or hypersparse const GrB_Matrix M, const bool Mask_struct, const bool M_in_place, const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { if (GB_IS_SPARSE (A) && GB_IS_SPARSE (B)) { // both A and B are sparse #define GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 0 #define GB_A_IS_SPARSE 1 #define GB_A_IS_HYPER 0 #define GB_A_IS_BITMAP 0 #define GB_A_IS_FULL 0 #define GB_B_IS_SPARSE 1 #define GB_B_IS_HYPER 0 #define GB_B_IS_BITMAP 0 #define GB_B_IS_FULL 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } else { // general case #undef GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } return (GrB_SUCCESS) ; } #endif //------------------------------------------------------------------------------ //GB_Asaxpy3B_noM: C=A*B: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ #if ( !GB_DISABLE ) GrB_Info GB (_Asaxpy3B_noM__times_div_uint32) ( GrB_Matrix C, // C=A*B, C sparse or hypersparse const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { if (GB_IS_SPARSE (A) && GB_IS_SPARSE (B)) { // both A and B are sparse #define GB_META16 #define GB_NO_MASK 1 #define GB_MASK_COMP 0 #define GB_A_IS_SPARSE 1 #define GB_A_IS_HYPER 0 #define GB_A_IS_BITMAP 0 #define GB_A_IS_FULL 0 #define GB_B_IS_SPARSE 1 #define GB_B_IS_HYPER 0 #define GB_B_IS_BITMAP 0 #define GB_B_IS_FULL 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } else { // general case #undef GB_META16 #define GB_NO_MASK 1 #define GB_MASK_COMP 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } return (GrB_SUCCESS) ; } #endif //------------------------------------------------------------------------------ //GB_Asaxpy3B_notM: C<!M>=A*B: saxpy method (Gustavson + Hash) //------------------------------------------------------------------------------ #if ( !GB_DISABLE ) GrB_Info GB (_Asaxpy3B_notM__times_div_uint32) ( GrB_Matrix C, // C<!M>=A*B, C sparse or hypersparse const GrB_Matrix M, const bool Mask_struct, const bool M_in_place, const GrB_Matrix A, const GrB_Matrix B, GB_saxpy3task_struct *restrict SaxpyTasks, const int ntasks, const int nfine, const int nthreads, const int do_sort, GB_Context Context ) { if (GB_IS_SPARSE (A) && GB_IS_SPARSE (B)) { // both A and B are sparse #define GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 1 #define GB_A_IS_SPARSE 1 #define GB_A_IS_HYPER 0 #define GB_A_IS_BITMAP 0 #define GB_A_IS_FULL 0 #define GB_B_IS_SPARSE 1 #define GB_B_IS_HYPER 0 #define GB_B_IS_BITMAP 0 #define GB_B_IS_FULL 0 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } else { // general case #undef GB_META16 #define GB_NO_MASK 0 #define GB_MASK_COMP 1 #include "GB_meta16_definitions.h" #include "GB_AxB_saxpy3_template.c" } return (GrB_SUCCESS) ; } #endif #endif
998341.c
/* * Copyright (c) 2021 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "softbus_utils.h" #include "securec.h" #include "softbus_adapter_crypto.h" #include "softbus_adapter_mem.h" #include "softbus_adapter_timer.h" #include "softbus_common.h" #include "softbus_def.h" #include "softbus_errcode.h" #include "softbus_log.h" #include "softbus_type_def.h" #define MAC_BIT_ZERO 0 #define MAC_BIT_ONE 1 #define MAC_BIT_TWO 2 #define MAC_BIT_THREE 3 #define MAC_BIT_FOUR 4 #define MAC_BIT_FIVE 5 #define BT_ADDR_LEN 6 static void *g_timerId = NULL; static TimerFunCallback g_timerFunList[SOFTBUS_MAX_TIMER_FUN_NUM] = {0}; SoftBusList *CreateSoftBusList(void) { pthread_mutexattr_t attr; SoftBusList *list = (SoftBusList *)SoftBusMalloc(sizeof(SoftBusList)); if (list == NULL) { SoftBusLog(SOFTBUS_LOG_COMM, SOFTBUS_LOG_ERROR, "malloc failed"); return NULL; } (void)memset_s(list, sizeof(SoftBusList), 0, sizeof(SoftBusList)); pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); if (pthread_mutex_init(&list->lock, &attr) != 0) { SoftBusLog(SOFTBUS_LOG_COMM, SOFTBUS_LOG_ERROR, "init lock failed"); SoftBusFree(list); return NULL; } ListInit(&list->list); return list; } void DestroySoftBusList(SoftBusList *list) { ListDelInit(&list->list); pthread_mutex_destroy(&list->lock); SoftBusFree(list); return; } int32_t RegisterTimeoutCallback(int32_t timerFunId, TimerFunCallback callback) { if (callback == NULL || timerFunId >= SOFTBUS_MAX_TIMER_FUN_NUM || timerFunId < SOFTBUS_CONN_TIMER_FUN) { return SOFTBUS_ERR; } if (g_timerFunList[timerFunId] != NULL) { return SOFTBUS_OK; } g_timerFunList[timerFunId] = callback; return SOFTBUS_OK; } static void HandleTimeoutFun(void) { int32_t i; for (i = 0; i < SOFTBUS_MAX_TIMER_FUN_NUM; i++) { if (g_timerFunList[i] != NULL) { g_timerFunList[i](); } } } int32_t SoftBusTimerInit(void) { if (g_timerId != NULL) { return SOFTBUS_OK; } g_timerId = SoftBusCreateTimer(&g_timerId, (void *)HandleTimeoutFun, TIMER_TYPE_PERIOD); if (SoftBusStartTimer(g_timerId, TIMER_TIMEOUT) != SOFTBUS_OK) { SoftBusLog(SOFTBUS_LOG_COMM, SOFTBUS_LOG_ERROR, "start timer failed."); (void)SoftBusDeleteTimer(g_timerId); g_timerId = NULL; return SOFTBUS_ERR; } return SOFTBUS_OK; } void SoftBusTimerDeInit(void) { if (g_timerId != NULL) { (void)SoftBusDeleteTimer(g_timerId); g_timerId = NULL; } } int32_t ConvertHexStringToBytes(unsigned char *outBuf, uint32_t outBufLen, const char *inBuf, int32_t inLen) { (void)outBufLen; if ((outBuf == NULL) || (inBuf == NULL) || (inLen % HEXIFY_UNIT_LEN != 0)) { SoftBusLog(SOFTBUS_LOG_COMM, SOFTBUS_LOG_ERROR, "invalid param"); return SOFTBUS_ERR; } uint32_t outLen = UN_HEXIFY_LEN(inLen); uint32_t i = 0; while (i < outLen) { unsigned char c = *inBuf++; if ((c >= '0') && (c <= '9')) { c -= '0'; } else if ((c >= 'a') && (c <= 'f')) { c -= 'a' - DEC_MAX_NUM; } else if ((c >= 'A') && (c <= 'F')) { c -= 'A' - DEC_MAX_NUM; } else { SoftBusLog(SOFTBUS_LOG_COMM, SOFTBUS_LOG_ERROR, "HexToString Error! %c", c); return SOFTBUS_ERR; } unsigned char c2 = *inBuf++; if ((c2 >= '0') && (c2 <= '9')) { c2 -= '0'; } else if ((c2 >= 'a') && (c2 <= 'f')) { c2 -= 'a' - DEC_MAX_NUM; } else if ((c2 >= 'A') && (c2 <= 'F')) { c2 -= 'A' - DEC_MAX_NUM; } else { SoftBusLog(SOFTBUS_LOG_COMM, SOFTBUS_LOG_ERROR, "HexToString Error! %c2", c2); return SOFTBUS_ERR; } *outBuf++ = (c << HEX_MAX_BIT_NUM) | c2; i++; } return SOFTBUS_OK; } int32_t ConvertBytesToHexString(char *outBuf, uint32_t outBufLen, const unsigned char *inBuf, int32_t inLen) { if ((outBuf == NULL) || (inBuf == NULL) || (outBufLen < (uint32_t)HEXIFY_LEN(inLen))) { return SOFTBUS_ERR; } while (inLen > 0) { unsigned char h = *inBuf / HEX_MAX_NUM; unsigned char l = *inBuf % HEX_MAX_NUM; if (h < DEC_MAX_NUM) { *outBuf++ = '0' + h; } else { *outBuf++ = 'a' + h - DEC_MAX_NUM; } if (l < DEC_MAX_NUM) { *outBuf++ = '0' + l; } else { *outBuf++ = 'a' + l - DEC_MAX_NUM; } ++inBuf; inLen--; } return SOFTBUS_OK; } int32_t GenerateRandomStr(char *str, uint32_t len) { if ((str == NULL) || (len < HEXIFY_UNIT_LEN)) { return SOFTBUS_INVALID_PARAM; } uint32_t hexLen = len / HEXIFY_UNIT_LEN; unsigned char *hexAuthId = (unsigned char *)SoftBusMalloc(hexLen); if (hexAuthId == NULL) { return SOFTBUS_MEM_ERR; } (void)memset_s(hexAuthId, hexLen, 0, hexLen); if (SoftBusGenerateRandomArray(hexAuthId, hexLen) != SOFTBUS_OK) { SoftBusFree(hexAuthId); return SOFTBUS_ERR; } if (ConvertBytesToHexString(str, len, hexAuthId, hexLen) != SOFTBUS_OK) { SoftBusFree(hexAuthId); return SOFTBUS_ERR; } SoftBusFree(hexAuthId); return SOFTBUS_OK; } bool IsValidString(const char *input, uint32_t maxLen) { if (input == NULL) { return false; } uint32_t len = strlen(input); if ((len == 0) || (len >= maxLen)) { return false; } return true; } int32_t ConvertBtMacToBinary(const char *strMac, int32_t strMacLen, uint8_t *binMac, int32_t binMacLen) { int32_t ret; if (strMac == NULL || strMacLen < BT_MAC_LEN || binMac == NULL || binMacLen < BT_ADDR_LEN) { return SOFTBUS_INVALID_PARAM; } ret = sscanf_s(strMac, "%02x:%02x:%02x:%02x:%02x:%02x", &binMac[MAC_BIT_ZERO], &binMac[MAC_BIT_ONE], &binMac[MAC_BIT_TWO], &binMac[MAC_BIT_THREE], &binMac[MAC_BIT_FOUR], &binMac[MAC_BIT_FIVE]); if (ret < 0) { return SOFTBUS_ERR; } return SOFTBUS_OK; } int32_t ConvertBtMacToStr(char *strMac, int32_t strMacLen, const uint8_t *binMac, int32_t binMacLen) { int32_t ret; if (strMac == NULL || strMacLen < BT_MAC_LEN || binMac == NULL || binMacLen < BT_ADDR_LEN) { return SOFTBUS_INVALID_PARAM; } ret = snprintf_s(strMac, strMacLen, strMacLen - 1, "%02x:%02x:%02x:%02x:%02x:%02x", binMac[MAC_BIT_ZERO], binMac[MAC_BIT_ONE], binMac[MAC_BIT_TWO], binMac[MAC_BIT_THREE], binMac[MAC_BIT_FOUR], binMac[MAC_BIT_FIVE]); if (ret < 0) { return SOFTBUS_ERR; } return SOFTBUS_OK; }
325774.c
/* * SPDX-FileCopyrightText: 2016-2022 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ #include <esp_types.h> #include <stdlib.h> #include <ctype.h> #include <string.h> #include "sdkconfig.h" #include "esp_intr_alloc.h" #include "esp_log.h" #include "esp_pm.h" #include "esp_check.h" #include "sys/lock.h" #include "freertos/FreeRTOS.h" #include "freertos/semphr.h" #include "freertos/timers.h" #include "freertos/ringbuf.h" #include "esp_private/periph_ctrl.h" #include "driver/gpio.h" #include "driver/adc.h" #include "hal/adc_types.h" #include "hal/adc_hal.h" #include "hal/dma_types.h" //For calibration #if CONFIG_IDF_TARGET_ESP32S2 #include "esp_efuse_rtc_table.h" #elif SOC_ADC_CALIBRATION_V1_SUPPORTED #include "esp_efuse_rtc_calib.h" #endif //For DMA #if SOC_GDMA_SUPPORTED #include "esp_private/gdma.h" #elif CONFIG_IDF_TARGET_ESP32S2 #include "hal/spi_types.h" #include "driver/spi_common_internal.h" #elif CONFIG_IDF_TARGET_ESP32 #include "driver/i2s.h" #include "hal/i2s_types.h" #include "soc/i2s_periph.h" #include "esp_private/i2s_platform.h" #endif static const char *ADC_TAG = "ADC"; #define ADC_GET_IO_NUM(periph, channel) (adc_channel_io_map[periph][channel]) extern portMUX_TYPE rtc_spinlock; //TODO: Will be placed in the appropriate position after the rtc module is finished. #define ADC_ENTER_CRITICAL() portENTER_CRITICAL(&rtc_spinlock) #define ADC_EXIT_CRITICAL() portEXIT_CRITICAL(&rtc_spinlock) /** * 1. sar_adc1_lock: this mutex lock is to protect the SARADC1 module. * 2. sar_adc2_lock: this mutex lock is to protect the SARADC2 module. * 3. adc_reg_lock: this spin lock is to protect the shared registers used by ADC1 / ADC2 single read mode. */ static _lock_t sar_adc1_lock; #define SAR_ADC1_LOCK_ACQUIRE() _lock_acquire(&sar_adc1_lock) #define SAR_ADC1_LOCK_RELEASE() _lock_release(&sar_adc1_lock) static _lock_t sar_adc2_lock; #define SAR_ADC2_LOCK_ACQUIRE() _lock_acquire(&sar_adc2_lock) #define SAR_ADC2_LOCK_RELEASE() _lock_release(&sar_adc2_lock) portMUX_TYPE adc_reg_lock = portMUX_INITIALIZER_UNLOCKED; #define ADC_REG_LOCK_ENTER() portENTER_CRITICAL(&adc_reg_lock) #define ADC_REG_LOCK_EXIT() portEXIT_CRITICAL(&adc_reg_lock) #define INTERNAL_BUF_NUM 5 /*--------------------------------------------------------------- Digital Controller Context ---------------------------------------------------------------*/ typedef struct adc_digi_context_t { uint8_t *rx_dma_buf; //dma buffer adc_hal_dma_ctx_t hal; //hal context #if SOC_GDMA_SUPPORTED gdma_channel_handle_t rx_dma_channel; //dma rx channel handle #elif CONFIG_IDF_TARGET_ESP32S2 spi_host_device_t spi_host; //ADC uses this SPI DMA intr_handle_t intr_hdl; //Interrupt handler #elif CONFIG_IDF_TARGET_ESP32 i2s_port_t i2s_host; //ADC uses this I2S DMA intr_handle_t intr_hdl; //Interrupt handler #endif RingbufHandle_t ringbuf_hdl; //RX ringbuffer handler intptr_t rx_eof_desc_addr; //eof descriptor address of RX channel bool ringbuf_overflow_flag; //1: ringbuffer overflow bool driver_start_flag; //1: driver is started; 0: driver is stoped bool use_adc1; //1: ADC unit1 will be used; 0: ADC unit1 won't be used. bool use_adc2; //1: ADC unit2 will be used; 0: ADC unit2 won't be used. This determines whether to acquire sar_adc2_mutex lock or not. adc_atten_t adc1_atten; //Attenuation for ADC1. On this chip each ADC can only support one attenuation. adc_atten_t adc2_atten; //Attenuation for ADC2. On this chip each ADC can only support one attenuation. adc_hal_digi_ctrlr_cfg_t hal_digi_ctrlr_cfg; //Hal digital controller configuration esp_pm_lock_handle_t pm_lock; //For power management } adc_digi_context_t; static adc_digi_context_t *s_adc_digi_ctx = NULL; #ifdef CONFIG_PM_ENABLE //Only for deprecated API extern esp_pm_lock_handle_t adc_digi_arbiter_lock; #endif //CONFIG_PM_ENABLE #if SOC_ADC_CALIBRATION_V1_SUPPORTED uint32_t adc_get_calibration_offset(adc_unit_t adc_n, adc_channel_t chan, adc_atten_t atten); #endif /*--------------------------------------------------------------- ADC Continuous Read Mode (via DMA) ---------------------------------------------------------------*/ //Function to address transaction static bool s_adc_dma_intr(adc_digi_context_t *adc_digi_ctx); #if SOC_GDMA_SUPPORTED static bool adc_dma_in_suc_eof_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data); #else static void adc_dma_intr_handler(void *arg); #endif static int8_t adc_digi_get_io_num(adc_unit_t adc_unit, uint8_t adc_channel) { assert(adc_unit <= SOC_ADC_PERIPH_NUM); uint8_t adc_n = (adc_unit == ADC_UNIT_1) ? 0 : 1; return adc_channel_io_map[adc_n][adc_channel]; } static esp_err_t adc_digi_gpio_init(adc_unit_t adc_unit, uint16_t channel_mask) { esp_err_t ret = ESP_OK; uint64_t gpio_mask = 0; uint32_t n = 0; int8_t io = 0; while (channel_mask) { if (channel_mask & 0x1) { io = adc_digi_get_io_num(adc_unit, n); if (io < 0) { return ESP_ERR_INVALID_ARG; } gpio_mask |= BIT64(io); } channel_mask = channel_mask >> 1; n++; } gpio_config_t cfg = { .pin_bit_mask = gpio_mask, .mode = GPIO_MODE_DISABLE, }; ret = gpio_config(&cfg); return ret; } esp_err_t adc_digi_initialize(const adc_digi_init_config_t *init_config) { esp_err_t ret = ESP_OK; s_adc_digi_ctx = calloc(1, sizeof(adc_digi_context_t)); if (s_adc_digi_ctx == NULL) { ret = ESP_ERR_NO_MEM; goto cleanup; } //ringbuffer s_adc_digi_ctx->ringbuf_hdl = xRingbufferCreate(init_config->max_store_buf_size, RINGBUF_TYPE_BYTEBUF); if (!s_adc_digi_ctx->ringbuf_hdl) { ret = ESP_ERR_NO_MEM; goto cleanup; } //malloc internal buffer used by DMA s_adc_digi_ctx->rx_dma_buf = heap_caps_calloc(1, init_config->conv_num_each_intr * INTERNAL_BUF_NUM, MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA); if (!s_adc_digi_ctx->rx_dma_buf) { ret = ESP_ERR_NO_MEM; goto cleanup; } //malloc dma descriptor s_adc_digi_ctx->hal.rx_desc = heap_caps_calloc(1, (sizeof(dma_descriptor_t)) * INTERNAL_BUF_NUM, MALLOC_CAP_DMA); if (!s_adc_digi_ctx->hal.rx_desc) { ret = ESP_ERR_NO_MEM; goto cleanup; } //malloc pattern table s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern = calloc(1, SOC_ADC_PATT_LEN_MAX * sizeof(adc_digi_pattern_config_t)); if (!s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern) { ret = ESP_ERR_NO_MEM; goto cleanup; } #if CONFIG_PM_ENABLE ret = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "adc_dma", &s_adc_digi_ctx->pm_lock); if (ret != ESP_OK) { goto cleanup; } #endif //CONFIG_PM_ENABLE //init gpio pins if (init_config->adc1_chan_mask) { ret = adc_digi_gpio_init(ADC_UNIT_1, init_config->adc1_chan_mask); if (ret != ESP_OK) { goto cleanup; } } if (init_config->adc2_chan_mask) { ret = adc_digi_gpio_init(ADC_UNIT_2, init_config->adc2_chan_mask); if (ret != ESP_OK) { goto cleanup; } } #if SOC_GDMA_SUPPORTED //alloc rx gdma channel gdma_channel_alloc_config_t rx_alloc_config = { .direction = GDMA_CHANNEL_DIRECTION_RX, }; ret = gdma_new_channel(&rx_alloc_config, &s_adc_digi_ctx->rx_dma_channel); if (ret != ESP_OK) { goto cleanup; } gdma_connect(s_adc_digi_ctx->rx_dma_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_ADC, 0)); gdma_strategy_config_t strategy_config = { .auto_update_desc = true, .owner_check = true }; gdma_apply_strategy(s_adc_digi_ctx->rx_dma_channel, &strategy_config); gdma_rx_event_callbacks_t cbs = { .on_recv_eof = adc_dma_in_suc_eof_callback }; gdma_register_rx_event_callbacks(s_adc_digi_ctx->rx_dma_channel, &cbs, s_adc_digi_ctx); int dma_chan; gdma_get_channel_id(s_adc_digi_ctx->rx_dma_channel, &dma_chan); #elif CONFIG_IDF_TARGET_ESP32S2 //ADC utilises SPI3 DMA on ESP32S2 bool spi_success = false; uint32_t dma_chan = 0; spi_success = spicommon_periph_claim(SPI3_HOST, "adc"); ret = spicommon_dma_chan_alloc(SPI3_HOST, SPI_DMA_CH_AUTO, &dma_chan, &dma_chan); if (ret == ESP_OK) { s_adc_digi_ctx->spi_host = SPI3_HOST; } if (!spi_success || (s_adc_digi_ctx->spi_host != SPI3_HOST)) { goto cleanup; } ret = esp_intr_alloc(spicommon_irqdma_source_for_host(s_adc_digi_ctx->spi_host), 0, adc_dma_intr_handler, (void *)s_adc_digi_ctx, &s_adc_digi_ctx->intr_hdl); if (ret != ESP_OK) { goto cleanup; } #elif CONFIG_IDF_TARGET_ESP32 //ADC utilises I2S0 DMA on ESP32 uint32_t dma_chan = 0; ret = i2s_priv_register_object(&s_adc_digi_ctx, I2S_NUM_0); if (ret != ESP_OK) { goto cleanup; } s_adc_digi_ctx->i2s_host = I2S_NUM_0; ret = esp_intr_alloc(i2s_periph_signal[s_adc_digi_ctx->i2s_host].irq, 0, adc_dma_intr_handler, (void *)s_adc_digi_ctx, &s_adc_digi_ctx->intr_hdl); if (ret != ESP_OK) { goto cleanup; } #endif adc_hal_dma_config_t config = { #if SOC_GDMA_SUPPORTED .dev = (void *)GDMA_LL_GET_HW(0), #elif CONFIG_IDF_TARGET_ESP32S2 .dev = (void *)SPI_LL_GET_HW(s_adc_digi_ctx->spi_host), #elif CONFIG_IDF_TARGET_ESP32 .dev = (void *)I2S_LL_GET_HW(s_adc_digi_ctx->i2s_host), #endif .desc_max_num = INTERNAL_BUF_NUM, .dma_chan = dma_chan, .eof_num = init_config->conv_num_each_intr / ADC_HAL_DATA_LEN_PER_CONV }; adc_hal_dma_ctx_config(&s_adc_digi_ctx->hal, &config); //enable ADC digital part periph_module_enable(PERIPH_SARADC_MODULE); //reset ADC digital part periph_module_reset(PERIPH_SARADC_MODULE); #if SOC_ADC_CALIBRATION_V1_SUPPORTED adc_hal_calibration_init(ADC_UNIT_1); adc_hal_calibration_init(ADC_UNIT_2); #endif //#if SOC_ADC_CALIBRATION_V1_SUPPORTED return ret; cleanup: adc_digi_deinitialize(); return ret; } #if SOC_GDMA_SUPPORTED static IRAM_ATTR bool adc_dma_in_suc_eof_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data) { assert(event_data); s_adc_digi_ctx->rx_eof_desc_addr = event_data->rx_eof_desc_addr; return s_adc_dma_intr(user_data); } #else static IRAM_ATTR void adc_dma_intr_handler(void *arg) { adc_digi_context_t *ctx = (adc_digi_context_t *)arg; bool need_yield = false; bool conversion_finish = adc_hal_check_event(&ctx->hal, ADC_HAL_DMA_INTR_MASK); if (conversion_finish) { adc_hal_digi_clr_intr(&s_adc_digi_ctx->hal, ADC_HAL_DMA_INTR_MASK); intptr_t desc_addr = adc_hal_get_desc_addr(&ctx->hal); ctx->rx_eof_desc_addr = desc_addr; need_yield = s_adc_dma_intr(ctx); } if (need_yield) { portYIELD_FROM_ISR(); } } #endif static IRAM_ATTR bool s_adc_dma_intr(adc_digi_context_t *adc_digi_ctx) { portBASE_TYPE taskAwoken = 0; BaseType_t ret; adc_hal_dma_desc_status_t status = false; dma_descriptor_t *current_desc = NULL; while (1) { status = adc_hal_get_reading_result(&adc_digi_ctx->hal, adc_digi_ctx->rx_eof_desc_addr, &current_desc); if (status != ADC_HAL_DMA_DESC_VALID) { break; } ret = xRingbufferSendFromISR(adc_digi_ctx->ringbuf_hdl, current_desc->buffer, current_desc->dw0.length, &taskAwoken); if (ret == pdFALSE) { //ringbuffer overflow adc_digi_ctx->ringbuf_overflow_flag = 1; } } if (status == ADC_HAL_DMA_DESC_NULL) { //start next turns of dma operation adc_hal_digi_start(&adc_digi_ctx->hal, adc_digi_ctx->rx_dma_buf); } return (taskAwoken == pdTRUE); } esp_err_t adc_digi_start(void) { if (s_adc_digi_ctx) { if (s_adc_digi_ctx->driver_start_flag != 0) { ESP_LOGE(ADC_TAG, "The driver is already started"); return ESP_ERR_INVALID_STATE; } adc_power_acquire(); //reset flags s_adc_digi_ctx->ringbuf_overflow_flag = 0; s_adc_digi_ctx->driver_start_flag = 1; if (s_adc_digi_ctx->use_adc1) { SAR_ADC1_LOCK_ACQUIRE(); } if (s_adc_digi_ctx->use_adc2) { SAR_ADC2_LOCK_ACQUIRE(); } #if CONFIG_PM_ENABLE // Lock APB frequency while ADC driver is in use esp_pm_lock_acquire(s_adc_digi_ctx->pm_lock); #endif #if SOC_ADC_CALIBRATION_V1_SUPPORTED if (s_adc_digi_ctx->use_adc1) { uint32_t cal_val = adc_get_calibration_offset(ADC_UNIT_1, ADC_CHANNEL_MAX, s_adc_digi_ctx->adc1_atten); adc_hal_set_calibration_param(ADC_UNIT_1, cal_val); } if (s_adc_digi_ctx->use_adc2) { uint32_t cal_val = adc_get_calibration_offset(ADC_UNIT_2, ADC_CHANNEL_MAX, s_adc_digi_ctx->adc2_atten); adc_hal_set_calibration_param(ADC_UNIT_2, cal_val); } #endif //#if SOC_ADC_CALIBRATION_V1_SUPPORTED #if SOC_ADC_ARBITER_SUPPORTED adc_arbiter_t config = ADC_ARBITER_CONFIG_DEFAULT(); adc_hal_arbiter_config(&config); #endif //#if SOC_ADC_ARBITER_SUPPORTED adc_hal_set_controller(ADC_UNIT_1, ADC_HAL_CONTINUOUS_READ_MODE); adc_hal_set_controller(ADC_UNIT_2, ADC_HAL_CONTINUOUS_READ_MODE); adc_hal_digi_init(&s_adc_digi_ctx->hal); adc_hal_digi_controller_config(&s_adc_digi_ctx->hal, &s_adc_digi_ctx->hal_digi_ctrlr_cfg); //start conversion adc_hal_digi_start(&s_adc_digi_ctx->hal, s_adc_digi_ctx->rx_dma_buf); } #if CONFIG_IDF_TARGET_ESP32S2 //For being compatible with the deprecated behaviour else { ESP_LOGE(ADC_TAG, "API used without driver initialization before. The following behaviour is deprecated!!"); #ifdef CONFIG_PM_ENABLE ESP_RETURN_ON_FALSE((adc_digi_arbiter_lock), ESP_FAIL, ADC_TAG, "Should start after call `adc_digi_controller_config`"); esp_pm_lock_acquire(adc_digi_arbiter_lock); #endif ADC_ENTER_CRITICAL(); adc_ll_digi_dma_enable(); adc_ll_digi_trigger_enable(); ADC_EXIT_CRITICAL(); } #endif //#if CONFIG_IDF_TARGET_ESP32S2 return ESP_OK; } esp_err_t adc_digi_stop(void) { if (s_adc_digi_ctx) { if (s_adc_digi_ctx->driver_start_flag != 1) { ESP_LOGE(ADC_TAG, "The driver is already stopped"); return ESP_ERR_INVALID_STATE; } s_adc_digi_ctx->driver_start_flag = 0; //disable the in suc eof intrrupt adc_hal_digi_dis_intr(&s_adc_digi_ctx->hal, ADC_HAL_DMA_INTR_MASK); //clear the in suc eof interrupt adc_hal_digi_clr_intr(&s_adc_digi_ctx->hal, ADC_HAL_DMA_INTR_MASK); //stop ADC adc_hal_digi_stop(&s_adc_digi_ctx->hal); adc_hal_digi_deinit(&s_adc_digi_ctx->hal); #if CONFIG_PM_ENABLE if (s_adc_digi_ctx->pm_lock) { esp_pm_lock_release(s_adc_digi_ctx->pm_lock); } #endif //CONFIG_PM_ENABLE if (s_adc_digi_ctx->use_adc1) { SAR_ADC1_LOCK_RELEASE(); } if (s_adc_digi_ctx->use_adc2) { SAR_ADC2_LOCK_RELEASE(); } adc_power_release(); } #if CONFIG_IDF_TARGET_ESP32S2 else { //For being compatible with the deprecated behaviour ESP_LOGE(ADC_TAG, "API used without driver initialization before. The following behaviour is deprecated!!"); #ifdef CONFIG_PM_ENABLE if (adc_digi_arbiter_lock) { esp_pm_lock_release(adc_digi_arbiter_lock); } #endif ADC_ENTER_CRITICAL(); adc_ll_digi_trigger_disable(); adc_ll_digi_dma_disable(); ADC_EXIT_CRITICAL(); } #endif //#if CONFIG_IDF_TARGET_ESP32S2 return ESP_OK; } esp_err_t adc_digi_read_bytes(uint8_t *buf, uint32_t length_max, uint32_t *out_length, uint32_t timeout_ms) { TickType_t ticks_to_wait; esp_err_t ret = ESP_OK; uint8_t *data = NULL; size_t size = 0; ticks_to_wait = timeout_ms / portTICK_PERIOD_MS; if (timeout_ms == ADC_MAX_DELAY) { ticks_to_wait = portMAX_DELAY; } data = xRingbufferReceiveUpTo(s_adc_digi_ctx->ringbuf_hdl, &size, ticks_to_wait, length_max); if (!data) { ESP_LOGV(ADC_TAG, "No data, increase timeout or reduce conv_num_each_intr"); ret = ESP_ERR_TIMEOUT; *out_length = 0; return ret; } memcpy(buf, data, size); vRingbufferReturnItem(s_adc_digi_ctx->ringbuf_hdl, data); assert((size % 4) == 0); *out_length = size; if (s_adc_digi_ctx->ringbuf_overflow_flag) { ret = ESP_ERR_INVALID_STATE; } return ret; } esp_err_t adc_digi_deinitialize(void) { if (!s_adc_digi_ctx) { return ESP_ERR_INVALID_STATE; } if (s_adc_digi_ctx->driver_start_flag != 0) { ESP_LOGE(ADC_TAG, "The driver is not stopped"); return ESP_ERR_INVALID_STATE; } if (s_adc_digi_ctx->ringbuf_hdl) { vRingbufferDelete(s_adc_digi_ctx->ringbuf_hdl); s_adc_digi_ctx->ringbuf_hdl = NULL; } #if CONFIG_PM_ENABLE if (s_adc_digi_ctx->pm_lock) { esp_pm_lock_delete(s_adc_digi_ctx->pm_lock); } #endif //CONFIG_PM_ENABLE free(s_adc_digi_ctx->rx_dma_buf); free(s_adc_digi_ctx->hal.rx_desc); free(s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern); #if SOC_GDMA_SUPPORTED gdma_disconnect(s_adc_digi_ctx->rx_dma_channel); gdma_del_channel(s_adc_digi_ctx->rx_dma_channel); #elif CONFIG_IDF_TARGET_ESP32S2 esp_intr_free(s_adc_digi_ctx->intr_hdl); spicommon_dma_chan_free(s_adc_digi_ctx->spi_host); spicommon_periph_free(s_adc_digi_ctx->spi_host); #elif CONFIG_IDF_TARGET_ESP32 esp_intr_free(s_adc_digi_ctx->intr_hdl); i2s_priv_deregister_object(s_adc_digi_ctx->i2s_host); #endif free(s_adc_digi_ctx); s_adc_digi_ctx = NULL; periph_module_disable(PERIPH_SARADC_MODULE); return ESP_OK; } /*--------------------------------------------------------------- Digital controller setting ---------------------------------------------------------------*/ esp_err_t adc_digi_controller_configure(const adc_digi_configuration_t *config) { if (!s_adc_digi_ctx) { return ESP_ERR_INVALID_STATE; } //Pattern related check ESP_RETURN_ON_FALSE(config->pattern_num <= SOC_ADC_PATT_LEN_MAX, ESP_ERR_INVALID_ARG, ADC_TAG, "Max pattern num is %d", SOC_ADC_PATT_LEN_MAX); #if CONFIG_IDF_TARGET_ESP32 for (int i = 0; i < config->pattern_num; i++) { ESP_RETURN_ON_FALSE((config->adc_pattern[i].bit_width >= SOC_ADC_DIGI_MIN_BITWIDTH && config->adc_pattern->bit_width <= SOC_ADC_DIGI_MAX_BITWIDTH), ESP_ERR_INVALID_ARG, ADC_TAG, "ADC bitwidth not supported"); ESP_RETURN_ON_FALSE(config->adc_pattern[i].unit == 0, ESP_ERR_INVALID_ARG, ADC_TAG, "Only support using ADC1 DMA mode"); } #else for (int i = 0; i < config->pattern_num; i++) { ESP_RETURN_ON_FALSE((config->adc_pattern[i].bit_width == SOC_ADC_DIGI_MAX_BITWIDTH), ESP_ERR_INVALID_ARG, ADC_TAG, "ADC bitwidth not supported"); } #endif ESP_RETURN_ON_FALSE(config->sample_freq_hz <= SOC_ADC_SAMPLE_FREQ_THRES_HIGH && config->sample_freq_hz >= SOC_ADC_SAMPLE_FREQ_THRES_LOW, ESP_ERR_INVALID_ARG, ADC_TAG, "ADC sampling frequency out of range"); #if CONFIG_IDF_TARGET_ESP32 ESP_RETURN_ON_FALSE(config->conv_limit_en == 1, ESP_ERR_INVALID_ARG, ADC_TAG, "`conv_limit_en` should be set to 1"); #endif #if CONFIG_IDF_TARGET_ESP32 ESP_RETURN_ON_FALSE(config->format == ADC_DIGI_OUTPUT_FORMAT_TYPE1, ESP_ERR_INVALID_ARG, ADC_TAG, "Please use type1"); #elif CONFIG_IDF_TARGET_ESP32S2 if (config->conv_mode == ADC_CONV_BOTH_UNIT || config->conv_mode == ADC_CONV_ALTER_UNIT) { ESP_RETURN_ON_FALSE(config->format == ADC_DIGI_OUTPUT_FORMAT_TYPE2, ESP_ERR_INVALID_ARG, ADC_TAG, "Please use type2"); } else if (config->conv_mode == ADC_CONV_SINGLE_UNIT_1 || config->conv_mode == ADC_CONV_SINGLE_UNIT_2) { ESP_RETURN_ON_FALSE(config->format == ADC_DIGI_OUTPUT_FORMAT_TYPE1, ESP_ERR_INVALID_ARG, ADC_TAG, "Please use type1"); } #else ESP_RETURN_ON_FALSE(config->format == ADC_DIGI_OUTPUT_FORMAT_TYPE2, ESP_ERR_INVALID_ARG, ADC_TAG, "Please use type2"); #endif s_adc_digi_ctx->hal_digi_ctrlr_cfg.conv_limit_en = config->conv_limit_en; s_adc_digi_ctx->hal_digi_ctrlr_cfg.conv_limit_num = config->conv_limit_num; s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern_len = config->pattern_num; s_adc_digi_ctx->hal_digi_ctrlr_cfg.sample_freq_hz = config->sample_freq_hz; s_adc_digi_ctx->hal_digi_ctrlr_cfg.conv_mode = config->conv_mode; memcpy(s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern, config->adc_pattern, config->pattern_num * sizeof(adc_digi_pattern_config_t)); const int atten_uninitialized = 999; s_adc_digi_ctx->adc1_atten = atten_uninitialized; s_adc_digi_ctx->adc2_atten = atten_uninitialized; s_adc_digi_ctx->use_adc1 = 0; s_adc_digi_ctx->use_adc2 = 0; for (int i = 0; i < config->pattern_num; i++) { const adc_digi_pattern_config_t *pat = &config->adc_pattern[i]; if (pat->unit == ADC_UNIT_1) { s_adc_digi_ctx->use_adc1 = 1; if (s_adc_digi_ctx->adc1_atten == atten_uninitialized) { s_adc_digi_ctx->adc1_atten = pat->atten; } else if (s_adc_digi_ctx->adc1_atten != pat->atten) { return ESP_ERR_INVALID_ARG; } } else if (pat->unit == ADC_UNIT_2) { //See whether ADC2 will be used or not. If yes, the ``sar_adc2_mutex`` should be acquired in the continuous read driver s_adc_digi_ctx->use_adc2 = 1; if (s_adc_digi_ctx->adc2_atten == atten_uninitialized) { s_adc_digi_ctx->adc2_atten = pat->atten; } else if (s_adc_digi_ctx->adc2_atten != pat->atten) { return ESP_ERR_INVALID_ARG; } } } return ESP_OK; } #if CONFIG_IDF_TARGET_ESP32C3 /*--------------------------------------------------------------- ADC Single Read Mode ---------------------------------------------------------------*/ static adc_atten_t s_atten1_single[ADC1_CHANNEL_MAX]; //Array saving attenuate of each channel of ADC1, used by single read API static adc_atten_t s_atten2_single[ADC2_CHANNEL_MAX]; //Array saving attenuate of each channel of ADC2, used by single read API esp_err_t adc_vref_to_gpio(adc_unit_t adc_unit, gpio_num_t gpio) { esp_err_t ret; uint32_t channel = ADC2_CHANNEL_MAX; if (adc_unit == ADC_UNIT_2) { for (int i = 0; i < ADC2_CHANNEL_MAX; i++) { if (gpio == ADC_GET_IO_NUM(ADC_UNIT_2, i)) { channel = i; break; } } if (channel == ADC2_CHANNEL_MAX) { return ESP_ERR_INVALID_ARG; } } adc_power_acquire(); if (adc_unit == ADC_UNIT_1) { ADC_ENTER_CRITICAL(); adc_hal_vref_output(ADC_UNIT_1, channel, true); ADC_EXIT_CRITICAL(); } else { //ADC_UNIT_2 ADC_ENTER_CRITICAL(); adc_hal_vref_output(ADC_UNIT_2, channel, true); ADC_EXIT_CRITICAL(); } ret = adc_digi_gpio_init(ADC_UNIT_2, BIT(channel)); return ret; } esp_err_t adc1_config_width(adc_bits_width_t width_bit) { //On ESP32C3, the data width is always 12-bits. if (width_bit != ADC_WIDTH_BIT_12) { return ESP_ERR_INVALID_ARG; } return ESP_OK; } esp_err_t adc1_config_channel_atten(adc1_channel_t channel, adc_atten_t atten) { ESP_RETURN_ON_FALSE(channel < SOC_ADC_CHANNEL_NUM(ADC_UNIT_1), ESP_ERR_INVALID_ARG, ADC_TAG, "ADC1 channel error"); ESP_RETURN_ON_FALSE((atten < ADC_ATTEN_MAX), ESP_ERR_INVALID_ARG, ADC_TAG, "ADC Atten Err"); esp_err_t ret = ESP_OK; s_atten1_single[channel] = atten; ret = adc_digi_gpio_init(ADC_UNIT_1, BIT(channel)); adc_hal_calibration_init(ADC_UNIT_1); return ret; } int adc1_get_raw(adc1_channel_t channel) { int raw_out = 0; periph_module_enable(PERIPH_SARADC_MODULE); adc_power_acquire(); SAR_ADC1_LOCK_ACQUIRE(); adc_atten_t atten = s_atten1_single[channel]; uint32_t cal_val = adc_get_calibration_offset(ADC_UNIT_1, channel, atten); adc_hal_set_calibration_param(ADC_UNIT_1, cal_val); ADC_REG_LOCK_ENTER(); adc_hal_set_atten(ADC_UNIT_2, channel, atten); adc_hal_convert(ADC_UNIT_1, channel, &raw_out); ADC_REG_LOCK_EXIT(); SAR_ADC1_LOCK_RELEASE(); adc_power_release(); periph_module_disable(PERIPH_SARADC_MODULE); return raw_out; } esp_err_t adc2_config_channel_atten(adc2_channel_t channel, adc_atten_t atten) { ESP_RETURN_ON_FALSE(channel < SOC_ADC_CHANNEL_NUM(ADC_UNIT_2), ESP_ERR_INVALID_ARG, ADC_TAG, "ADC2 channel error"); ESP_RETURN_ON_FALSE((atten <= ADC_ATTEN_11db), ESP_ERR_INVALID_ARG, ADC_TAG, "ADC2 Atten Err"); esp_err_t ret = ESP_OK; s_atten2_single[channel] = atten; ret = adc_digi_gpio_init(ADC_UNIT_2, BIT(channel)); adc_hal_calibration_init(ADC_UNIT_2); return ret; } esp_err_t adc2_get_raw(adc2_channel_t channel, adc_bits_width_t width_bit, int *raw_out) { //On ESP32C3, the data width is always 12-bits. if (width_bit != ADC_WIDTH_BIT_12) { return ESP_ERR_INVALID_ARG; } esp_err_t ret = ESP_OK; periph_module_enable(PERIPH_SARADC_MODULE); adc_power_acquire(); SAR_ADC2_LOCK_ACQUIRE(); adc_arbiter_t config = ADC_ARBITER_CONFIG_DEFAULT(); adc_hal_arbiter_config(&config); adc_atten_t atten = s_atten2_single[channel]; uint32_t cal_val = adc_get_calibration_offset(ADC_UNIT_2, channel, atten); adc_hal_set_calibration_param(ADC_UNIT_2, cal_val); ADC_REG_LOCK_ENTER(); adc_hal_set_atten(ADC_UNIT_2, channel, atten); ret = adc_hal_convert(ADC_UNIT_2, channel, raw_out); ADC_REG_LOCK_EXIT(); SAR_ADC2_LOCK_RELEASE(); adc_power_release(); periph_module_disable(PERIPH_SARADC_MODULE); return ret; } /*************************************/ /* Digital controller filter setting */ /*************************************/ esp_err_t adc_digi_filter_reset(adc_digi_filter_idx_t idx) { ADC_ENTER_CRITICAL(); adc_hal_digi_filter_reset(idx); ADC_EXIT_CRITICAL(); return ESP_OK; } esp_err_t adc_digi_filter_set_config(adc_digi_filter_idx_t idx, adc_digi_filter_t *config) { ADC_ENTER_CRITICAL(); adc_hal_digi_filter_set_factor(idx, config); ADC_EXIT_CRITICAL(); return ESP_OK; } esp_err_t adc_digi_filter_get_config(adc_digi_filter_idx_t idx, adc_digi_filter_t *config) { ADC_ENTER_CRITICAL(); adc_hal_digi_filter_get_factor(idx, config); ADC_EXIT_CRITICAL(); return ESP_OK; } esp_err_t adc_digi_filter_enable(adc_digi_filter_idx_t idx, bool enable) { ADC_ENTER_CRITICAL(); adc_hal_digi_filter_enable(idx, enable); ADC_EXIT_CRITICAL(); return ESP_OK; } /**************************************/ /* Digital controller monitor setting */ /**************************************/ esp_err_t adc_digi_monitor_set_config(adc_digi_monitor_idx_t idx, adc_digi_monitor_t *config) { ADC_ENTER_CRITICAL(); adc_hal_digi_monitor_config(idx, config); ADC_EXIT_CRITICAL(); return ESP_OK; } esp_err_t adc_digi_monitor_enable(adc_digi_monitor_idx_t idx, bool enable) { ADC_ENTER_CRITICAL(); adc_hal_digi_monitor_enable(idx, enable); ADC_EXIT_CRITICAL(); return ESP_OK; } #endif //#if CONFIG_IDF_TARGET_ESP32C3 #if SOC_ADC_CALIBRATION_V1_SUPPORTED /*--------------------------------------------------------------- Hardware Calibration Setting ---------------------------------------------------------------*/ #if CONFIG_IDF_TARGET_ESP32S2 #define esp_efuse_rtc_calib_get_ver() esp_efuse_rtc_table_read_calib_version() static inline uint32_t esp_efuse_rtc_calib_get_init_code(int version, uint32_t adc_unit, int atten) { int tag = esp_efuse_rtc_table_get_tag(version, adc_unit + 1, atten, RTCCALIB_V2_PARAM_VINIT); return esp_efuse_rtc_table_get_parsed_efuse_value(tag, false); } #endif static uint16_t s_adc_cali_param[SOC_ADC_PERIPH_NUM][ADC_ATTEN_MAX] = {}; //NOTE: according to calibration version, different types of lock may be taken during the process: // 1. Semaphore when reading efuse // 2. Lock (Spinlock, or Mutex) if we actually do ADC calibration in the future //This function shoudn't be called inside critical section or ISR uint32_t adc_get_calibration_offset(adc_unit_t adc_n, adc_channel_t channel, adc_atten_t atten) { if (s_adc_cali_param[adc_n][atten]) { ESP_LOGV(ADC_TAG, "Use calibrated val ADC%d atten=%d: %04X", adc_n, atten, s_adc_cali_param[adc_n][atten]); return (uint32_t)s_adc_cali_param[adc_n][atten]; } // check if we can fetch the values from eFuse. int version = esp_efuse_rtc_calib_get_ver(); uint32_t init_code = 0; if (version == ESP_EFUSE_ADC_CALIB_VER) { init_code = esp_efuse_rtc_calib_get_init_code(version, adc_n, atten); } else { ESP_LOGD(ADC_TAG, "Calibration eFuse is not configured, use self-calibration for ICode"); adc_power_acquire(); ADC_ENTER_CRITICAL(); const bool internal_gnd = true; init_code = adc_hal_self_calibration(adc_n, channel, atten, internal_gnd); ADC_EXIT_CRITICAL(); adc_power_release(); } s_adc_cali_param[adc_n][atten] = init_code; ESP_LOGV(ADC_TAG, "Calib(V%d) ADC%d atten=%d: %04X", version, adc_n, atten, init_code); return init_code; } // Internal function to calibrate PWDET for WiFi esp_err_t adc_cal_offset(adc_unit_t adc_n, adc_channel_t channel, adc_atten_t atten) { adc_hal_calibration_init(adc_n); uint32_t cal_val = adc_get_calibration_offset(adc_n, channel, atten); ADC_ENTER_CRITICAL(); adc_hal_set_calibration_param(adc_n, cal_val); ADC_EXIT_CRITICAL(); return ESP_OK; } #endif //#if SOC_ADC_CALIBRATION_V1_SUPPORTED /*--------------------------------------------------------------- Deprecated API ---------------------------------------------------------------*/ #if CONFIG_IDF_TARGET_ESP32C3 #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #include "deprecated/driver/adc_deprecated.h" #include "deprecated/driver/adc_types_deprecated.h" esp_err_t adc_digi_controller_config(const adc_digi_config_t *config) { if (!s_adc_digi_ctx) { return ESP_ERR_INVALID_STATE; } ESP_RETURN_ON_FALSE((config->sample_freq_hz <= SOC_ADC_SAMPLE_FREQ_THRES_HIGH && config->sample_freq_hz >= SOC_ADC_SAMPLE_FREQ_THRES_LOW), ESP_ERR_INVALID_ARG, ADC_TAG, "DC sampling frequency out of range"); s_adc_digi_ctx->hal_digi_ctrlr_cfg.conv_limit_en = config->conv_limit_en; s_adc_digi_ctx->hal_digi_ctrlr_cfg.conv_limit_num = config->conv_limit_num; s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern_len = config->adc_pattern_len; s_adc_digi_ctx->hal_digi_ctrlr_cfg.sample_freq_hz = config->sample_freq_hz; for (int i = 0; i < config->adc_pattern_len; i++) { s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern[i].atten = config->adc_pattern[i].atten; s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern[i].channel = config->adc_pattern[i].channel; s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern[i].unit = config->adc_pattern[i].unit; } const int atten_uninitialized = 999; s_adc_digi_ctx->adc1_atten = atten_uninitialized; s_adc_digi_ctx->adc2_atten = atten_uninitialized; s_adc_digi_ctx->use_adc1 = 0; s_adc_digi_ctx->use_adc2 = 0; for (int i = 0; i < config->adc_pattern_len; i++) { const adc_digi_pattern_config_t *pat = &s_adc_digi_ctx->hal_digi_ctrlr_cfg.adc_pattern[i]; if (pat->unit == ADC_UNIT_1) { s_adc_digi_ctx->use_adc1 = 1; if (s_adc_digi_ctx->adc1_atten == atten_uninitialized) { s_adc_digi_ctx->adc1_atten = pat->atten; } else if (s_adc_digi_ctx->adc1_atten != pat->atten) { return ESP_ERR_INVALID_ARG; } } else if (pat->unit == ADC_UNIT_2) { //See whether ADC2 will be used or not. If yes, the ``sar_adc2_mutex`` should be acquired in the continuous read driver s_adc_digi_ctx->use_adc2 = 1; if (s_adc_digi_ctx->adc2_atten == atten_uninitialized) { s_adc_digi_ctx->adc2_atten = pat->atten; } else if (s_adc_digi_ctx->adc2_atten != pat->atten) { return ESP_ERR_INVALID_ARG; } } } return ESP_OK; } #endif //#if CONFIG_IDF_TARGET_ESP32C3
745696.c
/** @file Implement UnitTestResultReportLib doing plain txt out to console Copyright (c) Microsoft Corporation.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include <Uefi.h> #include <Library/UnitTestResultReportLib.h> #include <Library/BaseLib.h> #include <Library/DebugLib.h> VOID ReportPrint ( IN CONST CHAR8 *Format, ... ); VOID ReportOutput ( IN CONST CHAR8 *Output ); struct _UNIT_TEST_STATUS_STRING { UNIT_TEST_STATUS Status; CHAR8 *String; }; struct _UNIT_TEST_FAILURE_TYPE_STRING { FAILURE_TYPE Type; CHAR8 *String; }; struct _UNIT_TEST_STATUS_STRING mStatusStrings[] = { { UNIT_TEST_PASSED, "PASSED"}, { UNIT_TEST_ERROR_PREREQUISITE_NOT_MET, "NOT RUN - PREREQUISITE FAILED"}, { UNIT_TEST_ERROR_TEST_FAILED, "FAILED"}, { UNIT_TEST_RUNNING, "RUNNING"}, { UNIT_TEST_PENDING, "PENDING"}, { 0, "**UNKNOWN**"} }; struct _UNIT_TEST_FAILURE_TYPE_STRING mFailureTypeStrings[] = { { FAILURETYPE_NOFAILURE, "NO FAILURE"}, { FAILURETYPE_OTHER, "OTHER FAILURE"}, { FAILURETYPE_ASSERTTRUE, "ASSERT_TRUE FAILURE"}, { FAILURETYPE_ASSERTFALSE, "ASSERT_FALSE FAILURE"}, { FAILURETYPE_ASSERTEQUAL, "ASSERT_EQUAL FAILURE"}, { FAILURETYPE_ASSERTNOTEQUAL, "ASSERT_NOTEQUAL FAILURE"}, { FAILURETYPE_ASSERTNOTEFIERROR, "ASSERT_NOTEFIERROR FAILURE"}, { FAILURETYPE_ASSERTSTATUSEQUAL, "ASSERT_STATUSEQUAL FAILURE"}, { FAILURETYPE_ASSERTNOTNULL , "ASSERT_NOTNULL FAILURE"}, { 0, "*UNKNOWN* Failure"} }; // // TEST REPORTING FUNCTIONS // STATIC CONST CHAR8* GetStringForUnitTestStatus ( IN UNIT_TEST_STATUS Status ) { UINTN Index; for (Index = 0; Index < ARRAY_SIZE (mStatusStrings); Index++) { if (mStatusStrings[Index].Status == Status) { // // Return string from matching entry // return mStatusStrings[Index].String; } } // // Return last entry if no match found. // return mStatusStrings[Index].String; } STATIC CONST CHAR8* GetStringForFailureType ( IN FAILURE_TYPE Failure ) { UINTN Index; for (Index = 0; Index < ARRAY_SIZE (mFailureTypeStrings); Index++) { if (mFailureTypeStrings[Index].Type == Failure) { // // Return string from matching entry // return mFailureTypeStrings[Index].String; } } // // Return last entry if no match found. // DEBUG((DEBUG_INFO, "%a Failure Type does not have string defined 0x%X\n", __FUNCTION__, (UINT32)Failure)); return mFailureTypeStrings[Index].String; } /* Method to print the Unit Test run results @retval Success */ EFI_STATUS EFIAPI OutputUnitTestFrameworkReport ( IN UNIT_TEST_FRAMEWORK_HANDLE FrameworkHandle ) { UNIT_TEST_FRAMEWORK *Framework; INTN Passed; INTN Failed; INTN NotRun; UNIT_TEST_SUITE_LIST_ENTRY *Suite; UNIT_TEST_LIST_ENTRY *Test; INTN SPassed; INTN SFailed; INTN SNotRun; Passed = 0; Failed = 0; NotRun = 0; Suite = NULL; Framework = (UNIT_TEST_FRAMEWORK *)FrameworkHandle; if (Framework == NULL) { return EFI_INVALID_PARAMETER; } ReportPrint ("---------------------------------------------------------\n"); ReportPrint ("------------- UNIT TEST FRAMEWORK RESULTS ---------------\n"); ReportPrint ("---------------------------------------------------------\n"); //print the version and time // // Iterate all suites // for (Suite = (UNIT_TEST_SUITE_LIST_ENTRY*)GetFirstNode(&Framework->TestSuiteList); (LIST_ENTRY*)Suite != &Framework->TestSuiteList; Suite = (UNIT_TEST_SUITE_LIST_ENTRY*)GetNextNode(&Framework->TestSuiteList, (LIST_ENTRY*)Suite)) { Test = NULL; SPassed = 0; SFailed = 0; SNotRun = 0; ReportPrint ("/////////////////////////////////////////////////////////\n"); ReportPrint (" SUITE: %a\n", Suite->UTS.Title); ReportPrint (" PACKAGE: %a\n", Suite->UTS.Name); ReportPrint ("/////////////////////////////////////////////////////////\n"); // // Iterate all tests within the suite // for (Test = (UNIT_TEST_LIST_ENTRY*)GetFirstNode(&(Suite->UTS.TestCaseList)); (LIST_ENTRY*)Test != &(Suite->UTS.TestCaseList); Test = (UNIT_TEST_LIST_ENTRY*)GetNextNode(&(Suite->UTS.TestCaseList), (LIST_ENTRY*)Test)) { ReportPrint ("*********************************************************\n"); ReportPrint (" CLASS NAME: %a\n", Test->UT.Name); ReportPrint (" TEST: %a\n", Test->UT.Description); ReportPrint (" STATUS: %a\n", GetStringForUnitTestStatus (Test->UT.Result)); ReportPrint (" FAILURE: %a\n", GetStringForFailureType (Test->UT.FailureType)); ReportPrint (" FAILURE MESSAGE:\n%a\n", Test->UT.FailureMessage); if (Test->UT.Log != NULL) { ReportPrint (" LOG:\n"); ReportOutput (Test->UT.Log); } switch (Test->UT.Result) { case UNIT_TEST_PASSED: SPassed++; break; case UNIT_TEST_ERROR_TEST_FAILED: SFailed++; break; case UNIT_TEST_PENDING: // Fall through... case UNIT_TEST_RUNNING: // Fall through... case UNIT_TEST_ERROR_PREREQUISITE_NOT_MET: SNotRun++; break; default: break; } ReportPrint ("**********************************************************\n"); } //End Test iteration ReportPrint ("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"); ReportPrint ("Suite Stats\n"); ReportPrint (" Passed: %d (%d%%)\n", SPassed, (SPassed * 100)/(SPassed+SFailed+SNotRun)); ReportPrint (" Failed: %d (%d%%)\n", SFailed, (SFailed * 100) / (SPassed + SFailed + SNotRun)); ReportPrint (" Not Run: %d (%d%%)\n", SNotRun, (SNotRun * 100) / (SPassed + SFailed + SNotRun)); ReportPrint ("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n" ); Passed += SPassed; //add to global counters Failed += SFailed; //add to global counters NotRun += SNotRun; //add to global counters }//End Suite iteration ReportPrint ("=========================================================\n"); ReportPrint ("Total Stats\n"); ReportPrint (" Passed: %d (%d%%)\n", Passed, (Passed * 100) / (Passed + Failed + NotRun)); ReportPrint (" Failed: %d (%d%%)\n", Failed, (Failed * 100) / (Passed + Failed + NotRun)); ReportPrint (" Not Run: %d (%d%%)\n", NotRun, (NotRun * 100) / (Passed + Failed + NotRun)); ReportPrint ("=========================================================\n" ); return EFI_SUCCESS; }
871672.c
#include <Imath/half.h> void half_example () { float f = 3.5f; half h = imath_float_to_half (f) float hh = imath_half_to_float (h) }
856134.c
/* This testcase is part of GDB, the GNU debugger. Copyright 2004-2021 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> void pendfunc1 (int x) { int y = x + 4; printf ("in pendfunc1, x is %d\n", x); } void pendfunc (int x) { pendfunc1 (x); }
522382.c
/** * Copyright (c) 2015 - 2017, Nordic Semiconductor ASA * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into a Nordic * Semiconductor ASA integrated circuit in a product or a software update for * such product, must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * * 3. Neither the name of Nordic Semiconductor ASA nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. This software, with or without modification, must only be used with a * Nordic Semiconductor ASA integrated circuit. * * 5. Any software provided in binary form under this license must not be reverse * engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /** @file * * @defgroup ble_sdk_app_lns_main main.c * @{ * @ingroup ble_sdk_app_lns * @brief Location and Navigation Service Sample Application main file. * * This file contains the source code for a sample application using the Location and Navigation service * (and also Battery and Device Information services). This application uses the * @ref srvlib_conn_params module. */ #include <stdint.h> #include <string.h> #include "nordic_common.h" #include "nrf.h" #include "nrf_assert.h" #include "nrf_error.h" #include "ble.h" #include "ble_hci.h" #include "ble_srv_common.h" #include "ble_advdata.h" #include "ble_advertising.h" #include "ble_bas.h" #include "ble_lns.h" #include "ble_dis.h" #include "sensorsim.h" #include "app_timer.h" #include "softdevice_handler.h" #include "ble_conn_params.h" #include "bsp.h" #include "bsp_btn_ble.h" #include "peer_manager.h" #include "fds.h" #include "fstorage.h" #include "ble_conn_state.h" #include "nrf_ble_gatt.h" #define NRF_LOG_MODULE_NAME "APP" #include "nrf_log.h" #include "nrf_log_ctrl.h" #define APP_FEATURE_NOT_SUPPORTED BLE_GATT_STATUS_ATTERR_APP_BEGIN + 2 /**< Reply when unsupported features are requested. */ #define DEVICE_NAME "Nordic_LNS" /**< Name of device. Will be included in the advertising data. */ #define MANUFACTURER_NAME "NordicSemiconductor" /**< Manufacturer. Will be passed to Device Information Service. */ #define APP_ADV_INTERVAL 40 /**< The advertising interval (in units of 0.625 ms; this value corresponds to 25 ms). */ #define APP_ADV_TIMEOUT_IN_SECONDS 180 /**< The advertising time-out in units of seconds. */ #define BATTERY_LEVEL_MEAS_INTERVAL APP_TIMER_TICKS(2400) /**< Battery level measurement interval (ticks). */ #define MIN_BATTERY_LEVEL 81 /**< Minimum simulated battery level. */ #define MAX_BATTERY_LEVEL 100 /**< Maximum simulated battery level. */ #define BATTERY_LEVEL_INCREMENT 1 /**< Increment between each simulated battery level measurement. */ #define LOC_AND_NAV_DATA_INTERVAL APP_TIMER_TICKS(1000) /**< Location and Navigation data interval (ticks). */ #define SECOND_10_MS_UNITS 100 /**< Definition of 1 second, when 1 unit is 10 ms. */ #define MIN_CONN_INTERVAL MSEC_TO_UNITS(100, UNIT_1_25_MS) /**< Minimum connection interval (100 ms). */ #define MAX_CONN_INTERVAL MSEC_TO_UNITS(250, UNIT_1_25_MS) /**< Maximum connection interval (250 ms). */ #define SLAVE_LATENCY 0 /**< Slave latency. */ #define CONN_SUP_TIMEOUT (4 * SECOND_10_MS_UNITS) /**< Connection supervisory time-out (4 seconds). Supervision time-out uses 10 ms units. */ #define FIRST_CONN_PARAMS_UPDATE_DELAY APP_TIMER_TICKS(5000) /**< Time from initiating event (connect or start of notification) to first time sd_ble_gap_conn_param_update is called (5 seconds). */ #define NEXT_CONN_PARAMS_UPDATE_DELAY APP_TIMER_TICKS(5000) /**< Time between each call to sd_ble_gap_conn_param_update after the first (30 seconds). */ #define MAX_CONN_PARAMS_UPDATE_COUNT 3 /**< Number of attempts before giving up the connection parameter negotiation. */ #define SEC_PARAM_BOND 1 /**< Perform bonding. */ #define SEC_PARAM_MITM 0 /**< Man In The Middle protection not required. */ #define SEC_PARAM_LESC 0 /**< LE Secure Connections not enabled. */ #define SEC_PARAM_KEYPRESS 0 /**< Keypress notifications not enabled. */ #define SEC_PARAM_IO_CAPABILITIES BLE_GAP_IO_CAPS_NONE /**< No I/O capabilities. */ #define SEC_PARAM_OOB 0 /**< Out Of Band data not available. */ #define SEC_PARAM_MIN_KEY_SIZE 7 /**< Minimum encryption key size. */ #define SEC_PARAM_MAX_KEY_SIZE 16 /**< Maximum encryption key size. */ #define DEAD_BEEF 0xDEADBEEF /**< Value used as error code on stack dump, can be used to identify stack location on stack unwind. */ static uint16_t m_conn_handle = BLE_CONN_HANDLE_INVALID; /**< Handle of the current connection. */ static ble_bas_t m_bas; /**< Structure used to identify the battery service. */ static ble_lns_t m_lns; /**< Structure used to identify the location and navigation service. */ static nrf_ble_gatt_t m_gatt; /**< GATT module instance. */ static sensorsim_cfg_t m_battery_sim_cfg; /**< Battery Level sensor simulator configuration. */ static sensorsim_state_t m_battery_sim_state; /**< Battery Level sensor simulator state. */ APP_TIMER_DEF(m_battery_timer_id); /**< Battery timer. */ APP_TIMER_DEF(m_loc_and_nav_timer_id); /**< Location and navigation measurement timer. */ static ble_lns_loc_speed_t m_sim_location_speed; /**< Location and speed simulation. */ static ble_lns_pos_quality_t m_sim_position_quality; /**< Position measurement quality simulation. */ static ble_lns_navigation_t m_sim_navigation; /**< Navigation data structure simulation. */ static ble_uuid_t m_adv_uuids[] = { {BLE_UUID_LOCATION_AND_NAVIGATION_SERVICE, BLE_UUID_TYPE_BLE}, {BLE_UUID_BATTERY_SERVICE, BLE_UUID_TYPE_BLE}, {BLE_UUID_DEVICE_INFORMATION_SERVICE, BLE_UUID_TYPE_BLE} }; static const ble_lns_loc_speed_t initial_lns_location_speed = { .instant_speed_present = true, .total_distance_present = true, .location_present = true, .elevation_present = true, .heading_present = true, .rolling_time_present = true, .utc_time_time_present = true, .position_status = BLE_LNS_POSITION_OK, .data_format = BLE_LNS_SPEED_DISTANCE_FORMAT_2D, .elevation_source = BLE_LNS_ELEV_SOURCE_POSITIONING_SYSTEM, .heading_source = BLE_LNS_HEADING_SOURCE_COMPASS, .instant_speed = 12, // = 1.2 meter/second .total_distance = 2356, // = 2356 meters/second .latitude = -103123567, // = -10.3123567 degrees .longitude = 601234567, // = 60.1234567 degrees .elevation = 1350, // = 13.5 meter .heading = 2123, // = 21.23 degrees .rolling_time = 1, // = 1 second .utc_time = { .year = 2015, .month = 7, .day = 8, .hours = 12, .minutes = 43, .seconds = 33 } }; static const ble_lns_pos_quality_t initial_lns_pos_quality = { .number_of_satellites_in_solution_present = true, .number_of_satellites_in_view_present = true, .time_to_first_fix_present = true, .ehpe_present = true, .evpe_present = true, .hdop_present = true, .vdop_present = true, .position_status = BLE_LNS_POSITION_OK, .number_of_satellites_in_solution = 5, .number_of_satellites_in_view = 6, .time_to_first_fix = 63, // = 6.3 seconds .ehpe = 100, // = 1 meter .evpe = 123, // = 1.23 meter .hdop = 123, .vdop = 143 }; static const ble_lns_navigation_t initial_lns_navigation = { .remaining_dist_present = true, .remaining_vert_dist_present = true, .eta_present = true, .position_status = BLE_LNS_POSITION_OK, .heading_source = BLE_LNS_HEADING_SOURCE_COMPASS, .navigation_indicator_type = BLE_LNS_NAV_TO_WAYPOINT, .waypoint_reached = false, .destination_reached = false, .bearing = 1234, // = 12.34 degrees .heading = 2123, // = 21.23 degrees .remaining_distance = 532576, // = 53257.6 meters .remaining_vert_distance = 123, // = 12.3 meters .eta = { .year = 2015, .month = 7, .day = 8, .hours = 16, .minutes = 43, .seconds = 33 } }; static void advertising_start(bool erase_bonds); /**@brief Callback function for asserts in the SoftDevice. * * @details This function will be called in case of an assert in the SoftDevice. * * @warning This handler is an example only and does not fit a final product. You need to analyze * how your product is supposed to react in case of Assert. * @warning On assert from the SoftDevice, the system can only recover on reset. * * @param[in] line_num Line number of the failing ASSERT call. * @param[in] file_name File name of the failing ASSERT call. */ void assert_nrf_callback(uint16_t line_num, const uint8_t * p_file_name) { app_error_handler(DEAD_BEEF, line_num, p_file_name); } /**@brief Function for handling Peer Manager events. * * @param[in] p_evt Peer Manager event. */ static void pm_evt_handler(pm_evt_t const * p_evt) { ret_code_t err_code; switch (p_evt->evt_id) { case PM_EVT_BONDED_PEER_CONNECTED: { NRF_LOG_INFO("Connected to a previously bonded device.\r\n"); } break; case PM_EVT_CONN_SEC_SUCCEEDED: { NRF_LOG_INFO("Link secured. Role: %d. conn_handle: %d, Procedure: %d\r\n", ble_conn_state_role(p_evt->conn_handle), p_evt->conn_handle, p_evt->params.conn_sec_succeeded.procedure); } break; case PM_EVT_CONN_SEC_FAILED: { /* Often, when securing fails, it shouldn't be restarted, for security reasons. * Other times, it can be restarted directly. * Sometimes it can be restarted, but only after changing some Security Parameters. * Sometimes, it cannot be restarted until the link is disconnected and reconnected. * Sometimes it is impossible, to secure the link, or the peer device does not support it. * How to handle this error is highly application dependent. */ } break; case PM_EVT_CONN_SEC_CONFIG_REQ: { // Reject pairing request from an already bonded peer. pm_conn_sec_config_t conn_sec_config = {.allow_repairing = false}; pm_conn_sec_config_reply(p_evt->conn_handle, &conn_sec_config); } break; case PM_EVT_STORAGE_FULL: { // Run garbage collection on the flash. err_code = fds_gc(); if (err_code == FDS_ERR_BUSY || err_code == FDS_ERR_NO_SPACE_IN_QUEUES) { // Retry. } else { APP_ERROR_CHECK(err_code); } } break; case PM_EVT_PEERS_DELETE_SUCCEEDED: { // Bonds are deleted. Start advertising. advertising_start(false); } break; case PM_EVT_LOCAL_DB_CACHE_APPLY_FAILED: { // The local database has likely changed, send service changed indications. pm_local_database_has_changed(); } break; case PM_EVT_PEER_DATA_UPDATE_FAILED: { // Assert. APP_ERROR_CHECK(p_evt->params.peer_data_update_failed.error); } break; case PM_EVT_PEER_DELETE_FAILED: { // Assert. APP_ERROR_CHECK(p_evt->params.peer_delete_failed.error); } break; case PM_EVT_PEERS_DELETE_FAILED: { // Assert. APP_ERROR_CHECK(p_evt->params.peers_delete_failed_evt.error); } break; case PM_EVT_ERROR_UNEXPECTED: { // Assert. APP_ERROR_CHECK(p_evt->params.error_unexpected.error); } break; case PM_EVT_CONN_SEC_START: case PM_EVT_PEER_DATA_UPDATE_SUCCEEDED: case PM_EVT_PEER_DELETE_SUCCEEDED: case PM_EVT_LOCAL_DB_CACHE_APPLIED: case PM_EVT_SERVICE_CHANGED_IND_SENT: case PM_EVT_SERVICE_CHANGED_IND_CONFIRMED: default: break; } } /**@brief Callback function for errors in the Location Navigation Service. * * @details This function will be called in case of an error in the Location Navigation Service. * * @warning This handler is an example only and does not fit a final product. You need to analyze * how your product is supposed to react in case of Assert. */ void lns_error_handler(uint32_t err_code) { app_error_handler(DEAD_BEEF, 0, 0); } /**@brief Location Navigation event handler. * * @details This function will be called for all events of the Location Navigation Module that * are passed to the application. * * @param[in] p_evt Event received from the Location Navigation Module. */ static void on_lns_evt(ble_lns_t const * p_lns, ble_lns_evt_t const * p_evt) { switch (p_evt->evt_type) { case BLE_LNS_CTRLPT_EVT_INDICATION_ENABLED: NRF_LOG_INFO("Control Point: Indication enabled\r\n"); break; case BLE_LNS_CTRLPT_EVT_INDICATION_DISABLED: NRF_LOG_INFO("Control Point: Indication disabled\r\n"); break; case BLE_LNS_LOC_SPEED_EVT_NOTIFICATION_ENABLED: NRF_LOG_INFO("Location/Speed: Notification enabled\r\n"); break; case BLE_LNS_LOC_SPEED_EVT_NOTIFICATION_DISABLED: NRF_LOG_INFO("Location/Speed: Notification disabled\r\n"); break; case BLE_LNS_NAVIGATION_EVT_NOTIFICATION_ENABLED: NRF_LOG_INFO("Navigation: Notification enabled\r\n"); break; case BLE_LNS_NAVIGATION_EVT_NOTIFICATION_DISABLED: NRF_LOG_INFO("Navigation: Notification disabled\r\n"); break; default: break; } } ble_lncp_rsp_code_t on_ln_ctrlpt_evt(ble_lncp_t const * p_lncp, ble_lncp_evt_t const * p_evt) { switch (p_evt->evt_type) { case LNCP_EVT_MASK_SET: NRF_LOG_INFO("LOC_SPEED_EVT: Feature mask set\r\n"); break; case LNCP_EVT_TOTAL_DISTANCE_SET: NRF_LOG_INFO("LOC_SPEED_EVT: Set total distance: %d\r\n", p_evt->params.total_distance); break; case LNCP_EVT_ELEVATION_SET: NRF_LOG_INFO("LOC_SPEED_EVT: Set elevation: %d\r\n", p_evt->params.elevation); break; case LNCP_EVT_FIX_RATE_SET: NRF_LOG_INFO("POS_QUAL_EVT: Fix rate set to %d\r\n", p_evt->params.fix_rate); break; case LNCP_EVT_NAV_COMMAND: NRF_LOG_INFO("NAV_EVT: Navigation state changed to %d\r\n", p_evt->params.nav_cmd); break; case LNCP_EVT_ROUTE_SELECTED: NRF_LOG_INFO("NAV_EVT: Route selected %d\r\n", p_evt->params.selected_route); break; default: break; } return (LNCP_RSP_SUCCESS); } /**@brief Function for performing battery measurement and updating the Battery Level characteristic * in Battery Service. */ static void battery_level_update(void) { ret_code_t err_code; uint8_t battery_level; battery_level = (uint8_t)sensorsim_measure(&m_battery_sim_state, &m_battery_sim_cfg); err_code = ble_bas_battery_level_update(&m_bas, battery_level); if ((err_code != NRF_SUCCESS) && (err_code != NRF_ERROR_INVALID_STATE) && (err_code != NRF_ERROR_RESOURCES) && (err_code != BLE_ERROR_GATTS_SYS_ATTR_MISSING) ) { APP_ERROR_HANDLER(err_code); } } /**@brief Function for handling the Battery measurement timer time-out. * * @details This function will be called each time the battery level measurement timer expires. * * @param[in] p_context Pointer used for passing some arbitrary information (context) from the * app_start_timer() call to the time-out handler. */ static void battery_level_meas_timeout_handler(void * p_context) { UNUSED_PARAMETER(p_context); battery_level_update(); } static void increment_time(ble_date_time_t * p_time) { p_time->seconds++; if (p_time->seconds > 59) { p_time->seconds = 0; p_time->minutes++; if (p_time->minutes > 59) { p_time->minutes = 0; p_time->hours++; if (p_time->hours > 24) { p_time->hours = 0; p_time->day++; if (p_time->day > 31) { p_time->day = 0; p_time->month++; if (p_time->month > 12) { p_time->year++; } } } } } } static void navigation_simulation_update(void) { m_sim_navigation.position_status = (ble_lns_pos_status_type_t) ( ( (uint32_t) m_sim_navigation.position_status + 1) % ( (uint32_t) BLE_LNS_LAST_KNOWN_POSITION + 1) ); m_sim_navigation.heading_source = (ble_lns_heading_source_t) ( ( (uint32_t) m_sim_navigation.heading_source + 1) % ( (uint32_t) BLE_LNS_HEADING_SOURCE_COMPASS + 1) ); m_sim_navigation.navigation_indicator_type = (ble_lns_nav_indicator_type_t) ( ( (uint32_t) m_sim_navigation.navigation_indicator_type + 1) % ( (uint32_t) BLE_LNS_NAV_TO_DESTINATION + 1) ); m_sim_navigation.waypoint_reached = !m_sim_navigation.waypoint_reached; m_sim_navigation.destination_reached = !m_sim_navigation.destination_reached; m_sim_navigation.bearing++; m_sim_navigation.heading++; m_sim_navigation.remaining_distance++; m_sim_navigation.remaining_vert_distance++; increment_time(&m_sim_navigation.eta); } static void position_quality_simulation_update(void) { m_sim_position_quality.position_status = (ble_lns_pos_status_type_t) ( ( (uint32_t) m_sim_position_quality.position_status + 1) % ( (uint32_t) BLE_LNS_LAST_KNOWN_POSITION + 1) ); m_sim_position_quality.number_of_satellites_in_solution++; m_sim_position_quality.number_of_satellites_in_view++; m_sim_position_quality.time_to_first_fix++; m_sim_position_quality.ehpe++; m_sim_position_quality.evpe++; m_sim_position_quality.hdop++; m_sim_position_quality.vdop++; } /**@brief Provide simulated location and speed. */ static void loc_speed_simulation_update(void) { m_sim_location_speed.position_status = (ble_lns_pos_status_type_t) ( ( (uint32_t) m_sim_location_speed.position_status + 1) % ( (uint32_t) BLE_LNS_LAST_KNOWN_POSITION + 1) ); m_sim_location_speed.data_format = (ble_lns_speed_distance_format_t) ( ( (uint32_t) m_sim_location_speed.data_format + 1) % ( (uint32_t) BLE_LNS_SPEED_DISTANCE_FORMAT_3D + 1) ); m_sim_location_speed.elevation_source = (ble_lns_elevation_source_t) ( ( (uint32_t) m_sim_location_speed.elevation_source + 1) % ( (uint32_t) BLE_LNS_ELEV_SOURCE_OTHER + 1) ); m_sim_location_speed.heading_source = (ble_lns_heading_source_t) ( ( (uint32_t) m_sim_location_speed.heading_source + 1) % ( (uint32_t) BLE_LNS_HEADING_SOURCE_COMPASS + 1) ); m_sim_location_speed.total_distance++; m_sim_location_speed.latitude++; m_sim_location_speed.longitude++; m_sim_location_speed.elevation++; m_sim_location_speed.heading++; m_sim_location_speed.rolling_time++; increment_time(&m_sim_location_speed.utc_time); } /**@brief Location and navigation time-out handler. * * @details This function will be called each time the location and navigation measurement timer expires. * * @param[in] p_context Pointer used for passing some arbitrary information (context) from the * app_start_timer() call to the time-out handler. */ static void loc_and_nav_timeout_handler(void * p_context) { ret_code_t err_code; UNUSED_PARAMETER(p_context); loc_speed_simulation_update(); position_quality_simulation_update(); navigation_simulation_update(); err_code = ble_lns_loc_speed_send(&m_lns); if (err_code != NRF_ERROR_INVALID_STATE) { APP_ERROR_CHECK(err_code); } err_code = ble_lns_navigation_send(&m_lns); if (err_code != NRF_ERROR_INVALID_STATE) { APP_ERROR_CHECK(err_code); } } /**@brief Timer initialization. * * @details Initializes the timer module. This creates and starts application timers. */ static void timers_init(void) { ret_code_t err_code; // Initialize timer module err_code = app_timer_init(); APP_ERROR_CHECK(err_code); // Create timers err_code = app_timer_create(&m_battery_timer_id, APP_TIMER_MODE_REPEATED, battery_level_meas_timeout_handler); APP_ERROR_CHECK(err_code); err_code = app_timer_create(&m_loc_and_nav_timer_id, APP_TIMER_MODE_REPEATED, loc_and_nav_timeout_handler); APP_ERROR_CHECK(err_code); } /**@brief GAP initialization. * * @details This function shall be used to set up all the necessary GAP (Generic Access Profile) * parameters of the device. It also sets the permissions and appearance. */ static void gap_params_init(void) { ret_code_t err_code; ble_gap_conn_params_t gap_conn_params; ble_gap_conn_sec_mode_t sec_mode; BLE_GAP_CONN_SEC_MODE_SET_OPEN(&sec_mode); err_code = sd_ble_gap_device_name_set(&sec_mode, (const uint8_t *)DEVICE_NAME, strlen(DEVICE_NAME)); APP_ERROR_CHECK(err_code); err_code = sd_ble_gap_appearance_set(BLE_APPEARANCE_OUTDOOR_SPORTS_ACT_LOC_AND_NAV_DISP); APP_ERROR_CHECK(err_code); memset(&gap_conn_params, 0, sizeof(gap_conn_params)); gap_conn_params.min_conn_interval = MIN_CONN_INTERVAL; gap_conn_params.max_conn_interval = MAX_CONN_INTERVAL; gap_conn_params.slave_latency = SLAVE_LATENCY; gap_conn_params.conn_sup_timeout = CONN_SUP_TIMEOUT; err_code = sd_ble_gap_ppcp_set(&gap_conn_params); APP_ERROR_CHECK(err_code); } /**@brief Function for initializing the GATT module. */ static void gatt_init(void) { ret_code_t err_code = nrf_ble_gatt_init(&m_gatt, NULL); APP_ERROR_CHECK(err_code); } /**@brief Initialize services that will be used by the application. * * @details Initialize the Location and Navigation, Battery and Device Information services. */ static void services_init(void) { ret_code_t err_code; ble_lns_init_t lns_init; ble_bas_init_t bas_init; ble_dis_init_t dis_init; memset(&lns_init, 0, sizeof(lns_init)); lns_init.evt_handler = on_lns_evt; lns_init.lncp_evt_handler = on_ln_ctrlpt_evt; lns_init.error_handler = lns_error_handler; lns_init.is_position_quality_present = true; lns_init.is_control_point_present = true; lns_init.is_navigation_present = true; lns_init.available_features = BLE_LNS_FEATURE_INSTANT_SPEED_SUPPORTED | BLE_LNS_FEATURE_TOTAL_DISTANCE_SUPPORTED | BLE_LNS_FEATURE_LOCATION_SUPPORTED | BLE_LNS_FEATURE_ELEVATION_SUPPORTED | BLE_LNS_FEATURE_HEADING_SUPPORTED | BLE_LNS_FEATURE_ROLLING_TIME_SUPPORTED | BLE_LNS_FEATURE_UTC_TIME_SUPPORTED | BLE_LNS_FEATURE_REMAINING_DISTANCE_SUPPORTED | BLE_LNS_FEATURE_REMAINING_VERT_DISTANCE_SUPPORTED | BLE_LNS_FEATURE_EST_TIME_OF_ARRIVAL_SUPPORTED | BLE_LNS_FEATURE_NUM_SATS_IN_SOLUTION_SUPPORTED | BLE_LNS_FEATURE_NUM_SATS_IN_VIEW_SUPPORTED | BLE_LNS_FEATURE_TIME_TO_FIRST_FIX_SUPPORTED | BLE_LNS_FEATURE_EST_HORZ_POS_ERROR_SUPPORTED | BLE_LNS_FEATURE_EST_VERT_POS_ERROR_SUPPORTED | BLE_LNS_FEATURE_HORZ_DILUTION_OF_PRECISION_SUPPORTED | BLE_LNS_FEATURE_VERT_DILUTION_OF_PRECISION_SUPPORTED | BLE_LNS_FEATURE_LOC_AND_SPEED_CONTENT_MASKING_SUPPORTED | BLE_LNS_FEATURE_FIX_RATE_SETTING_SUPPORTED | BLE_LNS_FEATURE_ELEVATION_SETTING_SUPPORTED | BLE_LNS_FEATURE_POSITION_STATUS_SUPPORTED; m_sim_location_speed = initial_lns_location_speed; m_sim_position_quality = initial_lns_pos_quality; m_sim_navigation = initial_lns_navigation; lns_init.p_location_speed = &m_sim_location_speed; lns_init.p_position_quality = &m_sim_position_quality; lns_init.p_navigation = &m_sim_navigation; lns_init.loc_nav_feature_security_req_read_perm = SEC_OPEN; lns_init.loc_speed_security_req_cccd_write_perm = SEC_OPEN; lns_init.position_quality_security_req_read_perm = SEC_OPEN; lns_init.navigation_security_req_cccd_write_perm = SEC_OPEN; lns_init.ctrl_point_security_req_write_perm = SEC_OPEN; lns_init.ctrl_point_security_req_cccd_write_perm = SEC_OPEN; err_code = ble_lns_init(&m_lns, &lns_init); APP_ERROR_CHECK(err_code); ble_lns_route_t route1 = {.route_name = "Route one"}; err_code = ble_lns_add_route(&m_lns, &route1); ble_lns_route_t route2 = {.route_name = "Route two"}; err_code = ble_lns_add_route(&m_lns, &route2); // Initialize Battery Service memset(&bas_init, 0, sizeof(bas_init)); // Here the sec level for the Battery Service can be changed/increased. BLE_GAP_CONN_SEC_MODE_SET_OPEN(&bas_init.battery_level_char_attr_md.cccd_write_perm); BLE_GAP_CONN_SEC_MODE_SET_OPEN(&bas_init.battery_level_char_attr_md.read_perm); BLE_GAP_CONN_SEC_MODE_SET_NO_ACCESS(&bas_init.battery_level_char_attr_md.write_perm); BLE_GAP_CONN_SEC_MODE_SET_OPEN(&bas_init.battery_level_report_read_perm); bas_init.evt_handler = NULL; bas_init.support_notification = true; bas_init.p_report_ref = NULL; bas_init.initial_batt_level = 100; err_code = ble_bas_init(&m_bas, &bas_init); APP_ERROR_CHECK(err_code); // Initialize Device Information Service memset(&dis_init, 0, sizeof(dis_init)); ble_srv_ascii_to_utf8(&dis_init.manufact_name_str, MANUFACTURER_NAME); BLE_GAP_CONN_SEC_MODE_SET_OPEN(&dis_init.dis_attr_md.read_perm); BLE_GAP_CONN_SEC_MODE_SET_NO_ACCESS(&dis_init.dis_attr_md.write_perm); err_code = ble_dis_init(&dis_init); APP_ERROR_CHECK(err_code); } /**@brief Initialize the simulators. */ static void sim_init(void) { // battery simulation m_battery_sim_cfg.min = MIN_BATTERY_LEVEL; m_battery_sim_cfg.max = MAX_BATTERY_LEVEL; m_battery_sim_cfg.incr = BATTERY_LEVEL_INCREMENT; m_battery_sim_cfg.start_at_max = true; sensorsim_init(&m_battery_sim_state, &m_battery_sim_cfg); } /**@brief Start application timers. */ static void application_timers_start(void) { ret_code_t err_code; // Start application timers err_code = app_timer_start(m_battery_timer_id, BATTERY_LEVEL_MEAS_INTERVAL, NULL); APP_ERROR_CHECK(err_code); err_code = app_timer_start(m_loc_and_nav_timer_id, LOC_AND_NAV_DATA_INTERVAL, NULL); APP_ERROR_CHECK(err_code); } /**@brief Connection Parameters Module handler. * * @details This function will be called for all events in the Connection Parameters Module that * are passed to the application. * @note All this function does is to disconnect. This could have been done by simply * setting the disconnect_on_fail config parameter, but instead we use the event * handler mechanism to demonstrate its use. * * @param[in] p_evt Event received from the Connection Parameters Module. */ static void on_conn_params_evt(ble_conn_params_evt_t * p_evt) { ret_code_t err_code; if (p_evt->evt_type == BLE_CONN_PARAMS_EVT_FAILED) { err_code = sd_ble_gap_disconnect(m_conn_handle, BLE_HCI_CONN_INTERVAL_UNACCEPTABLE); APP_ERROR_CHECK(err_code); } } /**@brief Connection Parameters module error handler. * * @param[in] nrf_error Error code containing information about what went wrong. */ static void conn_params_error_handler(uint32_t nrf_error) { APP_ERROR_HANDLER(nrf_error); } /**@brief Initialize the Connection Parameters module. */ static void conn_params_init(void) { ret_code_t err_code; ble_conn_params_init_t cp_init; memset(&cp_init, 0, sizeof(cp_init)); cp_init.p_conn_params = NULL; cp_init.first_conn_params_update_delay = FIRST_CONN_PARAMS_UPDATE_DELAY; cp_init.next_conn_params_update_delay = NEXT_CONN_PARAMS_UPDATE_DELAY; cp_init.max_conn_params_update_count = MAX_CONN_PARAMS_UPDATE_COUNT; cp_init.start_on_notify_cccd_handle = m_lns.loc_speed_handles.cccd_handle; cp_init.disconnect_on_fail = false; cp_init.evt_handler = on_conn_params_evt; cp_init.error_handler = conn_params_error_handler; err_code = ble_conn_params_init(&cp_init); APP_ERROR_CHECK(err_code); } /**@brief Function for putting the chip into sleep mode. * * @note This function will not return. */ static void sleep_mode_enter(void) { ret_code_t err_code; err_code = bsp_indication_set(BSP_INDICATE_IDLE); APP_ERROR_CHECK(err_code); // Prepare wakeup buttons. err_code = bsp_btn_ble_sleep_mode_prepare(); APP_ERROR_CHECK(err_code); // Go to system-off mode (this function will not return; wakeup will cause a reset). err_code = sd_power_system_off(); APP_ERROR_CHECK(err_code); } /**@brief Function for handling advertising events. * * @details This function will be called for advertising events that are passed to the application. * * @param[in] ble_adv_evt Advertising event. */ static void on_adv_evt(ble_adv_evt_t ble_adv_evt) { ret_code_t err_code; switch (ble_adv_evt) { case BLE_ADV_EVT_FAST: NRF_LOG_INFO("Fast advertising.\r\n"); err_code = bsp_indication_set(BSP_INDICATE_ADVERTISING); APP_ERROR_CHECK(err_code); break; case BLE_ADV_EVT_IDLE: sleep_mode_enter(); break; default: break; } } /**@brief Application's BLE Stack event handler. * * @param[in] p_ble_evt Bluetooth stack event. */ static void on_ble_evt(ble_evt_t * p_ble_evt) { ret_code_t err_code = NRF_SUCCESS; switch (p_ble_evt->header.evt_id) { case BLE_GAP_EVT_CONNECTED: NRF_LOG_INFO("Connected.\r\n"); err_code = bsp_indication_set(BSP_INDICATE_CONNECTED); APP_ERROR_CHECK(err_code); m_conn_handle = p_ble_evt->evt.gap_evt.conn_handle; break; // BLE_GAP_EVT_CONNECTED case BLE_GAP_EVT_DISCONNECTED: NRF_LOG_INFO("Disconnected.\r\n"); err_code = bsp_indication_set(BSP_INDICATE_IDLE); APP_ERROR_CHECK(err_code); m_conn_handle = BLE_CONN_HANDLE_INVALID; break; // BLE_GAP_EVT_DISCONNECTED case BLE_GATTC_EVT_TIMEOUT: // Disconnect on GATT Client timeout event. NRF_LOG_DEBUG("GATT Client Timeout.\r\n"); err_code = sd_ble_gap_disconnect(p_ble_evt->evt.gattc_evt.conn_handle, BLE_HCI_REMOTE_USER_TERMINATED_CONNECTION); APP_ERROR_CHECK(err_code); break; // BLE_GATTC_EVT_TIMEOUT case BLE_GATTS_EVT_TIMEOUT: // Disconnect on GATT Server timeout event. NRF_LOG_DEBUG("GATT Server Timeout.\r\n"); err_code = sd_ble_gap_disconnect(p_ble_evt->evt.gatts_evt.conn_handle, BLE_HCI_REMOTE_USER_TERMINATED_CONNECTION); APP_ERROR_CHECK(err_code); break; // BLE_GATTS_EVT_TIMEOUT case BLE_EVT_USER_MEM_REQUEST: err_code = sd_ble_user_mem_reply(m_conn_handle, NULL); APP_ERROR_CHECK(err_code); break; // BLE_EVT_USER_MEM_REQUEST case BLE_GATTS_EVT_RW_AUTHORIZE_REQUEST: { ble_gatts_evt_rw_authorize_request_t req; ble_gatts_rw_authorize_reply_params_t auth_reply; req = p_ble_evt->evt.gatts_evt.params.authorize_request; if (req.type != BLE_GATTS_AUTHORIZE_TYPE_INVALID) { if ((req.request.write.op == BLE_GATTS_OP_PREP_WRITE_REQ) || (req.request.write.op == BLE_GATTS_OP_EXEC_WRITE_REQ_NOW) || (req.request.write.op == BLE_GATTS_OP_EXEC_WRITE_REQ_CANCEL)) { if (req.type == BLE_GATTS_AUTHORIZE_TYPE_WRITE) { auth_reply.type = BLE_GATTS_AUTHORIZE_TYPE_WRITE; } else { auth_reply.type = BLE_GATTS_AUTHORIZE_TYPE_READ; } auth_reply.params.write.gatt_status = APP_FEATURE_NOT_SUPPORTED; err_code = sd_ble_gatts_rw_authorize_reply(p_ble_evt->evt.gatts_evt.conn_handle, &auth_reply); APP_ERROR_CHECK(err_code); } } } break; // BLE_GATTS_EVT_RW_AUTHORIZE_REQUEST default: break; } APP_ERROR_CHECK(err_code); } /**@brief Dispatches a BLE stack event to all modules with a BLE stack event handler. * * @details This function is called from the BLE Stack event interrupt handler after a BLE stack * event has been received. * * @param[in] p_ble_evt Bluetooth stack event. */ static void ble_evt_dispatch(ble_evt_t * p_ble_evt) { /** The Connection state module has to be fed BLE events in order to function correctly * Remember to call ble_conn_state_on_ble_evt before calling any ble_conns_state_* functions. */ ble_conn_state_on_ble_evt(p_ble_evt); pm_on_ble_evt(p_ble_evt); ble_lns_on_ble_evt(&m_lns, p_ble_evt); ble_bas_on_ble_evt(&m_bas, p_ble_evt); ble_conn_params_on_ble_evt(p_ble_evt); bsp_btn_ble_on_ble_evt(p_ble_evt); on_ble_evt(p_ble_evt); ble_advertising_on_ble_evt(p_ble_evt); nrf_ble_gatt_on_ble_evt(&m_gatt, p_ble_evt); } /**@brief Function for dispatching a system event to interested modules. * * @details This function is called from the System event interrupt handler after a system * event has been received. * * @param[in] sys_evt System stack event. */ static void sys_evt_dispatch(uint32_t sys_evt) { // Dispatch the system event to the fstorage module, where it will be // dispatched to the Flash Data Storage (FDS) module. fs_sys_event_handler(sys_evt); // Dispatch to the Advertising module last, since it will check if there are any // pending flash operations in fstorage. Let fstorage process system events first, // so that it can report correctly to the Advertising module. ble_advertising_on_sys_evt(sys_evt); } /**@brief Function for initializing the BLE stack. * * @details Initializes the SoftDevice and the BLE event interrupt. */ static void ble_stack_init(void) { ret_code_t err_code; nrf_clock_lf_cfg_t clock_lf_cfg = NRF_CLOCK_LFCLKSRC; // Initialize the SoftDevice handler module. SOFTDEVICE_HANDLER_INIT(&clock_lf_cfg, NULL); // Fetch the start address of the application RAM. uint32_t ram_start = 0; err_code = softdevice_app_ram_start_get(&ram_start); APP_ERROR_CHECK(err_code); // Overwrite some of the default configurations for the BLE stack. ble_cfg_t ble_cfg; // Configure the number of custom UUIDS. memset(&ble_cfg, 0, sizeof(ble_cfg)); ble_cfg.common_cfg.vs_uuid_cfg.vs_uuid_count = 0; err_code = sd_ble_cfg_set(BLE_COMMON_CFG_VS_UUID, &ble_cfg, ram_start); APP_ERROR_CHECK(err_code); // Configure the maximum number of connections. memset(&ble_cfg, 0, sizeof(ble_cfg)); ble_cfg.gap_cfg.role_count_cfg.periph_role_count = BLE_GAP_ROLE_COUNT_PERIPH_DEFAULT; ble_cfg.gap_cfg.role_count_cfg.central_role_count = 0; ble_cfg.gap_cfg.role_count_cfg.central_sec_count = 0; err_code = sd_ble_cfg_set(BLE_GAP_CFG_ROLE_COUNT, &ble_cfg, ram_start); APP_ERROR_CHECK(err_code); // Enable BLE stack. err_code = softdevice_enable(&ram_start); APP_ERROR_CHECK(err_code); // Register with the SoftDevice handler module for BLE events. err_code = softdevice_ble_evt_handler_set(ble_evt_dispatch); APP_ERROR_CHECK(err_code); // Register with the SoftDevice handler module for BLE events. err_code = softdevice_sys_evt_handler_set(sys_evt_dispatch); APP_ERROR_CHECK(err_code); } /**@brief Function for handling events from the BSP module. * * @param[in] event Event generated by button press. */ void bsp_event_handler(bsp_event_t event) { ret_code_t err_code; switch (event) { case BSP_EVENT_SLEEP: sleep_mode_enter(); break; case BSP_EVENT_DISCONNECT: err_code = sd_ble_gap_disconnect(m_conn_handle, BLE_HCI_REMOTE_USER_TERMINATED_CONNECTION); if (err_code != NRF_ERROR_INVALID_STATE) { APP_ERROR_CHECK(err_code); } break; default: break; } } /**@brief Function for the Peer Manager initialization. */ static void peer_manager_init(void) { ble_gap_sec_params_t sec_param; ret_code_t err_code; err_code = pm_init(); APP_ERROR_CHECK(err_code); memset(&sec_param, 0, sizeof(ble_gap_sec_params_t)); // Security parameters to be used for all security procedures. sec_param.bond = SEC_PARAM_BOND; sec_param.mitm = SEC_PARAM_MITM; sec_param.lesc = SEC_PARAM_LESC; sec_param.keypress = SEC_PARAM_KEYPRESS; sec_param.io_caps = SEC_PARAM_IO_CAPABILITIES; sec_param.oob = SEC_PARAM_OOB; sec_param.min_key_size = SEC_PARAM_MIN_KEY_SIZE; sec_param.max_key_size = SEC_PARAM_MAX_KEY_SIZE; sec_param.kdist_own.enc = 1; sec_param.kdist_own.id = 1; sec_param.kdist_peer.enc = 1; sec_param.kdist_peer.id = 1; err_code = pm_sec_params_set(&sec_param); APP_ERROR_CHECK(err_code); err_code = pm_register(pm_evt_handler); APP_ERROR_CHECK(err_code); } /**@brief Clear bond information from persistent storage. */ static void delete_bonds(void) { ret_code_t err_code; NRF_LOG_INFO("Erase bonds!\r\n"); err_code = pm_peers_delete(); APP_ERROR_CHECK(err_code); } /**@brief Advertising functionality initialization. * * @details Encodes the required advertising data and passes it to the stack. * Also builds a structure to be passed to the stack when starting advertising. */ static void advertising_init(void) { ret_code_t err_code; ble_advdata_t advdata; // Build advertising data struct to pass into @ref ble_advertising_init. memset(&advdata, 0, sizeof(advdata)); advdata.name_type = BLE_ADVDATA_FULL_NAME; advdata.include_appearance = true; advdata.flags = BLE_GAP_ADV_FLAGS_LE_ONLY_GENERAL_DISC_MODE; advdata.uuids_complete.uuid_cnt = sizeof(m_adv_uuids) / sizeof(m_adv_uuids[0]); advdata.uuids_complete.p_uuids = m_adv_uuids; ble_adv_modes_config_t options = {0}; options.ble_adv_fast_enabled = true; options.ble_adv_fast_interval = APP_ADV_INTERVAL; options.ble_adv_fast_timeout = APP_ADV_TIMEOUT_IN_SECONDS; err_code = ble_advertising_init(&advdata, NULL, &options, on_adv_evt, NULL); APP_ERROR_CHECK(err_code); } /**@brief Function for initializing buttons and LEDs. * * @param[out] p_erase_bonds Will be true if the clear bonding button was pressed to wake the application up. */ static void buttons_leds_init(bool * p_erase_bonds) { ret_code_t err_code; bsp_event_t startup_event; err_code = bsp_init(BSP_INIT_LED | BSP_INIT_BUTTONS, bsp_event_handler); APP_ERROR_CHECK(err_code); err_code = bsp_btn_ble_init(NULL, &startup_event); APP_ERROR_CHECK(err_code); *p_erase_bonds = (startup_event == BSP_EVENT_CLEAR_BONDING_DATA); } /**@brief Function for initializing logging. */ static void log_init(void) { ret_code_t err_code = NRF_LOG_INIT(NULL); APP_ERROR_CHECK(err_code); } /**@brief Power manager. */ static void power_manage(void) { ret_code_t err_code = sd_app_evt_wait(); APP_ERROR_CHECK(err_code); } /**@brief Function for starting advertising. */ static void advertising_start(bool erase_bonds) { if (erase_bonds == true) { delete_bonds(); // Advertising is started by PM_EVT_PEERS_DELETE_SUCCEEDED event. } else { ret_code_t err_code = ble_advertising_start(BLE_ADV_MODE_FAST); APP_ERROR_CHECK(err_code); } } /**@brief Application main function. */ int main(void) { bool erase_bonds; // Initialize log_init(); timers_init(); buttons_leds_init(&erase_bonds); ble_stack_init(); gap_params_init(); gatt_init(); advertising_init(); services_init(); sim_init(); conn_params_init(); peer_manager_init(); // Start execution application_timers_start(); NRF_LOG_INFO("Location and Navigation example started.\r\n"); advertising_start(erase_bonds); // Enter main loop for (;;) { if (NRF_LOG_PROCESS() == false) { power_manage(); } } } /** * @} */
185060.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_uint64 // op(A') function: GB_tran__abs_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_uint64 ( uint64_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
978607.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE190_Integer_Overflow__char_fscanf_multiply_54b.c Label Definition File: CWE190_Integer_Overflow.label.xml Template File: sources-sinks-54b.tmpl.c */ /* * @description * CWE: 190 Integer Overflow * BadSource: fscanf Read data from the console using fscanf() * GoodSource: Small, non-zero * Sinks: multiply * GoodSink: Ensure there is no overflow before performing the multiplication * BadSink : Multiply data by 2 * Flow Variant: 54 Data flow: data passed as an argument from one function through three others to a fifth; all five functions are in different source files * * */ #include "std_testcase.h" #ifndef OMITBAD /* bad function declaration */ void CWE190_Integer_Overflow__char_fscanf_multiply_54c_bad_sink(char data); void CWE190_Integer_Overflow__char_fscanf_multiply_54b_bad_sink(char data) { CWE190_Integer_Overflow__char_fscanf_multiply_54c_bad_sink(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE190_Integer_Overflow__char_fscanf_multiply_54c_goodG2B_sink(char data); void CWE190_Integer_Overflow__char_fscanf_multiply_54b_goodG2B_sink(char data) { CWE190_Integer_Overflow__char_fscanf_multiply_54c_goodG2B_sink(data); } /* goodB2G uses the BadSource with the GoodSink */ void CWE190_Integer_Overflow__char_fscanf_multiply_54c_goodB2G_sink(char data); void CWE190_Integer_Overflow__char_fscanf_multiply_54b_goodB2G_sink(char data) { CWE190_Integer_Overflow__char_fscanf_multiply_54c_goodB2G_sink(data); } #endif /* OMITGOOD */
935650.c
/* copyright 2013 Sascha Kruse and contributors (see LICENSE for licensing information) */ #define _GNU_SOURCE #include <time.h> #include <glib.h> #include <errno.h> #include <string.h> #include <stdbool.h> #include <unistd.h> #include <sys/wait.h> #include "dbus.h" #include "x.h" #include "notification.h" #include "dunst.h" #include "utils.h" #include "settings.h" #include "rules.h" #include "menu.h" int next_notification_id = 1; /* * print a human readable representation * of the given notification to stdout. */ void notification_print(notification * n) { printf("{\n"); printf("\tappname: '%s'\n", n->appname); printf("\tsummary: '%s'\n", n->summary); printf("\tbody: '%s'\n", n->body); printf("\ticon: '%s'\n", n->icon); printf("\tcategory: %s\n", n->category); printf("\turgency: %d\n", n->urgency); printf("\tformatted: '%s'\n", n->msg); printf("\tfg: %s\n", n->color_strings[ColFG]); printf("\tbg: %s\n", n->color_strings[ColBG]); printf("\tid: %d\n", n->id); if (n->urls) { printf("\turls\n"); printf("\t{\n"); printf("%s\n", n->urls); printf("\t}\n"); } if (n->actions) { printf("\tactions:\n"); printf("\t{\n"); for (int i = 0; i < n->actions->count; i += 2) { printf("\t\t [%s,%s]\n", n->actions->actions[i], n->actions->actions[i + 1]); } printf("actions_dmenu: %s\n", n->actions->dmenu_str); printf("\t]\n"); } printf("\tscript: %s\n", n->script); printf("}\n"); } /* * Run the script associated with the * given notification. */ void notification_run_script(notification * n) { if (!n->script || strlen(n->script) < 1) return; char *appname = n->appname ? n->appname : ""; char *summary = n->summary ? n->summary : ""; char *body = n->body ? n->body : ""; char *icon = n->icon ? n->icon : ""; char *urgency; switch (n->urgency) { case LOW: urgency = "LOW"; break; case NORM: urgency = "NORMAL"; break; case CRIT: urgency = "CRITICAL"; break; default: urgency = "NORMAL"; break; } int pid1 = fork(); if (pid1) { int status; waitpid(pid1, &status, 0); } else { int pid2 = fork(); if (pid2) { exit(0); } else { int ret = execlp(n->script, n->script, appname, summary, body, icon, urgency, (char *)NULL); if (ret != 0) { PERR("Unable to run script", errno); exit(EXIT_FAILURE); } } } } /* * Helper function to compare to given * notifications. */ int notification_cmp(const void *va, const void *vb) { notification *a = (notification *) va; notification *b = (notification *) vb; if (!settings.sort) return 1; if (a->urgency != b->urgency) { return a->urgency - b->urgency; } else { return b->id - a->id; } } /* * Wrapper for notification_cmp to match glib's * compare functions signature. */ int notification_cmp_data(const void *va, const void *vb, void *data) { return notification_cmp(va, vb); } /* * Free the memory used by the given notification. */ void notification_free(notification * n) { if (n == NULL) return; free(n->appname); free(n->summary); free(n->body); free(n->icon); free(n->msg); free(n->dbus_client); free(n); } /* * Strip any markup from text */ char *notification_fix_markup(char *str) { char *replace_buf, *start, *end; if (str == NULL) { return NULL; } str = string_replace_all("&quot;", "\"", str); str = string_replace_all("&apos;", "'", str); str = string_replace_all("&amp;", "&", str); str = string_replace_all("&lt;", "<", str); str = string_replace_all("&gt;", ">", str); /* remove tags */ str = string_replace_all("<b>", "", str); str = string_replace_all("</b>", "", str); str = string_replace_all("<br>", " ", str); str = string_replace_all("<br/>", " ", str); str = string_replace_all("<br />", " ", str); str = string_replace_all("<i>", "", str); str = string_replace_all("</i>", "", str); str = string_replace_all("<u>", "", str); str = string_replace_all("</u>", "", str); str = string_replace_all("</a>", "", str); while ((start = strstr(str, "<a href")) != NULL) { end = strstr(start, ">"); if (end != NULL) { replace_buf = strndup(start, end - start + 1); str = string_replace(replace_buf, "", str); free(replace_buf); } else { break; } } while ((start = strstr(str, "<img src")) != NULL) { end = strstr(start, "/>"); if (end != NULL) { replace_buf = strndup(start, end - start + 2); str = string_replace(replace_buf, "", str); free(replace_buf); } else { break; } } return str; } char *notification_extract_markup_urls(char **str_ptr) { char *start, *end, *replace_buf, *str, *urls = NULL, *url, *index_buf; int linkno = 1; str = *str_ptr; while ((start = strstr(str, "<a href")) != NULL) { end = strstr(start, ">"); if (end != NULL) { replace_buf = strndup(start, end - start + 1); url = extract_urls(replace_buf); if (url != NULL) { str = string_replace(replace_buf, "[", str); index_buf = g_strdup_printf("[#%d]", linkno++); if (urls == NULL) { urls = g_strconcat(index_buf, " ", url, NULL); } else { char *tmp = urls; urls = g_strconcat(tmp, "\n", index_buf, " ", url, NULL); free(tmp); } index_buf[0] = ' '; str = string_replace("</a>", index_buf, str); free(index_buf); free(url); } else { str = string_replace(replace_buf, "", str); str = string_replace("</a>", "", str); } free(replace_buf); } else { break; } } *str_ptr = str; return urls; } /* * Initialize the given notification and add it to * the queue. Replace notification with id if id > 0. */ int notification_init(notification * n, int id) { if (n == NULL) return -1; if (strcmp("DUNST_COMMAND_PAUSE", n->summary) == 0) { pause_display = true; return 0; } if (strcmp("DUNST_COMMAND_RESUME", n->summary) == 0) { pause_display = false; return 0; } n->script = NULL; n->text_to_render = NULL; n->format = settings.format; rule_apply_all(n); n->urls = notification_extract_markup_urls(&(n->body)); n->msg = string_replace("%a", n->appname, g_strdup(n->format)); n->msg = string_replace("%s", n->summary, n->msg); if (n->icon) { n->msg = string_replace("%I", basename(n->icon), n->msg); n->msg = string_replace("%i", n->icon, n->msg); } n->msg = string_replace("%b", n->body, n->msg); if (n->progress) { char pg[10]; sprintf(pg, "[%3d%%]", n->progress - 1); n->msg = string_replace("%p", pg, n->msg); } else { n->msg = string_replace("%p", "", n->msg); } if (!settings.allow_markup) n->msg = notification_fix_markup(n->msg); else if (!settings.ignore_newline) { n->msg = string_replace("<br>", "\n", n->msg); n->msg = string_replace("<br />", "\n", n->msg); } while (strstr(n->msg, "\\n") != NULL) n->msg = string_replace("\\n", "\n", n->msg); if (settings.ignore_newline) while (strstr(n->msg, "\n") != NULL) n->msg = string_replace("\n", " ", n->msg); n->msg = g_strstrip(n->msg); if (id == 0) { n->id = ++next_notification_id; } else { notification_close_by_id(id, -1); n->id = id; } n->dup_count = 0; /* check if n is a duplicate */ if (settings.stack_duplicates) { for (GList * iter = g_queue_peek_head_link(queue); iter; iter = iter->next) { notification *orig = iter->data; if (strcmp(orig->appname, n->appname) == 0 && strcmp(orig->summary, n->summary) == 0 && strcmp(orig->body, n->body) == 0) { /* If the progress differs this was probably intended to replace the notification * but notify-send was used. So don't increment dup_count in this case */ if (orig->progress == n->progress) { orig->dup_count++; } else { orig->progress = n->progress; } /* notifications that differ only in progress hints should be expected equal, * but we want the latest message, with the latest hint value */ free(orig->msg); orig->msg = strdup(n->msg); notification_free(n); wake_up(); return orig->id; } } for (GList * iter = g_queue_peek_head_link(displayed); iter; iter = iter->next) { notification *orig = iter->data; if (strcmp(orig->appname, n->appname) == 0 && strcmp(orig->summary, n->summary) == 0 && strcmp(orig->body, n->body) == 0) { /* notifications that differ only in progress hints should be expected equal, * but we want the latest message, with the latest hint value */ free(orig->msg); orig->msg = strdup(n->msg); /* If the progress differs this was probably intended to replace the notification * but notify-send was used. So don't increment dup_count in this case */ if (orig->progress == n->progress) { orig->dup_count++; } else { orig->progress = n->progress; } orig->start = time(NULL); notification_free(n); wake_up(); return orig->id; } } } /* urgency > CRIT -> array out of range */ n->urgency = n->urgency > CRIT ? CRIT : n->urgency; if (!n->color_strings[ColFG]) { n->color_strings[ColFG] = xctx.color_strings[ColFG][n->urgency]; } if (!n->color_strings[ColBG]) { n->color_strings[ColBG] = xctx.color_strings[ColBG][n->urgency]; } n->timeout = n->timeout == -1 ? settings.timeouts[n->urgency] : n->timeout; n->start = 0; if (n->icon == NULL) { n->icon = strdup(settings.icons[n->urgency]); } else if (strlen(n->icon) <= 0) { free(n->icon); n->icon = strdup(settings.icons[n->urgency]); } if (n->category == NULL) { n->category = ""; } n->timestamp = time(NULL); n->redisplayed = false; n->first_render = true; if (strlen(n->msg) == 0) { notification_close(n, 2); printf("skipping notification: %s %s\n", n->body, n->summary); } else { // g_queue_insert_sorted(queue, n, notification_cmp_data, NULL); history_push(n); } char *tmp = g_strconcat(n->summary, " ", n->body, NULL); char *tmp_urls = extract_urls(tmp); if (tmp_urls != NULL) { if (n->urls != NULL) { n->urls = string_append(n->urls, tmp_urls, "\n"); free(tmp_urls); } else { n->urls = tmp_urls; } } if (n->actions) { n->actions->dmenu_str = NULL; for (int i = 0; i < n->actions->count; i += 2) { char *human_readable = n->actions->actions[i + 1]; string_replace_char('[', '(', human_readable); // kill square brackets string_replace_char(']', ')', human_readable); n->actions->dmenu_str = string_append(n->actions->dmenu_str, g_strdup_printf("#%s [%s]", human_readable, n->appname), "\n"); } } free(tmp); if (settings.print_notifications) notification_print(n); return n->id; } /* * Close the notification that has id. * * reasons: * -1 -> notification is a replacement, no NotificationClosed signal emitted * 1 -> the notification expired * 2 -> the notification was dismissed by the user_data * 3 -> The notification was closed by a call to CloseNotification */ int notification_close_by_id(int id, int reason) { int free = 0; notification *target = NULL; for (GList * iter = g_queue_peek_head_link(displayed); iter; iter = iter->next) { notification *n = iter->data; if (n->id == id) { g_queue_remove(displayed, n); if(reason != 4) { history_push(n); } else free =1 ; target = n; break; } } for (GList * iter = g_queue_peek_head_link(queue); iter; iter = iter->next) { notification *n = iter->data; if (n->id == id) { g_queue_remove(queue, n); if(reason != 4) { history_push(n); } else free =1 ; target = n; break; } } if (reason > 0 && reason < 5 && target != NULL) { notificationClosed(target, reason); } if(free) { notification_free(target); } wake_up(); return reason; } /* * Close the given notification. SEE notification_close_by_id. */ int notification_close(notification * n, int reason) { if (n == NULL) return -1; return notification_close_by_id(n->id, reason); } void notification_update_text_to_render(notification *n) { if (n->text_to_render) { free(n->text_to_render); n->text_to_render = NULL; } char *buf = NULL; char *msg = g_strstrip(n->msg); /* print dup_count and msg */ if (n->dup_count > 0 && (n->actions || n->urls) && settings.show_indicators) { buf = g_strdup_printf("(%d%s%s) %s", n->dup_count, n->actions ? "A" : "", n->urls ? "U" : "", msg); } else if ((n->actions || n->urls) && settings.show_indicators) { buf = g_strdup_printf("(%s%s) %s", n->actions ? "A" : "", n->urls ? "U" : "", msg); } else if (n->dup_count > 0) { buf = g_strdup_printf("(%d) %s", n->dup_count, msg); } else { buf = g_strdup(msg); } /* print age */ int hours, minutes, seconds; time_t t_delta = time(NULL) - n->timestamp; if (settings.show_age_threshold >= 0 && t_delta >= settings.show_age_threshold) { hours = t_delta / 3600; minutes = t_delta / 60 % 60; seconds = t_delta % 60; char *new_buf; if (hours > 0) { new_buf = g_strdup_printf("%s (%dh %dm %ds old)", buf, hours, minutes, seconds); } else if (minutes > 0) { new_buf = g_strdup_printf("%s (%dm %ds old)", buf, minutes, seconds); } else { new_buf = g_strdup_printf("%s (%ds old)", buf, seconds); } free(buf); buf = new_buf; } n->text_to_render = buf; } int notification_get_ttl(notification *n) { if (n->timeout == 0) { return -1; } else { return n->timeout - (time(NULL) - n->start); } } int notification_get_age(notification *n) { return time(NULL) - n->timestamp; } /* vim: set ts=8 sw=8 tw=0: */
388265.c
//------------------------------------------------------------------------------ // GrB_BinaryOp_new: create a new user-defined binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // GrB_BinaryOp_new is implemented both as a macro and a function. Both are // user-callable. The macro is used by default since it can capture the name // of the binary function. #include "GB.h" // the macro version of this function must first be #undefined #undef GrB_BinaryOp_new GrB_Info GrB_BinaryOp_new ( GrB_BinaryOp *binaryop, // handle for the new binary operator void *function, // pointer to the binary function const GrB_Type ztype, // type of output z const GrB_Type xtype, // type of input x const GrB_Type ytype // type of input y ) { return (GB_BinaryOp_new (binaryop, function, ztype, xtype, ytype, "f")) ; }
912116.c
/**************************************************************************** * * Copyright 2017 Samsung Electronics All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the License. * ****************************************************************************/ /**************************************************************************** * arch/arm/src/tiva/tm4c_ethernet.c * * Copyright (C) 2014-2015 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <tinyara/config.h> #if defined(CONFIG_NET) && defined(CONFIG_TIVA_ETHERNET) #include <stdint.h> #include <stdbool.h> #include <time.h> #include <string.h> #include <debug.h> #include <queue.h> #include <errno.h> #include <arpa/inet.h> #include <tinyara/arch.h> #include <tinyara/irq.h> #include <tinyara/wdog.h> #ifdef CONFIG_NET_NOINTS #include <tinyara/wqueue.h> #endif #include <tinyara/net/mii.h> #include <tinyara/net/netdev.h> #ifdef CONFIG_TIVA_PHY_INTERRUPTS #include <tinyara/net/phy.h> #endif #include "up_internal.h" #include "chip.h" #include "tiva_gpio.h" #include "tiva_syscontrol.h" #include "tiva_enablepwr.h" #include "tiva_enableclks.h" #include "tiva_periphrdy.h" #include "tiva_ethernet.h" #include "chip/tiva_pinmap.h" #include <arch/board/board.h> /* TIVA_NETHCONTROLLERS determines the number of physical interfaces * that will be supported. */ #if TIVA_NETHCONTROLLERS > 0 /**************************************************************************** * Definitions ****************************************************************************/ /* Configuration ************************************************************/ #if TIVA_NETHCONTROLLERS > 1 #error Logic to support multiple Ethernet interfaces is incomplete #endif /* If processing is not done at the interrupt level, then high priority * work queue support is required. */ #if defined(CONFIG_NET_NOINTS) && !defined(CONFIG_SCHED_HPWORK) #error High priority work queue support is required #endif /* Are we using the internal PHY or an external PHY? */ #if defined(CONFIG_TIVA_PHY_INTERNAL) /* Internal PHY */ #if defined(CONFIG_TIVA_PHY_MII) || defined(CONFIG_TIVA_PHY_RMII) #warning CONFIG_TIVA_PHY_MII or CONFIG_TIVA_PHY_RMII defined with internal PHY #endif #undef CONFIG_TIVA_PHY_MII #undef CONFIG_TIVA_PHY_RMII /* Properties of the internal PHY are hard-coded */ #undef CONFIG_TIVA_PHYADDR #undef CONFIG_TIVA_PHYSR_ALTCONFIG #undef CONFIG_TIVA_PHYSR_ALTMODE #undef CONFIG_TIVA_PHYSR_10HD #undef CONFIG_TIVA_PHYSR_100HD #undef CONFIG_TIVA_PHYSR_10FD #undef CONFIG_TIVA_PHYSR_100FD #undef CONFIG_TIVA_PHYSR_SPEED #undef CONFIG_TIVA_PHYSR_100MBPS #undef CONFIG_TIVA_PHYSR_MODE #undef CONFIG_TIVA_PHYSR_FULLDUPLEX #define CONFIG_TIVA_PHYADDR 0 #define CONFIG_TIVA_PHYSR TIVA_EPHY_STS #define CONFIG_TIVA_PHYSR_SPEED EPHY_STS_SPEED #define CONFIG_TIVA_PHYSR_100MBPS 0 #define CONFIG_TIVA_PHYSR_MODE EPHY_STS_DUPLEX #define CONFIG_TIVA_PHYSR_FULLDUPLEX EPHY_STS_DUPLEX #else /* External PHY. Properties must be provided in the configuration */ #if !defined(CONFIG_TIVA_PHY_MII) && !defined(CONFIG_TIVA_PHY_RMII) #warning None of CONFIG_TIVA_PHY_INTERNAL, CONFIG_TIVA_PHY_MII, or CONFIG_TIVA_PHY_RMII defined #endif #if defined(CONFIG_TIVA_PHY_MII) && defined(CONFIG_TIVA_PHY_RMII) #error Both CONFIG_TIVA_PHY_MII and CONFIG_TIVA_PHY_RMII defined #endif #endif #ifndef CONFIG_TIVA_PHYADDR #error CONFIG_TIVA_PHYADDR must be defined in the NuttX configuration #endif #ifdef CONFIG_TIVA_AUTONEG #ifndef CONFIG_TIVA_PHYSR #error CONFIG_TIVA_PHYSR must be defined in the NuttX configuration #endif #ifdef CONFIG_TIVA_PHYSR_ALTCONFIG #ifndef CONFIG_TIVA_PHYSR_ALTMODE #error CONFIG_TIVA_PHYSR_ALTMODE must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_10HD #error CONFIG_TIVA_PHYSR_10HD must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_100HD #error CONFIG_TIVA_PHYSR_100HD must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_10FD #error CONFIG_TIVA_PHYSR_10FD must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_100FD #error CONFIG_TIVA_PHYSR_100FD must be defined in the NuttX configuration #endif #else #ifndef CONFIG_TIVA_PHYSR_SPEED #error CONFIG_TIVA_PHYSR_SPEED must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_100MBPS #error CONFIG_TIVA_PHYSR_100MBPS must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_MODE #error CONFIG_TIVA_PHYSR_MODE must be defined in the NuttX configuration #endif #ifndef CONFIG_TIVA_PHYSR_FULLDUPLEX #error CONFIG_TIVA_PHYSR_FULLDUPLEX must be defined in the NuttX configuration #endif #endif #endif #ifdef CONFIG_TIVA_EMAC_PTP #warning CONFIG_TIVA_EMAC_PTP is not yet supported #endif /* This driver does not use enhanced descriptors. Enhanced descriptors must * be used, however, if time stamping or and/or IPv4 checksum offload is * supported. */ #undef CONFIG_TIVA_EMAC_ENHANCEDDESC #undef CONFIG_TIVA_EMAC_HWCHECKSUM /* Ethernet buffer sizes, number of buffers, and number of descriptors */ #ifndef CONFIG_NET_MULTIBUFFER #error CONFIG_NET_MULTIBUFFER is required #endif #ifndef CONFIG_TIVA_EMAC_NRXDESC #define CONFIG_TIVA_EMAC_NRXDESC 8 #endif #ifndef CONFIG_TIVA_EMAC_NTXDESC #define CONFIG_TIVA_EMAC_NTXDESC 4 #endif /* Add 4 to the configured buffer size to account for the 2 byte checksum * memory needed at the end of the maximum size packet. Buffer sizes must * be an even multiple of 4, 8, or 16 bytes (depending on buswidth). We * will use the 16-byte alignment in all cases. */ #define OPTIMAL_EMAC_BUFSIZE ((CONFIG_NET_ETH_MTU + 4 + 15) & ~15) #if OPTIMAL_EMAC_BUFSIZE > EMAC_TDES1_TBS1_MASK #error OPTIMAL_EMAC_BUFSIZE is too large #endif #if (OPTIMAL_EMAC_BUFSIZE & 15) != 0 #error OPTIMAL_EMAC_BUFSIZE must be aligned #endif #if OPTIMAL_EMAC_BUFSIZE != OPTIMAL_EMAC_BUFSIZE #warning You using an incomplete/untested configuration #endif /* We need at least one more free buffer than transmit buffers */ #define TIVA_EMAC_NFREEBUFFERS (CONFIG_TIVA_EMAC_NTXDESC+1) /* Extremely detailed register debug that you would normally never want * enabled. */ #ifndef CONFIG_DEBUG #undef CONFIG_TIVA_ETHERNET_REGDEBUG #endif /* Clocking *****************************************************************/ /* Set MIIADDR CR bits depending on SysClk frequency */ #if SYSCLK_FREQUENCY >= 20000000 && SYSCLK_FREQUENCY < 35000000 #define EMAC_MIIADDR_CR EMAC_MIIADDR_CR_20_35 #elif SYSCLK_FREQUENCY >= 35000000 && SYSCLK_FREQUENCY <= 64000000 #define EMAC_MIIADDR_CR EMAC_MIIADDR_CR_35_60 #elif SYSCLK_FREQUENCY >= 60000000 && SYSCLK_FREQUENCY <= 104000000 #define EMAC_MIIADDR_CR EMAC_MIIADDR_CR_60_100 #elif SYSCLK_FREQUENCY >= 100000000 && SYSCLK_FREQUENCY <= 150000000 #define EMAC_MIIADDR_CR EMAC_MIIADDR_CR_100_150 #elif SYSCLK_FREQUENCY >= 150000000 && SYSCLK_FREQUENCY <= 168000000 #define EMAC_MIIADDR_CR EMAC_MIIADDR_CR_150_168 #else #error SYSCLK_FREQUENCY not supportable #endif /* Timing *******************************************************************/ /* TX poll delay = 1 seconds. CLK_TCK is the number of clock ticks per * second */ #define TIVA_WDDELAY (1*CLK_TCK) #define TIVA_POLLHSEC (1*2) /* TX timeout = 1 minute */ #define TIVA_TXTIMEOUT (60*CLK_TCK) /* PHY reset/configuration delays in milliseconds */ #define PHY_RESET_DELAY (65) #define PHY_CONFIG_DELAY (1000) /* PHY read/write delays in loop counts */ #define PHY_READ_TIMEOUT (0x0004ffff) #define PHY_WRITE_TIMEOUT (0x0004ffff) #define PHY_RETRY_TIMEOUT (0x0004ffff) /* Register values **********************************************************/ /* Clear the MACCR bits that will be setup during MAC initialization (or that * are cleared unconditionally). Per the reference manual, all reserved bits * must be retained at their reset value. * * EMAC_CFG_RE Bit 2: Receiver enable * EMAC_CFG_TE Bit 3: Transmitter enable * EMAC_CFG_DC Bit 4: Deferral check * EMAC_CFG_BL Bits 5-6: Back-off limit * EMAC_CFG_ACS Bit 7: Automatic pad/CRC stripping * EMAC_CFG_DR Bit 9: Retry disable * EMAC_CFG_IPC Bit 10: IPv4 checksum offload * EMAC_CFG_DUPM Bit 11: Duplex mode * EMAC_CFG_LOOPBM Bit 12: Loopback mode * EMAC_CFG_DRO Bit 13: Receive own disable * EMAC_CFG_FES Bit 14: Fast Ethernet speed * EMAC_CFG_PS Bit 15: Port Select * EMAC_CFG_DISCRS Bit 16: Carrier sense disable * EMAC_CFG_IFG Bits 17-19: Interframe gap * EMAC_CFG_JFEN Bit 20: Jumbo Frame Enable * EMAC_CFG_JD Bit 22: Jabber disable * EMAC_CFG_WDDIS Bit 23: Watchdog disable * EMAC_CFG_CST Bit 25: CRC stripping for Type frames * EMAC_CFG_TWOKPEN Bit 27: IEEE 802 * EMAC_CFG_SADDR Bits 28-30: Source Address Insertion or Replacement Control */ #define MACCR_CLEAR_BITS \ (EMAC_CFG_RE | EMAC_CFG_TE | EMAC_CFG_DC | EMAC_CFG_BL_MASK | \ EMAC_CFG_ACS | EMAC_CFG_DR | EMAC_CFG_IPC | EMAC_CFG_DUPM | \ EMAC_CFG_LOOPBM | EMAC_CFG_DRO | EMAC_CFG_FES | EMAC_CFG_DISCRS | \ EMAC_CFG_IFG_MASK | EMAC_CFG_JD | EMAC_CFG_WDDIS | EMAC_CFG_CST) /* The following bits are set or left zero unconditionally in all modes. * * EMAC_CFG_RE Receiver enable 0 (disabled) * EMAC_CFG_TE Transmitter enable 0 (disabled) * EMAC_CFG_DC Deferral check 0 (disabled) * EMAC_CFG_BL Back-off limit 0 (10) * EMAC_CFG_ACS Automatic pad/CRC stripping 0 (disabled) * EMAC_CFG_DR Retry disable 1 (disabled) * EMAC_CFG_IPC IPv4 checksum offload Depends on CONFIG_TIVA_EMAC_HWCHECKSUM * EMAC_CFG_LOOPBM Loopback mode 0 (disabled) * EMAC_CFG_DRO Receive own disable 0 (enabled) * EMAC_CFG_PS Port Select (read-only) * EMAC_CFG_DISCRS Carrier sense disable 0 (enabled) * EMAC_CFG_IFG Interframe gap 0 (96 bits) * EMAC_CFG_JFEN Jumbo Frame Enable 0 (jumbo frame creates error) * EMAC_CFG_JD Jabber disable 0 (enabled) * EMAC_CFG_WDDIS Watchdog disable 0 (enabled) * EMAC_CFG_CST CRC stripping for Type frames 0 (disabled, F2/F4 only) * EMAC_CFG_TWOKPEN IEEE 802 0 (>1518 == giant frame) * EMAC_CFG_SADDR Source Address Insertion or * Replacement Control * * The following are set conditionally based on mode and speed. * * EMAC_CFG_DUPM Duplex mode Depends on priv->fduplex * EMAC_CFG_FES Fast Ethernet speed Depends on priv->mbps100 */ #ifdef CONFIG_TIVA_EMAC_HWCHECKSUM #define MACCR_SET_BITS \ (EMAC_CFG_BL_10 | EMAC_CFG_DR | EMAC_CFG_IPC | EMAC_CFG_IFG_96) #else #define MACCR_SET_BITS \ (EMAC_CFG_BL_10 | EMAC_CFG_DR | EMAC_CFG_IFG_96) #endif /* Clear the MACCR bits that will be setup during MAC initialization (or that * are cleared unconditionally). Per the reference manual, all reserved bits * must be retained at their reset value. * * EMAC_FRAMEFLTR_PR Bit 0: Promiscuous mode * EMAC_FRAMEFLTR_HUC Bit 1: Hash unicast * EMAC_FRAMEFLTR_HMC Bit 2: Hash multicast * EMAC_FRAMEFLTR_DAIF Bit 3: Destination address inverse filtering * EMAC_FRAMEFLTR_PM Bit 4: Pass all multicast * EMAC_FRAMEFLTR_DBF Bit 5: Broadcast frames disable * EMAC_FRAMEFLTR_PCF Bits 6-7: Pass control frames * EMAC_FRAMEFLTR_SAIF Bit 8: Source address inverse filtering * EMAC_FRAMEFLTR_SAF Bit 9: Source address filter * EMAC_FRAMEFLTR_HPF Bit 10: Hash or perfect filter * EMAC_FRAMEFLTR_VTFE Bit 16: VLAN Tag Filter Enable * EMAC_FRAMEFLTR_RA Bit 31: Receive all */ #define FRAMEFLTR_CLEAR_BITS \ (EMAC_FRAMEFLTR_PR | EMAC_FRAMEFLTR_HUC | EMAC_FRAMEFLTR_HMC | EMAC_FRAMEFLTR_DAIF | \ EMAC_FRAMEFLTR_PM | EMAC_FRAMEFLTR_DBF | EMAC_FRAMEFLTR_PCF_MASK | EMAC_FRAMEFLTR_SAIF | \ EMAC_FRAMEFLTR_SAF | EMAC_FRAMEFLTR_HPF | EMAC_FRAMEFLTR_RA) /* The following bits are set or left zero unconditionally in all modes. * * EMAC_FRAMEFLTR_PR Promiscuous mode 0 (disabled) * EMAC_FRAMEFLTR_HUC Hash unicast 0 (perfect dest filtering) * EMAC_FRAMEFLTR_HMC Hash multicast 0 (perfect dest filtering) * EMAC_FRAMEFLTR_DAIF Destination address inverse filtering 0 (normal) * EMAC_FRAMEFLTR_PM Pass all multicast 0 (Depends on HM bit) * EMAC_FRAMEFLTR_DBF Broadcast frames disable 0 (enabled) * EMAC_FRAMEFLTR_PCF Pass control frames 1 (block all but PAUSE) * EMAC_FRAMEFLTR_SAIF Source address inverse filtering 0 (not used) * EMAC_FRAMEFLTR_SAF Source address filter 0 (disabled) * EMAC_FRAMEFLTR_HPF Hash or perfect filter 0 (Only matching frames passed) * EMAC_FRAMEFLTR_VTFE VLAN Tag Filter Enable 0 (VLAN tag ignored) * EMAC_FRAMEFLTR_RA Receive all 0 (disabled) */ #define FRAMEFLTR_SET_BITS (EMAC_FRAMEFLTR_PCF_PAUSE) /* Clear the FLOWCTL bits that will be setup during MAC initialization (or that * are cleared unconditionally). Per the reference manual, all reserved bits * must be retained at their reset value. * * EMAC_FLOWCTL_FCBBPA Bit 0: Flow control busy/back pressure activate * EMAC_FLOWCTL_TFE Bit 1: Transmit flow control enable * EMAC_FLOWCTL_RFE Bit 2: Receive flow control enable * EMAC_FLOWCTL_UP Bit 3: Unicast pause frame detect * EMAC_FLOWCTL_PLT Bits 4-5: Pause low threshold * EMAC_FLOWCTL_DZQP Bit 7: Zero-quanta pause disable * EMAC_FLOWCTL_PT Bits 16-31: Pause time */ #define FLOWCTL_CLEAR_MASK \ (EMAC_FLOWCTL_FCBBPA | EMAC_FLOWCTL_TFE | EMAC_FLOWCTL_RFE | EMAC_FLOWCTL_UP | \ EMAC_FLOWCTL_PLT_MASK | EMAC_FLOWCTL_DZQP | EMAC_FLOWCTL_PT_MASK) /* The following bits are set or left zero unconditionally in all modes. * * EMAC_FLOWCTL_FCBBPA Flow control busy/back pressure activate 0 (no pause control frame) * EMAC_FLOWCTL_TFE Transmit flow control enable 0 (disabled) * EMAC_FLOWCTL_RFE Receive flow control enable 0 (disabled) * EMAC_FLOWCTL_UP Unicast pause frame detect 0 (disabled) * EMAC_FLOWCTL_PLT Pause low threshold 0 (pause time - 4) * EMAC_FLOWCTL_DZQP Zero-quanta pause disable 1 (disabled) * EMAC_FLOWCTL_PT Pause time 0 */ #define FLOWCTL_SET_MASK (EMAC_FLOWCTL_PLT_M4 | EMAC_FLOWCTL_DZQP) /* Clear the DMAOPMODE bits that will be setup during MAC initialization (or that * are cleared unconditionally). Per the reference manual, all reserved bits * must be retained at their reset value. * * EMAC_DMAOPMODE_SR Bit 1: Start/stop receive * EMAC_DMAOPMODE_OSF Bit 2: Operate on second frame * EMAC_DMAOPMODE_RTC Bits 3-4: Receive threshold control * EMAC_DMAOPMODE_DGF Bit 5: Drop giant frames enable * EMAC_DMAOPMODE_FUF Bit 6: Forward undersized good frames * EMAC_DMAOPMODE_FEF Bit 7: Forward error frames * EMAC_DMAOPMODE_ST Bit 13: Start/stop transmission * EMAC_DMAOPMODE_TTC Bits 14-16: Transmit threshold control * EMAC_DMAOPMODE_FTF Bit 20: Flush transmit FIFO * EMAC_DMAOPMODE_TSF Bit 21: Transmit store and forward * EMAC_DMAOPMODE_DFF Bit 24: Disable flushing of received frames * EMAC_DMAOPMODE_RSF Bit 25: Receive store and forward * EMAC_DMAOPMODE_DT Bit 26: Dropping of TCP/IP checksum error frames disable */ #define DMAOPMODE_CLEAR_MASK \ (EMAC_DMAOPMODE_SR | EMAC_DMAOPMODE_OSF | EMAC_DMAOPMODE_RTC_MASK | EMAC_DMAOPMODE_DGF | \ EMAC_DMAOPMODE_FUF | EMAC_DMAOPMODE_FEF | EMAC_DMAOPMODE_ST | EMAC_DMAOPMODE_TTC_MASK | \ EMAC_DMAOPMODE_FTF | EMAC_DMAOPMODE_TSF | EMAC_DMAOPMODE_DFF | EMAC_DMAOPMODE_RSF | \ EMAC_DMAOPMODE_DT) /* The following bits are set or left zero unconditionally in all modes. * * EMAC_DMAOPMODE_SR Start/stop receive 0 (not running) * EMAC_DMAOPMODE_OSF Operate on second frame 1 (enabled) * EMAC_DMAOPMODE_RTC Receive threshold control 0 (64 bytes) * EMAC_DMAOPMODE_FUF Forward undersized good frames 0 (disabled) * EMAC_DMAOPMODE_FEF Forward error frames 0 (disabled) * EMAC_DMAOPMODE_ST Start/stop transmission 0 (not running) * EMAC_DMAOPMODE_TTC Transmit threshold control 0 (64 bytes) * EMAC_DMAOPMODE_FTF Flush transmit FIFO 0 (no flush) * EMAC_DMAOPMODE_TSF Transmit store and forward Depends on CONFIG_TIVA_EMAC_HWCHECKSUM * EMAC_DMAOPMODE_DFF Disable flushing of received frames 0 (enabled) * EMAC_DMAOPMODE_RSF Receive store and forward Depends on CONFIG_TIVA_EMAC_HWCHECKSUM * EMAC_DMAOPMODE_DT Dropping of TCP/IP checksum error Depends on CONFIG_TIVA_EMAC_HWCHECKSUM * frames disable * * When the checksum offload feature is enabled, we need to enable the Store * and Forward mode: the store and forward guarantee that a whole frame is * stored in the FIFO, so the MAC can insert/verify the checksum, if the * checksum is OK the DMA can handle the frame otherwise the frame is dropped */ #ifdef CONFIG_TIVA_EMAC_HWCHECKSUM #define DMAOPMODE_SET_MASK \ (EMAC_DMAOPMODE_OSF | EMAC_DMAOPMODE_RTC_64 | EMAC_DMAOPMODE_TTC_64 | \ EMAC_DMAOPMODE_TSF | EMAC_DMAOPMODE_RSF) #else #define DMAOPMODE_SET_MASK \ (EMAC_DMAOPMODE_OSF | EMAC_DMAOPMODE_RTC_64 | EMAC_DMAOPMODE_TTC_64 | \ EMAC_DMAOPMODE_DT) #endif /* Clear the DMABUSMOD bits that will be setup during MAC initialization (or that * are cleared unconditionally). Per the reference manual, all reserved bits * must be retained at their reset value. * * EMAC_DMABUSMOD_SWR Bit 0: Software reset * EMAC_DMABUSMOD_DA Bit 1: DMA Arbitration * EMAC_DMABUSMOD_DSL Bits 2-6: Descriptor skip length * EMAC_DMABUSMOD_ATDS Bit 7: Enhanced descriptor format enable * EMAC_DMABUSMOD_PBL Bits 8-13: Programmable burst length * EMAC_DMABUSMOD_PR Bits 14-15: RX TX priority ratio * EMAC_DMABUSMOD_FB Bit 16: Fixed burst * EMAC_DMABUSMOD_RPBL Bits 17-22: RX DMA programmable bust length * EMAC_DMABUSMOD_USP Bit 23: Use separate PBL * EMAC_DMABUSMOD_8XPBL Bit 24: 8x programmable burst length mode * EMAC_DMABUSMOD_AAL Bit 25: Address-aligned beats * EMAC_DMABUSMOD_MB Bit 26: Mixed burst (F2/F4 only) * EMAC_DMABUSMOD_TXPR Bit 27: Transmit Priority * EMAC_DMABUSMOD_RIB Bit 31: Rebuild Burst */ #define DMABUSMOD_CLEAR_MASK \ (EMAC_DMABUSMOD_SWR | EMAC_DMABUSMOD_DA | EMAC_DMABUSMOD_DSL_MASK | \ EMAC_DMABUSMOD_ATDS | EMAC_DMABUSMOD_PBL_MASK | EMAC_DMABUSMOD_PR_MASK | \ EMAC_DMABUSMOD_FB | EMAC_DMABUSMOD_RPBL_MASK | EMAC_DMABUSMOD_USP | \ EMAC_DMABUSMOD_8XPBL | EMAC_DMABUSMOD_AAL | EMAC_DMABUSMOD_MB |\ EMAC_DMABUSMOD_TXPR | EMAC_DMABUSMOD_RIB) /* The following bits are set or left zero unconditionally in all modes. * * EMAC_DMABUSMOD_SWR Software reset 0 (no reset) * EMAC_DMABUSMOD_DA DMA Arbitration 1 (fixed priority) * EMAC_DMABUSMOD_DSL Descriptor skip length 0 * EMAC_DMABUSMOD_ATDS Enhanced descriptor format enable Depends on CONFIG_TIVA_EMAC_ENHANCEDDESC * EMAC_DMABUSMOD_PBL Programmable burst length Depends on EMAC_DMA_RXBURST * EMAC_DMABUSMOD_PR RX TX priority ratio 0 1:1 * EMAC_DMABUSMOD_FB Fixed burst 0 (disabled) * EMAC_DMABUSMOD_RPBL RX DMA programmable burst length Depends on EMAC_DMA_TXBURST * EMAC_DMABUSMOD_USP Use separate PBL Depends on EMAC_DMA_RX/TXBURST * EMAC_DMABUSMOD_8XPBL 8x programmable burst length mode Depends on EMAC_DMA_RX/TXBURST * EMAC_DMABUSMOD_AAL Address-aligned beats 0 (disabled) * EMAC_DMABUSMOD_MB Mixed burst 1 (enabled) * EMAC_DMABUSMOD_TXPR Transmit Priority 0 (RX DMA has priority over TX) * EMAC_DMABUSMOD_RIB Rebuild Burst 0 */ #define EMAC_DMA_RXBURST 4 #define EMAC_DMA_TXBURST 4 #if EMAC_DMA_RXBURST > 32 || EMAC_DMA_TXBURST > 32 #define __EMAC_DMABUSMOD_8XPBL 0 #define __EMAC_DMA_RXBURST EMAC_DMA_RXBURST #define __EMAC_DMA_TXBURST EMAC_DMA_TXBURST #else /* Divide both burst lengths by 8 and set the 8X burst length multiplier */ #define __EMAC_DMABUSMOD_8XPBL EMAC_DMABUSMOD_8XPBL #define __EMAC_DMA_RXBURST (EMAC_DMA_RXBURST >> 3) #define __EMAC_DMA_TXBURST (EMAC_DMA_TXBURST >> 3) #endif #define __EMAC_DMABUSMOD_PBL EMAC_DMABUSMOD_PBL(__EMAC_DMA_RXBURST) /* Are the receive and transmit burst lengths the same? */ #if __EMAC_DMA_RXBURST == __EMAC_DMA_TXBURST /* Yes.. Set up to use a single burst length */ #define __EMAC_DMABUSMOD_USP 0 #define __EMAC_DMABUSMOD_RPBL 0 #else /* No.. Use separate burst lengths for each */ #define __EMAC_DMABUSMOD_USP EMAC_DMABUSMOD_USP #define __EMAC_DMABUSMOD_RPBL EMAC_DMABUSMOD_RPBL(__EMAC_DMA_TXBURST) #endif #ifdef CONFIG_TIVA_EMAC_ENHANCEDDESC #define __EMAC_DMABUSMOD_ATDS EMAC_DMABUSMOD_ATDS #else #define __EMAC_DMABUSMOD_ATDS 0 #endif #define DMABUSMOD_SET_MASK \ (EMAC_DMABUSMOD_DA | EMAC_DMABUSMOD_DSL(0) | __EMAC_DMABUSMOD_ATDS | \ __EMAC_DMABUSMOD_PBL | __EMAC_DMABUSMOD_RPBL | __EMAC_DMABUSMOD_USP | \ __EMAC_DMABUSMOD_8XPBL | EMAC_DMABUSMOD_MB) /* Interrupt bit sets *******************************************************/ /* All interrupts in the normal and abnormal interrupt summary. Early transmit * interrupt (ETI) is excluded from the abnormal set because it causes too * many interrupts and is not interesting. */ #define EMAC_DMAINT_NORMAL \ (EMAC_DMAINT_TI | EMAC_DMAINT_TBUI |EMAC_DMAINT_RI | EMAC_DMAINT_ERI) #define EMAC_DMAINT_ABNORMAL \ (EMAC_DMAINT_TPSI | EMAC_DMAINT_TJTI | EMAC_DMAINT_OVFI | EMAC_EMAINT_UNFI | \ EMAC_DMAINT_RBUI | EMAC_DMAINT_RPSI | EMAC_DMAINT_RWTI | /* EMAC_DMAINT_ETI | */ \ EMAC_DMAINT_FBEI) /* Normal receive, transmit, error interrupt enable bit sets */ #define EMAC_DMAINT_RECV_ENABLE (EMAC_DMAINT_NIS | EMAC_DMAINT_RI) #define EMAC_DMAINT_XMIT_ENABLE (EMAC_DMAINT_NIS | EMAC_DMAINT_TI) #define EMAC_DMAINT_XMIT_DISABLE (EMAC_DMAINT_TI) #ifdef CONFIG_DEBUG_NET #define EMAC_DMAINT_ERROR_ENABLE (EMAC_DMAINT_AIS | EMAC_DMAINT_ABNORMAL) #else #define EMAC_DMAINT_ERROR_ENABLE (0) #endif /* Helpers ******************************************************************/ /* This is a helper pointer for accessing the contents of the Ethernet * header */ #define BUF ((struct eth_hdr_s *)priv->dev.d_buf) /**************************************************************************** * Private Types ****************************************************************************/ /* The tiva_ethmac_s encapsulates all state information for a single hardware * interface */ struct tiva_ethmac_s { uint8_t ifup:1; /* true:ifup false:ifdown */ uint8_t mbps100:1; /* 100MBps operation (vs 10 MBps) */ uint8_t fduplex:1; /* Full (vs. half) duplex */ WDOG_ID txpoll; /* TX poll timer */ WDOG_ID txtimeout; /* TX timeout timer */ #ifdef CONFIG_NET_NOINTS struct work_s work; /* For deferring work to the work queue */ #endif #ifdef CONFIG_TIVA_PHY_INTERRUPTS xcpt_t handler; /* Attached PHY interrupt handler */ #endif /* This holds the information visible to uIP/NuttX */ struct net_driver_s dev; /* Interface understood by network subsystem */ /* Used to track transmit and receive descriptors */ struct emac_txdesc_s *txhead; /* Next available TX descriptor */ struct emac_rxdesc_s *rxhead; /* Next available RX descriptor */ struct emac_txdesc_s *txtail; /* First "in_flight" TX descriptor */ struct emac_rxdesc_s *rxcurr; /* First RX descriptor of the segment */ uint16_t segments; /* RX segment count */ uint16_t inflight; /* Number of TX transfers "in_flight" */ sq_queue_t freeb; /* The free buffer list */ /* Descriptor allocations */ struct emac_rxdesc_s rxtable[CONFIG_TIVA_EMAC_NRXDESC]; struct emac_txdesc_s txtable[CONFIG_TIVA_EMAC_NTXDESC]; /* Buffer allocations */ uint8_t rxbuffer[CONFIG_TIVA_EMAC_NRXDESC *OPTIMAL_EMAC_BUFSIZE]; uint8_t alloc[TIVA_EMAC_NFREEBUFFERS *OPTIMAL_EMAC_BUFSIZE]; }; /**************************************************************************** * Private Data ****************************************************************************/ static struct tiva_ethmac_s g_tiva_ethmac[TIVA_NETHCONTROLLERS]; /**************************************************************************** * Private Function Prototypes ****************************************************************************/ /* Register operations ******************************************************/ #if defined(CONFIG_TIVA_ETHERNET_REGDEBUG) && defined(CONFIG_DEBUG) static uint32_t tiva_getreg(uint32_t addr); static void tiva_putreg(uint32_t val, uint32_t addr); static void tiva_checksetup(void); #else #define tiva_getreg(addr) getreg32(addr) #define tiva_putreg(val, addr) putreg32(val, addr) #define tiva_checksetup() #endif /* Free buffer management */ static void tiva_initbuffer(FAR struct tiva_ethmac_s *priv); static inline uint8_t *tiva_allocbuffer(FAR struct tiva_ethmac_s *priv); static inline void tiva_freebuffer(FAR struct tiva_ethmac_s *priv, uint8_t *buffer); static inline bool tiva_isfreebuffer(FAR struct tiva_ethmac_s *priv); /* Common TX logic */ static int tiva_transmit(FAR struct tiva_ethmac_s *priv); static int tiva_txpoll(struct net_driver_s *dev); static void tiva_dopoll(FAR struct tiva_ethmac_s *priv); /* Interrupt handling */ static void tiva_enableint(FAR struct tiva_ethmac_s *priv, uint32_t ierbit); static void tiva_disableint(FAR struct tiva_ethmac_s *priv, uint32_t ierbit); static void tiva_freesegment(FAR struct tiva_ethmac_s *priv, FAR struct emac_rxdesc_s *rxfirst, int segments); static int tiva_recvframe(FAR struct tiva_ethmac_s *priv); static void tiva_receive(FAR struct tiva_ethmac_s *priv); static void tiva_freeframe(FAR struct tiva_ethmac_s *priv); static void tiva_txdone(FAR struct tiva_ethmac_s *priv); static inline void tiva_interrupt_process(FAR struct tiva_ethmac_s *priv); #ifdef CONFIG_NET_NOINTS static void tiva_interrupt_work(FAR void *arg); #endif static int tiva_interrupt(int irq, FAR void *context, FAR void *arg); /* Watchdog timer expirations */ static inline void tiva_txtimeout_process(FAR struct tiva_ethmac_s *priv); #ifdef CONFIG_NET_NOINTS static void tiva_txtimeout_work(FAR void *arg); #endif static void tiva_txtimeout_expiry(int argc, uint32_t arg, ...); static inline void tiva_poll_process(FAR struct tiva_ethmac_s *priv); #ifdef CONFIG_NET_NOINTS static void tiva_poll_work(FAR void *arg); #endif static void tiva_poll_expiry(int argc, uint32_t arg, ...); /* NuttX callback functions */ static int tiva_ifup(struct net_driver_s *dev); static int tiva_ifdown(struct net_driver_s *dev); static inline void tiva_txavail_process(FAR struct tiva_ethmac_s *priv); #ifdef CONFIG_NET_NOINTS static void tiva_txavail_work(FAR void *arg); #endif static int tiva_txavail(struct net_driver_s *dev); #ifdef CONFIG_NETDEV_PHY_IOCTL static int tiva_ioctl(struct net_driver_s *dev, int cmd, long arg); #endif /* Descriptor Initialization */ static void tiva_txdescinit(FAR struct tiva_ethmac_s *priv); static void tiva_rxdescinit(FAR struct tiva_ethmac_s *priv); /* PHY Initialization */ #ifdef CONFIG_TIVA_PHY_INTERRUPTS static void tiva_phy_intenable(bool enable); #endif static int tiva_phyread(uint16_t phydevaddr, uint16_t phyregaddr, uint16_t *value); static int tiva_phywrite(uint16_t phydevaddr, uint16_t phyregaddr, uint16_t value); static int tiva_phyinit(FAR struct tiva_ethmac_s *priv); /* MAC/DMA Initialization */ static void tiva_phy_configure(FAR struct tiva_ethmac_s *priv); static inline void tiva_phy_initialize(FAR struct tiva_ethmac_s *priv); static void tiva_ethreset(FAR struct tiva_ethmac_s *priv); static int tiva_macconfig(FAR struct tiva_ethmac_s *priv); static void tiva_macaddress(FAR struct tiva_ethmac_s *priv); #ifdef CONFIG_NET_ICMPv6 static void tiva_ipv6multicast(FAR struct tiva_ethmac_s *priv); #endif static int tiva_macenable(FAR struct tiva_ethmac_s *priv); static int tive_emac_configure(FAR struct tiva_ethmac_s *priv); /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Name: tiva_getreg * * Description: * This function may to used to intercept an monitor all register accesses. * Clearly this is nothing you would want to do unless you are debugging * this driver. * * Input Parameters: * addr - The register address to read * * Returned Value: * The value read from the register * ****************************************************************************/ #if defined(CONFIG_TIVA_ETHERNET_REGDEBUG) && defined(CONFIG_DEBUG) static uint32_t tiva_getreg(uint32_t addr) { static uint32_t prevaddr = 0; static uint32_t preval = 0; static uint32_t count = 0; /* Read the value from the register */ uint32_t val = getreg32(addr); /* Is this the same value that we read from the same register last time? * Are we polling the register? If so, suppress some of the output. */ if (addr == prevaddr && val == preval) { if (count == 0xffffffff || ++count > 3) { if (count == 4) { lldbg("...\n"); } return val; } } /* No this is a new address or value */ else { /* Did we print "..." for the previous value? */ if (count > 3) { /* Yes.. then show how many times the value repeated */ lldbg("[repeats %d more times]\n", count - 3); } /* Save the new address, value, and count */ prevaddr = addr; preval = val; count = 1; } /* Show the register value read */ lldbg("%08x->%08x\n", addr, val); return val; } #endif /**************************************************************************** * Name: tiva_putreg * * Description: * This function may to used to intercept an monitor all register accesses. * Clearly this is nothing you would want to do unless you are debugging * this driver. * * Input Parameters: * val - The value to write to the register * addr - The register address to read * * Returned Value: * None * ****************************************************************************/ #if defined(CONFIG_TIVA_ETHERNET_REGDEBUG) && defined(CONFIG_DEBUG) static void tiva_putreg(uint32_t val, uint32_t addr) { /* Show the register value being written */ lldbg("%08x<-%08x\n", addr, val); /* Write the value */ putreg32(val, addr); } #endif /**************************************************************************** * Name: tiva_checksetup * * Description: * Show the state of critical configuration registers. * * Input Parameters: * None * * Returned Value: * None * ****************************************************************************/ #if defined(CONFIG_TIVA_ETHERNET_REGDEBUG) && defined(CONFIG_DEBUG) static void tiva_checksetup(void) { } #endif /**************************************************************************** * Function: tiva_initbuffer * * Description: * Initialize the free buffer list. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Called during early driver initialization before Ethernet interrupts * are enabled. * ****************************************************************************/ static void tiva_initbuffer(FAR struct tiva_ethmac_s *priv) { uint8_t *buffer; int i; /* Initialize the head of the free buffer list */ sq_init(&priv->freeb); /* Add all of the pre-allocated buffers to the free buffer list */ for (i = 0, buffer = priv->alloc; i < TIVA_EMAC_NFREEBUFFERS; i++, buffer += OPTIMAL_EMAC_BUFSIZE) { sq_addlast((FAR sq_entry_t *) buffer, &priv->freeb); } } /**************************************************************************** * Function: tiva_allocbuffer * * Description: * Allocate one buffer from the free buffer list. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * Pointer to the allocated buffer on success; NULL on failure * * Assumptions: * May or may not be called from an interrupt handler. In either case, * global interrupts are disabled, either explicitly or indirectly through * interrupt handling logic. * ****************************************************************************/ static inline uint8_t *tiva_allocbuffer(FAR struct tiva_ethmac_s *priv) { /* Allocate a buffer by returning the head of the free buffer list */ return (uint8_t *) sq_remfirst(&priv->freeb); } /**************************************************************************** * Function: tiva_freebuffer * * Description: * Return a buffer to the free buffer list. * * Parameters: * priv - Reference to the driver state structure * buffer - A pointer to the buffer to be freed * * Returned Value: * None * * Assumptions: * May or may not be called from an interrupt handler. In either case, * global interrupts are disabled, either explicitly or indirectly through * interrupt handling logic. * ****************************************************************************/ static inline void tiva_freebuffer(FAR struct tiva_ethmac_s *priv, uint8_t *buffer) { /* Free the buffer by adding it to to the end of the free buffer list */ sq_addlast((FAR sq_entry_t *) buffer, &priv->freeb); } /**************************************************************************** * Function: tiva_isfreebuffer * * Description: * Return TRUE if the free buffer list is not empty. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * True if there are one or more buffers in the free buffer list; * false if the free buffer list is empty * * Assumptions: * None. * ****************************************************************************/ static inline bool tiva_isfreebuffer(FAR struct tiva_ethmac_s *priv) { /* Return TRUE if the free buffer list is not empty */ return !sq_empty(&priv->freeb); } /**************************************************************************** * Function: tiva_transmit * * Description: * Start hardware transmission. Called either from the txdone interrupt * handling or from watchdog based polling. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * OK on success; a negated errno on failure * * Assumptions: * May or may not be called from an interrupt handler. In either case, * global interrupts are disabled, either explicitly or indirectly through * interrupt handling logic. * ****************************************************************************/ static int tiva_transmit(FAR struct tiva_ethmac_s *priv) { struct emac_txdesc_s *txdesc; struct emac_txdesc_s *txfirst; /* The internal (optimal) uIP buffer size may be configured to be larger * than the Ethernet buffer size. */ #if OPTIMAL_EMAC_BUFSIZE > OPTIMAL_EMAC_BUFSIZE uint8_t *buffer; int bufcount; int lastsize; int i; #endif /* Verify that the hardware is ready to send another packet. If we get * here, then we are committed to sending a packet; Higher level logic * must have assured that there is no transmission in progress. */ txdesc = priv->txhead; txfirst = txdesc; nvdbg("d_len: %d d_buf: %p txhead: %p tdes0: %08x\n", priv->dev.d_len, priv->dev.d_buf, txdesc, txdesc->tdes0); DEBUGASSERT(txdesc && (txdesc->tdes0 & EMAC_TDES0_OWN) == 0); /* Is the size to be sent greater than the size of the Ethernet buffer? */ DEBUGASSERT(priv->dev.d_len > 0 && priv->dev.d_buf != NULL); #if OPTIMAL_EMAC_BUFSIZE > OPTIMAL_EMAC_BUFSIZE if (priv->dev.d_len > OPTIMAL_EMAC_BUFSIZE) { /* Yes... how many buffers will be need to send the packet? */ bufcount = (priv->dev.d_len + (OPTIMAL_EMAC_BUFSIZE - 1)) / OPTIMAL_EMAC_BUFSIZE; lastsize = priv->dev.d_len - (bufcount - 1) * OPTIMAL_EMAC_BUFSIZE; nvdbg("bufcount: %d lastsize: %d\n", bufcount, lastsize); /* Set the first segment bit in the first TX descriptor */ txdesc->tdes0 |= EMAC_TDES0_FS; /* Set up all but the last TX descriptor */ buffer = priv->dev.d_buf; for (i = 0; i < bufcount; i++) { /* This could be a normal event but the design does not handle it */ DEBUGASSERT((txdesc->tdes0 & EMAC_TDES0_OWN) == 0); /* Set the Buffer1 address pointer */ txdesc->tdes2 = (uint32_t) buffer; /* Set the buffer size in all TX descriptors */ if (i == (bufcount - 1)) { /* This is the last segment. Set the last segment bit in the * last TX descriptor and ask for an interrupt when this * segment transfer completes. */ txdesc->tdes0 |= (EMAC_TDES0_LS | EMAC_TDES0_IC); /* This segement is, most likely, of fractional buffersize */ txdesc->tdes1 = lastsize; buffer += lastsize; } else { /* This is not the last segment. We don't want an interrupt * when this segment transfer completes. */ txdesc->tdes0 &= ~EMAC_TDES0_IC; /* The size of the transfer is the whole buffer */ txdesc->tdes1 = OPTIMAL_EMAC_BUFSIZE; buffer += OPTIMAL_EMAC_BUFSIZE; } /* Give the descriptor to DMA */ txdesc->tdes0 |= EMAC_TDES0_OWN; txdesc = (struct emac_txdesc_s *)txdesc->tdes3; } } else #endif { /* The single descriptor is both the first and last segment. And we do * want an interrupt when the transfer completes. */ txdesc->tdes0 |= (EMAC_TDES0_FS | EMAC_TDES0_LS | EMAC_TDES0_IC); /* Set frame size */ DEBUGASSERT(priv->dev.d_len <= CONFIG_NET_ETH_MTU); txdesc->tdes1 = priv->dev.d_len; /* Set the Buffer1 address pointer */ txdesc->tdes2 = (uint32_t) priv->dev.d_buf; /* Set OWN bit of the TX descriptor tdes0. This gives the buffer to * Ethernet DMA */ txdesc->tdes0 |= EMAC_TDES0_OWN; /* Point to the next available TX descriptor */ txdesc = (struct emac_txdesc_s *)txdesc->tdes3; } /* Remember where we left off in the TX descriptor chain */ priv->txhead = txdesc; /* Detach the buffer from priv->dev structure. That buffer is now * "in-flight". */ priv->dev.d_buf = NULL; priv->dev.d_len = 0; /* If there is no other TX buffer, in flight, then remember the location * of the TX descriptor. This is the location to check for TX done events. */ if (!priv->txtail) { DEBUGASSERT(priv->inflight == 0); priv->txtail = txfirst; } /* Increment the number of TX transfer in-flight */ priv->inflight++; nvdbg("txhead: %p txtail: %p inflight: %d\n", priv->txhead, priv->txtail, priv->inflight); /* If all TX descriptors are in-flight, then we have to disable receive interrupts * too. This is because receive events can trigger more un-stoppable transmit * events. */ if (priv->inflight >= CONFIG_TIVA_EMAC_NTXDESC) { tiva_disableint(priv, EMAC_DMAINT_RI); } /* Check if the TX Buffer unavailable flag is set */ if ((tiva_getreg(TIVA_EMAC_DMARIS) & EMAC_DMAINT_TBUI) != 0) { /* Clear TX Buffer unavailable flag */ tiva_putreg(EMAC_DMAINT_TBUI, TIVA_EMAC_DMARIS); /* Resume DMA transmission */ tiva_putreg(0, TIVA_EMAC_TXPOLLD); } /* Enable TX interrupts */ tiva_enableint(priv, EMAC_DMAINT_TI); /* Setup the TX timeout watchdog (perhaps restarting the timer) */ (void)wd_start(priv->txtimeout, TIVA_TXTIMEOUT, tiva_txtimeout_expiry, 1, (uint32_t) priv); return OK; } /**************************************************************************** * Function: tiva_txpoll * * Description: * The transmitter is available, check if uIP has any outgoing packets ready * to send. This is a callback from devif_poll(). devif_poll() may be called: * * 1. When the preceding TX packet send is complete, * 2. When the preceding TX packet send timesout and the interface is reset * 3. During normal TX polling * * Parameters: * dev - Reference to the NuttX driver state structure * * Returned Value: * OK on success; a negated errno on failure * * Assumptions: * May or may not be called from an interrupt handler. In either case, * global interrupts are disabled, either explicitly or indirectly through * interrupt handling logic. * ****************************************************************************/ static int tiva_txpoll(struct net_driver_s *dev) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)dev->d_private; DEBUGASSERT(priv->dev.d_buf != NULL); /* If the polling resulted in data that should be sent out on the network, * the field d_len is set to a value > 0. */ if (priv->dev.d_len > 0) { /* Look up the destination MAC address and add it to the Ethernet * header. */ #ifdef CONFIG_NET_IPv4 #ifdef CONFIG_NET_IPv6 if (IFF_IS_IPv4(priv->dev.d_flags)) #endif { arp_out(&priv->dev); } #endif /* CONFIG_NET_IPv4 */ #ifdef CONFIG_NET_IPv6 #ifdef CONFIG_NET_IPv4 else #endif { neighbor_out(&priv->dev); } #endif /* CONFIG_NET_IPv6 */ /* Send the packet */ tiva_transmit(priv); DEBUGASSERT(dev->d_len == 0 && dev->d_buf == NULL); /* Check if the next TX descriptor is owned by the Ethernet DMA or CPU. We * cannot perform the TX poll if we are unable to accept another packet for * transmission. * * In a race condition, EMAC_TDES0_OWN may be cleared BUT still not available * because tiva_freeframe() has not yet run. If tiva_freeframe() has run, * the buffer1 pointer (tdes2) will be nullified (and inflight should be < * CONFIG_TIVA_EMAC_NTXDESC). */ if ((priv->txhead->tdes0 & EMAC_TDES0_OWN) != 0 || priv->txhead->tdes2 != 0) { /* We have to terminate the poll if we have no more descriptors * available for another transfer. */ return -EBUSY; } /* We have the descriptor, we can continue the poll. Allocate a new * buffer for the poll. */ dev->d_buf = tiva_allocbuffer(priv); /* We can't continue the poll if we have no buffers */ if (dev->d_buf == NULL) { /* Terminate the poll. */ return -ENOMEM; } } /* If zero is returned, the polling will continue until all connections have * been examined. */ return 0; } /**************************************************************************** * Function: tiva_dopoll * * Description: * The function is called when a frame is received using the DMA receive * interrupt. It scans the RX descriptors to the received frame. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static void tiva_dopoll(FAR struct tiva_ethmac_s *priv) { FAR struct net_driver_s *dev = &priv->dev; /* Check if the next TX descriptor is owned by the Ethernet DMA or * CPU. We cannot perform the TX poll if we are unable to accept * another packet for transmission. * * In a race condition, EMAC_TDES0_OWN may be cleared BUT still not available * because tiva_freeframe() has not yet run. If tiva_freeframe() has run, * the buffer1 pointer (tdes2) will be nullified (and inflight should be < * CONFIG_TIVA_EMAC_NTXDESC). */ if ((priv->txhead->tdes0 & EMAC_TDES0_OWN) == 0 && priv->txhead->tdes2 == 0) { /* If we have the descriptor, then poll uIP for new XMIT data. * Allocate a buffer for the poll. */ DEBUGASSERT(dev->d_len == 0 && dev->d_buf == NULL); dev->d_buf = tiva_allocbuffer(priv); /* We can't poll if we have no buffers */ if (dev->d_buf) { (void)devif_poll(dev, tiva_txpoll); /* We will, most likely end up with a buffer to be freed. But it * might not be the same one that we allocated above. */ if (dev->d_buf) { DEBUGASSERT(dev->d_len == 0); tiva_freebuffer(priv, dev->d_buf); dev->d_buf = NULL; } } } } /**************************************************************************** * Function: tiva_enableint * * Description: * Enable a "normal" interrupt * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static void tiva_enableint(FAR struct tiva_ethmac_s *priv, uint32_t ierbit) { uint32_t regval; /* Enable the specified "normal" interrupt */ regval = tiva_getreg(TIVA_EMAC_DMAIM); regval |= (EMAC_DMAINT_NIS | ierbit); tiva_putreg(regval, TIVA_EMAC_DMAIM); } /**************************************************************************** * Function: tiva_disableint * * Description: * Disable a normal interrupt. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static void tiva_disableint(FAR struct tiva_ethmac_s *priv, uint32_t ierbit) { uint32_t regval; /* Disable the "normal" interrupt */ regval = tiva_getreg(TIVA_EMAC_DMAIM); regval &= ~ierbit; /* Are all "normal" interrupts now disabled? */ if ((regval & EMAC_DMAINT_NORMAL) == 0) { /* Yes.. disable normal interrupts */ regval &= ~EMAC_DMAINT_NIS; } tiva_putreg(regval, TIVA_EMAC_DMAIM); } /**************************************************************************** * Function: tiva_freesegment * * Description: * The function is called when a frame is received using the DMA receive * interrupt. It scans the RX descriptors to the received frame. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static void tiva_freesegment(FAR struct tiva_ethmac_s *priv, FAR struct emac_rxdesc_s *rxfirst, int segments) { struct emac_rxdesc_s *rxdesc; int i; nvdbg("rxfirst: %p segments: %d\n", rxfirst, segments); /* Set OWN bit in RX descriptors. This gives the buffers back to DMA */ rxdesc = rxfirst; for (i = 0; i < segments; i++) { rxdesc->rdes0 = EMAC_RDES0_OWN; rxdesc = (struct emac_rxdesc_s *)rxdesc->rdes3; } /* Reset the segment managment logic */ priv->rxcurr = NULL; priv->segments = 0; /* Check if the RX Buffer unavailable flag is set */ if ((tiva_getreg(TIVA_EMAC_DMARIS) & EMAC_DMAINT_RBUI) != 0) { /* Clear RBUS Ethernet DMA flag */ tiva_putreg(EMAC_DMAINT_RBUI, TIVA_EMAC_DMARIS); /* Resume DMA reception */ tiva_putreg(0, TIVA_EMAC_RXPOLLD); } } /**************************************************************************** * Function: tiva_recvframe * * Description: * The function is called when a frame is received using the DMA receive * interrupt. It scans the RX descriptors of the received frame. * * NOTE: This function will silently discard any packets containing errors. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * OK if a packet was successfully returned; -EAGAIN if there are no * further packets available * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static int tiva_recvframe(FAR struct tiva_ethmac_s *priv) { struct emac_rxdesc_s *rxdesc; struct emac_rxdesc_s *rxcurr; uint8_t *buffer; int i; nvdbg("rxhead: %p rxcurr: %p segments: %d\n", priv->rxhead, priv->rxcurr, priv->segments); /* Check if there are free buffers. We cannot receive new frames in this * design unless there is at least one free buffer. */ if (!tiva_isfreebuffer(priv)) { nlldbg("No free buffers\n"); return -ENOMEM; } /* Scan descriptors owned by the CPU. Scan until: * * 1) We find a descriptor still owned by the DMA, * 2) We have examined all of the RX descriptors, or * 3) All of the TX descriptors are in flight. * * This last case is obscure. It is due to that fact that each packet * that we receive can generate an unstoppable transmisson. So we have * to stop receiving when we can not longer transmit. In this case, the * transmit logic should also have disabled further RX interrupts. */ rxdesc = priv->rxhead; for (i = 0; (rxdesc->rdes0 & EMAC_RDES0_OWN) == 0 && i < CONFIG_TIVA_EMAC_NRXDESC && priv->inflight < CONFIG_TIVA_EMAC_NTXDESC; i++) { /* Check if this is the first segment in the frame */ if ((rxdesc->rdes0 & EMAC_RDES0_FS) != 0 && (rxdesc->rdes0 & EMAC_RDES0_LS) == 0) { priv->rxcurr = rxdesc; priv->segments = 1; } /* Check if this is an intermediate segment in the frame */ else if (((rxdesc->rdes0 & EMAC_RDES0_LS) == 0) && ((rxdesc->rdes0 & EMAC_RDES0_FS) == 0)) { priv->segments++; } /* Otherwise, it is the last segment in the frame */ else { priv->segments++; /* Check if there is only one segment in the frame */ if (priv->segments == 1) { rxcurr = rxdesc; } else { rxcurr = priv->rxcurr; } nvdbg("rxhead: %p rxcurr: %p segments: %d\n", priv->rxhead, priv->rxcurr, priv->segments); /* Check if any errors are reported in the frame */ if ((rxdesc->rdes0 & EMAC_RDES0_ES) == 0) { struct net_driver_s *dev = &priv->dev; /* Get the Frame Length of the received packet: substruct 4 * bytes of the CRC */ dev->d_len = ((rxdesc->rdes0 & EMAC_RDES0_FL_MASK) >> EMAC_RDES0_FL_SHIFT) - 4; /* Get a buffer from the free list. We don't even check if * this is successful because we already assure the free * list is not empty above. */ buffer = tiva_allocbuffer(priv); /* Take the buffer from the RX descriptor of the first free * segment, put it into the uIP device structure, then replace * the buffer in the RX descriptor with the newly allocated * buffer. */ DEBUGASSERT(dev->d_buf == NULL); dev->d_buf = (uint8_t *) rxcurr->rdes2; rxcurr->rdes2 = (uint32_t) buffer; /* Return success, remebering where we should re-start scanning * and resetting the segment scanning logic */ priv->rxhead = (struct emac_rxdesc_s *)rxdesc->rdes3; tiva_freesegment(priv, rxcurr, priv->segments); nvdbg("rxhead: %p d_buf: %p d_len: %d\n", priv->rxhead, dev->d_buf, dev->d_len); return OK; } else { /* Drop the frame that contains the errors, reset the segment * scanning logic, and continue scanning with the next frame. */ nlldbg("DROPPED: RX descriptor errors: %08x\n", rxdesc->rdes0); tiva_freesegment(priv, rxcurr, priv->segments); } } /* Try the next descriptor */ rxdesc = (struct emac_rxdesc_s *)rxdesc->rdes3; } /* We get here after all of the descriptors have been scanned or when rxdesc points * to the first descriptor owned by the DMA. Remember where we left off. */ priv->rxhead = rxdesc; nvdbg("rxhead: %p rxcurr: %p segments: %d\n", priv->rxhead, priv->rxcurr, priv->segments); return -EAGAIN; } /**************************************************************************** * Function: tiva_receive * * Description: * An interrupt was received indicating the availability of a new RX packet * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static void tiva_receive(FAR struct tiva_ethmac_s *priv) { struct net_driver_s *dev = &priv->dev; /* Loop while while tiva_recvframe() successfully retrieves valid * Ethernet frames. */ while (tiva_recvframe(priv) == OK) { /* Check if the packet is a valid size for the uIP buffer configuration * (this should not happen) */ if (dev->d_len > CONFIG_NET_ETH_MTU) { nlldbg("DROPPED: Too big: %d\n", dev->d_len); } else /* We only accept IP packets of the configured type and ARP packets */ #ifdef CONFIG_NET_IPv4 if (BUF->type == HTONS(ETHTYPE_IP)) { nllvdbg("IPv4 frame\n"); /* Handle ARP on input then give the IPv4 packet to the network * layer */ arp_ipin(&priv->dev); ipv4_input(&priv->dev); /* If the above function invocation resulted in data that should be * sent out on the network, the field d_len will set to a value > 0. */ if (priv->dev.d_len > 0) { /* Update the Ethernet header with the correct MAC address */ #ifdef CONFIG_NET_IPv6 if (IFF_IS_IPv4(priv->dev.d_flags)) #endif { arp_out(&priv->dev); } #ifdef CONFIG_NET_IPv6 else { neighbor_out(&priv->dev); } #endif /* And send the packet */ tiva_transmit(priv); } } else #endif #ifdef CONFIG_NET_IPv6 if (BUF->type == HTONS(ETHTYPE_IP6)) { nllvdbg("IPv6 frame\n"); /* Give the IPv6 packet to the network layer */ ipv6_input(&priv->dev); /* If the above function invocation resulted in data that should be * sent out on the network, the field d_len will set to a value > 0. */ if (priv->dev.d_len > 0) { /* Update the Ethernet header with the correct MAC address */ #ifdef CONFIG_NET_IPv4 if (IFF_IS_IPv4(priv->dev.d_flags)) { arp_out(&priv->dev); } else #endif #ifdef CONFIG_NET_IPv6 { neighbor_out(&priv->dev); } #endif /* And send the packet */ tiva_transmit(priv); } } else #endif #ifdef CONFIG_NET_ARP if (BUF->type == htons(ETHTYPE_ARP)) { nvdbg("ARP frame\n"); /* Handle ARP packet */ arp_arpin(&priv->dev); /* If the above function invocation resulted in data that should be * sent out on the network, the field d_len will set to a value > 0. */ if (priv->dev.d_len > 0) { tiva_transmit(priv); } } else #endif { nlldbg("DROPPED: Unknown type: %04x\n", BUF->type); } /* We are finished with the RX buffer. NOTE: If the buffer is * re-used for transmission, the dev->d_buf field will have been * nullified. */ if (dev->d_buf) { /* Free the receive packet buffer */ tiva_freebuffer(priv, dev->d_buf); dev->d_buf = NULL; dev->d_len = 0; } } } /**************************************************************************** * Function: tiva_freeframe * * Description: * Scans the TX descriptors and frees the buffers of completed TX transfers. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None. * * Assumptions: * Global interrupts are disabled by interrupt handling logic. * ****************************************************************************/ static void tiva_freeframe(FAR struct tiva_ethmac_s *priv) { FAR struct emac_txdesc_s *txdesc; int i; nvdbg("txhead: %p txtail: %p inflight: %d\n", priv->txhead, priv->txtail, priv->inflight); /* Scan for "in-flight" descriptors owned by the CPU */ txdesc = priv->txtail; if (txdesc) { DEBUGASSERT(priv->inflight > 0); for (i = 0; (txdesc->tdes0 & EMAC_TDES0_OWN) == 0; i++) { /* There should be a buffer assigned to all in-flight * TX descriptors. */ nvdbg("txtail: %p tdes0: %08x tdes2: %08x tdes3: %08x\n", txdesc, txdesc->tdes0, txdesc->tdes2, txdesc->tdes3); DEBUGASSERT(txdesc->tdes2 != 0); /* Check if this is the first segment of a TX frame. */ if ((txdesc->tdes0 & EMAC_TDES0_FS) != 0) { /* Yes.. Free the buffer */ tiva_freebuffer(priv, (uint8_t *) txdesc->tdes2); } /* In any event, make sure that TDES2 is nullified. */ txdesc->tdes2 = 0; /* Check if this is the last segment of a TX frame */ if ((txdesc->tdes0 & EMAC_TDES0_LS) != 0) { /* Yes.. Decrement the number of frames "in-flight". */ priv->inflight--; /* If all of the TX descriptors were in-flight, then RX interrupts * may have been disabled... we can re-enable them now. */ tiva_enableint(priv, EMAC_DMAINT_RI); /* If there are no more frames in-flight, then bail. */ if (priv->inflight <= 0) { priv->txtail = NULL; priv->inflight = 0; return; } } /* Try the next descriptor in the TX chain */ txdesc = (struct emac_txdesc_s *)txdesc->tdes3; } /* We get here if (1) there are still frames "in-flight". Remember * where we left off. */ priv->txtail = txdesc; nvdbg("txhead: %p txtail: %p inflight: %d\n", priv->txhead, priv->txtail, priv->inflight); } } /**************************************************************************** * Function: tiva_txdone * * Description: * An interrupt was received indicating that the last TX packet(s) is done * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by the watchdog logic. * ****************************************************************************/ static void tiva_txdone(FAR struct tiva_ethmac_s *priv) { FAR struct net_driver_s *dev = &priv->dev; DEBUGASSERT(priv->txtail != NULL); /* Scan the TX descriptor change, returning buffers to free list */ tiva_freeframe(priv); dev->d_buf = NULL; dev->d_len = 0; /* If no further xmits are pending, then cancel the TX timeout */ if (priv->inflight <= 0) { wd_cancel(priv->txtimeout); /* And disable further TX interrupts. */ tiva_disableint(priv, EMAC_DMAINT_TI); } /* Then poll uIP for new XMIT data */ tiva_dopoll(priv); } /**************************************************************************** * Function: tiva_interrupt_process * * Description: * Interrupt processing. This may be performed either within the interrupt * handler or on the worker thread, depending upon the configuration * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Ethernet interrupts are disabled * ****************************************************************************/ static inline void tiva_interrupt_process(FAR struct tiva_ethmac_s *priv) { uint32_t dmaris; /* Get the DMA interrupt status bits (no MAC interrupts are expected) */ dmaris = tiva_getreg(TIVA_EMAC_DMARIS); /* Mask only enabled interrupts. This depends on the fact that the interrupt * related bits (0-16) correspond in these two registers. */ dmaris &= tiva_getreg(TIVA_EMAC_DMAIM); /* Check if there are pending "normal" interrupts */ if ((dmaris & EMAC_DMAINT_NIS) != 0) { /* Yes.. Check if we received an incoming packet, if so, call * tiva_receive() */ if ((dmaris & EMAC_DMAINT_RI) != 0) { /* Clear the pending receive interrupt */ tiva_putreg(EMAC_DMAINT_RI, TIVA_EMAC_DMARIS); /* Handle the received package */ tiva_receive(priv); } /* Check if a packet transmission just completed. If so, call * tiva_txdone(). This may disable further TX interrupts if there * are no pending transmissions. */ if ((dmaris & EMAC_DMAINT_TI) != 0) { /* Clear the pending receive interrupt */ tiva_putreg(EMAC_DMAINT_TI, TIVA_EMAC_DMARIS); /* Check if there are pending transmissions */ tiva_txdone(priv); } /* Clear the pending normal summary interrupt */ tiva_putreg(EMAC_DMAINT_NIS, TIVA_EMAC_DMARIS); } /* Handle error interrupt only if CONFIG_DEBUG_NET is eanbled */ #ifdef CONFIG_DEBUG_NET /* Check if there are pending "abnormal" interrupts */ if ((dmaris & EMAC_DMAINT_AIS) != 0) { /* Just let the user know what happened */ nlldbg("Abnormal event(s): %08x\n", dmaris); /* Clear all pending abnormal events */ tiva_putreg(EMAC_DMAINT_ABNORMAL, TIVA_EMAC_DMARIS); /* Clear the pending abnormal summary interrupt */ tiva_putreg(EMAC_DMAINT_AIS, TIVA_EMAC_DMARIS); } #endif } /**************************************************************************** * Function: tiva_interrupt_work * * Description: * Perform interrupt related work from the worker thread * * Parameters: * arg - The argument passed when work_queue() was called. * * Returned Value: * OK on success * * Assumptions: * Ethernet interrupts are disabled * ****************************************************************************/ #ifdef CONFIG_NET_NOINTS static void tiva_interrupt_work(FAR void *arg) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg; DEBUGASSERT(priv); /* Process pending Ethernet interrupts */ net_lock(); tiva_interrupt_process(priv); net_unlock(); /* Re-enable Ethernet interrupts at the NVIC */ up_enable_irq(TIVA_IRQ_ETHCON); } #endif /**************************************************************************** * Function: tiva_interrupt * * Description: * Hardware interrupt handler * * Parameters: * irq - Number of the IRQ that generated the interrupt * context - Interrupt register state save info (architecture-specific) * * Returned Value: * OK on success * * Assumptions: * ****************************************************************************/ static int tiva_interrupt(int irq, FAR void *context, FAR void *arg) { FAR struct tiva_ethmac_s *priv = &g_tiva_ethmac[0]; #ifdef CONFIG_NET_NOINTS uint32_t dmaris; /* Get the raw interrupt status. */ dmaris = tiva_getreg(TIVA_EMAC_DMARIS); if (dmaris != 0) { /* Disable further Ethernet interrupts. Because Ethernet interrupts * are also disabled if the TX timeout event occurs, there can be no * race condition here. */ up_disable_irq(TIVA_IRQ_ETHCON); /* Check if a packet transmission just completed. */ if ((dmaris & EMAC_DMAINT_TI) != 0) { /* If a TX transfer just completed, then cancel the TX timeout so * there will be no race condition between any subsequent timeout * expiration and the deferred interrupt processing. */ wd_cancel(priv->txtimeout); } /* Cancel any pending poll work */ work_cancel(HPWORK, &priv->work); /* Schedule to perform the interrupt processing on the worker thread. */ work_queue(HPWORK, &priv->work, tiva_interrupt_work, priv, 0); } #else /* Process the interrupt now */ tiva_interrupt_process(priv); #endif #ifdef CONFIG_TIVA_PHY_INTERRUPTS /* Check for pending PHY interrupts */ if ((tiva_getreg(TIVA_EPHY_MISC) & EMAC_PHYMISC_INT) != 0) { /* Clear the pending PHY interrupt */ tiva_putreg(EMAC_PHYMISC_INT, TIVA_EPHY_MISC); /* Dispatch to the registered handler */ if (priv->handler) { (void)priv->handler(irq, context); } } #endif return OK; } /**************************************************************************** * Function: tiva_txtimeout_process * * Description: * Process a TX timeout. Called from the either the watchdog timer * expiration logic or from the worker thread, depending upon the * configuration. The timeout means that the last TX never completed. * Reset the hardware and start again. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by the watchdog logic. * ****************************************************************************/ static inline void tiva_txtimeout_process(FAR struct tiva_ethmac_s *priv) { /* Reset the hardware. Just take the interface down, then back up again. */ tiva_ifdown(&priv->dev); tiva_ifup(&priv->dev); /* Then poll uIP for new XMIT data */ tiva_dopoll(priv); } /**************************************************************************** * Function: tiva_txtimeout_work * * Description: * Perform TX timeout related work from the worker thread * * Parameters: * arg - The argument passed when work_queue() as called. * * Returned Value: * OK on success * * Assumptions: * Ethernet interrupts are disabled * ****************************************************************************/ #ifdef CONFIG_NET_NOINTS static void tiva_txtimeout_work(FAR void *arg) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg; /* Process pending Ethernet interrupts */ net_lock(); tiva_txtimeout_process(priv); net_unlock(); } #endif /**************************************************************************** * Function: tiva_txtimeout_expiry * * Description: * Our TX watchdog timed out. Called from the timer interrupt handler. * The last TX never completed. Reset the hardware and start again. * * Parameters: * argc - The number of available arguments * arg - The first argument * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by the watchdog logic. * ****************************************************************************/ static void tiva_txtimeout_expiry(int argc, uint32_t arg, ...) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg; nlldbg("Timeout!\n"); #ifdef CONFIG_NET_NOINTS /* Disable further Ethernet interrupts. This will prevent some race * conditions with interrupt work. There is still a potential race * condition with interrupt work that is already queued and in progress. * * Interrupts will be re-enabled when tiva_ifup() is called. */ up_disable_irq(TIVA_IRQ_ETHCON); /* Cancel any pending poll or interrupt work. This will have no effect * on work that has already been started. */ work_cancel(HPWORK, &priv->work); /* Schedule to perform the TX timeout processing on the worker thread. */ work_queue(HPWORK, &priv->work, tiva_txtimeout_work, priv, 0); #else /* Process the timeout now */ tiva_txtimeout_process(priv); #endif } /**************************************************************************** * Function: tiva_poll_process * * Description: * Perform the periodic poll. This may be called either from watchdog * timer logic or from the worker thread, depending upon the configuration. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * ****************************************************************************/ static inline void tiva_poll_process(FAR struct tiva_ethmac_s *priv) { FAR struct net_driver_s *dev = &priv->dev; /* Check if the next TX descriptor is owned by the Ethernet DMA or CPU. We * cannot perform the timer poll if we are unable to accept another packet * for transmission. Hmmm.. might be bug here. Does this mean if there is * a transmit in progress, we will miss TCP time state updates? * * In a race condition, EMAC_TDES0_OWN may be cleared BUT still not available * because tiva_freeframe() has not yet run. If tiva_freeframe() has run, * the buffer1 pointer (tdes2) will be nullified (and inflight should be < * CONFIG_TIVA_EMAC_NTXDESC). */ if ((priv->txhead->tdes0 & EMAC_TDES0_OWN) == 0 && priv->txhead->tdes2 == 0) { /* If we have the descriptor, then perform the timer poll. Allocate a * buffer for the poll. */ DEBUGASSERT(dev->d_len == 0 && dev->d_buf == NULL); dev->d_buf = tiva_allocbuffer(priv); /* We can't poll if we have no buffers */ if (dev->d_buf) { /* Update TCP timing states and poll uIP for new XMIT data. */ (void)devif_timer(dev, tiva_txpoll, TIVA_POLLHSEC); /* We will, most likely end up with a buffer to be freed. But it * might not be the same one that we allocated above. */ if (dev->d_buf) { DEBUGASSERT(dev->d_len == 0); tiva_freebuffer(priv, dev->d_buf); dev->d_buf = NULL; } } } /* Setup the watchdog poll timer again */ (void)wd_start(priv->txpoll, TIVA_WDDELAY, tiva_poll_expiry, 1, (uint32_t) priv); } /**************************************************************************** * Function: tiva_poll_work * * Description: * Perform periodic polling from the worker thread * * Parameters: * arg - The argument passed when work_queue() as called. * * Returned Value: * OK on success * * Assumptions: * Ethernet interrupts are disabled * ****************************************************************************/ #ifdef CONFIG_NET_NOINTS static void tiva_poll_work(FAR void *arg) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg; /* Perform the poll */ net_lock(); tiva_poll_process(priv); net_unlock(); } #endif /**************************************************************************** * Function: tiva_poll_expiry * * Description: * Periodic timer handler. Called from the timer interrupt handler. * * Parameters: * argc - The number of available arguments * arg - The first argument * * Returned Value: * None * * Assumptions: * Global interrupts are disabled by the watchdog logic. * ****************************************************************************/ static void tiva_poll_expiry(int argc, uint32_t arg, ...) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg; #ifdef CONFIG_NET_NOINTS /* Is our single work structure available? It may not be if there are * pending interrupt actions. */ if (work_available(&priv->work)) { /* Schedule to perform the interrupt processing on the worker thread. */ work_queue(HPWORK, &priv->work, tiva_poll_work, priv, 0); } else { /* No.. Just re-start the watchdog poll timer, missing one polling * cycle. */ (void)wd_start(priv->txpoll, TIVA_WDDELAY, tiva_poll_expiry, 1, (uint32_t) priv); } #else /* Process the interrupt now */ tiva_poll_process(priv); #endif } /**************************************************************************** * Function: tiva_ifup * * Description: * NuttX Callback: Bring up the Ethernet interface when an IP address is * provided * * Parameters: * dev - Reference to the NuttX driver state structure * * Returned Value: * None * * Assumptions: * ****************************************************************************/ static int tiva_ifup(struct net_driver_s *dev) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)dev->d_private; int ret; #ifdef CONFIG_NET_IPv4 ndbg("Bringing up: %d.%d.%d.%d\n", dev->d_ipaddr & 0xff, (dev->d_ipaddr >> 8) & 0xff, (dev->d_ipaddr >> 16) & 0xff, dev->d_ipaddr >> 24); #endif #ifdef CONFIG_NET_IPv6 ndbg("Bringing up: %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", dev->d_ipv6addr[0], dev->d_ipv6addr[1], dev->d_ipv6addr[2], dev->d_ipv6addr[3], dev->d_ipv6addr[4], dev->d_ipv6addr[5], dev->d_ipv6addr[6], dev->d_ipv6addr[7]); #endif /* Configure the Ethernet interface for DMA operation. */ ret = tive_emac_configure(priv); if (ret < 0) { return ret; } /* Set and activate a timer process */ (void)wd_start(priv->txpoll, TIVA_WDDELAY, tiva_poll_expiry, 1, (uint32_t) priv); /* Enable the Ethernet interrupt */ priv->ifup = true; up_enable_irq(TIVA_IRQ_ETHCON); tiva_checksetup(); return OK; } /**************************************************************************** * Function: tiva_ifdown * * Description: * NuttX Callback: Stop the interface. * * Parameters: * dev - Reference to the NuttX driver state structure * * Returned Value: * None * * Assumptions: * ****************************************************************************/ static int tiva_ifdown(struct net_driver_s *dev) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)dev->d_private; irqstate_t flags; nvdbg("Taking the network down\n"); /* Disable the Ethernet interrupt */ flags = irqsave(); up_disable_irq(TIVA_IRQ_ETHCON); /* Cancel the TX poll timer and TX timeout timers */ wd_cancel(priv->txpoll); wd_cancel(priv->txtimeout); /* Put the EMAC in its reset, non-operational state. This should be * a known configuration that will guarantee the tiva_ifup() always * successfully brings the interface back up. */ tiva_ethreset(priv); /* Mark the device "down" */ priv->ifup = false; irqrestore(flags); return OK; } /**************************************************************************** * Function: tiva_txavail_process * * Description: * Perform an out-of-cycle poll. * * Parameters: * priv - Reference to the NuttX driver state structure * * Returned Value: * None * * Assumptions: * Called in normal user mode * ****************************************************************************/ static inline void tiva_txavail_process(FAR struct tiva_ethmac_s *priv) { nvdbg("ifup: %d\n", priv->ifup); /* Ignore the notification if the interface is not yet up */ if (priv->ifup) { /* Poll uIP for new XMIT data */ tiva_dopoll(priv); } } /**************************************************************************** * Function: tiva_txavail_work * * Description: * Perform an out-of-cycle poll on the worker thread. * * Parameters: * arg - Reference to the NuttX driver state structure (cast to void*) * * Returned Value: * None * * Assumptions: * Called on the higher priority worker thread. * ****************************************************************************/ #ifdef CONFIG_NET_NOINTS static void tiva_txavail_work(FAR void *arg) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)arg; /* Perform the poll */ net_lock(); tiva_txavail_process(priv); net_unlock(); } #endif /**************************************************************************** * Function: tiva_txavail * * Description: * Driver callback invoked when new TX data is available. This is a * stimulus perform an out-of-cycle poll and, thereby, reduce the TX * latency. * * Parameters: * dev - Reference to the NuttX driver state structure * * Returned Value: * None * * Assumptions: * Called in normal user mode * ****************************************************************************/ static int tiva_txavail(struct net_driver_s *dev) { FAR struct tiva_ethmac_s *priv = (FAR struct tiva_ethmac_s *)dev->d_private; #ifdef CONFIG_NET_NOINTS /* Is our single work structure available? It may not be if there are * pending interrupt actions and we will have to ignore the Tx * availability action. */ if (work_available(&priv->work)) { /* Schedule to serialize the poll on the worker thread. */ work_queue(HPWORK, &priv->work, tiva_txavail_work, priv, 0); } #else irqstate_t flags; /* Disable interrupts because this function may be called from interrupt * level processing. */ flags = irqsave(); /* Perform the out-of-cycle poll now */ tiva_txavail_process(priv); irqrestore(flags); #endif return OK; } /**************************************************************************** * Function: tiva_calcethcrc * * Description: * Function to calculate the CRC used to check an ethernet frame * * Parameters: * data - the data to be checked * length - length of the data * * Returned Value: * None * * Assumptions: * ****************************************************************************/ /**************************************************************************** * Function: tiva_addmac * * Description: * NuttX Callback: Add the specified MAC address to the hardware multicast * address filtering * * Parameters: * dev - Reference to the NuttX driver state structure * mac - The MAC address to be added * * Returned Value: * None * * Assumptions: * ****************************************************************************/ #if defined(CONFIG_NET_ICMPv6) static int tiva_addmac(struct net_driver_s *dev, FAR const uint8_t *mac) { uint32_t crc; uint32_t hashindex; uint32_t temp; uint32_t registeraddress; nvdbg("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); /* Add the MAC address to the hardware multicast hash table */ crc = tiva_calcethcrc(mac, 6); hashindex = (crc >> 26) & 0x3F; if (hashindex > 31) { registeraddress = TIVA_EMAC_HASHTBLH; hashindex -= 32; } else { registeraddress = TIVA_EMAC_HASHTBLL; } temp = tiva_getreg(registeraddress); temp |= 1 << hashindex; tiva_putreg(temp, registeraddress); temp = tiva_getreg(TIVA_EMAC_FRAMEFLTR); temp |= (EMAC_FRAMEFLTR_HMC | EMAC_FRAMEFLTR_HPF); tiva_putreg(temp, TIVA_EMAC_FRAMEFLTR); return OK; } #endif /**************************************************************************** * Function: tiva_txdescinit * * Description: * Initializes the DMA TX descriptors in chain mode. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * ****************************************************************************/ static void tiva_txdescinit(FAR struct tiva_ethmac_s *priv) { struct emac_txdesc_s *txdesc; int i; /* priv->txhead will point to the first, available TX descriptor in the chain. * Set the priv->txhead pointer to the first descriptor in the table. */ priv->txhead = priv->txtable; /* priv->txtail will point to the first segment of the oldest pending * "in-flight" TX transfer. NULL means that there are no active TX * transfers. */ priv->txtail = NULL; priv->inflight = 0; /* Initialize each TX descriptor */ for (i = 0; i < CONFIG_TIVA_EMAC_NTXDESC; i++) { txdesc = &priv->txtable[i]; /* Set Second Address Chained bit */ txdesc->tdes0 = EMAC_TDES0_TCH; #ifdef CHECKSUM_BY_HARDWARE /* Enable the checksum insertion for the TX frames */ txdesc->tdes0 |= EMAC_TDES0_CIC_ALL; #endif /* Clear Buffer1 address pointer (buffers will be assigned as they * are used) */ txdesc->tdes2 = 0; /* Initialize the next descriptor with the Next Descriptor Polling Enable */ if (i < (CONFIG_TIVA_EMAC_NTXDESC - 1)) { /* Set next descriptor address register with next descriptor base * address */ txdesc->tdes3 = (uint32_t) & priv->txtable[i + 1]; } else { /* For last descriptor, set next descriptor address register equal * to the first descriptor base address */ txdesc->tdes3 = (uint32_t) priv->txtable; } } /* Set Transmit Descriptor List Address Register */ tiva_putreg((uint32_t) priv->txtable, TIVA_EMAC_TXDLADDR); } /**************************************************************************** * Function: tiva_rxdescinit * * Description: * Initializes the DMA RX descriptors in chain mode. * * Parameters: * priv - Reference to the driver state structure * * Returned Value: * None * * Assumptions: * ****************************************************************************/ static void tiva_rxdescinit(FAR struct tiva_ethmac_s *priv) { struct emac_rxdesc_s *rxdesc; int i; /* priv->rxhead will point to the first, RX descriptor in the chain. * This will be where we receive the first incomplete frame. */ priv->rxhead = priv->rxtable; /* If we accumulate the frame in segments, priv->rxcurr points to the * RX descriptor of the first segment in the current TX frame. */ priv->rxcurr = NULL; priv->segments = 0; /* Initialize each TX descriptor */ for (i = 0; i < CONFIG_TIVA_EMAC_NRXDESC; i++) { rxdesc = &priv->rxtable[i]; /* Set Own bit of the RX descriptor rdes0 */ rxdesc->rdes0 = EMAC_RDES0_OWN; /* Set Buffer1 size and Second Address Chained bit and enabled DMA * RX desc receive interrupt */ rxdesc->rdes1 = EMAC_RDES1_RCH | (uint32_t) OPTIMAL_EMAC_BUFSIZE; /* Set Buffer1 address pointer */ rxdesc->rdes2 = (uint32_t) & priv->rxbuffer[i * OPTIMAL_EMAC_BUFSIZE]; /* Initialize the next descriptor with the Next Descriptor Polling Enable */ if (i < (CONFIG_TIVA_EMAC_NRXDESC - 1)) { /* Set next descriptor address register with next descriptor base * address */ rxdesc->rdes3 = (uint32_t) & priv->rxtable[i + 1]; } else { /* For last descriptor, set next descriptor address register equal * to the first descriptor base address */ rxdesc->rdes3 = (uint32_t) priv->rxtable; } } /* Set Receive Descriptor List Address Register */ tiva_putreg((uint32_t) priv->rxtable, TIVA_EMAC_RXDLADDR); } /**************************************************************************** * Function: tiva_ioctl * * Description: * Executes the SIOCxMIIxxx command and responds using the request struct * that must be provided as its 2nd parameter. * * When called with SIOCGMIIPHY it will get the PHY address for the device * and write it to the req->phy_id field of the request struct. * * When called with SIOCGMIIREG it will read a register of the PHY that is * specified using the req->reg_no struct field and then write its output * to the req->val_out field. * * When called with SIOCSMIIREG it will write to a register of the PHY that * is specified using the req->reg_no struct field and use req->val_in as * its input. * * Parameters: * dev - Ethernet device structure * cmd - SIOCxMIIxxx command code * arg - Request structure also used to return values * * Returned Value: Negated errno on failure. * * Assumptions: * ****************************************************************************/ #ifdef CONFIG_NETDEV_PHY_IOCTL static int tiva_ioctl(struct net_driver_s *dev, int cmd, long arg) { int ret; switch (cmd) { #ifdef CONFIG_TIVA_PHY_INTERRUPTS case SIOCMIINOTIFY: { /* Set up for PHY event notifications */ struct mii_iotcl_notify_s *req = (struct mii_iotcl_notify_s *)((uintptr_t) arg); ret = phy_notify_subscribe(dev->d_ifname, req->pid, req->signo, req->arg); if (ret == OK) { /* Enable PHY link up/down interrupts */ tiva_phy_intenable(true); } } break; #endif case SIOCGMIIPHY: { /* Get MII PHY address */ struct mii_ioctl_data_s *req = (struct mii_ioctl_data_s *)((uintptr_t) arg); req->phy_id = CONFIG_TIVA_PHYADDR; ret = OK; } break; case SIOCGMIIREG: { /* Get register from MII PHY */ struct mii_ioctl_data_s *req = (struct mii_ioctl_data_s *)((uintptr_t) arg); ret = tiva_phyread(req->phy_id, req->reg_num, &req->val_out); } break; case SIOCSMIIREG: { /* Set register in MII PHY */ struct mii_ioctl_data_s *req = (struct mii_ioctl_data_s *)((uintptr_t) arg); ret = tiva_phywrite(req->phy_id, req->reg_num, req->val_in); } break; default: ret = -ENOTTY; break; } return ret; } #endif /* CONFIG_NETDEV_PHY_IOCTL */ /**************************************************************************** * Function: tiva_phy_intenable * * Description: * Enable link up/down PHY interrupts. The interrupt protocol is like this: * * - Interrupt status is cleared when the interrupt is enabled. * - Interrupt occurs. Interrupt is disabled (at the processor level) when * is received. * - Interrupt status is cleared when the interrupt is re-enabled. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno (-ETIMEDOUT) on failure. * ****************************************************************************/ #ifdef CONFIG_TIVA_PHY_INTERRUPTS static void tiva_phy_intenable(bool enable) { #ifdef CONFIG_TIVA_PHY_INTERNAL uint16_t phyval; int ret; /* Disable further PHY interrupts until we complete this setup */ tiva_putreg(0, TIVA_EPHY_IM); /* Enable/disable event based PHY interrupts */ /* REVISIT: There is an issue here: The PHY interrupt handler is called * from the interrupt level and it, in turn, will call this function to * disabled further interrupts. Subsequent link status processing will * also call tiva_phyread() to access PHY registers and will, eventually, * call this function again to re-enable the PHY interrupt. The control * between interrupt level access to the PHY and non-interrupt level * access to the PHY is not well enforced but is probably okay just due * to the sequencing of things. */ if (enable) { /* Configure interrupts on link status change events */ ret = tiva_phywrite(CONFIG_TIVA_PHYADDR, TIVA_EPHY_MISR1, EPHY_MISR1_LINKSTATEN); if (ret == OK) { /* Enable PHY event based interrupts */ ret = tiva_phyread(CONFIG_TIVA_PHYADDR, TIVA_EPHY_SCR, &phyval); if (ret == OK) { phyval |= EPHY_SCR_INTEN; ret = tiva_phywrite(CONFIG_TIVA_PHYADDR, TIVA_EPHY_SCR, phyval); if (ret == OK) { /* Enable PHY interrupts */ tiva_putreg(EMAC_PHYIM_INT, TIVA_EPHY_IM); } } } } else { /* Read the MISR1 register in order to clear any pending link status * interrupts. */ ret = tiva_phyread(CONFIG_TIVA_PHYADDR, TIVA_EPHY_MISR1, &phyval); if (ret == OK) { /* Disable PHY event based interrupts */ ret = tiva_phyread(CONFIG_TIVA_PHYADDR, TIVA_EPHY_SCR, &phyval); if (ret == OK) { phyval |= EPHY_SCR_INTEN; (void)tiva_phywrite(CONFIG_TIVA_PHYADDR, TIVA_EPHY_SCR, phyval); } } } #else /* Interrupt configuration logic for external PHYs depends on the * particular PHY part connected. */ #warning Missing logic return -ENOSYS; #endif } #endif /**************************************************************************** * Function: tiva_phyread * * Description: * Read a PHY register. * * Parameters: * phydevaddr - The PHY device address * phyregaddr - The PHY register address * value - The location to return the 16-bit PHY register value. * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static int tiva_phyread(uint16_t phydevaddr, uint16_t phyregaddr, uint16_t *value) { volatile uint32_t timeout; uint32_t regval; /* Configure the MIIADDR register, preserving CSR Clock Range CR[2:0] bits */ regval = tiva_getreg(TIVA_EMAC_MIIADDR); regval &= EMAC_MIIADDR_CR_MASK; /* Set the PHY device address, PHY register address, and set the buy bit. * the EMAC_MIIADDR_MIIW is clear, indicating a read operation. */ regval |= (((uint32_t) phydevaddr << EMAC_MIIADDR_PLA_SHIFT) & EMAC_MIIADDR_PLA_MASK); regval |= (((uint32_t) phyregaddr << EMAC_MIIADDR_MII_SHIFT) & EMAC_MIIADDR_MII_MASK); regval |= EMAC_MIIADDR_MIIB; tiva_putreg(regval, TIVA_EMAC_MIIADDR); /* Wait for the transfer to complete */ for (timeout = 0; timeout < PHY_READ_TIMEOUT; timeout++) { if ((tiva_getreg(TIVA_EMAC_MIIADDR) & EMAC_MIIADDR_MIIB) == 0) { *value = (uint16_t) tiva_getreg(TIVA_EMAC_MIIDATA); return OK; } } ndbg("MII transfer timed out: phydevaddr: %04x phyregaddr: %04x\n", phydevaddr, phyregaddr); return -ETIMEDOUT; } /**************************************************************************** * Function: tiva_phywrite * * Description: * Write to a PHY register. * * Parameters: * phydevaddr - The PHY device address * phyregaddr - The PHY register address * value - The 16-bit value to write to the PHY register value. * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static int tiva_phywrite(uint16_t phydevaddr, uint16_t phyregaddr, uint16_t value) { volatile uint32_t timeout; uint32_t regval; /* Configure the MIIADDR register, preserving CSR Clock Range CR[2:0] bits */ regval = tiva_getreg(TIVA_EMAC_MIIADDR); regval &= EMAC_MIIADDR_CR_MASK; /* Set the PHY device address, PHY register address, and set the busy bit. * the EMAC_MIIADDR_MIIW is set, indicating a write operation. */ regval |= (((uint32_t) phydevaddr << EMAC_MIIADDR_PLA_SHIFT) & EMAC_MIIADDR_PLA_MASK); regval |= (((uint32_t) phyregaddr << EMAC_MIIADDR_MII_SHIFT) & EMAC_MIIADDR_MII_MASK); regval |= (EMAC_MIIADDR_MIIB | EMAC_MIIADDR_MIIW); /* Write the value into the MACIIDR register before setting the new MIIADDR * register value. */ tiva_putreg(value, TIVA_EMAC_MIIDATA); tiva_putreg(regval, TIVA_EMAC_MIIADDR); /* Wait for the transfer to complete */ for (timeout = 0; timeout < PHY_WRITE_TIMEOUT; timeout++) { if ((tiva_getreg(TIVA_EMAC_MIIADDR) & EMAC_MIIADDR_MIIB) == 0) { return OK; } } ndbg("MII transfer timed out: phydevaddr: %04x phyregaddr: %04x value: %04x\n", phydevaddr, phyregaddr, value); return -ETIMEDOUT; } /**************************************************************************** * Function: tiva_phyinit * * Description: * Configure the PHY and determine the link speed/duplex. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static int tiva_phyinit(FAR struct tiva_ethmac_s *priv) { #ifdef CONFIG_TIVA_AUTONEG volatile uint32_t timeout; #endif uint32_t regval; uint16_t phyval; int ret; /* Assume 10MBps and half duplex */ priv->mbps100 = 0; priv->fduplex = 0; /* Setup up PHY clocking by setting the CR field in the MIIADDR register */ regval = tiva_getreg(TIVA_EMAC_MIIADDR); regval &= ~EMAC_MIIADDR_CR_MASK; regval |= EMAC_MIIADDR_CR; tiva_putreg(regval, TIVA_EMAC_MIIADDR); /* Put the PHY in reset mode */ ret = tiva_phywrite(CONFIG_TIVA_PHYADDR, MII_MCR, MII_MCR_RESET); if (ret < 0) { ndbg("Failed to reset the PHY: %d\n", ret); return ret; } up_mdelay(PHY_RESET_DELAY); /* Perform auto-negotiation if so configured */ #ifdef CONFIG_TIVA_AUTONEG /* Wait for link status */ for (timeout = 0; timeout < PHY_RETRY_TIMEOUT; timeout++) { ret = tiva_phyread(CONFIG_TIVA_PHYADDR, MII_MSR, &phyval); if (ret < 0) { ndbg("Failed to read the PHY MSR: %d\n", ret); return ret; } else if ((phyval & MII_MSR_LINKSTATUS) != 0) { break; } } if (timeout >= PHY_RETRY_TIMEOUT) { ndbg("Timed out waiting for link status: %04x\n", phyval); return -ETIMEDOUT; } /* Enable auto-negotiation */ ret = tiva_phywrite(CONFIG_TIVA_PHYADDR, MII_MCR, MII_MCR_ANENABLE); if (ret < 0) { ndbg("Failed to enable auto-negotiation: %d\n", ret); return ret; } /* Wait until auto-negotiation completes */ for (timeout = 0; timeout < PHY_RETRY_TIMEOUT; timeout++) { ret = tiva_phyread(CONFIG_TIVA_PHYADDR, MII_MSR, &phyval); if (ret < 0) { ndbg("Failed to read the PHY MSR: %d\n", ret); return ret; } else if ((phyval & MII_MSR_ANEGCOMPLETE) != 0) { break; } } if (timeout >= PHY_RETRY_TIMEOUT) { ndbg("Timed out waiting for auto-negotiation\n"); return -ETIMEDOUT; } /* Read the result of the auto-negotiation from the PHY-specific register */ ret = tiva_phyread(CONFIG_TIVA_PHYADDR, CONFIG_TIVA_PHYSR, &phyval); if (ret < 0) { ndbg("Failed to read PHY status register\n"); return ret; } /* Remember the selected speed and duplex modes */ nvdbg("PHYSR[%d]: %04x\n", CONFIG_TIVA_PHYSR, phyval); /* Different PHYs present speed and mode information in different ways. IF * This CONFIG_TIVA_PHYSR_ALTCONFIG is selected, this indicates that the PHY * represents speed and mode information are combined, for example, with * separate bits for 10HD, 100HD, 10FD and 100FD. */ #ifdef CONFIG_TIVA_PHYSR_ALTCONFIG switch (phyval & CONFIG_TIVA_PHYSR_ALTMODE) { default: case CONFIG_TIVA_PHYSR_10HD: priv->fduplex = 0; priv->mbps100 = 0; break; case CONFIG_TIVA_PHYSR_100HD: priv->fduplex = 0; priv->mbps100 = 1; break; case CONFIG_TIVA_PHYSR_10FD: priv->fduplex = 1; priv->mbps100 = 0; break; case CONFIG_TIVA_PHYSR_100FD: priv->fduplex = 1; priv->mbps100 = 1; break; } /* Different PHYs present speed and mode information in different ways. Some * will present separate information for speed and mode (this is the default). * Those PHYs, for example, may provide a 10/100 Mbps indication and a separate * full/half duplex indication. */ #else if ((phyval & CONFIG_TIVA_PHYSR_MODE) == CONFIG_TIVA_PHYSR_FULLDUPLEX) { priv->fduplex = 1; } if ((phyval & CONFIG_TIVA_PHYSR_SPEED) == CONFIG_TIVA_PHYSR_100MBPS) { priv->mbps100 = 1; } #endif #else /* Auto-negotion not selected */ phyval = 0; #ifdef CONFIG_TIVA_ETHFD phyval |= MII_MCR_FULLDPLX; #endif #ifdef CONFIG_TIVA_ETH100MBPS phyval |= MII_MCR_SPEED100; #endif ret = tiva_phywrite(CONFIG_TIVA_PHYADDR, MII_MCR, phyval); if (ret < 0) { ndbg("Failed to write the PHY MCR: %d\n", ret); return ret; } up_mdelay(PHY_CONFIG_DELAY); /* Remember the selected speed and duplex modes */ #ifdef CONFIG_TIVA_ETHFD priv->fduplex = 1; #endif #ifdef CONFIG_TIVA_ETH100MBPS priv->mbps100 = 1; #endif #endif ndbg("Duplex: %s Speed: %d MBps\n", priv->fduplex ? "FULL" : "HALF", priv->mbps100 ? 100 : 10); return OK; } /**************************************************************************** * Function: tiva_phy_configure * * Description: * Configure to support the selected PHY. Called after each reset since * many properties of the PHY configuration are lost at each reset. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * None. * * Assumptions: * ****************************************************************************/ static void tiva_phy_configure(FAR struct tiva_ethmac_s *priv) { uint32_t regval; /* Set up the PHY configuration */ #if defined(CONFIG_TIVA_PHY_RMII) regval = EMAC_PC_PINTFS_RMII | EMAC_PC_PHYEXT; #elif defined(CONFIG_TIVA_PHY_MII) regval = EMAC_PC_PINTFS_MII | EMAC_PC_PHYEXT; #else /* defined(CONFIG_TIVA_PHY_INTERNAL) */ regval = EMAC_PC_MDIXEN | EMAC_PC_ANMODE_100FD | EMAC_PC_ANEN | EMAC_PC_PINTFS_MII; #endif tiva_putreg(regval, TIVA_EMAC_PC); #ifdef CONFIG_TIVA_PHY_INTERNAL /* If we are using the internal PHY, reset it to ensure that new * configuration is latched. */ regval = tiva_getreg(TIVA_SYSCON_SREPHY); regval |= SYSCON_SREPHY_R0; tiva_putreg(regval, TIVA_SYSCON_SREPHY); regval &= ~SYSCON_SREPHY_R0; tiva_putreg(regval, TIVA_SYSCON_SREPHY); /* Wait for the reset to complete */ while (!tiva_ephy_periphrdy()) ; up_udelay(250); #endif /* Disable all MMC interrupts as these are enabled by default at reset */ tiva_putreg(0xffffffff, TIVA_EMAC_MMCRXIM); tiva_putreg(0xffffffff, TIVA_EMAC_MMCTXIM); /* If using an external RMII PHY, we must enable the external clock */ regval = tiva_getreg(TIVA_EMAC_CC); #if defined(CONFIG_TIVA_PHY_RMII) /* Enable the external clock source input to the RMII interface signal * EN0RREF_CLK by setting both the CLKEN bit in the Ethernet Clock * Configuration (EMACCC) register. The external clock source must be * 50 MHz with a frequency tolerance of 50 PPM. */ regval = tiva_getreg(TIVA_EMAC_CC); #else /* Disable the external clock */ regval &= ~EMAC_CC_CLKEN; #endif tiva_putreg(regval, TIVA_EMAC_CC); } /**************************************************************************** * Function: tiva_phy_initialize * * Description: * Perform one-time PHY initialization * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * None. * * Assumptions: * ****************************************************************************/ static inline void tiva_phy_initialize(FAR struct tiva_ethmac_s *priv) { /* Enable the clock to the PHY module */ nllvdbg("Enable EPHY clocking\n"); tiva_ephy_enableclk(); /* What until the PREPHY register indicates that the PHY is ready before * continuing. */ while (!tiva_ephy_periphrdy()) ; up_udelay(250); /* Enable power to the Ethernet PHY */ nllvdbg("Enable EPHY power\n"); tiva_ephy_enablepwr(); /* What until the PREPHY register indicates that the PHY registers are ready * to be accessed. */ while (!tiva_ephy_periphrdy()) ; up_udelay(250); nllvdbg("RCGCEPHY: %08x PCEPHY: %08x PREPHY: %08x\n", getreg32(TIVA_SYSCON_RCGCEPHY), getreg32(TIVA_SYSCON_PCEPHY), getreg32(TIVA_SYSCON_PREPHY)); nllvdbg("Configure PHY GPIOs\n"); #ifdef CONFIG_TIVA_PHY_INTERNAL /* Integrated PHY: * * "The Ethernet Controller Module and Integrated PHY receive two clock inputs: * - A gated system clock acts as the clock source to the Control and Status * registers (CSR) of the Ethernet MAC. The SYSCLK frequency for Run, Sleep * and Deep Sleep mode is programmed in the System Control module. ... * - The PHY receives the main oscillator (MOSC) which must be 25 MHz ± 50 ppm * for proper operation. The MOSC source can be a single-ended source or a * crystal." * * These are currently set up in tiva_clockconfig() before this function runs. * * MII/RMII Clocking: * * External PHY support is not yet implemented. */ /* PHY interface pins: * * EN0TXOP - Fixed pin assignment * EN0TXON - Fixed pin assignment * EN0RXIP - Fixed pin assignment * EN0RXIN - Fixed pin assignment * RBIAS - Fixed pin assignment * EN0LED0 - Configured GPIO output * EN0LED1 - Configured GPIO output * EN0LED2 - Configured GPIO output */ tiva_configgpio(GPIO_EN0_LED0); tiva_configgpio(GPIO_EN0_LED1); tiva_configgpio(GPIO_EN0_LED2); #else /* if defined(CONFIG_TIVA_PHY_MII) || defined(CONFIG_TIVA_PHY_RMII) */ /* External PHY interrupt pin */ tiva_configgpio(GPIO_EN0_INTRN); /* Configure GPIO pins to support MII or RMII */ /* MDC and MDIO are common to both modes */ tiva_configgpio(GPIO_EN0_MDC); tiva_configgpio(GPIO_EN0_MDIO); #if defined(CONFIG_TIVA_PHY_MII) /* Set up the MII interface */ /* "Four clock inputs are driven into the Ethernet MAC when the MII * configuration is enabled. The clocks are described as follows: * * - Gated system clock (SYSCLK): The SYSCLK signal acts as the clock * source to the Control and Status registers (CSR) of the Ethernet * MAC. The SYSCLK frequency for Run, Sleep and Deep Sleep mode is * programmed in the System Control module. ... * - MOSC: A gated version of the MOSC clock is provided as the Precision * Time Protocol (PTP) reference clock (PTPREF_CLK). The MOSC clock * source can be a single-ended source on the OSC0 pin or a crystal * on the OSC0 and OSC1 pins. When advanced timestamping is used and * the Precision Timer Protocol (PTP) module has been enabled by setting * the PTPCEN bit in the EMACCC register, the MOSC drives PTPREF_CLK. * PTPREF_CLK has a minimum frequency requirement of 5 MHz and a * maximum frequency of 25 MHz. ... * - EN0RXCK: This clock signal is driven by the external PHY oscillator * and is either 2.5 or 25 MHz depending on whether the device is * operating at 10 Mbps or 100 Mbps. * - EN0TXCK This clock signal is driven by the external PHY oscillator * and is either 2.5 or 25 MHz depending on whether the device is * operating at 10 Mbps or 100 Mbps." */ /* MII interface pins (17): * * MII_TX_CLK, MII_TXD[3:0], MII_TX_EN, MII_RX_CLK, MII_RXD[3:0], MII_RX_ER, * MII_RX_DV, MII_CRS, MII_COL, MDC, MDIO */ tiva_configgpio(GPIO_EN0_MII_COL); tiva_configgpio(GPIO_EN0_MII_CRS); tiva_configgpio(GPIO_EN0_MII_RXD0); tiva_configgpio(GPIO_EN0_MII_RXD1); tiva_configgpio(GPIO_EN0_MII_RXD2); tiva_configgpio(GPIO_EN0_MII_RXD3); tiva_configgpio(GPIO_EN0_MII_RX_CLK); tiva_configgpio(GPIO_EN0_MII_RX_DV); tiva_configgpio(GPIO_EN0_MII_RX_ER); tiva_configgpio(GPIO_EN0_MII_TXD0); tiva_configgpio(GPIO_EN0_MII_TXD1); tiva_configgpio(GPIO_EN0_MII_TXD2); tiva_configgpio(GPIO_EN0_MII_TXD3); tiva_configgpio(GPIO_EN0_MII_TX_CLK); tiva_configgpio(GPIO_EN0_MII_TX_EN); #elif defined(CONFIG_TIVA_PHY_RMII) /* Set up the RMII interface. */ /* "There are three clock sources that interface to the Ethernet MAC in * an RMII configuration: * * - Gated system clock (SYSCLK): The SYSCLK signal acts as the clock * source to the Control and Status registers (CSR) of the Ethernet MAC. * The SYSCLK frequency for Run, Sleep and Deep Sleep mode is programmed * in the System Control module. ... * - MOSC: A gated version of the MOSC clock is provided as the Precision * Time Protocol (PTP) reference clock (PTPREF_CLK). The MOSC clock * source can be a single-ended source on the OSC0 pin or a crystal on * the OSC0 and OSC1 pins. When advanced timestamping is used and * the PTP module has been enabled by setting the PTPCEN bit in the * EMACCC register, the MOSC drives PTPREF_CLK. PTPREF_CLK has a minimum * frequency requirement of 5 MHz and a maximum frequency of 25 MHz. ... * - EN0REF_CLK: When using RMII, a 50 MHz external reference clock must * drive the EN0REF_CLK input signal and the external PHY. Depending on * the configuration of the FES bit in the Ethernet MAC Configuration * (EMACCFG) register, the reference clock input (EN0REF_CLK) is divided * by 20 for 10 Mbps or 2 for 100 Mbps operation and used as the clock * for receive and transmit data." */ /* RMII interface pins (7): * * RMII_TXD[1:0], RMII_TX_EN, RMII_RXD[1:0], RMII_CRS_DV, MDC, MDIO, * RMII_REF_CLK */ tiva_configgpio(GPIO_EN0_RMII_CRS_DV); tiva_configgpio(GPIO_EN0_RMII_REF_CLK); tiva_configgpio(GPIO_EN0_RMII_RXD0); tiva_configgpio(GPIO_EN0_RMII_RXD1); tiva_configgpio(GPIO_EN0_RMII_TXD0); tiva_configgpio(GPIO_EN0_RMII_TXD1); /* tiva_configgpio(GPIO_EN0_RMII_TX_CLK); not needed? */ tiva_configgpio(GPIO_EN0_RMII_TX_EN); #endif /* Enable pulse-per-second (PPS) output signal */ tiva_configgpio(GPIO_EN0_PPS); #endif } /**************************************************************************** * Function: tiva_ethreset * * Description: * Reset the Ethernet block. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * None. * * Assumptions: * ****************************************************************************/ static void tiva_ethreset(FAR struct tiva_ethmac_s *priv) { uint32_t regval; #if 0 /* REVISIT: This causes the DMABUSMOD reset to hang. */ /* Reset the Ethernet MAC */ regval = tiva_getreg(TIVA_SYSCON_SREMAC); regval |= SYSCON_SREMAC_R0; tiva_putreg(regval, TIVA_SYSCON_SREMAC); regval &= ~SYSCON_SREMAC_R0; tiva_putreg(regval, TIVA_SYSCON_SREMAC); /* Wait for the reset to complete */ while (!tiva_emac_periphrdy()) ; up_udelay(250); #endif /* Perform a software reset by setting the SWR bit in the DMABUSMOD register. * This Resets all MAC subsystem internal registers and logic. After this * reset all the registers holds their reset values. */ regval = tiva_getreg(TIVA_EMAC_DMABUSMOD); regval |= EMAC_DMABUSMOD_SWR; tiva_putreg(regval, TIVA_EMAC_DMABUSMOD); /* Wait for software reset to complete. The SWR bit is cleared automatically * after the reset operation has completed in all of the core clock domains. */ while ((tiva_getreg(TIVA_EMAC_DMABUSMOD) & EMAC_DMABUSMOD_SWR) != 0) ; up_udelay(250); /* Reconfigure the PHY. Some PHY configurations will be lost as a * consequence of the EMAC reset */ tiva_phy_configure(priv); } /**************************************************************************** * Function: tiva_macconfig * * Description: * Configure the Ethernet MAC for DMA operation. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static int tiva_macconfig(FAR struct tiva_ethmac_s *priv) { uint32_t regval; /* Set up the MACCR register */ regval = tiva_getreg(TIVA_EMAC_CFG); regval &= ~MACCR_CLEAR_BITS; regval |= MACCR_SET_BITS; if (priv->fduplex) { /* Set the DM bit for full duplex support */ regval |= EMAC_CFG_DUPM; } if (priv->mbps100) { /* Set the FES bit for 100Mbps fast Ethernet support */ regval |= EMAC_CFG_FES; } tiva_putreg(regval, TIVA_EMAC_CFG); /* Set up the FRAMEFLTR register */ regval = tiva_getreg(TIVA_EMAC_FRAMEFLTR); regval &= ~FRAMEFLTR_CLEAR_BITS; regval |= FRAMEFLTR_SET_BITS; tiva_putreg(regval, TIVA_EMAC_FRAMEFLTR); /* Set up the HASHTBLH and HASHTBLL registers */ tiva_putreg(0, TIVA_EMAC_HASHTBLH); tiva_putreg(0, TIVA_EMAC_HASHTBLL); /* Setup up the FLOWCTL register */ regval = tiva_getreg(TIVA_EMAC_FLOWCTL); regval &= ~FLOWCTL_CLEAR_MASK; regval |= FLOWCTL_SET_MASK; tiva_putreg(regval, TIVA_EMAC_FLOWCTL); /* Setup up the VLANTG register */ tiva_putreg(0, TIVA_EMAC_VLANTG); /* DMA Configuration */ /* Set up the DMAOPMODE register */ regval = tiva_getreg(TIVA_EMAC_DMAOPMODE); regval &= ~DMAOPMODE_CLEAR_MASK; regval |= DMAOPMODE_SET_MASK; tiva_putreg(regval, TIVA_EMAC_DMAOPMODE); /* Set up the DMABUSMOD register */ regval = tiva_getreg(TIVA_EMAC_DMABUSMOD); regval &= ~DMABUSMOD_CLEAR_MASK; regval |= DMABUSMOD_SET_MASK; tiva_putreg(regval, TIVA_EMAC_DMABUSMOD); return OK; } /**************************************************************************** * Function: tiva_macaddress * * Description: * Configure the selected MAC address. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static void tiva_macaddress(FAR struct tiva_ethmac_s *priv) { FAR struct net_driver_s *dev = &priv->dev; uint32_t regval; nvdbg("%s MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", dev->d_ifname, dev->d_mac.ether_addr_octet[0], dev->d_mac.ether_addr_octet[1], dev->d_mac.ether_addr_octet[2], dev->d_mac.ether_addr_octet[3], dev->d_mac.ether_addr_octet[4], dev->d_mac.ether_addr_octet[5]); /* Set the MAC address high register */ regval = ((uint32_t) dev->d_mac.ether_addr_octet[5] << 8) | (uint32_t) dev->d_mac.ether_addr_octet[4]; tiva_putreg(regval, TIVA_EMAC_ADDR0H); /* Set the MAC address low register */ regval = ((uint32_t) dev->d_mac.ether_addr_octet[3] << 24) | ((uint32_t) dev->d_mac.ether_addr_octet[2] << 16) | ((uint32_t) dev->d_mac.ether_addr_octet[1] << 8) | (uint32_t) dev->d_mac.ether_addr_octet[0]; tiva_putreg(regval, TIVA_EMAC_ADDR0L); } /**************************************************************************** * Function: tiva_ipv6multicast * * Description: * Configure the IPv6 multicast MAC address. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ #ifdef CONFIG_NET_ICMPv6 static void tiva_ipv6multicast(FAR struct tiva_ethmac_s *priv) { struct net_driver_s *dev; uint16_t tmp16; uint8_t mac[6]; /* For ICMPv6, we need to add the IPv6 multicast address * * For IPv6 multicast addresses, the Ethernet MAC is derived by * the four low-order octets OR'ed with the MAC 33:33:00:00:00:00, * so for example the IPv6 address FF02:DEAD:BEEF::1:3 would map * to the Ethernet MAC address 33:33:00:01:00:03. * * NOTES: This appears correct for the ICMPv6 Router Solicitation * Message, but the ICMPv6 Neighbor Solicitation message seems to * use 33:33:ff:01:00:03. */ mac[0] = 0x33; mac[1] = 0x33; dev = &priv->dev; tmp16 = dev->d_ipv6addr[6]; mac[2] = 0xff; mac[3] = tmp16 >> 8; tmp16 = dev->d_ipv6addr[7]; mac[4] = tmp16 & 0xff; mac[5] = tmp16 >> 8; nvdbg("IPv6 Multicast: %02x:%02x:%02x:%02x:%02x:%02x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); (void)tiva_addmac(dev, mac); #ifdef CONFIG_NET_ICMPv6_AUTOCONF /* Add the IPv6 all link-local nodes Ethernet address. This is the * address that we expect to receive ICMPv6 Router Advertisement * packets. */ (void)tiva_addmac(dev, g_ipv6_ethallnodes.ether_addr_octet); #endif /* CONFIG_NET_ICMPv6_AUTOCONF */ #ifdef CONFIG_NET_ICMPv6_ROUTER /* Add the IPv6 all link-local routers Ethernet address. This is the * address that we expect to receive ICMPv6 Router Solicitation * packets. */ (void)tiva_addmac(dev, g_ipv6_ethallrouters.ether_addr_octet); #endif /* CONFIG_NET_ICMPv6_ROUTER */ } #endif /* CONFIG_NET_ICMPv6 */ /**************************************************************************** * Function: tiva_macenable * * Description: * Enable normal MAC operation. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static int tiva_macenable(FAR struct tiva_ethmac_s *priv) { uint32_t regval; /* Set the MAC address */ tiva_macaddress(priv); #ifdef CONFIG_NET_ICMPv6 /* Set up the IPv6 multicast address */ tiva_ipv6multicast(priv); #endif /* Enable transmit state machine of the MAC for transmission on the MII */ regval = tiva_getreg(TIVA_EMAC_CFG); regval |= EMAC_CFG_TE; tiva_putreg(regval, TIVA_EMAC_CFG); /* Flush Transmit FIFO */ regval = tiva_getreg(TIVA_EMAC_DMAOPMODE); regval |= EMAC_DMAOPMODE_FTF; tiva_putreg(regval, TIVA_EMAC_DMAOPMODE); /* Enable receive state machine of the MAC for reception from the MII */ /* Enables or disables the MAC reception. */ regval = tiva_getreg(TIVA_EMAC_CFG); regval |= EMAC_CFG_RE; tiva_putreg(regval, TIVA_EMAC_CFG); /* Start DMA transmission */ regval = tiva_getreg(TIVA_EMAC_DMAOPMODE); regval |= EMAC_DMAOPMODE_ST; tiva_putreg(regval, TIVA_EMAC_DMAOPMODE); /* Start DMA reception */ regval = tiva_getreg(TIVA_EMAC_DMAOPMODE); regval |= EMAC_DMAOPMODE_SR; tiva_putreg(regval, TIVA_EMAC_DMAOPMODE); /* Enable Ethernet DMA interrupts. */ tiva_putreg(EMAC_IM_ALLINTS, TIVA_EMAC_IM); /* Ethernet DMA supports two classes of interrupts: Normal interrupt * summary (NIS) and Abnormal interrupt summary (AIS) with a variety * individual normal and abnormal interrupting events. Here only * the normal receive event is enabled (unless DEBUG is enabled). Transmit * events will only be enabled when a transmit interrupt is expected. */ tiva_putreg((EMAC_DMAINT_RECV_ENABLE | EMAC_DMAINT_ERROR_ENABLE), TIVA_EMAC_DMAIM); return OK; } /**************************************************************************** * Function: tive_emac_configure * * Description: * Configure the Ethernet interface for DMA operation. * * Parameters: * priv - A reference to the private driver state structure * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ static int tive_emac_configure(FAR struct tiva_ethmac_s *priv) { int ret; /* NOTE: The Ethernet clocks were initialized earlier in the start-up * sequence. */ /* Reset the Ethernet block */ nvdbg("Reset the Ethernet block\n"); tiva_ethreset(priv); /* Initialize the PHY */ nvdbg("Initialize the PHY\n"); ret = tiva_phyinit(priv); if (ret < 0) { return ret; } /* Initialize the MAC and DMA */ nvdbg("Initialize the MAC and DMA\n"); ret = tiva_macconfig(priv); if (ret < 0) { return ret; } /* Initialize the free buffer list */ tiva_initbuffer(priv); /* Initialize TX Descriptors list: Chain Mode */ tiva_txdescinit(priv); /* Initialize RX Descriptors list: Chain Mode */ tiva_rxdescinit(priv); /* Enable normal MAC operation */ nvdbg("Enable normal operation\n"); return tiva_macenable(priv); } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Function: tiva_ethinitialize * * Description: * Initialize the Ethernet driver for one interface. If the Tiva chip * supports multiple Ethernet controllers, then board specific logic * must implement up_netinitialize() and call this function to initialize * the desired interfaces. * * Parameters: * intf - In the case where there are multiple EMACs, this value * identifies which EMAC is to be initialized. * * Returned Value: * OK on success; Negated errno on failure. * * Assumptions: * ****************************************************************************/ #if TIVA_NETHCONTROLLERS == 1 static inline #endif int tiva_ethinitialize(int intf) { struct tiva_ethmac_s *priv; uint32_t regval; nllvdbg("intf: %d\n", intf); /* Get the interface structure associated with this interface number. */ DEBUGASSERT(intf < TIVA_NETHCONTROLLERS); priv = &g_tiva_ethmac[intf]; /* Initialize the driver structure */ memset(priv, 0, sizeof(struct tiva_ethmac_s)); priv->dev.d_ifup = tiva_ifup; /* I/F up (new IP address) callback */ priv->dev.d_ifdown = tiva_ifdown; /* I/F down callback */ priv->dev.d_txavail = tiva_txavail; /* New TX data callback */ #ifdef CONFIG_NETDEV_PHY_IOCTL priv->dev.d_ioctl = tiva_ioctl; /* Support PHY ioctl() calls */ #endif priv->dev.d_private = (void *)g_tiva_ethmac; /* Used to recover private state from dev */ /* Create a watchdog for timing polling for and timing of transmissions */ priv->txpoll = wd_create(); /* Create periodic poll timer */ priv->txtimeout = wd_create(); /* Create TX timeout timer */ #ifdef CONFIG_TIVA_PHY_INTERRUPTS /* Initialize a semaphore for phy notification */ phy_notify_initialize(); #endif #ifdef CONFIG_TIVA_BOARDMAC /* If the board can provide us with a MAC address, get the address * from the board now. The MAC will not be applied until tiva_ifup() * is called (and the MAC can be overwritten with a netdev ioctl call). */ tiva_ethernetmac(&priv->dev.d_mac); #endif /* Enable power and clocking to the Ethernet MAC * * - Enable Power: Applies power (only) to the EMAC peripheral. This is not * an essential step since enabling clocking will also apply power. The * only significance is that the EMAC state will be retained if the EMAC * clocking is subsequently disabled. * - Enable Clocking: Applies both power and clocking to the EMAC peripheral, * bringing it a fully functional state. */ nllvdbg("Enable EMAC clocking\n"); tiva_emac_enablepwr(); /* Ethernet MAC Power Control */ tiva_emac_enableclk(); /* Ethernet MAC Run Mode Clock Gating Control */ /* What until the PREMAC register indicates that the EMAC registers are ready * to be accessed. */ while (!tiva_emac_periphrdy()) ; up_udelay(250); /* Show all EMAC clocks */ nllvdbg("RCGCEMAC: %08x PCEMAC: %08x PREMAC: %08x MOSCCTL: %08x\n", getreg32(TIVA_SYSCON_RCGCEMAC), getreg32(TIVA_SYSCON_PCEMAC), getreg32(TIVA_SYSCON_PREMAC), getreg32(TIVA_SYSCON_MOSCCTL)); /* Configure clocking and GPIOs to support the internal/eternal PHY */ tiva_phy_initialize(priv); /* Attach the IRQ to the driver */ if (irq_attach(TIVA_IRQ_ETHCON, tiva_interrupt)) { /* We could not attach the ISR to the interrupt */ return -EAGAIN; } /* Wait for EMAC to come out of reset. The SWR bit is cleared automatically * after the reset operation has completed in all of the core clock domains. */ while ((tiva_getreg(TIVA_EMAC_DMABUSMOD) & EMAC_DMABUSMOD_SWR) != 0) ; up_udelay(250); #if 0 /* REVISIT: Part of work around to avoid DMABUSMOD SWR hangs */ /* Put the interface in the down state. */ tiva_ifdown(&priv->dev); #else /* Reset the Ethernet MAC */ regval = tiva_getreg(TIVA_SYSCON_SREMAC); regval |= SYSCON_SREMAC_R0; tiva_putreg(regval, TIVA_SYSCON_SREMAC); regval &= ~SYSCON_SREMAC_R0; tiva_putreg(regval, TIVA_SYSCON_SREMAC); /* Wait for the reset to complete */ while (!tiva_emac_periphrdy()) ; up_udelay(250); #endif /* Register the device with the OS so that socket IOCTLs can be performed */ nllvdbg("Registering Ethernet device\n"); return netdev_register(&priv->dev, NET_LL_ETHERNET); } /**************************************************************************** * Function: up_netinitialize * * Description: * This is the "standard" network initialization logic called from the * low-level initialization logic in up_initialize.c. If TIVA_NETHCONTROLLERS * greater than one, then board specific logic will have to supply a * version of up_netinitialize() that calls tiva_ethinitialize() with * the appropriate interface number. * * Parameters: * None. * * Returned Value: * None. * * Assumptions: * ****************************************************************************/ #if TIVA_NETHCONTROLLERS == 1 void up_netinitialize(void) { (void)tiva_ethinitialize(0); } #endif /**************************************************************************** * Name: arch_phy_irq * * Description: * This function may be called to register an interrupt handler that will * be called when a PHY interrupt occurs. This function both attaches * the interrupt handler and enables the interrupt if 'handler' is non- * NULL. If handler is NULL, then the interrupt is detached and disabled * instead. * * The PHY interrupt is always disabled upon return. The caller must * call back through the enable function point to control the state of * the interrupt. * * This interrupt may or may not be available on a given platform depending * on how the network hardware architecture is implemented. In a typical * case, the PHY interrupt is provided to board-level logic as a GPIO * interrupt (in which case this is a board-specific interface and really * should be called board_phy_irq()); In other cases, the PHY interrupt * may be cause by the chip's MAC logic (in which case arch_phy_irq()) is * an appropriate name. Other other boards, there may be no PHY interrupts * available at all. If client attachable PHY interrupts are available * from the board or from the chip, then CONFIG_ARCH_PHY_INTERRUPT should * be defined to indicate that fact. * * Typical usage: * a. OS service logic (not application logic*) attaches to the PHY * PHY interrupt and enables the PHY interrupt. * b. When the PHY interrupt occurs: (1) the interrupt should be * disabled and () work should be scheduled on the worker thread (or * perhaps a dedicated application thread). * c. That worker thread should use the SIOCGMIIPHY, SIOCGMIIREG, * and SIOCSMIIREG ioctl calls** to communicate with the PHY, * determine what network event took place (Link Up/Down?), and * take the appropriate actions. * d. It should then interact the PHY to clear any pending * interrupts, then re-enable the PHY interrupt. * * * This is an OS internal interface and should not be used from * application space. Rather applications should use the SIOCMIISIG * ioctl to receive a signal when a PHY event occurs. * ** This interrupt is really of no use if the Ethernet MAC driver * does not support these ioctl calls. * * Input Parameters: * intf - Identifies the network interface. For example "eth0". Only * useful on platforms that support multiple Ethernet interfaces * and, hence, multiple PHYs and PHY interrupts. * handler - The client interrupt handler to be invoked when the PHY * asserts an interrupt. Must reside in OS space, but can * signal tasks in user space. A value of NULL can be passed * in order to detach and disable the PHY interrupt. * enable - A function pointer that be unsed to enable or disable the * PHY interrupt. * * Returned Value: * The previous PHY interrupt handler address is returned. This allows you * to temporarily replace an interrupt handler, then restore the original * interrupt handler. NULL is returned if there is was not handler in * place when the call was made. * ****************************************************************************/ #ifdef CONFIG_TIVA_PHY_INTERRUPTS xcpt_t arch_phy_irq(FAR const char *intf, xcpt_t handler, phy_enable_t *enable) { struct tiva_ethmac_s *priv; irqstate_t flags; xcpt_t oldhandler; DEBUGASSERT(intf); nvdbg("%s: handler=%p\n", intf, handler); /* Get the interface structure associated with this interface. */ #if TIVA_NETHCONTROLLERS > 1 /* REVISIT: Additional logic needed if there are multiple EMACs */ warning Missing logic #endif priv = g_tiva_ethmac; /* Disable interrupts until we are done. This guarantees that the * following operations are atomic. */ flags = irqsave(); /* Get the old interrupt handler and save the new one */ oldhandler = priv->handler; priv->handler = handler; /* Return with the interrupt disabled in any case */ tiva_phy_intenable(false); /* Return the enabling function pointer */ if (enable) { *enable = handler ? tiva_phy_intenable : NULL;; } /* Return the old handler (so that it can be restored) */ irqrestore(flags); return oldhandler; } #endif /* CONFIG_TIVA_PHY_INTERRUPTS */ #endif /* TIVA_NETHCONTROLLERS > 0 */ #endif /* CONFIG_NET && CONFIG_TIVA_ETHERNET */
167875.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Red Hat * Author: Rob Clark <[email protected]> */ #include <drm/drm_atomic_uapi.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_vblank.h> #include "msm_atomic_trace.h" #include "msm_drv.h" #include "msm_gem.h" #include "msm_kms.h" int msm_atomic_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct msm_drm_private *priv = plane->dev->dev_private; struct msm_kms *kms = priv->kms; if (!new_state->fb) return 0; drm_gem_plane_helper_prepare_fb(plane, new_state); return msm_framebuffer_prepare(new_state->fb, kms->aspace); } /* * Helpers to control vblanks while we flush.. basically just to ensure * that vblank accounting is switched on, so we get valid seqn/timestamp * on pageflip events (if requested) */ static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) { struct drm_crtc *crtc; for_each_crtc_mask(kms->dev, crtc, crtc_mask) { if (!crtc->state->active) continue; drm_crtc_vblank_get(crtc); } } static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) { struct drm_crtc *crtc; for_each_crtc_mask(kms->dev, crtc, crtc_mask) { if (!crtc->state->active) continue; drm_crtc_vblank_put(crtc); } } static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) { int crtc_index; struct drm_crtc *crtc; for_each_crtc_mask(kms->dev, crtc, crtc_mask) { crtc_index = drm_crtc_index(crtc); mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index); } } static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask) { struct drm_crtc *crtc; for_each_crtc_mask_reverse(kms->dev, crtc, crtc_mask) mutex_unlock(&kms->commit_lock[drm_crtc_index(crtc)]); } static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) { unsigned crtc_mask = BIT(crtc_idx); trace_msm_atomic_async_commit_start(crtc_mask); lock_crtcs(kms, crtc_mask); if (!(kms->pending_crtc_mask & crtc_mask)) { unlock_crtcs(kms, crtc_mask); goto out; } kms->pending_crtc_mask &= ~crtc_mask; kms->funcs->enable_commit(kms); vblank_get(kms, crtc_mask); /* * Flush hardware updates: */ trace_msm_atomic_flush_commit(crtc_mask); kms->funcs->flush_commit(kms, crtc_mask); /* * Wait for flush to complete: */ trace_msm_atomic_wait_flush_start(crtc_mask); kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); vblank_put(kms, crtc_mask); kms->funcs->complete_commit(kms, crtc_mask); unlock_crtcs(kms, crtc_mask); kms->funcs->disable_commit(kms); out: trace_msm_atomic_async_commit_finish(crtc_mask); } static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t) { struct msm_pending_timer *timer = container_of(t, struct msm_pending_timer, timer); kthread_queue_work(timer->worker, &timer->work); return HRTIMER_NORESTART; } static void msm_atomic_pending_work(struct kthread_work *work) { struct msm_pending_timer *timer = container_of(work, struct msm_pending_timer, work); msm_atomic_async_commit(timer->kms, timer->crtc_idx); } int msm_atomic_init_pending_timer(struct msm_pending_timer *timer, struct msm_kms *kms, int crtc_idx) { timer->kms = kms; timer->crtc_idx = crtc_idx; hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); timer->timer.function = msm_atomic_pending_timer; timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx); if (IS_ERR(timer->worker)) { int ret = PTR_ERR(timer->worker); timer->worker = NULL; return ret; } sched_set_fifo(timer->worker->task); kthread_init_work(&timer->work, msm_atomic_pending_work); return 0; } void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer) { if (timer->worker) kthread_destroy_worker(timer->worker); } static bool can_do_async(struct drm_atomic_state *state, struct drm_crtc **async_crtc) { struct drm_connector_state *connector_state; struct drm_connector *connector; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; int i, num_crtcs = 0; if (!(state->legacy_cursor_update || state->async_update)) return false; /* any connector change, means slow path: */ for_each_new_connector_in_state(state, connector, connector_state, i) return false; for_each_new_crtc_in_state(state, crtc, crtc_state, i) { if (drm_atomic_crtc_needs_modeset(crtc_state)) return false; if (++num_crtcs > 1) return false; *async_crtc = crtc; } return true; } /* Get bitmask of crtcs that will need to be flushed. The bitmask * can be used with for_each_crtc_mask() iterator, to iterate * effected crtcs without needing to preserve the atomic state. */ static unsigned get_crtc_mask(struct drm_atomic_state *state) { struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; unsigned i, mask = 0; for_each_new_crtc_in_state(state, crtc, crtc_state, i) mask |= drm_crtc_mask(crtc); return mask; } void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct drm_crtc *async_crtc = NULL; unsigned crtc_mask = get_crtc_mask(state); bool async = kms->funcs->vsync_time && can_do_async(state, &async_crtc); trace_msm_atomic_commit_tail_start(async, crtc_mask); kms->funcs->enable_commit(kms); /* * Ensure any previous (potentially async) commit has * completed: */ lock_crtcs(kms, crtc_mask); trace_msm_atomic_wait_flush_start(crtc_mask); kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); /* * Now that there is no in-progress flush, prepare the * current update: */ kms->funcs->prepare_commit(kms, state); /* * Push atomic updates down to hardware: */ drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_modeset_enables(dev, state); if (async) { struct msm_pending_timer *timer = &kms->pending_timers[drm_crtc_index(async_crtc)]; /* async updates are limited to single-crtc updates: */ WARN_ON(crtc_mask != drm_crtc_mask(async_crtc)); /* * Start timer if we don't already have an update pending * on this crtc: */ if (!(kms->pending_crtc_mask & crtc_mask)) { ktime_t vsync_time, wakeup_time; kms->pending_crtc_mask |= crtc_mask; vsync_time = kms->funcs->vsync_time(kms, async_crtc); wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1)); hrtimer_start(&timer->timer, wakeup_time, HRTIMER_MODE_ABS); } kms->funcs->disable_commit(kms); unlock_crtcs(kms, crtc_mask); /* * At this point, from drm core's perspective, we * are done with the atomic update, so we can just * go ahead and signal that it is done: */ drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); trace_msm_atomic_commit_tail_finish(async, crtc_mask); return; } /* * If there is any async flush pending on updated crtcs, fold * them into the current flush. */ kms->pending_crtc_mask &= ~crtc_mask; vblank_get(kms, crtc_mask); /* * Flush hardware updates: */ trace_msm_atomic_flush_commit(crtc_mask); kms->funcs->flush_commit(kms, crtc_mask); unlock_crtcs(kms, crtc_mask); /* * Wait for flush to complete: */ trace_msm_atomic_wait_flush_start(crtc_mask); kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); vblank_put(kms, crtc_mask); lock_crtcs(kms, crtc_mask); kms->funcs->complete_commit(kms, crtc_mask); unlock_crtcs(kms, crtc_mask); kms->funcs->disable_commit(kms); drm_atomic_helper_commit_hw_done(state); drm_atomic_helper_cleanup_planes(dev, state); trace_msm_atomic_commit_tail_finish(async, crtc_mask); }
247398.c
#include "cambadge.h" #include "globals.h" char* settings(unsigned int action) { unsigned int i, j, t; static unsigned char tport, tbyte; static unsigned int u1rxcount; static unsigned char state = 0; #define s_sstart 0 #define s_idle 1 #define s_formwait 2 #define s_formwait2 3 #define s_twidstart 4 #define s_twiddle 5 #define s_speedtest 6 #define s_stwait 7 if (action == act_name) return ("UTILITIES"); else if (action == act_help) return ("Various oddments"); if (action == act_start) state = s_sstart; if (action != act_poll) return (0); if (butpress & powerbut) return (""); switch (state) { case s_sstart: printf(cls butcol "EXIT " whi inv "UTILITIES" inv butcol " Boot" bot "Utils twiddler" tabx0 taby11 "Card Hardware"); state = s_idle; break; case s_idle: if (!tick) break; printf(tabx0 whi taby2 "X: %6d\nY: %6d\nZ:%6d\n\n", accx, accy, accz); if (butpress & but1) { printf(cls top "EXIT" bot "Speedtest Format"); state = s_formwait; break; } if (butpress & but2) state = s_twidstart; if (butpress & but4) { do { kickwatchdog; delayus(10000); readbuttons(); } while (butstate); reboot(); break; } break; case s_formwait: if (!butpress) break; printf(cls top "EXIT" tabx14 "Really" taby1 tabx13"Confirm"); if (butpress & but1) { state = s_speedtest; break; } if (butpress & but3) state = s_formwait2; else state = s_sstart; break; case s_speedtest: if (cardmounted == 0) { state = s_sstart; break; } state = s_stwait; printf(cls whi "Opening" tabx0); T5CON = 0b1000000001110000; // timer to measure grab+save time for playback framerate PR5 = 0xffff; TMR5 = 0; // timer on, /256 prescale fptr = FSfopen("Speedtst.dat", FS_WRITE); if(fptr==NULL) {printf("CARD ERROR");break;} u1rxcount = 0; #define passes 7 #define wsize (128*96*2+8) for (i = 0; i != passes; i++) { TMR5 = 0; IFS0bits.T5IF = 0; j = FSfwrite(&cambuffer[0], wsize, 1, fptr); if(j==0) printf("ERR"); t = TMR5; if (IFS0bits.T5IF) t += 0x10000; // rolled - assume only once t = (t * 256 / (clockfreq / 1000)); // mS u1rxcount += t; printf("Blk %1d T=%5dmS", i, t); if (IFS0bits.T5IF) printf("?"); //possible overflow printf("\n"); }//for FSfclose(fptr); printf("\n%4d Kbytes/Sec\n~%2d FPS (RGB)", wsize * passes * 1024 / 1000 / u1rxcount, 1000 * passes / u1rxcount); FSremove("Speedtst.dat"); break; case s_stwait: if (!tick) break; printf(tabx0 taby11 whi "Card status: "); if (cardmounted) printf("OK " bot butcol "Repeat Exit"); else printf("None " bot butcol " Exit"); if (butpress & but3) state = s_sstart; if (butpress & but1) if (cardmounted)state = s_speedtest; break; case s_formwait2: if (!(butpress & but4)) break; sd_cs_in; delayus(1000); i = sd_carddet; sd_cs_out; state = s_sstart; if (!i) { printf(cls "No card" del del); break; } if (butpress & but4) { printf(cls top "\n\nFormatting "); if (FSformat(0, 1234, "HADBADGE")) printf("\nFailed" del del); else printf("\nFormatted OK" del del); } break; case s_twidstart: printf(cls butcol "EXIT Tx1" bot "OutPort Flip I2C"); state = s_twiddle; tport = 0; tbyte = 0x55; break; case s_twiddle: if (U1MODEbits.ON) if (U1STAbits.URXDA) printf(tabx8 taby6 "U1RX %02X #%04d", U1RXREG, ++u1rxcount); if (!tick) return (0); printf(tabx0 taby1 whi " A2 B0 B1 B4 C3 C5" tabx0 taby2 "In" tabx0 taby3 "Out"); printf(tabx4 taby2 "%1d %1d %1d %1d %1d %1d", PORTAbits.RA2 ? 1 : 0, PORTBbits.RB0 ? 1 : 0, PORTBbits.RB1 ? 1 : 0, PORTBbits.RB4 ? 1 : 0, PORTCbits.RC3 ? 1 : 0, PORTCbits.RC5 ? 1 : 0); printf(tabx4 taby3 "%1d %1d %1d %1d %1d %1d", LATAbits.LATA2 ? 1 : 0, LATBbits.LATB0 ? 1 : 0, LATBbits.LATB1 ? 1 : 0, LATBbits.LATB4 ? 1 : 0, LATCbits.LATC3 ? 1 : 0, PMADDR & 8 ? 1 : 0); for (i = 0; i != 6; i++) printf(taby4"%c%c", 0x84 + i * 3, (i == tport) ? 0x18 : ' '); if (butpress & but1) if (++tport == 6) tport = 0; if (butpress & but4) { } if (butpress & but2) switch (tport) { case 0: LATAINV = 1 << 2; break; case 1: LATBINV = 1 << 0; break; case 2: LATBINV = 1 << 1; break; case 3: LATBINV = 1 << 4; break; case 4: LATCINV = 1 << 3; break; case 5: //LATCINV = 1 << 5; PMADDR = (PMADDR & 8) ? PMADDR&~8 : PMADDR | 8; break; } if (butpress & but3) { printf(top taby9 whi "I2C: " taby9 tabx4); I2C2BRG = clockfreq / 2 / i2cspeed_cam; for (i = 0x10; i != 0xfe; i += 2) { j = iistart(i); iistop(); if (j) printf("%02X ", i); } } if (butpress & but4) { printf(tabx0 taby6 "U1TX %02X", tbyte); u1txbyte(tbyte++); } if (U1MODEbits.ON) printf(tabx0 taby7 "U1 Baud=%d", u1baud); }//switch return (0); }
548493.c
/* * Copyright (c) 2013-2019 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX * Title: RTX Library Configuration * * ----------------------------------------------------------------------------- */ #include "cmsis_compiler.h" #include "RTX_Config.h" #include "rtx_os.h" #ifdef RTE_Compiler_EventRecorder #include "EventRecorder.h" #include "EventRecorderConf.h" #endif #include "rtx_evr.h" // System Configuration // ==================== // Dynamic Memory #if (OS_DYNAMIC_MEM_SIZE != 0) #if ((OS_DYNAMIC_MEM_SIZE % 8) != 0) #error "Invalid Dynamic Memory size!" #endif static uint64_t os_mem[OS_DYNAMIC_MEM_SIZE/8] \ __attribute__((section(".bss.os"))); #endif // Kernel Tick Frequency #if (OS_TICK_FREQ < 1) #error "Invalid Kernel Tick Frequency!" #endif // ISR FIFO Queue #if (OS_ISR_FIFO_QUEUE < 4) #error "Invalid ISR FIFO Queue size!" #endif static void *os_isr_queue[OS_ISR_FIFO_QUEUE] \ __attribute__((section(".bss.os"))); // Thread Configuration // ==================== #if (((OS_STACK_SIZE % 8) != 0) || (OS_STACK_SIZE < 72)) #error "Invalid default Thread Stack size!" #endif #if (((OS_IDLE_THREAD_STACK_SIZE % 8) != 0) || (OS_IDLE_THREAD_STACK_SIZE < 72)) #error "Invalid Idle Thread Stack size!" #endif #if (OS_THREAD_OBJ_MEM != 0) #if (OS_THREAD_NUM == 0) #error "Invalid number of user Threads!" #endif #if ((OS_THREAD_USER_STACK_SIZE != 0) && ((OS_THREAD_USER_STACK_SIZE % 8) != 0)) #error "Invalid total Stack size!" #endif // Thread Control Blocks static osRtxThread_t os_thread_cb[OS_THREAD_NUM] \ __attribute__((section(".bss.os.thread.cb"))); // Thread Default Stack #if (OS_THREAD_DEF_STACK_NUM != 0) static uint64_t os_thread_def_stack[OS_THREAD_DEF_STACK_NUM*(OS_STACK_SIZE/8)] \ __attribute__((section(".bss.os.thread.stack"))); #endif // Memory Pool for Thread Control Blocks static osRtxMpInfo_t os_mpi_thread \ __attribute__((section(".data.os.thread.mpi"))) = { (uint32_t)OS_THREAD_NUM, 0U, (uint32_t)osRtxThreadCbSize, &os_thread_cb[0], NULL, NULL }; // Memory Pool for Thread Default Stack #if (OS_THREAD_DEF_STACK_NUM != 0) static osRtxMpInfo_t os_mpi_def_stack \ __attribute__((section(".data.os.thread.mpi"))) = { (uint32_t)OS_THREAD_DEF_STACK_NUM, 0U, (uint32_t)OS_STACK_SIZE, &os_thread_def_stack[0], NULL, NULL }; #endif // Memory Pool for Thread Stack #if (OS_THREAD_USER_STACK_SIZE != 0) static uint64_t os_thread_stack[2 + OS_THREAD_NUM + (OS_THREAD_USER_STACK_SIZE/8)] \ __attribute__((section(".bss.os.thread.stack"))); #endif #endif // (OS_THREAD_OBJ_MEM != 0) // Stack overrun checking #if (OS_STACK_CHECK == 0) // Override library function extern void osRtxThreadStackCheck (void); void osRtxThreadStackCheck (void) {} #endif // Idle Thread Control Block static osRtxThread_t os_idle_thread_cb \ __attribute__((section(".bss.os.thread.cb"))); // Idle Thread Stack #if defined (__CC_ARM) static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8]; #else static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8] \ __attribute__((section(".bss.os.thread.stack"))); #endif // Idle Thread Attributes static const osThreadAttr_t os_idle_thread_attr = { #if defined(OS_IDLE_THREAD_NAME) OS_IDLE_THREAD_NAME, #else NULL, #endif osThreadDetached, &os_idle_thread_cb, (uint32_t)sizeof(os_idle_thread_cb), &os_idle_thread_stack[0], (uint32_t)sizeof(os_idle_thread_stack), osPriorityIdle, #if defined(OS_IDLE_THREAD_TZ_MOD_ID) (uint32_t)OS_IDLE_THREAD_TZ_MOD_ID, #else 0U, #endif 0U }; // Timer Configuration // =================== #if (OS_TIMER_OBJ_MEM != 0) #if (OS_TIMER_NUM == 0) #error "Invalid number of Timer objects!" #endif // Timer Control Blocks static osRtxTimer_t os_timer_cb[OS_TIMER_NUM] \ __attribute__((section(".bss.os.timer.cb"))); // Memory Pool for Timer Control Blocks static osRtxMpInfo_t os_mpi_timer \ __attribute__((section(".data.os.timer.mpi"))) = { (uint32_t)OS_TIMER_NUM, 0U, (uint32_t)osRtxTimerCbSize, &os_timer_cb[0], NULL, NULL }; #endif // (OS_TIMER_OBJ_MEM != 0) #if ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0)) #if (((OS_TIMER_THREAD_STACK_SIZE % 8) != 0) || (OS_TIMER_THREAD_STACK_SIZE < 96)) #error "Invalid Timer Thread Stack size!" #endif // Timer Thread Control Block static osRtxThread_t os_timer_thread_cb \ __attribute__((section(".bss.os.thread.cb"))); #if defined (__CC_ARM) static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8]; #else // Timer Thread Stack static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8] \ __attribute__((section(".bss.os.thread.stack"))); #endif // Timer Thread Attributes static const osThreadAttr_t os_timer_thread_attr = { #if defined(OS_TIMER_THREAD_NAME) OS_TIMER_THREAD_NAME, #else NULL, #endif osThreadDetached, &os_timer_thread_cb, (uint32_t)sizeof(os_timer_thread_cb), &os_timer_thread_stack[0], (uint32_t)sizeof(os_timer_thread_stack), //lint -e{9030} -e{9034} "cast from signed to enum" (osPriority_t)OS_TIMER_THREAD_PRIO, #if defined(OS_TIMER_THREAD_TZ_MOD_ID) (uint32_t)OS_TIMER_THREAD_TZ_MOD_ID, #else 0U, #endif 0U }; // Timer Message Queue Control Block static osRtxMessageQueue_t os_timer_mq_cb \ __attribute__((section(".bss.os.msgqueue.cb"))); // Timer Message Queue Data static uint32_t os_timer_mq_data[osRtxMessageQueueMemSize(OS_TIMER_CB_QUEUE,8)/4] \ __attribute__((section(".bss.os.msgqueue.mem"))); // Timer Message Queue Attributes static const osMessageQueueAttr_t os_timer_mq_attr = { NULL, 0U, &os_timer_mq_cb, (uint32_t)sizeof(os_timer_mq_cb), &os_timer_mq_data[0], (uint32_t)sizeof(os_timer_mq_data) }; #else extern void osRtxTimerThread (void *argument); void osRtxTimerThread (void *argument) { (void)argument; } #endif // ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0)) // Event Flags Configuration // ========================= #if (OS_EVFLAGS_OBJ_MEM != 0) #if (OS_EVFLAGS_NUM == 0) #error "Invalid number of Event Flags objects!" #endif // Event Flags Control Blocks static osRtxEventFlags_t os_ef_cb[OS_EVFLAGS_NUM] \ __attribute__((section(".bss.os.evflags.cb"))); // Memory Pool for Event Flags Control Blocks static osRtxMpInfo_t os_mpi_ef \ __attribute__((section(".data.os.evflags.mpi"))) = { (uint32_t)OS_EVFLAGS_NUM, 0U, (uint32_t)osRtxEventFlagsCbSize, &os_ef_cb[0], NULL, NULL }; #endif // (OS_EVFLAGS_OBJ_MEM != 0) // Mutex Configuration // =================== #if (OS_MUTEX_OBJ_MEM != 0) #if (OS_MUTEX_NUM == 0) #error "Invalid number of Mutex objects!" #endif // Mutex Control Blocks static osRtxMutex_t os_mutex_cb[OS_MUTEX_NUM] \ __attribute__((section(".bss.os.mutex.cb"))); // Memory Pool for Mutex Control Blocks static osRtxMpInfo_t os_mpi_mutex \ __attribute__((section(".data.os.mutex.mpi"))) = { (uint32_t)OS_MUTEX_NUM, 0U, (uint32_t)osRtxMutexCbSize, &os_mutex_cb[0], NULL, NULL }; #endif // (OS_MUTEX_OBJ_MEM != 0) // Semaphore Configuration // ======================= #if (OS_SEMAPHORE_OBJ_MEM != 0) #if (OS_SEMAPHORE_NUM == 0) #error "Invalid number of Semaphore objects!" #endif // Semaphore Control Blocks static osRtxSemaphore_t os_semaphore_cb[OS_SEMAPHORE_NUM] \ __attribute__((section(".bss.os.semaphore.cb"))); // Memory Pool for Semaphore Control Blocks static osRtxMpInfo_t os_mpi_semaphore \ __attribute__((section(".data.os.semaphore.mpi"))) = { (uint32_t)OS_SEMAPHORE_NUM, 0U, (uint32_t)osRtxSemaphoreCbSize, &os_semaphore_cb[0], NULL, NULL }; #endif // (OS_SEMAPHORE_OBJ_MEM != 0) // Memory Pool Configuration // ========================= #if (OS_MEMPOOL_OBJ_MEM != 0) #if (OS_MEMPOOL_NUM == 0) #error "Invalid number of Memory Pool objects!" #endif // Memory Pool Control Blocks static osRtxMemoryPool_t os_mp_cb[OS_MEMPOOL_NUM] \ __attribute__((section(".bss.os.mempool.cb"))); // Memory Pool for Memory Pool Control Blocks static osRtxMpInfo_t os_mpi_mp \ __attribute__((section(".data.os.mempool.mpi"))) = { (uint32_t)OS_MEMPOOL_NUM, 0U, (uint32_t)osRtxMemoryPoolCbSize, &os_mp_cb[0], NULL, NULL }; // Memory Pool for Memory Pool Data Storage #if (OS_MEMPOOL_DATA_SIZE != 0) #if ((OS_MEMPOOL_DATA_SIZE % 8) != 0) #error "Invalid Data Memory size for Memory Pools!" #endif static uint64_t os_mp_data[2 + OS_MEMPOOL_NUM + (OS_MEMPOOL_DATA_SIZE/8)] \ __attribute__((section(".bss.os.mempool.mem"))); #endif #endif // (OS_MEMPOOL_OBJ_MEM != 0) // Message Queue Configuration // =========================== #if (OS_MSGQUEUE_OBJ_MEM != 0) #if (OS_MSGQUEUE_NUM == 0) #error "Invalid number of Message Queue objects!" #endif // Message Queue Control Blocks static osRtxMessageQueue_t os_mq_cb[OS_MSGQUEUE_NUM] \ __attribute__((section(".bss.os.msgqueue.cb"))); // Memory Pool for Message Queue Control Blocks static osRtxMpInfo_t os_mpi_mq \ __attribute__((section(".data.os.msgqueue.mpi"))) = { (uint32_t)OS_MSGQUEUE_NUM, 0U, (uint32_t)osRtxMessageQueueCbSize, &os_mq_cb[0], NULL, NULL }; // Memory Pool for Message Queue Data Storage #if (OS_MSGQUEUE_DATA_SIZE != 0) #if ((OS_MSGQUEUE_DATA_SIZE % 8) != 0) #error "Invalid Data Memory size for Message Queues!" #endif static uint64_t os_mq_data[2 + OS_MSGQUEUE_NUM + (OS_MSGQUEUE_DATA_SIZE/8)] \ __attribute__((section(".bss.os.msgqueue.mem"))); #endif #endif // (OS_MSGQUEUE_OBJ_MEM != 0) // Event Recorder Configuration // ============================ #if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0)) // Initial Thread configuration covered also Thread Flags and Generic Wait #if defined(OS_EVR_THREAD_FILTER) #if !defined(OS_EVR_THFLAGS_FILTER) #define OS_EVR_THFLAGS_FILTER OS_EVR_THREAD_FILTER #endif #if !defined(OS_EVR_WAIT_FILTER) #define OS_EVR_WAIT_FILTER OS_EVR_THREAD_FILTER #endif #endif // Migrate initial filter configuration #if defined(OS_EVR_MEMORY_FILTER) #define OS_EVR_MEMORY_LEVEL (((OS_EVR_MEMORY_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMORY_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_KERNEL_FILTER) #define OS_EVR_KERNEL_LEVEL (((OS_EVR_KERNEL_FILTER & 0x80U) != 0U) ? (OS_EVR_KERNEL_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_THREAD_FILTER) #define OS_EVR_THREAD_LEVEL (((OS_EVR_THREAD_FILTER & 0x80U) != 0U) ? (OS_EVR_THREAD_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_WAIT_FILTER) #define OS_EVR_WAIT_LEVEL (((OS_EVR_WAIT_FILTER & 0x80U) != 0U) ? (OS_EVR_WAIT_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_THFLAGS_FILTER) #define OS_EVR_THFLAGS_LEVEL (((OS_EVR_THFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_THFLAGS_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_EVFLAGS_FILTER) #define OS_EVR_EVFLAGS_LEVEL (((OS_EVR_EVFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_EVFLAGS_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_TIMER_FILTER) #define OS_EVR_TIMER_LEVEL (((OS_EVR_TIMER_FILTER & 0x80U) != 0U) ? (OS_EVR_TIMER_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_MUTEX_FILTER) #define OS_EVR_MUTEX_LEVEL (((OS_EVR_MUTEX_FILTER & 0x80U) != 0U) ? (OS_EVR_MUTEX_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_SEMAPHORE_FILTER) #define OS_EVR_SEMAPHORE_LEVEL (((OS_EVR_SEMAPHORE_FILTER & 0x80U) != 0U) ? (OS_EVR_SEMAPHORE_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_MEMPOOL_FILTER) #define OS_EVR_MEMPOOL_LEVEL (((OS_EVR_MEMPOOL_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMPOOL_FILTER & 0x0FU) : 0U) #endif #if defined(OS_EVR_MSGQUEUE_FILTER) #define OS_EVR_MSGQUEUE_LEVEL (((OS_EVR_MSGQUEUE_FILTER & 0x80U) != 0U) ? (OS_EVR_MSGQUEUE_FILTER & 0x0FU) : 0U) #endif #if defined(RTE_Compiler_EventRecorder) // Event Recorder Initialize __STATIC_INLINE void evr_initialize (void) { (void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START); (void)EventRecorderEnable(OS_EVR_MEMORY_LEVEL, EvtRtxMemoryNo, EvtRtxMemoryNo); (void)EventRecorderEnable(OS_EVR_KERNEL_LEVEL, EvtRtxKernelNo, EvtRtxKernelNo); (void)EventRecorderEnable(OS_EVR_THREAD_LEVEL, EvtRtxThreadNo, EvtRtxThreadNo); (void)EventRecorderEnable(OS_EVR_WAIT_LEVEL, EvtRtxWaitNo, EvtRtxWaitNo); (void)EventRecorderEnable(OS_EVR_THFLAGS_LEVEL, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo); (void)EventRecorderEnable(OS_EVR_EVFLAGS_LEVEL, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo); (void)EventRecorderEnable(OS_EVR_TIMER_LEVEL, EvtRtxTimerNo, EvtRtxTimerNo); (void)EventRecorderEnable(OS_EVR_MUTEX_LEVEL, EvtRtxMutexNo, EvtRtxMutexNo); (void)EventRecorderEnable(OS_EVR_SEMAPHORE_LEVEL, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo); (void)EventRecorderEnable(OS_EVR_MEMPOOL_LEVEL, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo); (void)EventRecorderEnable(OS_EVR_MSGQUEUE_LEVEL, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo); } #else #warning "Event Recorder cannot be initialized (Event Recorder component is not selected)!" #define evr_initialize() #endif #endif // (OS_EVR_INIT != 0) // OS Configuration // ================ const osRtxConfig_t osRtxConfig \ __USED \ __attribute__((section(".rodata"))) = { //lint -e{835} "Zero argument to operator" 0U // Flags #if (OS_PRIVILEGE_MODE != 0) | osRtxConfigPrivilegedMode #endif #if (OS_STACK_CHECK != 0) | osRtxConfigStackCheck #endif #if (OS_STACK_WATERMARK != 0) | osRtxConfigStackWatermark #endif , (uint32_t)OS_TICK_FREQ, #if (OS_ROBIN_ENABLE != 0) (uint32_t)OS_ROBIN_TIMEOUT, #else 0U, #endif { &os_isr_queue[0], (uint16_t)(sizeof(os_isr_queue)/sizeof(void *)), 0U }, { // Memory Pools (Variable Block Size) #if ((OS_THREAD_OBJ_MEM != 0) && (OS_THREAD_USER_STACK_SIZE != 0)) &os_thread_stack[0], sizeof(os_thread_stack), #else NULL, 0U, #endif #if ((OS_MEMPOOL_OBJ_MEM != 0) && (OS_MEMPOOL_DATA_SIZE != 0)) &os_mp_data[0], sizeof(os_mp_data), #else NULL, 0U, #endif #if ((OS_MSGQUEUE_OBJ_MEM != 0) && (OS_MSGQUEUE_DATA_SIZE != 0)) &os_mq_data[0], sizeof(os_mq_data), #else NULL, 0U, #endif #if (OS_DYNAMIC_MEM_SIZE != 0) &os_mem[0], (uint32_t)OS_DYNAMIC_MEM_SIZE, #else NULL, 0U #endif }, { // Memory Pools (Fixed Block Size) #if (OS_THREAD_OBJ_MEM != 0) #if (OS_THREAD_DEF_STACK_NUM != 0) &os_mpi_def_stack, #else NULL, #endif &os_mpi_thread, #else NULL, NULL, #endif #if (OS_TIMER_OBJ_MEM != 0) &os_mpi_timer, #else NULL, #endif #if (OS_EVFLAGS_OBJ_MEM != 0) &os_mpi_ef, #else NULL, #endif #if (OS_MUTEX_OBJ_MEM != 0) &os_mpi_mutex, #else NULL, #endif #if (OS_SEMAPHORE_OBJ_MEM != 0) &os_mpi_semaphore, #else NULL, #endif #if (OS_MEMPOOL_OBJ_MEM != 0) &os_mpi_mp, #else NULL, #endif #if (OS_MSGQUEUE_OBJ_MEM != 0) &os_mpi_mq, #else NULL, #endif }, (uint32_t)OS_STACK_SIZE, &os_idle_thread_attr, #if ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0)) &os_timer_thread_attr, &os_timer_mq_attr, (uint32_t)OS_TIMER_CB_QUEUE #else NULL, NULL, 0U #endif }; // Non weak reference to library irq module //lint -esym(526,irqRtxLib) "Defined by Exception handlers" //lint -esym(714,irqRtxLibRef) "Non weak reference" //lint -esym(765,irqRtxLibRef) "Global scope" extern uint8_t irqRtxLib; extern const uint8_t *irqRtxLibRef; const uint8_t *irqRtxLibRef = &irqRtxLib; // Default User SVC Table //lint -esym(714,osRtxUserSVC) "Referenced by Exception handlers" //lint -esym(765,osRtxUserSVC) "Global scope" //lint -e{9067} "extern array declared without size" extern void * const osRtxUserSVC[]; __WEAK void * const osRtxUserSVC[1] = { (void *)0 }; // OS Sections // =========== #if defined(__CC_ARM) || \ (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) static uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base"))); //lint -esym(728,__os_thread_cb_start__) static uint32_t __os_thread_cb_end__ __attribute__((weakref(".bss.os.thread.cb$$Limit"))); //lint -esym(728,__os_thread_cb_end__) static uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base"))); //lint -esym(728,__os_timer_cb_start__) static uint32_t __os_timer_cb_end__ __attribute__((weakref(".bss.os.timer.cb$$Limit"))); //lint -esym(728,__os_timer_cb_end__) static uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base"))); //lint -esym(728,__os_evflags_cb_start__) static uint32_t __os_evflags_cb_end__ __attribute__((weakref(".bss.os.evflags.cb$$Limit"))); //lint -esym(728,__os_evflags_cb_end__) static uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base"))); //lint -esym(728,__os_mutex_cb_start__) static uint32_t __os_mutex_cb_end__ __attribute__((weakref(".bss.os.mutex.cb$$Limit"))); //lint -esym(728,__os_mutex_cb_end__) static uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base"))); //lint -esym(728,__os_semaphore_cb_start__) static uint32_t __os_semaphore_cb_end__ __attribute__((weakref(".bss.os.semaphore.cb$$Limit"))); //lint -esym(728,__os_semaphore_cb_end__) static uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base"))); //lint -esym(728,__os_mempool_cb_start__) static uint32_t __os_mempool_cb_end__ __attribute__((weakref(".bss.os.mempool.cb$$Limit"))); //lint -esym(728,__os_mempool_cb_end__) static uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base"))); //lint -esym(728,__os_msgqueue_cb_start__) static uint32_t __os_msgqueue_cb_end__ __attribute__((weakref(".bss.os.msgqueue.cb$$Limit"))); //lint -esym(728,__os_msgqueue_cb_end__) #else extern uint32_t __os_thread_cb_start__ __attribute__((weak)); extern uint32_t __os_thread_cb_end__ __attribute__((weak)); extern uint32_t __os_timer_cb_start__ __attribute__((weak)); extern uint32_t __os_timer_cb_end__ __attribute__((weak)); extern uint32_t __os_evflags_cb_start__ __attribute__((weak)); extern uint32_t __os_evflags_cb_end__ __attribute__((weak)); extern uint32_t __os_mutex_cb_start__ __attribute__((weak)); extern uint32_t __os_mutex_cb_end__ __attribute__((weak)); extern uint32_t __os_semaphore_cb_start__ __attribute__((weak)); extern uint32_t __os_semaphore_cb_end__ __attribute__((weak)); extern uint32_t __os_mempool_cb_start__ __attribute__((weak)); extern uint32_t __os_mempool_cb_end__ __attribute__((weak)); extern uint32_t __os_msgqueue_cb_start__ __attribute__((weak)); extern uint32_t __os_msgqueue_cb_end__ __attribute__((weak)); #endif //lint -e{9067} "extern array declared without size" extern const uint32_t * const os_cb_sections[]; //lint -esym(714,os_cb_sections) "Referenced by debugger" //lint -esym(765,os_cb_sections) "Global scope" const uint32_t * const os_cb_sections[] \ __USED \ __attribute__((section(".rodata"))) = { &__os_thread_cb_start__, &__os_thread_cb_end__, &__os_timer_cb_start__, &__os_timer_cb_end__, &__os_evflags_cb_start__, &__os_evflags_cb_end__, &__os_mutex_cb_start__, &__os_mutex_cb_end__, &__os_semaphore_cb_start__, &__os_semaphore_cb_end__, &__os_mempool_cb_start__, &__os_mempool_cb_end__, &__os_msgqueue_cb_start__, &__os_msgqueue_cb_end__ }; // OS Initialization // ================= #if defined(__CC_ARM) || \ (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) #ifndef __MICROLIB //lint -esym(714,_platform_post_stackheap_init) "Referenced by C library" //lint -esym(765,_platform_post_stackheap_init) "Global scope" extern void _platform_post_stackheap_init (void); __WEAK void _platform_post_stackheap_init (void) { (void)osKernelInitialize(); } #endif #elif defined(__GNUC__) extern void software_init_hook (void); __WEAK void software_init_hook (void) { (void)osKernelInitialize(); } #endif // OS Hooks // ======== // RTOS Kernel Pre-Initialization Hook #if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0)) void osRtxKernelPreInit (void); void osRtxKernelPreInit (void) { if (osKernelGetState() == osKernelInactive) { evr_initialize(); } } #endif // C/C++ Standard Library Multithreading Interface // =============================================== #if ( !defined(RTX_NO_MULTITHREAD_CLIB) && \ ( defined(__CC_ARM) || \ (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))) && \ !defined(__MICROLIB)) #define LIBSPACE_SIZE 96 //lint -esym(714,__user_perthread_libspace,_mutex_*) "Referenced by C library" //lint -esym(765,__user_perthread_libspace,_mutex_*) "Global scope" //lint -esym(9003, os_libspace*) "variables 'os_libspace*' defined at module scope" // Memory for libspace static uint32_t os_libspace[OS_THREAD_LIBSPACE_NUM+1][LIBSPACE_SIZE/4] \ __attribute__((section(".bss.os.libspace"))); // Thread IDs for libspace static osThreadId_t os_libspace_id[OS_THREAD_LIBSPACE_NUM] \ __attribute__((section(".bss.os.libspace"))); // Check if Kernel has been started static uint32_t os_kernel_is_active (void) { static uint8_t os_kernel_active = 0U; if (os_kernel_active == 0U) { if (osKernelGetState() > osKernelReady) { os_kernel_active = 1U; } } return (uint32_t)os_kernel_active; } // Provide libspace for current thread void *__user_perthread_libspace (void); void *__user_perthread_libspace (void) { osThreadId_t id; uint32_t n; if (os_kernel_is_active() != 0U) { id = osThreadGetId(); for (n = 0U; n < (uint32_t)OS_THREAD_LIBSPACE_NUM; n++) { if (os_libspace_id[n] == NULL) { os_libspace_id[n] = id; } if (os_libspace_id[n] == id) { break; } } if (n == (uint32_t)OS_THREAD_LIBSPACE_NUM) { (void)osRtxErrorNotify(osRtxErrorClibSpace, id); } } else { n = OS_THREAD_LIBSPACE_NUM; } //lint -e{9087} "cast between pointers to different object types" return (void *)&os_libspace[n][0]; } // Mutex identifier typedef void *mutex; //lint -save "Function prototypes defined in C library" //lint -e970 "Use of 'int' outside of a typedef" //lint -e818 "Pointer 'm' could be declared as pointing to const" // Initialize mutex #if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED #endif int _mutex_initialize(mutex *m); __WEAK int _mutex_initialize(mutex *m) { int result; *m = osMutexNew(NULL); if (*m != NULL) { result = 1; } else { result = 0; (void)osRtxErrorNotify(osRtxErrorClibMutex, m); } return result; } // Acquire mutex #if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED #endif __WEAK void _mutex_acquire(mutex *m); void _mutex_acquire(mutex *m) { if (os_kernel_is_active() != 0U) { (void)osMutexAcquire(*m, osWaitForever); } } // Release mutex #if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED #endif __WEAK void _mutex_release(mutex *m); void _mutex_release(mutex *m) { if (os_kernel_is_active() != 0U) { (void)osMutexRelease(*m); } } // Free mutex #if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED #endif __WEAK void _mutex_free(mutex *m); void _mutex_free(mutex *m) { (void)osMutexDelete(*m); } //lint -restore #endif
281210.c
/* This file is part of GOTCHA. For copyright information see the COPYRIGHT file in the top level directory, or at https://github.com/LLNL/gotcha/blob/master/COPYRIGHT This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (as published by the Free Software Foundation) version 2.1 dated February 1999. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "tool.h" #include "libc_wrappers.h" #include "gotcha_utils.h" static tool_t *tools = NULL; static binding_t *all_bindings = NULL; tool_t* get_tool_list(){ return tools; } int tool_equal(tool_t* t1, tool_t* t2){ return gotcha_strcmp(t1->tool_name,t2->tool_name); } void remove_tool_from_list(struct tool_t* target){ if(!tools){ return; } if(!tool_equal(tools,target)){ tools = tools->next_tool; return; } struct tool_t *cur = tools; while( (cur!=NULL) && (cur->next_tool != NULL) && (tool_equal(cur->next_tool,target))){ cur = cur->next_tool; } if(!tool_equal(cur->next_tool,target)){ cur->next_tool = target->next_tool; } } void reorder_tool(tool_t* new_tool) { int new_priority = new_tool->config.priority; if(tools==NULL || tools->config.priority >= new_priority ){ new_tool->next_tool = tools; tools = new_tool; } else{ struct tool_t *cur = tools; while((cur->next_tool != NULL) && cur->next_tool->config.priority < new_priority){ cur = cur->next_tool; } new_tool->next_tool = cur->next_tool; cur->next_tool = new_tool; } } tool_t *create_tool(const char *tool_name) { debug_printf(1, "Found no existing tool with name %s\n",tool_name); // TODO: ensure free tool_t *newtool = (tool_t *) gotcha_malloc(sizeof(tool_t)); if (!newtool) { error_printf("Failed to malloc tool %s\n", tool_name); return NULL; } newtool->tool_name = tool_name; newtool->binding = NULL; //newtool->next_tool = tools; newtool->config = get_default_configuration(); reorder_tool(newtool); newtool->parent_tool = NULL; create_hashtable(&newtool->child_tools, 24, (hash_func_t) strhash, (hash_cmp_t) gotcha_strcmp); //tools = newtool; debug_printf(1, "Created new tool %s\n", tool_name); return newtool; } tool_t *get_tool(const char *tool_name) { tool_t *t; for (t = tools; t; t = t->next_tool) { if (gotcha_strcmp(tool_name, t->tool_name) == 0) { return t; } } return NULL; } binding_t *add_binding_to_tool(tool_t *tool, struct gotcha_binding_t *user_binding, int user_binding_size) { binding_t *newbinding; int result, i; newbinding = (binding_t *) gotcha_malloc(sizeof(binding_t)); newbinding->tool = tool; struct internal_binding_t* internal_bindings = (struct internal_binding_t*)gotcha_malloc(sizeof(struct internal_binding_t)*user_binding_size); for(i=0;i<user_binding_size;i++){ internal_bindings[i].user_binding = &user_binding[i]; *(user_binding[i].function_handle) = &internal_bindings[i]; internal_bindings[i].associated_binding_table = newbinding; } newbinding->internal_bindings = internal_bindings; newbinding->internal_bindings_size = user_binding_size; result = create_hashtable(&newbinding->binding_hash, user_binding_size * 2, (hash_func_t) strhash, (hash_cmp_t) gotcha_strcmp); if (result != 0) { error_printf("Could not create hash table for %s\n", tool->tool_name); goto error; // error is a label which frees allocated resources and returns NULL } for (i = 0; i < user_binding_size; i++) { result = addto_hashtable(&newbinding->binding_hash, (void *) user_binding[i].name, (void *) (internal_bindings + i)); if (result != 0) { error_printf("Could not add hash entry for %s to table for tool %s\n", user_binding[i].name, tool->tool_name); goto error; // error is a label which frees allocated resources and returns NULL } } newbinding->next_tool_binding = tool->binding; tool->binding = newbinding; newbinding->next_binding = all_bindings; all_bindings = newbinding; debug_printf(2, "Created new binding table of size %d for tool %s\n", user_binding_size, tool->tool_name); return newbinding; error: if (newbinding) gotcha_free(newbinding); return NULL; } binding_t *get_bindings() { return all_bindings; } binding_t *get_tool_bindings(tool_t *tool) { return tool->binding; } struct gotcha_configuration_t get_default_configuration(){ struct gotcha_configuration_t result; result.priority = UNSET_PRIORITY; return result; } enum gotcha_error_t get_default_configuration_value(enum gotcha_config_key_t key, void* data){ struct gotcha_configuration_t config = get_default_configuration(); if(key==GOTCHA_PRIORITY){ *((int*)(data)) = config.priority; } return GOTCHA_SUCCESS; } enum gotcha_error_t get_configuration_value(const char* tool_name, enum gotcha_config_key_t key, void* location_to_store_result){ struct tool_t* tool = get_tool(tool_name); if(tool==NULL){ error_printf("Property being examined for nonexistent tool %s\n", tool_name); return GOTCHA_INVALID_TOOL; } get_default_configuration_value(key, location_to_store_result); int found_valid_value = 0; while( (tool!=NULL) && !(found_valid_value) ){ struct gotcha_configuration_t config = tool->config; if(key==GOTCHA_PRIORITY){ int current_priority = config.priority; if(current_priority!=UNSET_PRIORITY){ *((int*)(location_to_store_result)) = config.priority; found_valid_value = 1; return GOTCHA_SUCCESS; } } else{ error_printf("Invalid property being configured on tool %s\n", tool_name); return GOTCHA_INTERNAL; } tool = tool->parent_tool; } return GOTCHA_SUCCESS; } int get_priority(tool_t *tool) { return tool->config.priority; }
467028.c
/* * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2005 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2015 Research Organization for Information Science * and Technology (RIST). All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ #include "ompi_config.h" #include "ompi/mpi/c/bindings.h" #include "ompi/runtime/params.h" #include "ompi/errhandler/errhandler.h" #include "ompi/file/file.h" #if OMPI_BUILD_MPI_PROFILING #if OPAL_HAVE_WEAK_SYMBOLS #pragma weak MPI_File_get_atomicity = PMPI_File_get_atomicity #endif #define MPI_File_get_atomicity PMPI_File_get_atomicity #endif static const char FUNC_NAME[] = "MPI_File_get_atomicity"; int MPI_File_get_atomicity(MPI_File fh, int *flag) { int rc; if (MPI_PARAM_CHECK) { rc = MPI_SUCCESS; OMPI_ERR_INIT_FINALIZE(FUNC_NAME); if (ompi_file_invalid(fh)) { rc = MPI_ERR_FILE; fh = MPI_FILE_NULL; } else if (NULL == flag) { rc = MPI_ERR_ARG; } OMPI_ERRHANDLER_CHECK(rc, fh, rc, FUNC_NAME); } /* Call the back-end io component function */ switch (fh->f_io_version) { case MCA_IO_BASE_V_2_0_0: rc = fh->f_io_selected_module.v2_0_0. io_module_file_get_atomicity(fh, flag); break; default: rc = MPI_ERR_INTERN; break; } /* All done */ OMPI_ERRHANDLER_RETURN(rc, fh, rc, FUNC_NAME); }
368723.c
/* * fs/f2fs/segment.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/prefetch.h> #include <linux/kthread.h> #include <linux/vmalloc.h> #include <linux/swap.h> #include "f2fs.h" #include "segment.h" #include "node.h" #include "trace.h" #include <trace/events/f2fs.h> #define __reverse_ffz(x) __reverse_ffs(~(x)) static struct kmem_cache *discard_entry_slab; static struct kmem_cache *sit_entry_set_slab; static struct kmem_cache *inmem_entry_slab; /* * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since * MSB and LSB are reversed in a byte by f2fs_set_bit. */ static inline unsigned long __reverse_ffs(unsigned long word) { int num = 0; #if BITS_PER_LONG == 64 if ((word & 0xffffffff) == 0) { num += 32; word >>= 32; } #endif if ((word & 0xffff) == 0) { num += 16; word >>= 16; } if ((word & 0xff) == 0) { num += 8; word >>= 8; } if ((word & 0xf0) == 0) num += 4; else word >>= 4; if ((word & 0xc) == 0) num += 2; else word >>= 2; if ((word & 0x2) == 0) num += 1; return num; } /* * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because * f2fs_set_bit makes MSB and LSB reversed in a byte. * Example: * LSB <--> MSB * f2fs_set_bit(0, bitmap) => 0000 0001 * f2fs_set_bit(7, bitmap) => 1000 0000 */ static unsigned long __find_rev_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG - 1); unsigned long tmp; unsigned long mask, submask; unsigned long quot, rest; if (offset >= size) return size; size -= result; offset %= BITS_PER_LONG; if (!offset) goto aligned; tmp = *(p++); quot = (offset >> 3) << 3; rest = offset & 0x7; mask = ~0UL << quot; submask = (unsigned char)(0xff << rest) >> rest; submask <<= quot; mask &= submask; tmp &= mask; if (size < BITS_PER_LONG) goto found_first; if (tmp) goto found_middle; size -= BITS_PER_LONG; result += BITS_PER_LONG; aligned: while (size & ~(BITS_PER_LONG-1)) { tmp = *(p++); if (tmp) goto found_middle; result += BITS_PER_LONG; size -= BITS_PER_LONG; } if (!size) return result; tmp = *p; found_first: tmp &= (~0UL >> (BITS_PER_LONG - size)); if (tmp == 0UL) /* Are any bits set? */ return result + size; /* Nope. */ found_middle: return result + __reverse_ffs(tmp); } static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = offset & ~(BITS_PER_LONG - 1); unsigned long tmp; unsigned long mask, submask; unsigned long quot, rest; if (offset >= size) return size; size -= result; offset %= BITS_PER_LONG; if (!offset) goto aligned; tmp = *(p++); quot = (offset >> 3) << 3; rest = offset & 0x7; mask = ~(~0UL << quot); submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest); submask <<= quot; mask += submask; tmp |= mask; if (size < BITS_PER_LONG) goto found_first; if (~tmp) goto found_middle; size -= BITS_PER_LONG; result += BITS_PER_LONG; aligned: while (size & ~(BITS_PER_LONG - 1)) { tmp = *(p++); if (~tmp) goto found_middle; result += BITS_PER_LONG; size -= BITS_PER_LONG; } if (!size) return result; tmp = *p; found_first: tmp |= ~0UL << size; if (tmp == ~0UL) /* Are any bits zero? */ return result + size; /* Nope. */ found_middle: return result + __reverse_ffz(tmp); } void register_inmem_page(struct inode *inode, struct page *page) { struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *new; int err; SetPagePrivate(page); f2fs_trace_pid(page); new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); /* add atomic page indices to the list */ new->page = page; INIT_LIST_HEAD(&new->list); retry: /* increase reference count with clean state */ mutex_lock(&fi->inmem_lock); err = radix_tree_insert(&fi->inmem_root, page->index, new); if (err == -EEXIST) { mutex_unlock(&fi->inmem_lock); kmem_cache_free(inmem_entry_slab, new); return; } else if (err) { mutex_unlock(&fi->inmem_lock); goto retry; } get_page(page); list_add_tail(&new->list, &fi->inmem_pages); inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); mutex_unlock(&fi->inmem_lock); } void commit_inmem_pages(struct inode *inode, bool abort) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *cur, *tmp; bool submit_bio = false; struct f2fs_io_info fio = { .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, }; /* * The abort is true only when f2fs_evict_inode is called. * Basically, the f2fs_evict_inode doesn't produce any data writes, so * that we don't need to call f2fs_balance_fs. * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this * inode becomes free by iget_locked in f2fs_iget. */ if (!abort) { f2fs_balance_fs(sbi); f2fs_lock_op(sbi); } mutex_lock(&fi->inmem_lock); list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { if (!abort) { lock_page(cur->page); if (cur->page->mapping == inode->i_mapping) { f2fs_wait_on_page_writeback(cur->page, DATA); if (clear_page_dirty_for_io(cur->page)) inode_dec_dirty_pages(inode); do_write_data_page(cur->page, &fio); submit_bio = true; } f2fs_put_page(cur->page, 1); } else { put_page(cur->page); } radix_tree_delete(&fi->inmem_root, cur->page->index); list_del(&cur->list); kmem_cache_free(inmem_entry_slab, cur); dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); } mutex_unlock(&fi->inmem_lock); if (!abort) { f2fs_unlock_op(sbi); if (submit_bio) f2fs_submit_merged_bio(sbi, DATA, WRITE); } } /* * This function balances dirty node and dentry pages. * In addition, it controls garbage collection. */ void f2fs_balance_fs(struct f2fs_sb_info *sbi) { /* * We should do GC or end up with checkpoint, if there are so many dirty * dir/node pages without enough free segments. */ if (has_not_enough_free_secs(sbi, 0)) { mutex_lock(&sbi->gc_mutex); f2fs_gc(sbi); } } void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) { /* check the # of cached NAT entries and prefree segments */ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || excess_prefree_segs(sbi) || !available_free_memory(sbi, INO_ENTRIES)) f2fs_sync_fs(sbi->sb, true); } static int issue_flush_thread(void *data) { struct f2fs_sb_info *sbi = data; struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; wait_queue_head_t *q = &fcc->flush_wait_queue; repeat: if (kthread_should_stop()) return 0; if (!llist_empty(&fcc->issue_list)) { struct bio *bio = bio_alloc(GFP_NOIO, 0); struct flush_cmd *cmd, *next; int ret; fcc->dispatch_list = llist_del_all(&fcc->issue_list); fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); bio->bi_bdev = sbi->sb->s_bdev; ret = submit_bio_wait(WRITE_FLUSH, bio); llist_for_each_entry_safe(cmd, next, fcc->dispatch_list, llnode) { cmd->ret = ret; complete(&cmd->wait); } bio_put(bio); fcc->dispatch_list = NULL; } wait_event_interruptible(*q, kthread_should_stop() || !llist_empty(&fcc->issue_list)); goto repeat; } int f2fs_issue_flush(struct f2fs_sb_info *sbi) { struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; struct flush_cmd cmd; trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER), test_opt(sbi, FLUSH_MERGE)); if (test_opt(sbi, NOBARRIER)) return 0; if (!test_opt(sbi, FLUSH_MERGE)) return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL); init_completion(&cmd.wait); llist_add(&cmd.llnode, &fcc->issue_list); if (!fcc->dispatch_list) wake_up(&fcc->flush_wait_queue); wait_for_completion(&cmd.wait); return cmd.ret; } int create_flush_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; struct flush_cmd_control *fcc; int err = 0; fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); if (!fcc) return -ENOMEM; init_waitqueue_head(&fcc->flush_wait_queue); init_llist_head(&fcc->issue_list); SM_I(sbi)->cmd_control_info = fcc; fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(fcc->f2fs_issue_flush)) { err = PTR_ERR(fcc->f2fs_issue_flush); kfree(fcc); SM_I(sbi)->cmd_control_info = NULL; return err; } return err; } void destroy_flush_cmd_control(struct f2fs_sb_info *sbi) { struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info; if (fcc && fcc->f2fs_issue_flush) kthread_stop(fcc->f2fs_issue_flush); kfree(fcc); SM_I(sbi)->cmd_control_info = NULL; } static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); /* need not be added */ if (IS_CURSEG(sbi, segno)) return; if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]++; if (dirty_type == DIRTY) { struct seg_entry *sentry = get_seg_entry(sbi, segno); enum dirty_type t = sentry->type; if (unlikely(t >= DIRTY)) { f2fs_bug_on(sbi, 1); return; } if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]++; } } static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]--; if (dirty_type == DIRTY) { struct seg_entry *sentry = get_seg_entry(sbi, segno); enum dirty_type t = sentry->type; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]--; if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0) clear_bit(GET_SECNO(sbi, segno), dirty_i->victim_secmap); } } /* * Should not occur error such as -ENOMEM. * Adding dirty entry into seglist is not critical operation. * If a given segment is one of current working segments, it won't be added. */ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned short valid_blocks; if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) return; mutex_lock(&dirty_i->seglist_lock); valid_blocks = get_valid_blocks(sbi, segno, 0); if (valid_blocks == 0) { __locate_dirty_segment(sbi, segno, PRE); __remove_dirty_segment(sbi, segno, DIRTY); } else if (valid_blocks < sbi->blocks_per_seg) { __locate_dirty_segment(sbi, segno, DIRTY); } else { /* Recovery routine with SSR needs this */ __remove_dirty_segment(sbi, segno, DIRTY); } mutex_unlock(&dirty_i->seglist_lock); } static int f2fs_issue_discard(struct f2fs_sb_info *sbi, block_t blkstart, block_t blklen) { sector_t start = SECTOR_FROM_BLOCK(blkstart); sector_t len = SECTOR_FROM_BLOCK(blklen); trace_f2fs_issue_discard(sbi->sb, blkstart, blklen); return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0); } void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr) { if (f2fs_issue_discard(sbi, blkaddr, 1)) { struct page *page = grab_meta_page(sbi, blkaddr); /* zero-filled page */ set_page_dirty(page); f2fs_put_page(page, 1); } } static void __add_discard_entry(struct f2fs_sb_info *sbi, struct cp_control *cpc, unsigned int start, unsigned int end) { struct list_head *head = &SM_I(sbi)->discard_list; struct discard_entry *new, *last; if (!list_empty(head)) { last = list_last_entry(head, struct discard_entry, list); if (START_BLOCK(sbi, cpc->trim_start) + start == last->blkaddr + last->len) { last->len += end - start; goto done; } } new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS); INIT_LIST_HEAD(&new->list); new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start; new->len = end - start; list_add_tail(&new->list, head); done: SM_I(sbi)->nr_discards += end - start; cpc->trimmed += end - start; } static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) { int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); int max_blocks = sbi->blocks_per_seg; struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); unsigned long *cur_map = (unsigned long *)se->cur_valid_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *dmap = SIT_I(sbi)->tmp_map; unsigned int start = 0, end = -1; bool force = (cpc->reason == CP_DISCARD); int i; if (!force && (!test_opt(sbi, DISCARD) || SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)) return; if (force && !se->valid_blocks) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); /* * if this segment is registered in the prefree list, then * we should skip adding a discard candidate, and let the * checkpoint do that later. */ mutex_lock(&dirty_i->seglist_lock); if (test_bit(cpc->trim_start, dirty_i->dirty_segmap[PRE])) { mutex_unlock(&dirty_i->seglist_lock); cpc->trimmed += sbi->blocks_per_seg; return; } mutex_unlock(&dirty_i->seglist_lock); __add_discard_entry(sbi, cpc, 0, sbi->blocks_per_seg); return; } /* zero block will be discarded through the prefree list */ if (!se->valid_blocks || se->valid_blocks == max_blocks) return; /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ for (i = 0; i < entries; i++) dmap[i] = force ? ~ckpt_map[i] : (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { start = __find_rev_next_bit(dmap, max_blocks, end + 1); if (start >= max_blocks) break; end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); if (end - start < cpc->trim_minlen) continue; __add_discard_entry(sbi, cpc, start, end); } } void release_discard_addrs(struct f2fs_sb_info *sbi) { struct list_head *head = &(SM_I(sbi)->discard_list); struct discard_entry *entry, *this; /* drop caches */ list_for_each_entry_safe(entry, this, head, list) { list_del(&entry->list); kmem_cache_free(discard_entry_slab, entry); } } /* * Should call clear_prefree_segments after checkpoint is done. */ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int segno; mutex_lock(&dirty_i->seglist_lock); for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) __set_test_and_free(sbi, segno); mutex_unlock(&dirty_i->seglist_lock); } void clear_prefree_segments(struct f2fs_sb_info *sbi) { struct list_head *head = &(SM_I(sbi)->discard_list); struct discard_entry *entry, *this; struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; unsigned int start = 0, end = -1; mutex_lock(&dirty_i->seglist_lock); while (1) { int i; start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); if (start >= MAIN_SEGS(sbi)) break; end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), start + 1); for (i = start; i < end; i++) clear_bit(i, prefree_map); dirty_i->nr_dirty[PRE] -= end - start; if (!test_opt(sbi, DISCARD)) continue; f2fs_issue_discard(sbi, START_BLOCK(sbi, start), (end - start) << sbi->log_blocks_per_seg); } mutex_unlock(&dirty_i->seglist_lock); /* send small discards */ list_for_each_entry_safe(entry, this, head, list) { f2fs_issue_discard(sbi, entry->blkaddr, entry->len); list_del(&entry->list); SM_I(sbi)->nr_discards -= entry->len; kmem_cache_free(discard_entry_slab, entry); } } static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) { struct sit_info *sit_i = SIT_I(sbi); if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { sit_i->dirty_sentries++; return false; } return true; } static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, unsigned int segno, int modified) { struct seg_entry *se = get_seg_entry(sbi, segno); se->type = type; if (modified) __mark_sit_entry_dirty(sbi, segno); } static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) { struct seg_entry *se; unsigned int segno, offset; long int new_vblocks; segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); new_vblocks = se->valid_blocks + del; offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || (new_vblocks > sbi->blocks_per_seg))); se->valid_blocks = new_vblocks; se->mtime = get_mtime(sbi); SIT_I(sbi)->max_mtime = se->mtime; /* Update valid block bitmap */ if (del > 0) { if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) f2fs_bug_on(sbi, 1); } else { if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) f2fs_bug_on(sbi, 1); } if (!f2fs_test_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks += del; __mark_sit_entry_dirty(sbi, segno); /* update total number of valid blocks to be written in ckpt area */ SIT_I(sbi)->written_valid_blocks += del; if (sbi->segs_per_sec > 1) get_sec_entry(sbi, segno)->valid_blocks += del; } void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) { update_sit_entry(sbi, new, 1); if (GET_SEGNO(sbi, old) != NULL_SEGNO) update_sit_entry(sbi, old, -1); locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); } void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) { unsigned int segno = GET_SEGNO(sbi, addr); struct sit_info *sit_i = SIT_I(sbi); f2fs_bug_on(sbi, addr == NULL_ADDR); if (addr == NEW_ADDR) return; /* add it into sit main buffer */ mutex_lock(&sit_i->sentry_lock); update_sit_entry(sbi, addr, -1); /* add it into dirty seglist */ locate_dirty_segment(sbi, segno); mutex_unlock(&sit_i->sentry_lock); } /* * This function should be resided under the curseg_mutex lock */ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, struct f2fs_summary *sum) { struct curseg_info *curseg = CURSEG_I(sbi, type); void *addr = curseg->sum_blk; addr += curseg->next_blkoff * sizeof(struct f2fs_summary); memcpy(addr, sum, sizeof(struct f2fs_summary)); } /* * Calculate the number of current summary pages for writing */ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) { int valid_sum_count = 0; int i, sum_in_page; for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { if (sbi->ckpt->alloc_type[i] == SSR) valid_sum_count += sbi->blocks_per_seg; else { if (for_ra) valid_sum_count += le16_to_cpu( F2FS_CKPT(sbi)->cur_data_blkoff[i]); else valid_sum_count += curseg_blkoff(sbi, i); } } sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE; if (valid_sum_count <= sum_in_page) return 1; else if ((valid_sum_count - sum_in_page) <= (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) return 2; return 3; } /* * Caller should put this summary page */ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) { return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); } static void write_sum_page(struct f2fs_sb_info *sbi, struct f2fs_summary_block *sum_blk, block_t blk_addr) { struct page *page = grab_meta_page(sbi, blk_addr); void *kaddr = page_address(page); memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno = curseg->segno + 1; struct free_segmap_info *free_i = FREE_I(sbi); if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) return !test_bit(segno, free_i->free_segmap); return 0; } /* * Find a new segment from the free segments bitmap to right order * This function should be returned with success, otherwise BUG */ static void get_new_segment(struct f2fs_sb_info *sbi, unsigned int *newseg, bool new_sec, int dir) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno, secno, zoneno; unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; unsigned int hint = *newseg / sbi->segs_per_sec; unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg); unsigned int left_start = hint; bool init = true; int go_left = 0; int i; spin_lock(&free_i->segmap_lock); if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { segno = find_next_zero_bit(free_i->free_segmap, MAIN_SEGS(sbi), *newseg + 1); if (segno - *newseg < sbi->segs_per_sec - (*newseg % sbi->segs_per_sec)) goto got_it; } find_other_zone: secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); if (secno >= MAIN_SECS(sbi)) { if (dir == ALLOC_RIGHT) { secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), 0); f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); } else { go_left = 1; left_start = hint - 1; } } if (go_left == 0) goto skip_left; while (test_bit(left_start, free_i->free_secmap)) { if (left_start > 0) { left_start--; continue; } left_start = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), 0); f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); break; } secno = left_start; skip_left: hint = secno; segno = secno * sbi->segs_per_sec; zoneno = secno / sbi->secs_per_zone; /* give up on finding another zone */ if (!init) goto got_it; if (sbi->secs_per_zone == 1) goto got_it; if (zoneno == old_zoneno) goto got_it; if (dir == ALLOC_LEFT) { if (!go_left && zoneno + 1 >= total_zones) goto got_it; if (go_left && zoneno == 0) goto got_it; } for (i = 0; i < NR_CURSEG_TYPE; i++) if (CURSEG_I(sbi, i)->zone == zoneno) break; if (i < NR_CURSEG_TYPE) { /* zone is in user, try another */ if (go_left) hint = zoneno * sbi->secs_per_zone - 1; else if (zoneno + 1 >= total_zones) hint = 0; else hint = (zoneno + 1) * sbi->secs_per_zone; init = false; goto find_other_zone; } got_it: /* set it as dirty segment in free segmap */ f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); __set_inuse(sbi, segno); *newseg = segno; spin_unlock(&free_i->segmap_lock); } static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) { struct curseg_info *curseg = CURSEG_I(sbi, type); struct summary_footer *sum_footer; curseg->segno = curseg->next_segno; curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); curseg->next_blkoff = 0; curseg->next_segno = NULL_SEGNO; sum_footer = &(curseg->sum_blk->footer); memset(sum_footer, 0, sizeof(struct summary_footer)); if (IS_DATASEG(type)) SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); if (IS_NODESEG(type)) SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); __set_sit_entry_type(sbi, type, curseg->segno, modified); } /* * Allocate a current working segment. * This function always allocates a free segment in LFS manner. */ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno = curseg->segno; int dir = ALLOC_LEFT; write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) dir = ALLOC_RIGHT; if (test_opt(sbi, NOHEAP)) dir = ALLOC_RIGHT; get_new_segment(sbi, &segno, new_sec, dir); curseg->next_segno = segno; reset_curseg(sbi, type, 1); curseg->alloc_type = LFS; } static void __next_free_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg, block_t start) { struct seg_entry *se = get_seg_entry(sbi, seg->segno); int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); unsigned long *target_map = SIT_I(sbi)->tmp_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *cur_map = (unsigned long *)se->cur_valid_map; int i, pos; for (i = 0; i < entries; i++) target_map[i] = ckpt_map[i] | cur_map[i]; pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); seg->next_blkoff = pos; } /* * If a segment is written by LFS manner, next block offset is just obtained * by increasing the current block offset. However, if a segment is written by * SSR manner, next block offset obtained by calling __next_free_blkoff */ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg) { if (seg->alloc_type == SSR) __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); else seg->next_blkoff++; } /* * This function always allocates a used segment(from dirty seglist) by SSR * manner, so it should recover the existing segment information of valid blocks */ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int new_segno = curseg->next_segno; struct f2fs_summary_block *sum_node; struct page *sum_page; write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); __set_test_and_inuse(sbi, new_segno); mutex_lock(&dirty_i->seglist_lock); __remove_dirty_segment(sbi, new_segno, PRE); __remove_dirty_segment(sbi, new_segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); reset_curseg(sbi, type, 1); curseg->alloc_type = SSR; __next_free_blkoff(sbi, curseg, 0); if (reuse) { sum_page = get_sum_page(sbi, new_segno); sum_node = (struct f2fs_summary_block *)page_address(sum_page); memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); f2fs_put_page(sum_page, 1); } } static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0)) return v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR); /* For data segments, let's do SSR more intensively */ for (; type >= CURSEG_HOT_DATA; type--) if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR)) return 1; return 0; } /* * flush out current segment and replace it with new segment * This function should be returned with success, otherwise BUG */ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, int type, bool force) { struct curseg_info *curseg = CURSEG_I(sbi, type); if (force) new_curseg(sbi, type, true); else if (type == CURSEG_WARM_NODE) new_curseg(sbi, type, false); else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) new_curseg(sbi, type, false); else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) change_curseg(sbi, type, true); else new_curseg(sbi, type, false); stat_inc_seg_type(sbi, curseg); } static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int old_segno; old_segno = curseg->segno; SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); locate_dirty_segment(sbi, old_segno); } void allocate_new_segments(struct f2fs_sb_info *sbi) { int i; for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) __allocate_new_segments(sbi, i); } static const struct segment_allocation default_salloc_ops = { .allocate_segment = allocate_segment_by_default, }; int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; unsigned int start_segno, end_segno; struct cp_control cpc; if (range->minlen > SEGMENT_SIZE(sbi) || start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) return -EINVAL; cpc.trimmed = 0; if (end <= MAIN_BLKADDR(sbi)) goto out; /* start/end segment number in main_area */ start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); cpc.reason = CP_DISCARD; cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen); /* do checkpoint to issue discard commands safely */ for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { cpc.trim_start = start_segno; cpc.trim_end = min_t(unsigned int, rounddown(start_segno + BATCHED_TRIM_SEGMENTS(sbi), sbi->segs_per_sec) - 1, end_segno); mutex_lock(&sbi->gc_mutex); write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); } out: range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); return 0; } static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); if (curseg->next_blkoff < sbi->blocks_per_seg) return true; return false; } static int __get_segment_type_2(struct page *page, enum page_type p_type) { if (p_type == DATA) return CURSEG_HOT_DATA; else return CURSEG_HOT_NODE; } static int __get_segment_type_4(struct page *page, enum page_type p_type) { if (p_type == DATA) { struct inode *inode = page->mapping->host; if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; else return CURSEG_COLD_DATA; } else { if (IS_DNODE(page) && is_cold_node(page)) return CURSEG_WARM_NODE; else return CURSEG_COLD_NODE; } } static int __get_segment_type_6(struct page *page, enum page_type p_type) { if (p_type == DATA) { struct inode *inode = page->mapping->host; if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; else if (is_cold_data(page) || file_is_cold(inode)) return CURSEG_COLD_DATA; else return CURSEG_WARM_DATA; } else { if (IS_DNODE(page)) return is_cold_node(page) ? CURSEG_WARM_NODE : CURSEG_HOT_NODE; else return CURSEG_COLD_NODE; } } static int __get_segment_type(struct page *page, enum page_type p_type) { switch (F2FS_P_SB(page)->active_logs) { case 2: return __get_segment_type_2(page, p_type); case 4: return __get_segment_type_4(page, p_type); } /* NR_CURSEG_TYPE(6) logs by default */ f2fs_bug_on(F2FS_P_SB(page), F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE); return __get_segment_type_6(page, p_type); } void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg; bool direct_io = (type == CURSEG_DIRECT_IO); type = direct_io ? CURSEG_WARM_DATA : type; curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); /* direct_io'ed data is aligned to the segment for better performance */ if (direct_io && curseg->next_blkoff) __allocate_new_segments(sbi, type); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); /* * __add_sum_entry should be resided under the curseg_mutex * because, this function updates a summary entry in the * current summary block. */ __add_sum_entry(sbi, type, sum); mutex_lock(&sit_i->sentry_lock); __refresh_next_blkoff(sbi, curseg); stat_inc_block_count(sbi, curseg); if (!__has_curseg_space(sbi, type)) sit_i->s_ops->allocate_segment(sbi, type, false); /* * SIT information should be updated before segment allocation, * since SSR needs latest valid block information. */ refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); mutex_unlock(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); mutex_unlock(&curseg->curseg_mutex); } static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, struct f2fs_summary *sum, struct f2fs_io_info *fio) { int type = __get_segment_type(page, fio->type); allocate_data_block(sbi, page, fio->blk_addr, &fio->blk_addr, sum, type); /* writeout dirty page into bdev */ f2fs_submit_page_mbio(sbi, page, fio); } void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) { struct f2fs_io_info fio = { .type = META, .rw = WRITE_SYNC | REQ_META | REQ_PRIO, .blk_addr = page->index, }; set_page_writeback(page); f2fs_submit_page_mbio(sbi, page, &fio); } void write_node_page(struct f2fs_sb_info *sbi, struct page *page, unsigned int nid, struct f2fs_io_info *fio) { struct f2fs_summary sum; set_summary(&sum, nid, 0, 0); do_write_page(sbi, page, &sum, fio); } void write_data_page(struct page *page, struct dnode_of_data *dn, struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_summary sum; struct node_info ni; f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); do_write_page(sbi, page, &sum, fio); dn->data_blkaddr = fio->blk_addr; } void rewrite_data_page(struct page *page, struct f2fs_io_info *fio) { stat_inc_inplace_blocks(F2FS_P_SB(page)); f2fs_submit_page_mbio(F2FS_P_SB(page), page, fio); } void recover_data_page(struct f2fs_sb_info *sbi, struct page *page, struct f2fs_summary *sum, block_t old_blkaddr, block_t new_blkaddr) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg; unsigned int segno, old_cursegno; struct seg_entry *se; int type; segno = GET_SEGNO(sbi, new_blkaddr); se = get_seg_entry(sbi, segno); type = se->type; if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { if (old_blkaddr == NULL_ADDR) type = CURSEG_COLD_DATA; else type = CURSEG_WARM_DATA; } curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); old_cursegno = curseg->segno; /* change the current segment */ if (segno != curseg->segno) { curseg->next_segno = segno; change_curseg(sbi, type, true); } curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); __add_sum_entry(sbi, type, sum); refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); locate_dirty_segment(sbi, old_cursegno); mutex_unlock(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); } static inline bool is_merged_page(struct f2fs_sb_info *sbi, struct page *page, enum page_type type) { enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = &sbi->write_io[btype]; struct bio_vec *bvec; int i; down_read(&io->io_rwsem); if (!io->bio) goto out; bio_for_each_segment_all(bvec, io->bio, i) { if (page == bvec->bv_page) { up_read(&io->io_rwsem); return true; } } out: up_read(&io->io_rwsem); return false; } void f2fs_wait_on_page_writeback(struct page *page, enum page_type type) { if (PageWriteback(page)) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); if (is_merged_page(sbi, page, type)) f2fs_submit_merged_bio(sbi, type, WRITE); wait_on_page_writeback(page); } } static int read_compacted_summaries(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct curseg_info *seg_i; unsigned char *kaddr; struct page *page; block_t start; int i, j, offset; start = start_sum_block(sbi); page = get_meta_page(sbi, start++); kaddr = (unsigned char *)page_address(page); /* Step 1: restore nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); /* Step 2: restore sit cache */ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); offset = 2 * SUM_JOURNAL_SIZE; /* Step 3: restore summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blk_off; unsigned int segno; seg_i = CURSEG_I(sbi, i); segno = le32_to_cpu(ckpt->cur_data_segno[i]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); seg_i->next_segno = segno; reset_curseg(sbi, i, 0); seg_i->alloc_type = ckpt->alloc_type[i]; seg_i->next_blkoff = blk_off; if (seg_i->alloc_type == SSR) blk_off = sbi->blocks_per_seg; for (j = 0; j < blk_off; j++) { struct f2fs_summary *s; s = (struct f2fs_summary *)(kaddr + offset); seg_i->sum_blk->entries[j] = *s; offset += SUMMARY_SIZE; if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) continue; f2fs_put_page(page, 1); page = NULL; page = get_meta_page(sbi, start++); kaddr = (unsigned char *)page_address(page); offset = 0; } } f2fs_put_page(page, 1); return 0; } static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_summary_block *sum; struct curseg_info *curseg; struct page *new; unsigned short blk_off; unsigned int segno = 0; block_t blk_addr = 0; /* get segment number and block addr */ if (IS_DATASEG(type)) { segno = le32_to_cpu(ckpt->cur_data_segno[type]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); else blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); } else { segno = le32_to_cpu(ckpt->cur_node_segno[type - CURSEG_HOT_NODE]); blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); else blk_addr = GET_SUM_BLOCK(sbi, segno); } new = get_meta_page(sbi, blk_addr); sum = (struct f2fs_summary_block *)page_address(new); if (IS_NODESEG(type)) { if (__exist_node_summaries(sbi)) { struct f2fs_summary *ns = &sum->entries[0]; int i; for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { ns->version = 0; ns->ofs_in_node = 0; } } else { int err; err = restore_node_summary(sbi, segno, sum); if (err) { f2fs_put_page(new, 1); return err; } } } /* set uncompleted segment to curseg */ curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE); curseg->next_segno = segno; reset_curseg(sbi, type, 0); curseg->alloc_type = ckpt->alloc_type[type]; curseg->next_blkoff = blk_off; mutex_unlock(&curseg->curseg_mutex); f2fs_put_page(new, 1); return 0; } static int restore_curseg_summaries(struct f2fs_sb_info *sbi) { int type = CURSEG_HOT_DATA; int err; if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { int npages = npages_for_summary_flush(sbi, true); if (npages >= 2) ra_meta_pages(sbi, start_sum_block(sbi), npages, META_CP); /* restore for compacted data summary */ if (read_compacted_summaries(sbi)) return -EINVAL; type = CURSEG_HOT_NODE; } if (__exist_node_summaries(sbi)) ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), NR_CURSEG_TYPE - type, META_CP); for (; type <= CURSEG_COLD_NODE; type++) { err = read_normal_summaries(sbi, type); if (err) return err; } return 0; } static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) { struct page *page; unsigned char *kaddr; struct f2fs_summary *summary; struct curseg_info *seg_i; int written_size = 0; int i, j; page = grab_meta_page(sbi, blkaddr++); kaddr = (unsigned char *)page_address(page); /* Step 1: write nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE); written_size += SUM_JOURNAL_SIZE; /* Step 2: write sit cache */ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits, SUM_JOURNAL_SIZE); written_size += SUM_JOURNAL_SIZE; /* Step 3: write summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blkoff; seg_i = CURSEG_I(sbi, i); if (sbi->ckpt->alloc_type[i] == SSR) blkoff = sbi->blocks_per_seg; else blkoff = curseg_blkoff(sbi, i); for (j = 0; j < blkoff; j++) { if (!page) { page = grab_meta_page(sbi, blkaddr++); kaddr = (unsigned char *)page_address(page); written_size = 0; } summary = (struct f2fs_summary *)(kaddr + written_size); *summary = seg_i->sum_blk->entries[j]; written_size += SUMMARY_SIZE; if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) continue; set_page_dirty(page); f2fs_put_page(page, 1); page = NULL; } } if (page) { set_page_dirty(page); f2fs_put_page(page, 1); } } static void write_normal_summaries(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { int i, end; if (IS_DATASEG(type)) end = type + NR_CURSEG_DATA_TYPE; else end = type + NR_CURSEG_NODE_TYPE; for (i = type; i < end; i++) { struct curseg_info *sum = CURSEG_I(sbi, i); mutex_lock(&sum->curseg_mutex); write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type)); mutex_unlock(&sum->curseg_mutex); } } void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) write_compacted_summaries(sbi, start_blk); else write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); } void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); } int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, unsigned int val, int alloc) { int i; if (type == NAT_JOURNAL) { for (i = 0; i < nats_in_cursum(sum); i++) { if (le32_to_cpu(nid_in_journal(sum, i)) == val) return i; } if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) return update_nats_in_cursum(sum, 1); } else if (type == SIT_JOURNAL) { for (i = 0; i < sits_in_cursum(sum); i++) if (le32_to_cpu(segno_in_journal(sum, i)) == val) return i; if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES) return update_sits_in_cursum(sum, 1); } return -1; } static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, unsigned int segno) { return get_meta_page(sbi, current_sit_addr(sbi, segno)); } static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, unsigned int start) { struct sit_info *sit_i = SIT_I(sbi); struct page *src_page, *dst_page; pgoff_t src_off, dst_off; void *src_addr, *dst_addr; src_off = current_sit_addr(sbi, start); dst_off = next_sit_addr(sbi, src_off); /* get current sit block page without lock */ src_page = get_meta_page(sbi, src_off); dst_page = grab_meta_page(sbi, dst_off); f2fs_bug_on(sbi, PageDirty(src_page)); src_addr = page_address(src_page); dst_addr = page_address(dst_page); memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); set_to_next_sit(sit_i, start); return dst_page; } static struct sit_entry_set *grab_sit_entry_set(void) { struct sit_entry_set *ses = f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC); ses->entry_cnt = 0; INIT_LIST_HEAD(&ses->set_list); return ses; } static void release_sit_entry_set(struct sit_entry_set *ses) { list_del(&ses->set_list); kmem_cache_free(sit_entry_set_slab, ses); } static void adjust_sit_entry_set(struct sit_entry_set *ses, struct list_head *head) { struct sit_entry_set *next = ses; if (list_is_last(&ses->set_list, head)) return; list_for_each_entry_continue(next, head, set_list) if (ses->entry_cnt <= next->entry_cnt) break; list_move_tail(&ses->set_list, &next->set_list); } static void add_sit_entry(unsigned int segno, struct list_head *head) { struct sit_entry_set *ses; unsigned int start_segno = START_SEGNO(segno); list_for_each_entry(ses, head, set_list) { if (ses->start_segno == start_segno) { ses->entry_cnt++; adjust_sit_entry_set(ses, head); return; } } ses = grab_sit_entry_set(); ses->start_segno = start_segno; ses->entry_cnt++; list_add(&ses->set_list, head); } static void add_sits_in_set(struct f2fs_sb_info *sbi) { struct f2fs_sm_info *sm_info = SM_I(sbi); struct list_head *set_list = &sm_info->sit_entry_set; unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; unsigned int segno; for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) add_sit_entry(segno, set_list); } static void remove_sits_in_journal(struct f2fs_sb_info *sbi) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_summary_block *sum = curseg->sum_blk; int i; for (i = sits_in_cursum(sum) - 1; i >= 0; i--) { unsigned int segno; bool dirtied; segno = le32_to_cpu(segno_in_journal(sum, i)); dirtied = __mark_sit_entry_dirty(sbi, segno); if (!dirtied) add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); } update_sits_in_cursum(sum, -sits_in_cursum(sum)); } /* * CP calls this function, which flushes SIT entries including sit_journal, * and moves prefree segs to free segs. */ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct sit_info *sit_i = SIT_I(sbi); unsigned long *bitmap = sit_i->dirty_sentries_bitmap; struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_summary_block *sum = curseg->sum_blk; struct sit_entry_set *ses, *tmp; struct list_head *head = &SM_I(sbi)->sit_entry_set; bool to_journal = true; struct seg_entry *se; mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); /* * add and account sit entries of dirty bitmap in sit entry * set temporarily */ add_sits_in_set(sbi); /* * if there are no enough space in journal to store dirty sit * entries, remove all entries from journal and add and account * them in sit entry set. */ if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL)) remove_sits_in_journal(sbi); if (!sit_i->dirty_sentries) goto out; /* * there are two steps to flush sit entries: * #1, flush sit entries to journal in current cold data summary block. * #2, flush sit entries to sit page. */ list_for_each_entry_safe(ses, tmp, head, set_list) { struct page *page = NULL; struct f2fs_sit_block *raw_sit = NULL; unsigned int start_segno = ses->start_segno; unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, (unsigned long)MAIN_SEGS(sbi)); unsigned int segno = start_segno; if (to_journal && !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL)) to_journal = false; if (!to_journal) { page = get_next_sit_page(sbi, start_segno); raw_sit = page_address(page); } /* flush dirty sit entries in region of current sit set */ for_each_set_bit_from(segno, bitmap, end) { int offset, sit_offset; se = get_seg_entry(sbi, segno); /* add discard candidates */ if (cpc->reason != CP_DISCARD) { cpc->trim_start = segno; add_discard_addrs(sbi, cpc); } if (to_journal) { offset = lookup_journal_in_cursum(sum, SIT_JOURNAL, segno, 1); f2fs_bug_on(sbi, offset < 0); segno_in_journal(sum, offset) = cpu_to_le32(segno); seg_info_to_raw_sit(se, &sit_in_journal(sum, offset)); } else { sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); } __clear_bit(segno, bitmap); sit_i->dirty_sentries--; ses->entry_cnt--; } if (!to_journal) f2fs_put_page(page, 1); f2fs_bug_on(sbi, ses->entry_cnt); release_sit_entry_set(ses); } f2fs_bug_on(sbi, !list_empty(head)); f2fs_bug_on(sbi, sit_i->dirty_sentries); out: if (cpc->reason == CP_DISCARD) { for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) add_discard_addrs(sbi, cpc); } mutex_unlock(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); set_prefree_as_free_segments(sbi); } static int build_sit_info(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct sit_info *sit_i; unsigned int sit_segs, start; char *src_bitmap, *dst_bitmap; unsigned int bitmap_size; /* allocate memory for SIT information */ sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); if (!sit_i) return -ENOMEM; SM_I(sbi)->sit_info = sit_i; sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry)); if (!sit_i->sentries) return -ENOMEM; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!sit_i->dirty_sentries_bitmap) return -ENOMEM; for (start = 0; start < MAIN_SEGS(sbi); start++) { sit_i->sentries[start].cur_valid_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); sit_i->sentries[start].ckpt_valid_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].cur_valid_map || !sit_i->sentries[start].ckpt_valid_map) return -ENOMEM; } sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->tmp_map) return -ENOMEM; if (sbi->segs_per_sec > 1) { sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) * sizeof(struct sec_entry)); if (!sit_i->sec_entries) return -ENOMEM; } /* get information related with SIT */ sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; /* setup SIT bitmap from ckeckpoint pack */ bitmap_size = __bitmap_size(sbi, SIT_BITMAP); src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); if (!dst_bitmap) return -ENOMEM; /* init SIT information */ sit_i->s_ops = &default_salloc_ops; sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); sit_i->sit_bitmap = dst_bitmap; sit_i->bitmap_size = bitmap_size; sit_i->dirty_sentries = 0; sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; mutex_init(&sit_i->sentry_lock); return 0; } static int build_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i; unsigned int bitmap_size, sec_bitmap_size; /* allocate memory for free segmap information */ free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); if (!free_i) return -ENOMEM; SM_I(sbi)->free_info = free_i; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL); if (!free_i->free_segmap) return -ENOMEM; sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL); if (!free_i->free_secmap) return -ENOMEM; /* set all segments as dirty temporarily */ memset(free_i->free_segmap, 0xff, bitmap_size); memset(free_i->free_secmap, 0xff, sec_bitmap_size); /* init free segmap information */ free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); free_i->free_segments = 0; free_i->free_sections = 0; spin_lock_init(&free_i->segmap_lock); return 0; } static int build_curseg(struct f2fs_sb_info *sbi) { struct curseg_info *array; int i; array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL); if (!array) return -ENOMEM; SM_I(sbi)->curseg_array = array; for (i = 0; i < NR_CURSEG_TYPE; i++) { mutex_init(&array[i].curseg_mutex); array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); if (!array[i].sum_blk) return -ENOMEM; array[i].segno = NULL_SEGNO; array[i].next_blkoff = 0; } return restore_curseg_summaries(sbi); } static void build_sit_entries(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_summary_block *sum = curseg->sum_blk; int sit_blk_cnt = SIT_BLK_CNT(sbi); unsigned int i, start, end; unsigned int readed, start_blk = 0; int nrpages = MAX_BIO_BLOCKS(sbi); do { readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT); start = start_blk * sit_i->sents_per_block; end = (start_blk + readed) * sit_i->sents_per_block; for (; start < end && start < MAIN_SEGS(sbi); start++) { struct seg_entry *se = &sit_i->sentries[start]; struct f2fs_sit_block *sit_blk; struct f2fs_sit_entry sit; struct page *page; mutex_lock(&curseg->curseg_mutex); for (i = 0; i < sits_in_cursum(sum); i++) { if (le32_to_cpu(segno_in_journal(sum, i)) == start) { sit = sit_in_journal(sum, i); mutex_unlock(&curseg->curseg_mutex); goto got_it; } } mutex_unlock(&curseg->curseg_mutex); page = get_current_sit_page(sbi, start); sit_blk = (struct f2fs_sit_block *)page_address(page); sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; f2fs_put_page(page, 1); got_it: check_block_count(sbi, start, &sit); seg_info_from_raw_sit(se, &sit); if (sbi->segs_per_sec > 1) { struct sec_entry *e = get_sec_entry(sbi, start); e->valid_blocks += se->valid_blocks; } } start_blk += readed; } while (start_blk < sit_blk_cnt); } static void init_free_segmap(struct f2fs_sb_info *sbi) { unsigned int start; int type; for (start = 0; start < MAIN_SEGS(sbi); start++) { struct seg_entry *sentry = get_seg_entry(sbi, start); if (!sentry->valid_blocks) __set_free(sbi, start); } /* set use the current segments */ for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { struct curseg_info *curseg_t = CURSEG_I(sbi, type); __set_test_and_inuse(sbi, curseg_t->segno); } } static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno = 0, offset = 0; unsigned short valid_blocks; while (1) { /* find dirty segment based on free segmap */ segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); if (segno >= MAIN_SEGS(sbi)) break; offset = segno + 1; valid_blocks = get_valid_blocks(sbi, segno, 0); if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) continue; if (valid_blocks > sbi->blocks_per_seg) { f2fs_bug_on(sbi, 1); continue; } mutex_lock(&dirty_i->seglist_lock); __locate_dirty_segment(sbi, segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); } } static int init_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->victim_secmap) return -ENOMEM; return 0; } static int build_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i; unsigned int bitmap_size, i; /* allocate memory for dirty segments list information */ dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); if (!dirty_i) return -ENOMEM; SM_I(sbi)->dirty_info = dirty_i; mutex_init(&dirty_i->seglist_lock); bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); for (i = 0; i < NR_DIRTY_TYPE; i++) { dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->dirty_segmap[i]) return -ENOMEM; } init_dirty_segmap(sbi); return init_victim_secmap(sbi); } /* * Update min, max modified time for cost-benefit GC algorithm */ static void init_min_max_mtime(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); unsigned int segno; mutex_lock(&sit_i->sentry_lock); sit_i->min_mtime = LLONG_MAX; for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { unsigned int i; unsigned long long mtime = 0; for (i = 0; i < sbi->segs_per_sec; i++) mtime += get_seg_entry(sbi, segno + i)->mtime; mtime = div_u64(mtime, sbi->segs_per_sec); if (sit_i->min_mtime > mtime) sit_i->min_mtime = mtime; } sit_i->max_mtime = get_mtime(sbi); mutex_unlock(&sit_i->sentry_lock); } int build_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_sm_info *sm_info; int err; sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); if (!sm_info) return -ENOMEM; /* init sm info */ sbi->sm_info = sm_info; sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); sm_info->segment_count = le32_to_cpu(raw_super->segment_count); sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); sm_info->rec_prefree_segments = sm_info->main_segments * DEF_RECLAIM_PREFREE_SEGMENTS / 100; sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; INIT_LIST_HEAD(&sm_info->discard_list); sm_info->nr_discards = 0; sm_info->max_discards = 0; sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) return err; } err = build_sit_info(sbi); if (err) return err; err = build_free_segmap(sbi); if (err) return err; err = build_curseg(sbi); if (err) return err; /* reinit free segmap based on SIT */ build_sit_entries(sbi); init_free_segmap(sbi); err = build_dirty_segmap(sbi); if (err) return err; init_min_max_mtime(sbi); return 0; } static void discard_dirty_segmap(struct f2fs_sb_info *sbi, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); mutex_lock(&dirty_i->seglist_lock); kfree(dirty_i->dirty_segmap[dirty_type]); dirty_i->nr_dirty[dirty_type] = 0; mutex_unlock(&dirty_i->seglist_lock); } static void destroy_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); kfree(dirty_i->victim_secmap); } static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); int i; if (!dirty_i) return; /* discard pre-free/dirty segments list */ for (i = 0; i < NR_DIRTY_TYPE; i++) discard_dirty_segmap(sbi, i); destroy_victim_secmap(sbi); SM_I(sbi)->dirty_info = NULL; kfree(dirty_i); } static void destroy_curseg(struct f2fs_sb_info *sbi) { struct curseg_info *array = SM_I(sbi)->curseg_array; int i; if (!array) return; SM_I(sbi)->curseg_array = NULL; for (i = 0; i < NR_CURSEG_TYPE; i++) kfree(array[i].sum_blk); kfree(array); } static void destroy_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i = SM_I(sbi)->free_info; if (!free_i) return; SM_I(sbi)->free_info = NULL; kfree(free_i->free_segmap); kfree(free_i->free_secmap); kfree(free_i); } static void destroy_sit_info(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); unsigned int start; if (!sit_i) return; if (sit_i->sentries) { for (start = 0; start < MAIN_SEGS(sbi); start++) { kfree(sit_i->sentries[start].cur_valid_map); kfree(sit_i->sentries[start].ckpt_valid_map); } } kfree(sit_i->tmp_map); vfree(sit_i->sentries); vfree(sit_i->sec_entries); kfree(sit_i->dirty_sentries_bitmap); SM_I(sbi)->sit_info = NULL; kfree(sit_i->sit_bitmap); kfree(sit_i); } void destroy_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_sm_info *sm_info = SM_I(sbi); if (!sm_info) return; destroy_flush_cmd_control(sbi); destroy_dirty_segmap(sbi); destroy_curseg(sbi); destroy_free_segmap(sbi); destroy_sit_info(sbi); sbi->sm_info = NULL; kfree(sm_info); } int __init create_segment_manager_caches(void) { discard_entry_slab = f2fs_kmem_cache_create("discard_entry", sizeof(struct discard_entry)); if (!discard_entry_slab) goto fail; sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", sizeof(struct sit_entry_set)); if (!sit_entry_set_slab) goto destory_discard_entry; inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", sizeof(struct inmem_pages)); if (!inmem_entry_slab) goto destroy_sit_entry_set; return 0; destroy_sit_entry_set: kmem_cache_destroy(sit_entry_set_slab); destory_discard_entry: kmem_cache_destroy(discard_entry_slab); fail: return -ENOMEM; } void destroy_segment_manager_caches(void) { kmem_cache_destroy(sit_entry_set_slab); kmem_cache_destroy(discard_entry_slab); kmem_cache_destroy(inmem_entry_slab); }
128571.c
/* / _____) _ | | ( (____ _____ ____ _| |_ _____ ____| |__ \____ \| ___ | (_ _) ___ |/ ___) _ \ _____) ) ____| | | || |_| ____( (___| | | | (______/|_____)_|_|_| \__)_____)\____)_| |_| (C)2013 Semtech Description: Bleeper board SPI driver implementation License: Revised BSD License, see LICENSE.TXT file include in the project Maintainer: Miguel Luis and Gregory Cristian */ #include "board.h" #include "spi-board.h" #include "stm32l1xx_spi.h" #include "stm32l1xx_gpio.h" /*! * MCU SPI peripherals enumeration */ typedef enum { SPI_1 = ( uint32_t )SPI1_BASE, SPI_2 = ( uint32_t )SPI2_BASE, SPI_3 = ( uint32_t )SPI3_BASE, } SPIName; SPI_InitTypeDef SPI_InitStructure; void SpiInit( Spi_t *obj, PinNames mosi, PinNames miso, PinNames sclk, PinNames nss ) { GpioInit( &obj->Mosi, mosi, PIN_ALTERNATE_FCT, PIN_PUSH_PULL, PIN_PULL_DOWN, 0 ); GpioInit( &obj->Miso, miso, PIN_ALTERNATE_FCT, PIN_PUSH_PULL, PIN_PULL_DOWN, 0 ); GpioInit( &obj->Sclk, sclk, PIN_ALTERNATE_FCT, PIN_PUSH_PULL, PIN_PULL_DOWN, 0 ); // TODO: Make independent of stm32l1xx_gpio.h GPIO_PinAFConfig( obj->Mosi.port, ( obj->Mosi.pin & 0x0F ), GPIO_AF_SPI1 ); GPIO_PinAFConfig( obj->Miso.port, ( obj->Miso.pin & 0x0F ), GPIO_AF_SPI1 ); GPIO_PinAFConfig( obj->Sclk.port, ( obj->Sclk.pin & 0x0F ), GPIO_AF_SPI1 ); if( nss != NC ) { GpioInit( &obj->Nss, nss, PIN_ALTERNATE_FCT, PIN_PUSH_PULL, PIN_PULL_UP, 1 ); // TODO: Make independent of stm32l1xx_gpio.h GPIO_PinAFConfig( obj->Nss.port, ( obj->Nss.pin & 0x0F ), GPIO_AF_SPI1 ); } else { SPI_InitStructure.SPI_NSS = SPI_NSS_Soft; } // Choose SPI interface according to the given pins obj->Spi = ( SPI_TypeDef* )SPI1_BASE; RCC_APB2PeriphClockCmd( RCC_APB2Periph_SPI1, ENABLE ); if( nss == NC ) { // 8 bits, CPOL = 0, CPHA = 0, MASTER SpiFormat( obj, 8, 0, 0, 0 ); } else { // 8 bits, CPOL = 0, CPHA = 0, SLAVE SpiFormat( obj, 8, 0, 0, 1 ); } SpiFrequency( obj, 10000000 ); SPI_Cmd( obj->Spi, ENABLE ); } void SpiDeInit( Spi_t *obj ) { SPI_Cmd( obj->Spi, DISABLE ); SPI_I2S_DeInit( obj->Spi ); GpioInit( &obj->Mosi, obj->Mosi.pin, PIN_OUTPUT, PIN_PUSH_PULL, PIN_NO_PULL, 0 ); GpioInit( &obj->Miso, obj->Miso.pin, PIN_OUTPUT, PIN_PUSH_PULL, PIN_PULL_DOWN, 0 ); GpioInit( &obj->Sclk, obj->Sclk.pin, PIN_OUTPUT, PIN_PUSH_PULL, PIN_NO_PULL, 0 ); GpioInit( &obj->Nss, obj->Nss.pin, PIN_OUTPUT, PIN_PUSH_PULL, PIN_NO_PULL, 1 ); } void SpiFormat( Spi_t *obj, int8_t bits, int8_t cpol, int8_t cpha, int8_t slave ) { SPI_Cmd( obj->Spi, DISABLE ); if( ( ( ( bits == 8 ) || ( bits == 16 ) ) == false ) || ( ( ( cpol >= 0 ) && ( cpol <= 1 ) ) == false ) || ( ( ( cpha >= 0 ) && ( cpha <= 1 ) ) == false ) ) { // SPI error while( 1 ); } SPI_InitStructure.SPI_Mode = ( slave == 0x01 ) ? SPI_Mode_Slave : SPI_Mode_Master; SPI_InitStructure.SPI_CPOL = ( cpol == 0x01 ) ? SPI_CPOL_High : SPI_CPOL_Low; SPI_InitStructure.SPI_CPHA = ( cpha == 0x01 ) ? SPI_CPHA_2Edge : SPI_CPHA_1Edge; SPI_InitStructure.SPI_DataSize = ( bits == 8 ) ? SPI_DataSize_8b : SPI_DataSize_16b; SPI_Init( obj->Spi, &SPI_InitStructure ); SPI_Cmd( obj->Spi, ENABLE ); } void SpiFrequency( Spi_t *obj, uint32_t hz ) { uint32_t divisor; SPI_Cmd( obj->Spi, DISABLE ); divisor = SystemCoreClock / hz; // Find the nearest power-of-2 divisor = divisor > 0 ? divisor-1 : 0; divisor |= divisor >> 1; divisor |= divisor >> 2; divisor |= divisor >> 4; divisor |= divisor >> 8; divisor |= divisor >> 16; divisor++; divisor = __ffs( divisor ) - 1; divisor = ( divisor > 0x07 ) ? 0x07 : divisor; SPI_InitStructure.SPI_BaudRatePrescaler = divisor << 3; SPI_Init( obj->Spi, &SPI_InitStructure ); SPI_Cmd( obj->Spi, ENABLE ); } uint16_t SpiInOut( Spi_t *obj, uint16_t outData ) { if( ( obj == NULL ) || ( obj->Spi ) == NULL ) { while( 1 ); } while( SPI_I2S_GetFlagStatus( obj->Spi, SPI_I2S_FLAG_TXE ) == RESET ); SPI_I2S_SendData( obj->Spi, outData ); while( SPI_I2S_GetFlagStatus( obj->Spi, SPI_I2S_FLAG_RXNE ) == RESET ); return SPI_I2S_ReceiveData( obj->Spi ); }
280198.c
/* THIS FILE IS GENERATED. -*- buffer-read-only: t -*- vi:set ro: Original: x32-avx-linux.xml */ #include "defs.h" #include "osabi.h" #include "target-descriptions.h" struct target_desc *tdesc_x32_avx_linux; static void initialize_tdesc_x32_avx_linux (void) { struct target_desc *result = allocate_target_description (); struct tdesc_feature *feature; struct tdesc_type *field_type; struct tdesc_type *type; set_tdesc_architecture (result, bfd_scan_arch ("i386:x64-32")); set_tdesc_osabi (result, osabi_from_tdesc_string ("GNU/Linux")); feature = tdesc_create_feature (result, "org.gnu.gdb.i386.core"); field_type = tdesc_create_flags (feature, "i386_eflags", 4); tdesc_add_flag (field_type, 0, "CF"); tdesc_add_flag (field_type, 1, ""); tdesc_add_flag (field_type, 2, "PF"); tdesc_add_flag (field_type, 4, "AF"); tdesc_add_flag (field_type, 6, "ZF"); tdesc_add_flag (field_type, 7, "SF"); tdesc_add_flag (field_type, 8, "TF"); tdesc_add_flag (field_type, 9, "IF"); tdesc_add_flag (field_type, 10, "DF"); tdesc_add_flag (field_type, 11, "OF"); tdesc_add_flag (field_type, 14, "NT"); tdesc_add_flag (field_type, 16, "RF"); tdesc_add_flag (field_type, 17, "VM"); tdesc_add_flag (field_type, 18, "AC"); tdesc_add_flag (field_type, 19, "VIF"); tdesc_add_flag (field_type, 20, "VIP"); tdesc_add_flag (field_type, 21, "ID"); tdesc_create_reg (feature, "rax", 0, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rbx", 1, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rcx", 2, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rdx", 3, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rsi", 4, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rdi", 5, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rbp", 6, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rsp", 7, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r8", 8, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r9", 9, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r10", 10, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r11", 11, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r12", 12, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r13", 13, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r14", 14, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "r15", 15, 1, NULL, 64, "int64"); tdesc_create_reg (feature, "rip", 16, 1, NULL, 64, "uint64"); tdesc_create_reg (feature, "eflags", 17, 1, NULL, 32, "i386_eflags"); tdesc_create_reg (feature, "cs", 18, 1, NULL, 32, "int32"); tdesc_create_reg (feature, "ss", 19, 1, NULL, 32, "int32"); tdesc_create_reg (feature, "ds", 20, 1, NULL, 32, "int32"); tdesc_create_reg (feature, "es", 21, 1, NULL, 32, "int32"); tdesc_create_reg (feature, "fs", 22, 1, NULL, 32, "int32"); tdesc_create_reg (feature, "gs", 23, 1, NULL, 32, "int32"); tdesc_create_reg (feature, "st0", 24, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st1", 25, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st2", 26, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st3", 27, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st4", 28, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st5", 29, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st6", 30, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "st7", 31, 1, NULL, 80, "i387_ext"); tdesc_create_reg (feature, "fctrl", 32, 1, "float", 32, "int"); tdesc_create_reg (feature, "fstat", 33, 1, "float", 32, "int"); tdesc_create_reg (feature, "ftag", 34, 1, "float", 32, "int"); tdesc_create_reg (feature, "fiseg", 35, 1, "float", 32, "int"); tdesc_create_reg (feature, "fioff", 36, 1, "float", 32, "int"); tdesc_create_reg (feature, "foseg", 37, 1, "float", 32, "int"); tdesc_create_reg (feature, "fooff", 38, 1, "float", 32, "int"); tdesc_create_reg (feature, "fop", 39, 1, "float", 32, "int"); feature = tdesc_create_feature (result, "org.gnu.gdb.i386.sse"); field_type = tdesc_named_type (feature, "ieee_single"); tdesc_create_vector (feature, "v4f", field_type, 4); field_type = tdesc_named_type (feature, "ieee_double"); tdesc_create_vector (feature, "v2d", field_type, 2); field_type = tdesc_named_type (feature, "int8"); tdesc_create_vector (feature, "v16i8", field_type, 16); field_type = tdesc_named_type (feature, "int16"); tdesc_create_vector (feature, "v8i16", field_type, 8); field_type = tdesc_named_type (feature, "int32"); tdesc_create_vector (feature, "v4i32", field_type, 4); field_type = tdesc_named_type (feature, "int64"); tdesc_create_vector (feature, "v2i64", field_type, 2); type = tdesc_create_union (feature, "vec128"); field_type = tdesc_named_type (feature, "v4f"); tdesc_add_field (type, "v4_float", field_type); field_type = tdesc_named_type (feature, "v2d"); tdesc_add_field (type, "v2_double", field_type); field_type = tdesc_named_type (feature, "v16i8"); tdesc_add_field (type, "v16_int8", field_type); field_type = tdesc_named_type (feature, "v8i16"); tdesc_add_field (type, "v8_int16", field_type); field_type = tdesc_named_type (feature, "v4i32"); tdesc_add_field (type, "v4_int32", field_type); field_type = tdesc_named_type (feature, "v2i64"); tdesc_add_field (type, "v2_int64", field_type); field_type = tdesc_named_type (feature, "uint128"); tdesc_add_field (type, "uint128", field_type); field_type = tdesc_create_flags (feature, "i386_mxcsr", 4); tdesc_add_flag (field_type, 0, "IE"); tdesc_add_flag (field_type, 1, "DE"); tdesc_add_flag (field_type, 2, "ZE"); tdesc_add_flag (field_type, 3, "OE"); tdesc_add_flag (field_type, 4, "UE"); tdesc_add_flag (field_type, 5, "PE"); tdesc_add_flag (field_type, 6, "DAZ"); tdesc_add_flag (field_type, 7, "IM"); tdesc_add_flag (field_type, 8, "DM"); tdesc_add_flag (field_type, 9, "ZM"); tdesc_add_flag (field_type, 10, "OM"); tdesc_add_flag (field_type, 11, "UM"); tdesc_add_flag (field_type, 12, "PM"); tdesc_add_flag (field_type, 15, "FZ"); tdesc_create_reg (feature, "xmm0", 40, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm1", 41, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm2", 42, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm3", 43, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm4", 44, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm5", 45, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm6", 46, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm7", 47, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm8", 48, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm9", 49, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm10", 50, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm11", 51, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm12", 52, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm13", 53, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm14", 54, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "xmm15", 55, 1, NULL, 128, "vec128"); tdesc_create_reg (feature, "mxcsr", 56, 1, "vector", 32, "i386_mxcsr"); feature = tdesc_create_feature (result, "org.gnu.gdb.i386.linux"); tdesc_create_reg (feature, "orig_rax", 57, 1, NULL, 64, "int"); feature = tdesc_create_feature (result, "org.gnu.gdb.i386.avx"); tdesc_create_reg (feature, "ymm0h", 58, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm1h", 59, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm2h", 60, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm3h", 61, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm4h", 62, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm5h", 63, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm6h", 64, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm7h", 65, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm8h", 66, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm9h", 67, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm10h", 68, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm11h", 69, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm12h", 70, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm13h", 71, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm14h", 72, 1, NULL, 128, "uint128"); tdesc_create_reg (feature, "ymm15h", 73, 1, NULL, 128, "uint128"); tdesc_x32_avx_linux = result; }
709549.c
/* * Debugging routines * * Copyright (C) 2006-2010, Brainspark B.V. * * This file is part of PolarSSL (http://www.polarssl.org) * Lead Maintainer: Paul Bakker <polarssl_maintainer at polarssl.org> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "polarssl/config.h" #if defined(POLARSSL_DEBUG_C) #include "polarssl/debug.h" #include <stdarg.h> #include <stdlib.h> #if defined _MSC_VER && !defined snprintf #define snprintf _snprintf #endif #if defined _MSC_VER && !defined vsnprintf #define vsnprintf _vsnprintf #endif char *debug_fmt( const char *format, ... ) { va_list argp; static char str[512]; int maxlen = sizeof( str ) - 1; va_start( argp, format ); vsnprintf( str, maxlen, format, argp ); va_end( argp ); str[maxlen] = '\0'; return( str ); } void debug_print_msg( const ssl_context *ssl, int level, const char *file, int line, const char *text ) { char str[512]; int maxlen = sizeof( str ) - 1; if( ssl->f_dbg == NULL ) return; snprintf( str, maxlen, "%s(%04d): %s\n", file, line, text ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); } void debug_print_ret( const ssl_context *ssl, int level, const char *file, int line, const char *text, int ret ) { char str[512]; int maxlen = sizeof( str ) - 1; if( ssl->f_dbg == NULL ) return; snprintf( str, maxlen, "%s(%04d): %s() returned %d (0x%x)\n", file, line, text, ret, ret ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); } void debug_print_buf( const ssl_context *ssl, int level, const char *file, int line, const char *text, unsigned char *buf, int len ) { char str[512]; int i, maxlen = sizeof( str ) - 1; if( ssl->f_dbg == NULL || len < 0 ) return; snprintf( str, maxlen, "%s(%04d): dumping '%s' (%d bytes)\n", file, line, text, len ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); for( i = 0; i < len; i++ ) { if( i >= 4096 ) break; if( i % 16 == 0 ) { if( i > 0 ) ssl->f_dbg( ssl->p_dbg, level, "\n" ); snprintf( str, maxlen, "%s(%04d): %04x: ", file, line, i ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); } snprintf( str, maxlen, " %02x", (unsigned int) buf[i] ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); } if( len > 0 ) ssl->f_dbg( ssl->p_dbg, level, "\n" ); } void debug_print_mpi( const ssl_context *ssl, int level, const char *file, int line, const char *text, const mpi *X ) { char str[512]; int i, j, k, n, maxlen = sizeof( str ) - 1; if( ssl->f_dbg == NULL || X == NULL ) return; for( n = X->n - 1; n >= 0; n-- ) if( X->p[n] != 0 ) break; snprintf( str, maxlen, "%s(%04d): value of '%s' (%lu bits) is:\n", file, line, text, (unsigned long) ((n + 1) * sizeof( t_int )) << 3 ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); for( i = n, j = 0; i >= 0; i--, j++ ) { if( j % ( 16 / sizeof( t_int ) ) == 0 ) { if( j > 0 ) ssl->f_dbg( ssl->p_dbg, level, "\n" ); snprintf( str, maxlen, "%s(%04d): ", file, line ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); } for( k = sizeof( t_int ) - 1; k >= 0; k-- ) { snprintf( str, maxlen, " %02x", (unsigned int) ( X->p[i] >> (k << 3) ) & 0xFF ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); } } ssl->f_dbg( ssl->p_dbg, level, "\n" ); } void debug_print_crt( const ssl_context *ssl, int level, const char *file, int line, const char *text, const x509_cert *crt ) { char str[1024], prefix[64]; int i = 0, maxlen = sizeof( prefix ) - 1; if( ssl->f_dbg == NULL || crt == NULL ) return; snprintf( prefix, maxlen, "%s(%04d): ", file, line ); prefix[maxlen] = '\0'; maxlen = sizeof( str ) - 1; while( crt != NULL ) { char buf[1024]; x509parse_cert_info( buf, sizeof( buf ) - 1, prefix, crt ); snprintf( str, maxlen, "%s(%04d): %s #%d:\n%s", file, line, text, ++i, buf ); str[maxlen] = '\0'; ssl->f_dbg( ssl->p_dbg, level, str ); debug_print_mpi( ssl, level, file, line, "crt->rsa.N", &crt->rsa.N ); debug_print_mpi( ssl, level, file, line, "crt->rsa.E", &crt->rsa.E ); crt = crt->next; } } #endif
507997.c
// This file is a part of Julia. License is MIT: https://julialang.org/license /* modules and top-level bindings */ #include "julia.h" #include "julia_internal.h" #include "julia_assert.h" #ifdef __cplusplus extern "C" { #endif jl_module_t *jl_main_module = NULL; jl_module_t *jl_core_module = NULL; jl_module_t *jl_base_module = NULL; jl_module_t *jl_top_module = NULL; extern jl_function_t *jl_append_any_func; JL_DLLEXPORT jl_module_t *jl_new_module(jl_sym_t *name) { jl_ptls_t ptls = jl_get_ptls_states(); const jl_uuid_t uuid_zero = {0, 0}; jl_module_t *m = (jl_module_t*)jl_gc_alloc(ptls, sizeof(jl_module_t), jl_module_type); JL_GC_PUSH1(&m); assert(jl_is_symbol(name)); m->name = name; m->parent = NULL; m->istopmod = 0; m->uuid = uuid_zero; static unsigned int mcounter; // simple counter backup, in case hrtime is not incrementing m->build_id = jl_hrtime() + (++mcounter); if (!m->build_id) m->build_id++; // build id 0 is invalid m->primary_world = 0; m->counter = 0; htable_new(&m->bindings, 0); arraylist_new(&m->usings, 0); if (jl_core_module) { jl_module_using(m, jl_core_module); } // export own name, so "using Foo" makes "Foo" itself visible jl_set_const(m, name, (jl_value_t*)m); jl_module_export(m, name); JL_GC_POP(); return m; } uint32_t jl_module_next_counter(jl_module_t *m) { return ++(m->counter); } JL_DLLEXPORT jl_value_t *jl_f_new_module(jl_sym_t *name, uint8_t std_imports) { jl_module_t *m = jl_new_module(name); JL_GC_PUSH1(&m); m->parent = jl_main_module; jl_gc_wb(m, m->parent); if (std_imports) jl_add_standard_imports(m); JL_GC_POP(); return (jl_value_t*)m; } JL_DLLEXPORT void jl_set_istopmod(jl_module_t *self, uint8_t isprimary) { self->istopmod = 1; if (isprimary) { jl_top_module = self; jl_append_any_func = NULL; } } JL_DLLEXPORT uint8_t jl_istopmod(jl_module_t *mod) { return mod->istopmod; } static jl_binding_t *new_binding(jl_sym_t *name) { jl_ptls_t ptls = jl_get_ptls_states(); assert(jl_is_symbol(name)); jl_binding_t *b = (jl_binding_t*)jl_gc_alloc_buf(ptls, sizeof(jl_binding_t)); b->name = name; b->value = NULL; b->owner = NULL; b->globalref = NULL; b->constp = 0; b->exportp = 0; b->imported = 0; b->deprecated = 0; return b; } // get binding for assignment JL_DLLEXPORT jl_binding_t *jl_get_binding_wr(jl_module_t *m, jl_sym_t *var, int error) { jl_binding_t **bp = (jl_binding_t**)ptrhash_bp(&m->bindings, var); jl_binding_t *b = *bp; if (b != HT_NOTFOUND) { if (b->owner != m) { if (b->owner == NULL) { b->owner = m; } else if (error) { jl_errorf("cannot assign variable %s.%s from module %s", jl_symbol_name(b->owner->name), jl_symbol_name(var), jl_symbol_name(m->name)); } } return *bp; } b = new_binding(var); b->owner = m; *bp = b; jl_gc_wb_buf(m, b, sizeof(jl_binding_t)); return *bp; } // return module of binding JL_DLLEXPORT jl_module_t *jl_get_module_of_binding(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = jl_get_binding(m, var); if (b == NULL) return NULL; return b->owner; } // get binding for adding a method // like jl_get_binding_wr, but has different error paths JL_DLLEXPORT jl_binding_t *jl_get_binding_for_method_def(jl_module_t *m, jl_sym_t *var) { jl_binding_t **bp = (jl_binding_t**)ptrhash_bp(&m->bindings, var); jl_binding_t *b = *bp; if (b != HT_NOTFOUND) { if (b->owner != m) { if (b->owner == NULL) { b->owner = m; } else { jl_binding_t *b2 = jl_get_binding(b->owner, var); if (b2 == NULL || b2->value == NULL) jl_errorf("invalid method definition: imported function %s.%s does not exist", jl_symbol_name(b->owner->name), jl_symbol_name(var)); // TODO: we might want to require explicitly importing types to add constructors if (!b->imported && !jl_is_type(b2->value)) { jl_errorf("error in method definition: function %s.%s must be explicitly imported to be extended", jl_symbol_name(b->owner->name), jl_symbol_name(var)); } return b2; } } return b; } b = new_binding(var); b->owner = m; *bp = b; jl_gc_wb_buf(m, b, sizeof(jl_binding_t)); return *bp; } static void module_import_(jl_module_t *to, jl_module_t *from, jl_sym_t *s, int explici); typedef struct _modstack_t { jl_module_t *m; struct _modstack_t *prev; } modstack_t; static jl_binding_t *jl_get_binding_(jl_module_t *m, jl_sym_t *var, modstack_t *st); // find a binding from a module's `usings` list static jl_binding_t *using_resolve_binding(jl_module_t *m, jl_sym_t *var, modstack_t *st, int warn) { jl_binding_t *b = NULL; jl_module_t *owner = NULL; for(int i=(int)m->usings.len-1; i >= 0; --i) { jl_module_t *imp = (jl_module_t*)m->usings.items[i]; jl_binding_t *tempb = (jl_binding_t*)ptrhash_get(&imp->bindings, var); if (tempb != HT_NOTFOUND && tempb->exportp) { tempb = jl_get_binding_(imp, var, st); if (tempb == NULL || tempb->owner == NULL) // couldn't resolve; try next using (see issue #6105) continue; if (owner != NULL && tempb->owner != b->owner && !tempb->deprecated && !b->deprecated && !(tempb->constp && tempb->value && b->constp && b->value == tempb->value)) { if (warn) { jl_printf(JL_STDERR, "WARNING: both %s and %s export \"%s\"; uses of it in module %s must be qualified\n", jl_symbol_name(owner->name), jl_symbol_name(imp->name), jl_symbol_name(var), jl_symbol_name(m->name)); // mark this binding resolved, to avoid repeating the warning (void)jl_get_binding_wr(m, var, 0); } return NULL; } if (owner == NULL || !tempb->deprecated) { owner = imp; b = tempb; } } } return b; } // get binding for reading. might return NULL for unbound. static jl_binding_t *jl_get_binding_(jl_module_t *m, jl_sym_t *var, modstack_t *st) { modstack_t top = { m, st }; modstack_t *tmp = st; while (tmp != NULL) { if (tmp->m == m) { // import cycle without finding actual location return NULL; } tmp = tmp->prev; } jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, var); if (b == HT_NOTFOUND || b->owner == NULL) { b = using_resolve_binding(m, var, &top, 1); if (b != NULL) { // do a full import to prevent the result of this lookup // from changing, for example if this var is assigned to // later. module_import_(m, b->owner, var, 0); return b; } return NULL; } if (b->owner != m) return jl_get_binding_(b->owner, var, &top); return b; } // get owner of binding when accessing m.var, without resolving the binding JL_DLLEXPORT jl_value_t *jl_binding_owner(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, var); if (b == HT_NOTFOUND || b->owner == NULL) { b = using_resolve_binding(m, var, NULL, 0); if (b == NULL || b->owner == NULL) return jl_nothing; } return (jl_value_t*)b->owner; } JL_DLLEXPORT jl_binding_t *jl_get_binding(jl_module_t *m, jl_sym_t *var) { return jl_get_binding_(m, var, NULL); } void jl_binding_deprecation_warning(jl_module_t *m, jl_binding_t *b); JL_DLLEXPORT jl_binding_t *jl_get_binding_or_error(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = jl_get_binding(m, var); if (b == NULL) jl_undefined_var_error(var); if (b->deprecated) jl_binding_deprecation_warning(m, b); return b; } JL_DLLEXPORT jl_value_t *jl_module_globalref(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, var); if (b == HT_NOTFOUND) { return jl_new_struct(jl_globalref_type, m, var); } if (b->globalref == NULL) { b->globalref = jl_new_struct(jl_globalref_type, m, var); jl_gc_wb(m, b->globalref); } return b->globalref; } static int eq_bindings(jl_binding_t *a, jl_binding_t *b) { if (a==b) return 1; if (a->name == b->name && a->owner == b->owner) return 1; if (a->constp && a->value && b->constp && b->value == a->value) return 1; return 0; } // does module m explicitly import s? JL_DLLEXPORT int jl_is_imported(jl_module_t *m, jl_sym_t *s) { jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, s); return (b != HT_NOTFOUND && b->imported); } // NOTE: we use explici since explicit is a C++ keyword static void module_import_(jl_module_t *to, jl_module_t *from, jl_sym_t *s, int explici) { jl_binding_t *b = jl_get_binding(from, s); if (b == NULL) { jl_printf(JL_STDERR, "WARNING: could not import %s.%s into %s\n", jl_symbol_name(from->name), jl_symbol_name(s), jl_symbol_name(to->name)); } else { if (b->deprecated) { if (b->value == jl_nothing) { return; } else if (to != jl_main_module && to != jl_base_module && jl_options.depwarn != JL_OPTIONS_DEPWARN_OFF) { /* with #22763, external packages wanting to replace deprecated Base bindings should simply export the new binding */ jl_printf(JL_STDERR, "WARNING: importing deprecated binding %s.%s into %s.\n", jl_symbol_name(from->name), jl_symbol_name(s), jl_symbol_name(to->name)); } } jl_binding_t **bp = (jl_binding_t**)ptrhash_bp(&to->bindings, s); jl_binding_t *bto = *bp; if (bto != HT_NOTFOUND) { if (bto == b) { // importing a binding on top of itself. harmless. } else if (bto->owner == b->owner) { // already imported bto->imported = (explici!=0); } else if (bto->owner != to && bto->owner != NULL) { // already imported from somewhere else jl_binding_t *bval = jl_get_binding(to, s); if (bval->constp && bval->value && b->constp && b->value == bval->value) { // equivalent binding bto->imported = (explici!=0); return; } jl_printf(JL_STDERR, "WARNING: ignoring conflicting import of %s.%s into %s\n", jl_symbol_name(from->name), jl_symbol_name(s), jl_symbol_name(to->name)); } else if (bto->constp || bto->value) { // conflict with name owned by destination module assert(bto->owner == to); if (bto->constp && bto->value && b->constp && b->value == bto->value) { // equivalent binding return; } jl_printf(JL_STDERR, "WARNING: import of %s.%s into %s conflicts with an existing identifier; ignored.\n", jl_symbol_name(from->name), jl_symbol_name(s), jl_symbol_name(to->name)); } else { bto->owner = b->owner; bto->imported = (explici!=0); } } else { jl_binding_t *nb = new_binding(s); nb->owner = b->owner; nb->imported = (explici!=0); nb->deprecated = b->deprecated; *bp = nb; jl_gc_wb_buf(to, nb, sizeof(jl_binding_t)); } } } JL_DLLEXPORT void jl_module_import(jl_module_t *to, jl_module_t *from, jl_sym_t *s) { module_import_(to, from, s, 1); } JL_DLLEXPORT void jl_module_use(jl_module_t *to, jl_module_t *from, jl_sym_t *s) { module_import_(to, from, s, 0); } JL_DLLEXPORT void jl_module_importall(jl_module_t *to, jl_module_t *from) { void **table = from->bindings.table; for(size_t i=1; i < from->bindings.size; i+=2) { if (table[i] != HT_NOTFOUND) { jl_binding_t *b = (jl_binding_t*)table[i]; if (b->exportp && (b->owner==from || b->imported)) jl_module_import(to, from, b->name); } } } JL_DLLEXPORT void jl_module_using(jl_module_t *to, jl_module_t *from) { if (to == from) return; for(size_t i=0; i < to->usings.len; i++) { if (from == to->usings.items[i]) return; } // print a warning if something visible via this "using" conflicts with // an existing identifier. note that an identifier added later may still // silently override a "using" name. see issue #2054. void **table = from->bindings.table; for(size_t i=1; i < from->bindings.size; i+=2) { if (table[i] != HT_NOTFOUND) { jl_binding_t *b = (jl_binding_t*)table[i]; if (b->exportp && (b->owner==from || b->imported)) { jl_sym_t *var = (jl_sym_t*)table[i-1]; jl_binding_t **tobp = (jl_binding_t**)ptrhash_bp(&to->bindings, var); if (*tobp != HT_NOTFOUND && (*tobp)->owner != NULL && // don't warn for conflicts with the module name itself. // see issue #4715 var != to->name && !eq_bindings(jl_get_binding(to,var), b)) { jl_printf(JL_STDERR, "WARNING: using %s.%s in module %s conflicts with an existing identifier.\n", jl_symbol_name(from->name), jl_symbol_name(var), jl_symbol_name(to->name)); } } } } arraylist_push(&to->usings, from); jl_gc_wb(to, from); } JL_DLLEXPORT void jl_module_export(jl_module_t *from, jl_sym_t *s) { jl_binding_t **bp = (jl_binding_t**)ptrhash_bp(&from->bindings, s); if (*bp == HT_NOTFOUND) { jl_binding_t *b = new_binding(s); // don't yet know who the owner is b->owner = NULL; *bp = b; jl_gc_wb_buf(from, b, sizeof(jl_binding_t)); } assert(*bp != HT_NOTFOUND); (*bp)->exportp = 1; } JL_DLLEXPORT int jl_boundp(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = jl_get_binding(m, var); return b && (b->value != NULL); } JL_DLLEXPORT int jl_defines_or_exports_p(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, var); return b != HT_NOTFOUND && (b->exportp || b->owner==m); } JL_DLLEXPORT int jl_module_exports_p(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, var); return b != HT_NOTFOUND && b->exportp; } JL_DLLEXPORT int jl_binding_resolved_p(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = (jl_binding_t*)ptrhash_get(&m->bindings, var); return b != HT_NOTFOUND && b->owner != NULL; } JL_DLLEXPORT jl_value_t *jl_get_global(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = jl_get_binding(m, var); if (b == NULL) return NULL; if (b->deprecated) jl_binding_deprecation_warning(m, b); return b->value; } JL_DLLEXPORT void jl_set_global(jl_module_t *m, jl_sym_t *var, jl_value_t *val) { jl_binding_t *bp = jl_get_binding_wr(m, var, 1); if (!bp->constp) { bp->value = val; jl_gc_wb(m, val); } } JL_DLLEXPORT void jl_set_const(jl_module_t *m, jl_sym_t *var, jl_value_t *val) { jl_binding_t *bp = jl_get_binding_wr(m, var, 1); if (!bp->constp) { bp->value = val; bp->constp = 1; jl_gc_wb(m, val); } } JL_DLLEXPORT int jl_is_const(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = jl_get_binding(m, var); return b && b->constp; } // set the deprecated flag for a binding: // 0=not deprecated, 1=renamed, 2=moved to another package JL_DLLEXPORT void jl_deprecate_binding(jl_module_t *m, jl_sym_t *var, int flag) { jl_binding_t *b = jl_get_binding(m, var); if (b) b->deprecated = flag; } JL_DLLEXPORT int jl_is_binding_deprecated(jl_module_t *m, jl_sym_t *var) { jl_binding_t *b = jl_get_binding(m, var); return b && b->deprecated; } extern const char *jl_filename; extern int jl_lineno; char dep_message_prefix[] = "_dep_message_"; jl_binding_t *jl_get_dep_message_binding(jl_module_t *m, jl_binding_t *deprecated_binding) { size_t prefix_len = strlen(dep_message_prefix); size_t name_len = strlen(jl_symbol_name(deprecated_binding->name)); char *dep_binding_name = (char*)alloca(prefix_len+name_len+1); memcpy(dep_binding_name, dep_message_prefix, prefix_len); memcpy(dep_binding_name + prefix_len, jl_symbol_name(deprecated_binding->name), name_len); dep_binding_name[prefix_len+name_len] = '\0'; return jl_get_binding(m, jl_symbol(dep_binding_name)); } void jl_binding_deprecation_warning(jl_module_t *m, jl_binding_t *b) { // Only print a warning for deprecated == 1 (renamed). // For deprecated == 2 (moved to a package) the binding is to a function // that throws an error, so we don't want to print a warning too. if (b->deprecated == 1 && jl_options.depwarn) { if (jl_options.depwarn != JL_OPTIONS_DEPWARN_ERROR) jl_printf(JL_STDERR, "WARNING: "); jl_binding_t *dep_message_binding = NULL; if (b->owner) { jl_printf(JL_STDERR, "%s.%s is deprecated", jl_symbol_name(b->owner->name), jl_symbol_name(b->name)); dep_message_binding = jl_get_dep_message_binding(b->owner, b); } else { jl_printf(JL_STDERR, "%s is deprecated", jl_symbol_name(b->name)); } if (dep_message_binding && dep_message_binding->value) { if (jl_isa(dep_message_binding->value, (jl_value_t*)jl_string_type)) { jl_uv_puts(JL_STDERR, jl_string_data(dep_message_binding->value), jl_string_len(dep_message_binding->value)); } else { jl_static_show(JL_STDERR, dep_message_binding->value); } } else { jl_value_t *v = b->value; if (v) { if (jl_is_type(v) || jl_is_module(v)) { jl_printf(JL_STDERR, ", use "); jl_static_show(JL_STDERR, v); jl_printf(JL_STDERR, " instead."); } else { jl_methtable_t *mt = jl_gf_mtable(v); if (mt != NULL && (mt->defs.unknown != jl_nothing || jl_isa(v, (jl_value_t*)jl_builtin_type))) { jl_printf(JL_STDERR, ", use "); if (mt->module != jl_core_module) { jl_static_show(JL_STDERR, (jl_value_t*)mt->module); jl_printf(JL_STDERR, "."); } jl_printf(JL_STDERR, "%s", jl_symbol_name(mt->name)); jl_printf(JL_STDERR, " instead."); } } } } jl_printf(JL_STDERR, "\n"); if (jl_options.depwarn != JL_OPTIONS_DEPWARN_ERROR) { if (jl_lineno == 0) { jl_printf(JL_STDERR, " in module %s\n", jl_symbol_name(m->name)); } else { jl_printf(JL_STDERR, " likely near %s:%d\n", jl_filename, jl_lineno); } } if (jl_options.depwarn == JL_OPTIONS_DEPWARN_ERROR) { if (b->owner) jl_errorf("deprecated binding: %s.%s", jl_symbol_name(b->owner->name), jl_symbol_name(b->name)); else jl_errorf("deprecated binding: %s", jl_symbol_name(b->name)); } } } JL_DLLEXPORT void jl_checked_assignment(jl_binding_t *b, jl_value_t *rhs) { if (b->constp && b->value != NULL) { if (!jl_egal(rhs, b->value)) { if (jl_typeof(rhs) != jl_typeof(b->value) || jl_is_type(rhs) /*|| jl_is_function(rhs)*/ || jl_is_module(rhs)) { jl_errorf("invalid redefinition of constant %s", jl_symbol_name(b->name)); } jl_printf(JL_STDERR, "WARNING: redefining constant %s\n", jl_symbol_name(b->name)); } } b->value = rhs; jl_gc_wb_binding(b, rhs); } JL_DLLEXPORT void jl_declare_constant(jl_binding_t *b) { if (b->value != NULL && !b->constp) { jl_errorf("cannot declare %s constant; it already has a value", jl_symbol_name(b->name)); } b->constp = 1; } JL_DLLEXPORT jl_value_t *jl_get_current_module(void) { jl_ptls_t ptls = jl_get_ptls_states(); return (jl_value_t*)ptls->current_module; } JL_DLLEXPORT void jl_set_current_module(jl_value_t *m) { jl_ptls_t ptls = jl_get_ptls_states(); assert(jl_typeis(m, jl_module_type)); ptls->current_module = (jl_module_t*)m; } JL_DLLEXPORT jl_value_t *jl_module_usings(jl_module_t *m) { jl_array_t *a = jl_alloc_array_1d(jl_array_any_type, 0); JL_GC_PUSH1(&a); for(int i=(int)m->usings.len-1; i >= 0; --i) { jl_array_grow_end(a, 1); jl_module_t *imp = (jl_module_t*)m->usings.items[i]; jl_array_ptr_set(a,jl_array_dim0(a)-1, (jl_value_t*)imp); } JL_GC_POP(); return (jl_value_t*)a; } JL_DLLEXPORT jl_value_t *jl_module_names(jl_module_t *m, int all, int imported) { jl_array_t *a = jl_alloc_array_1d(jl_array_symbol_type, 0); JL_GC_PUSH1(&a); size_t i; void **table = m->bindings.table; for (i = 1; i < m->bindings.size; i+=2) { if (table[i] != HT_NOTFOUND) { jl_binding_t *b = (jl_binding_t*)table[i]; int hidden = jl_symbol_name(b->name)[0]=='#'; if ((b->exportp || (imported && b->imported) || (b->owner == m && !b->imported && (all || m == jl_main_module))) && (all || (!b->deprecated && !hidden))) { jl_array_grow_end(a, 1); //XXX: change to jl_arrayset if array storage allocation for Array{Symbols,1} changes: jl_array_ptr_set(a, jl_array_dim0(a)-1, (jl_value_t*)b->name); } } } JL_GC_POP(); return (jl_value_t*)a; } JL_DLLEXPORT jl_sym_t *jl_module_name(jl_module_t *m) { return m->name; } JL_DLLEXPORT jl_module_t *jl_module_parent(jl_module_t *m) { return m->parent; } JL_DLLEXPORT uint64_t jl_module_build_id(jl_module_t *m) { return m->build_id; } JL_DLLEXPORT jl_uuid_t jl_module_uuid(jl_module_t* m) { return m->uuid; } // TODO: make this part of the module constructor and read-only? JL_DLLEXPORT void jl_set_module_uuid(jl_module_t *m, jl_uuid_t uuid) { m->uuid = uuid; } int jl_is_submodule(jl_module_t *child, jl_module_t *parent) { while (1) { if (parent == child) return 1; if (child == NULL || child == child->parent) return 0; child = child->parent; } } #ifdef __cplusplus } #endif
246010.c
/* * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * This example shows how to retrieve secret value from Greengrass. */ #include <stdio.h> #include <stdlib.h> #include <memory.h> #include "greengrasssdk.h" #define BUFFER_SIZE 512 /* loop read the request bytes into buffer. */ gg_error loop_request_read(gg_request ggreq, void *buffer, size_t buffer_size, size_t *total_read) { gg_error err = GGE_SUCCESS; uint8_t *read_index = (uint8_t*)buffer; size_t remaining_buf_size = buffer_size; size_t amount_read = 0; do { err = gg_request_read(ggreq, read_index, remaining_buf_size, &amount_read); if(err) { gg_log(GG_LOG_ERROR, "gg_request_read had an error"); goto cleanup; } *total_read += amount_read; read_index += amount_read; remaining_buf_size -= amount_read; } while(amount_read); cleanup: return err; } gg_error get_secret_value() { gg_error err = GGE_SUCCESS; gg_request ggreq = NULL; size_t amount_read = 0; struct gg_request_result result; char read_buf[BUFFER_SIZE]; const char secret_id[] = "foo"; // Replace with the actual secret id memset(read_buf, 0, BUFFER_SIZE); err = gg_request_init(&ggreq); if(err) { gg_log(GG_LOG_ERROR, "Failed to initialize request"); goto done; } err = gg_get_secret_value(ggreq, secret_id, NULL, NULL, &result); if(err) { gg_log(GG_LOG_ERROR, "gg_get_secret_value failed with err %d", err); goto done; } gg_log(GG_LOG_INFO, "gg_get_secret_value had result request_status %d", result.request_status); if(result.request_status != GG_REQUEST_SUCCESS) { // get_secret_value failed, reads error response err = loop_request_read(ggreq, read_buf, BUFFER_SIZE, &amount_read); if(err) { gg_log(GG_LOG_ERROR, "Failed to read get_secret_value error response. amount_read(%zu), READ_BUFFER_SIZE(%zu), err(%d)", amount_read, BUFFER_SIZE, err); goto cleanup; } gg_log(GG_LOG_ERROR, "get_secret_value failed. error message: %.*s", (int)amount_read, read_buf); } else { // get_secret_value succeeded, reads success response err = loop_request_read(ggreq, read_buf, BUFFER_SIZE, &amount_read); if(err) { gg_log(GG_LOG_ERROR, "Failed to read secret value. amount_read(%zu), READ_BUFFER_SIZE(%zu), err(%d)", amount_read, BUFFER_SIZE, err); goto cleanup; } gg_log(GG_LOG_INFO, "get_secret_value succeeded. response: %.*s", (int)amount_read, read_buf); } cleanup: gg_request_close(ggreq); done: return err; } void handler(const gg_lambda_context *cxt) { /* cxt is not used. */ (void) cxt; get_secret_value(); } int main() { gg_error err = GGE_SUCCESS; err = gg_global_init(0); if(err) { gg_log(GG_LOG_ERROR, "gg_global_init failed %d", err); goto cleanup; } /* start the runtime in blocking mode. This blocks forever. */ gg_runtime_start(handler, 0); cleanup: return -1; }
261756.c
int check_args(int argc) { if (argc == 8) { return (8); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } else if (argc == 4) { return (1); } return (0); }
498422.c
// Current libev version: 4.24 #ifdef _LIBEV // libev produces many warnings which isn't really appropriate for us to // address since it is 3rd party code that could be overwritten at any time // with a new version #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4013 4068 4100 4101 4127 4133 4189 4244 4245 4456 4457 4706 4996) #else #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcomment" #pragma GCC diagnostic ignored "-Wold-style-declaration" #pragma GCC diagnostic ignored "-Wparentheses" #pragma GCC diagnostic ignored "-Wsign-compare" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wunused-value" #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wunused-result" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wimplicit-function-declaration" #ifdef __MINGW__ #include <time.h> struct timespec { time_t tv_sec; long int tv_nsec; }; #endif // __MINGW__ #endif //#define EV_STANDALONE 1 //config.h for ev.c is generated by cmake #include "ev.c" #ifdef _MSC_VER #pragma warning(pop) #else #pragma GCC diagnostic pop #endif #endif // _LIBEV
397329.c
/* * FreeRTOS V202111.00 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * https://www.FreeRTOS.org * https://github.com/FreeRTOS * */ /*! @file binary_semaphore_utest.c */ #include "../queue_utest_common.h" #include "mock_fake_port.h" /* Queue includes */ #include "FreeRTOS.h" #include "FreeRTOSConfig.h" #include "semphr.h" /* =============================== CONSTANTS =============================== */ /* ============================ GLOBAL VARIABLES =========================== */ /* Used to share a semaphore handle between a test case and callback */ static SemaphoreHandle_t xSemaphoreHandleStatic; /* ========================== CALLBACK FUNCTIONS =========================== */ /* ============================= Unity Fixtures ============================= */ void setUp( void ) { commonSetUp(); } void tearDown( void ) { commonTearDown(); } void suiteSetUp() { commonSuiteSetUp(); } int suiteTearDown( int numFailures ) { return commonSuiteTearDown( numFailures ); } /* ========================== Helper functions =========================== */ /* ============================= Test Cases ============================== */ /** * @brief Test xSemaphoreTake with a Binary Semaphore * @details Create a binary semaphore using xSemaphoreCreateBinary * and verify that an immediate call to xSemaphoreTake fails. * @coverage xQueueSemaphoreTake */ void test_macro_xSemaphoreTake_xSemaphoreCreateBinary_fail( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreTake operation fails */ TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, 0 ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGive with xSemaphoreCreateBinary * @details Create a binary semaphore using xSemaphoreCreateBinary * and verify that an immediate call to xSemaphoreGive succeeds. * @coverage xQueueGenericSend */ void test_macro_xSemaphoreGive_xSemaphoreCreateBinary_success( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreGive operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGive( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @deprecated * @brief Test xSemaphoreTake with vSemaphoreCreateBinary * @details Create a semaphore using vSemaphoreCreateBinary and verify that a * subsequent call to xSemaphoreTake succeeds. * @coverage xQueueSemaphoreTake */ void test_macro_xSemaphoreTake_vSemaphoreCreateBinary_success( void ) { SemaphoreHandle_t xSemaphore = NULL; vSemaphoreCreateBinary( xSemaphore ); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreTake operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); vSemaphoreDelete( xSemaphore ); } /** * @deprecated * @brief Test xSemaphoreGive with vSemaphoreCreateBinary * @details Create a semaphore using vSemaphoreCreateBinary and verify that a * subsequent call to xSemaphoreGive fails. * @coverage xQueueGenericSend */ void test_macro_xSemaphoreGive_vSemaphoreCreateBinary_fail( void ) { SemaphoreHandle_t xSemaphore = NULL; vSemaphoreCreateBinary( xSemaphore ); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreGive operation fails */ TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreGive( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGive and xSemaphoreTake with xSemaphoreCreateBinary * @details Create a binary semaphore using xSemaphoreCreateBinary * and verify that an immediate call to xSemaphoreGive succeeds and a subsequent * call to xSemaphoreTake succeeds. * @coverage xQueueGenericSend xQueueSemaphoreTake */ void test_macro_xSemaphoreGive_xSemaphoreTake_success( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreGive operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGive( xSemaphore ) ); /* Verify that a subsequent xSemaphoreTake operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGive multiple times on a Binary Semaphore * @details Create a binary semaphore using xSemaphoreCreateBinary * and verify that an immediate call to xSemaphoreGive succeeds and a subsequent * call to xSemaphoreGive fails. * @coverage xQueueGenericSend */ void test_macro_xSemaphoreGive_multiple_fail( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreGive operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGive( xSemaphore ) ); /* Verify that the second xSemaphoreGive operation fails */ TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreGive( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake multiple times on a Binary Semaphore * @details Create a binary semaphore using xSemaphoreCreateBinary, * verify that an immediate call to xSemaphoreGive succeeds, a subsequent * call to xSemaphoreTake succeds, but a second call to xSemaphoreTake fails. * @coverage xQueueSemaphoreTake */ void test_macro_xSemaphoreTake_multiple_fail( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* validate returned semaphore handle */ TEST_ASSERT_NOT_EQUAL( NULL, xSemaphore ); TEST_ASSERT_EQUAL( QUEUE_T_SIZE, getLastMallocSize() ); /* Verify that an immediate xSemaphoreGive operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGive( xSemaphore ) ); /* Verify that a subsequent xSemaphoreTake operation succeeds */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); /* Verify that a second xSemaphoreTake operation fails */ TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, 0 ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test uxSemaphoreGetCount with a Binary Semaphore * @details Create a binary semaphore using vSemaphoreCreateBinary. * validate the return value of uxSemaphoreGetCount(), * call xSemaphoreTake() and validate the return value of uxSemaphoreGetCount() * @coverage uxQueueMessagesWaiting */ void test_macro_uxSemaphoreGetCount( void ) { SemaphoreHandle_t xSemaphore = NULL; vSemaphoreCreateBinary( xSemaphore ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); ( void ) xSemaphoreTake( xSemaphore, 0 ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTakeFromISR with a Binary Semaphore * @coverage xQueueReceiveFromISR **/ void test_macro_xSemaphoreTakeFromISR_success( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); /* Give the Binary Semaphore */ ( void ) xSemaphoreGive( xSemaphore ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTakeFromISR( xSemaphore, NULL ) ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief xSemaphoreGiveFromISR with an empty queue * @coverage xQueueGiveFromISR */ void test_macro_xSemaphoreGiveFromISR_success( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, NULL ) ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /*! * @brief xSemaphoreGiveFromISR with a full queue * @coverage xQueueGiveFromISR */ void test_macro_xSemaphoreGiveFromISR_fail( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, NULL ) ); vFakePortAssertIfInterruptPriorityInvalid_Expect(); TEST_ASSERT_EQUAL( errQUEUE_FULL, xSemaphoreGiveFromISR( xSemaphore, NULL ) ); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGiveFromISR with a higher priority task waiting and a null pointer for pxHigherPriorityTaskWoken * @details Test xSemaphoreGiveFromISR with a higher priority task waiting and * verifies that a null pxHigherPriorityTaskWoken is handled correctly. * @coverage xQueueGiveFromISR */ void test_macro_xSemaphoreGiveFromISR_high_priority_pending_null_ptr( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); /* Insert an item into the event list */ td_task_setFakeTaskPriority( DEFAULT_PRIORITY + 1 ); td_task_addFakeTaskWaitingToReceiveFromQueue( xSemaphore ); /* Give the Binary Semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, NULL ) ); TEST_ASSERT_EQUAL( pdTRUE, td_task_getYieldPending() ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGiveFromISR with a higher priority task waiting * @details Test xSemaphoreGiveFromISR with a higher priority task waiting and * verify that xHigherPriorityTaskWoken is set accordingly. * @coverage xQueueGiveFromISR */ void test_macro_xSemaphoreGiveFromISR_high_priority_pending( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); /* Insert an item into the event list */ td_task_setFakeTaskPriority( DEFAULT_PRIORITY + 1 ); td_task_addFakeTaskWaitingToReceiveFromQueue( xSemaphore ); BaseType_t xHigherPriorityTaskWoken = pdFALSE; /* Give the semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken ) ); TEST_ASSERT_EQUAL( pdTRUE, xHigherPriorityTaskWoken ); TEST_ASSERT_EQUAL( pdTRUE, td_task_getYieldPending() ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGiveFromISR with a lower priority task waiting * @details Test xSemaphoreGiveFromISR with a lower priority task waiting and * verify that xHigherPriorityTaskWoken is not modified. * @coverage xQueueGiveFromISR */ void test_macro_xSemaphoreGiveFromISR_low_priority_pending( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); /* Insert an item into the event list */ td_task_setFakeTaskPriority( DEFAULT_PRIORITY - 1 ); td_task_addFakeTaskWaitingToReceiveFromQueue( xSemaphore ); BaseType_t xHigherPriorityTaskWoken = pdFALSE; /* Give the semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken ) ); TEST_ASSERT_EQUAL( pdFALSE, xHigherPriorityTaskWoken ); TEST_ASSERT_EQUAL( pdFALSE, td_task_getYieldPending() ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGiveFromISR with no tasks waiting * @details Test xSemaphoreGiveFromISR with no tasks waiting and verify that xHigherPriorityTaskWoken is not modified. * @coverage xQueueGiveFromISR */ void test_macro_xSemaphoreGiveFromISR_no_pending( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Expect(); BaseType_t xHigherPriorityTaskWoken = pdFALSE; /* Give the semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, &xHigherPriorityTaskWoken ) ); TEST_ASSERT_EQUAL( pdFALSE, xHigherPriorityTaskWoken ); TEST_ASSERT_EQUAL( pdFALSE, td_task_getYieldPending() ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGiveFromISR on a semaphore that is locked * @coverage xQueueGiveFromISR */ void test_xSemaphoreGiveFromISR_locked( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* Set private lock counters */ vSetQueueRxLock( xSemaphore, queueLOCKED_UNMODIFIED ); vSetQueueTxLock( xSemaphore, queueLOCKED_UNMODIFIED ); vFakePortAssertIfInterruptPriorityInvalid_Expect(); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, NULL ) ); /* Verify that the cRxLock counter has not changed */ TEST_ASSERT_EQUAL( queueLOCKED_UNMODIFIED, cGetQueueRxLock( xSemaphore ) ); /* Verify that the cTxLock counter has been incremented */ TEST_ASSERT_EQUAL( queueLOCKED_UNMODIFIED + 1, cGetQueueTxLock( xSemaphore ) ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreGiveFromISR on a semaphore that is locked and cRxLock overflows. * @coverage xQueueGiveFromISR */ void test_xSemaphoreGiveFromISR_locked_overflow( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* Set private lock counters */ vSetQueueRxLock( xSemaphore, INT8_MAX ); vSetQueueTxLock( xSemaphore, INT8_MAX ); vFakePortAssertIfInterruptPriorityInvalid_Expect(); /* Expect an assertion since the cTxLock value has overflowed */ fakeAssertExpectFail(); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreGiveFromISR( xSemaphore, NULL ) ); /* Verify that the cRxLock counter has not changed */ TEST_ASSERT_EQUAL( INT8_MAX, cGetQueueRxLock( xSemaphore ) ); /* Verify that the cTxLock counter has been incremented */ TEST_ASSERT_EQUAL( INT8_MIN, cGetQueueTxLock( xSemaphore ) ); TEST_ASSERT_EQUAL( true, fakeAssertGetFlagAndClear() ); TEST_ASSERT_EQUAL( B_SEMPHR_AVAILABLE, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake with an occupied semaphore with higher priority tasks waiting * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_tasks_waiting_higher_priority( void ) { /* Create a new binary semaphore */ SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); ( void ) xSemaphoreGive( xSemaphore ); /* Insert an item into the event list */ td_task_setFakeTaskPriority( DEFAULT_PRIORITY + 1 ); td_task_addFakeTaskWaitingToSendToQueue( xSemaphore ); /* take the semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); TEST_ASSERT_EQUAL( 1, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( 1, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake with an occupied semaphore with an equal priority task waiting * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_tasks_waiting_equal_priority( void ) { /* Create a new binary semaphore */ SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); ( void ) xSemaphoreGive( xSemaphore ); /* Insert an item into the event list */ td_task_setFakeTaskPriority( DEFAULT_PRIORITY ); td_task_addFakeTaskWaitingToSendToQueue( xSemaphore ); /* take the semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); TEST_ASSERT_EQUAL( 0, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( 0, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake with an occupied semaphore with lower priority tasks waiting. * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_tasks_waiting_lower_priority( void ) { /* Create a new binary semaphore */ SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); ( void ) xSemaphoreGive( xSemaphore ); /* Insert an item into the event list */ td_task_setFakeTaskPriority( DEFAULT_PRIORITY - 1 ); td_task_addFakeTaskWaitingToSendToQueue( xSemaphore ); /* take the semaphore */ TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, 0 ) ); TEST_ASSERT_EQUAL( 0, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( 0, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake with taskSCHEDULER_SUSPENDED and timeout=10 * @details This should cause xSemaphoreTake to configASSERT because it would * block forever when the semaphore is empty. * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_blocking_suspended_assert( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); fakeAssertExpectFail(); vTaskSuspendAll_Stub( td_task_vTaskSuspendAllStubNoCheck ); td_task_setSchedulerState( taskSCHEDULER_SUSPENDED ); TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( pdTRUE, fakeAssertGetFlagAndClear() ); td_task_setSchedulerState( taskSCHEDULER_RUNNING ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake with taskSCHEDULER_SUSPENDED and timeout=0 * @details This should not cause xSemaphoreTake to configASSERT because * xSemaphoreTake is non-blocking when timeout=0. * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_nonblocking_suspended_noassert( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); td_task_setSchedulerState( taskSCHEDULER_SUSPENDED ); TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, 0 ) ); td_task_setSchedulerState( taskSCHEDULER_RUNNING ); vSemaphoreDelete( xSemaphore ); } /** * @brief Callback which calls xSemaphoreGive on xSemaphoreHandleStatic */ static BaseType_t blocking_success_xTaskCheckForTimeOut_cb( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait, int cmock_num_calls ) { BaseType_t xReturnValue = td_task_xTaskCheckForTimeOutStub( pxTimeOut, pxTicksToWait, cmock_num_calls ); if( cmock_num_calls == NUM_CALLS_TO_INTERCEPT ) { ( void ) xSemaphoreGiveFromISR( xSemaphoreHandleStatic, NULL ); } return xReturnValue; } /** * @brief Test xSemaphoreTake in blocking mode with a taken Binary Semaphore * which becomes available while a call to xSemaphoreTake is blocking. * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_blocking_success( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* Export for blocking_success_xTaskCheckForTimeOut_cb callback */ xSemaphoreHandleStatic = xSemaphore; vFakePortAssertIfInterruptPriorityInvalid_Ignore(); xTaskCheckForTimeOut_Stub( &blocking_success_xTaskCheckForTimeOut_cb ); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Callback which calls xSemaphoreGive on xSemaphoreHandleStatic when * cmock_num_calls == TICKS_TO_WAIT */ static BaseType_t blocking_last_chance_xTaskCheckForTimeOut_cb( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait, int cmock_num_calls ) { BaseType_t xReturnValue = td_task_xTaskCheckForTimeOutStub( pxTimeOut, pxTicksToWait, cmock_num_calls ); if( cmock_num_calls == TICKS_TO_WAIT ) { ( void ) xSemaphoreGiveFromISR( xSemaphoreHandleStatic, NULL ); return pdTRUE; } return xReturnValue; } /** * @brief Test xSemaphoreTake in blocking mode with a Binary Semaphore that is initially taken, * but becomes available at the end of the blocking time period. * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_blocking_success_last_chance( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* Export for blocking_success_xTaskCheckForTimeOut_cb callback */ xSemaphoreHandleStatic = xSemaphore; vFakePortAssertIfInterruptPriorityInvalid_Expect(); xTaskCheckForTimeOut_Stub( &blocking_last_chance_xTaskCheckForTimeOut_cb ); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( B_SEMPHR_TAKEN, uxSemaphoreGetCount( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake in blocking mode with a taken binary semaphore * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_blocking_timeout( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getCount_vPortYieldWithinAPI() ); vSemaphoreDelete( xSemaphore ); } /** * @brief Test xSemaphoreTake in blocking mode with a taken locked semaphore * @details This test case verifies a situation that should never occur * ( xSemaphoreTake called on a locked semaphore ). * @coverage xQueueSemaphoreTake */ void test_xSemaphoreTake_blocking_locked( void ) { /* Create a new binary semaphore */ SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); /* Set private lock counters */ vSetQueueRxLock( xSemaphore, queueLOCKED_UNMODIFIED ); vSetQueueTxLock( xSemaphore, queueLOCKED_UNMODIFIED ); /* Run xSemaphoreTake in blocking mode with the semaphore locked */ TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getCount_vPortYieldWithinAPI() ); /* Verify that the semaphore is now unlocked */ TEST_ASSERT_EQUAL( queueUNLOCKED, cGetQueueRxLock( xSemaphore ) ); TEST_ASSERT_EQUAL( queueUNLOCKED, cGetQueueTxLock( xSemaphore ) ); vSemaphoreDelete( xSemaphore ); } /** * @brief Callback for test_xSemaphoreTake_blocking_success_locked_no_pending * which adds an item to it's test queue. */ static BaseType_t xSemaphoreTake_xTaskCheckForTimeOutCB( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait, int cmock_num_calls ) { BaseType_t xReturnValue = td_task_xTaskCheckForTimeOutStub( pxTimeOut, pxTicksToWait, cmock_num_calls ); if( cmock_num_calls == NUM_CALLS_TO_INTERCEPT ) { TEST_ASSERT_TRUE( xSemaphoreGiveFromISR( xSemaphoreHandleStatic, NULL ) ); TEST_ASSERT_EQUAL( 1, uxQueueMessagesWaiting( xSemaphoreHandleStatic ) ); } return xReturnValue; } /** * @brief Test a blocking call to xSemaphoreTake with a locked binary semaphore. * @details Test a blocking call to xSemaphoreTake with a locked binary semaphore with no * tasks in the binary semaphore WaitingToReceiveFrom event list. * @coverage xQueueSemaphoreTake prvUnlockQueue */ void test_xSemaphoreTake_blocking_success_locked_no_pending( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Ignore(); /* Export for callbacks */ xSemaphoreHandleStatic = xSemaphore; xTaskCheckForTimeOut_Stub( &xSemaphoreTake_xTaskCheckForTimeOutCB ); xTaskResumeAll_Stub( &td_task_xTaskResumeAllStub ); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( 0, uxSemaphoreGetCount( xSemaphore ) ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT, td_task_getCount_vPortYieldWithinAPI() ); vQueueDelete( xSemaphore ); } /** * @brief Callback for xTaskResumeAll used by tests for blocking calls to * xSemaphoreTake */ static BaseType_t xSemaphoreTake_xTaskResumeAllCallback( int cmock_num_calls ) { BaseType_t xReturnValue = td_task_xTaskResumeAllStub( cmock_num_calls ); /* If td_task_xTaskResumeAllStub returns pdTRUE, a higher priority task is pending * Receive from an ISR to block */ if( pdTRUE == xReturnValue ) { if( cmock_num_calls == NUM_CALLS_TO_INTERCEPT ) { TEST_ASSERT_EQUAL( 1, uxSemaphoreGetCount( xSemaphoreHandleStatic ) ); TEST_ASSERT_TRUE( xSemaphoreTakeFromISR( xSemaphoreHandleStatic, NULL ) ); } } return xReturnValue; } /** * @brief Test a blocking call to xSemaphoreTake with a locked binary semaphore. * @details Test a blocking call to xSemaphoreTake with a locked binary semaphore with a * higher priority task in the binary semaphore WaitingToReceiveFrom event list. * @coverage xQueueSemaphoreTake prvUnlockQueue */ void test_xSemaphoreTake_blocking_timeout_locked_high_prio_pending( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Ignore(); /* Export for callbacks */ xSemaphoreHandleStatic = xSemaphore; xTaskCheckForTimeOut_Stub( &xSemaphoreTake_xTaskCheckForTimeOutCB ); xTaskResumeAll_Stub( &xSemaphoreTake_xTaskResumeAllCallback ); td_task_setFakeTaskPriority( DEFAULT_PRIORITY + 1 ); td_task_addFakeTaskWaitingToReceiveFromQueue( xSemaphore ); TEST_ASSERT_EQUAL( pdFALSE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( 0, uxSemaphoreGetCount( xSemaphore ) ); TEST_ASSERT_EQUAL( TICKS_TO_WAIT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT + 1, td_task_getCount_YieldFromTaskResumeAll() ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT - 1, td_task_getCount_vPortYieldWithinAPI() ); TEST_ASSERT_EQUAL( 1, td_task_getCount_vTaskMissedYield() ); vQueueDelete( xSemaphore ); } /** * @brief Test a blocking call to xSemaphoreTake with a locked binary semaphore. * @details Test a blocking call to xSemaphoreTake with a locked binary semaphore with a * lower priority task in the semaphore WaitingToReceiveFrom event list. * @coverage xQueueSemaphoreTake prvUnlockQueue */ void test_xSemaphoreTake_blocking_success_locked_low_prio_pending( void ) { SemaphoreHandle_t xSemaphore = xSemaphoreCreateBinary(); vFakePortAssertIfInterruptPriorityInvalid_Ignore(); /* Export for callbacks */ xSemaphoreHandleStatic = xSemaphore; xTaskCheckForTimeOut_Stub( &xSemaphoreTake_xTaskCheckForTimeOutCB ); xTaskResumeAll_Stub( &xSemaphoreTake_xTaskResumeAllCallback ); td_task_setFakeTaskPriority( DEFAULT_PRIORITY - 1 ); td_task_addFakeTaskWaitingToReceiveFromQueue( xSemaphore ); TEST_ASSERT_EQUAL( pdTRUE, xSemaphoreTake( xSemaphore, TICKS_TO_WAIT ) ); TEST_ASSERT_EQUAL( 0, uxSemaphoreGetCount( xSemaphore ) ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT, td_task_getYieldCount() ); TEST_ASSERT_EQUAL( NUM_CALLS_TO_INTERCEPT, td_task_getCount_vPortYieldWithinAPI() ); vQueueDelete( xSemaphore ); }
178939.c
/* * Copyright (c) 1989, 1990, 1991 by the University of Washington * * For copying and distribution information, please see the file * <copyright.h>. */ #include <copyright.h> #include <pfs.h> /* * vl_comp - compare the names of two virtual links * * VL_COMP compares the names of two links. It returns * 0 if they are equal, negative if vl1 < vl2, and positive if * vl1 > vl2. * * ARGS: vl1,vl2 - Virtual links to be compared * * RETURNS: 0 if equal, + is vl1 > vl2, - if vl1 < vl2 * * NOTES: Order of significance is as follows. Existence, * name. If names do not exist, then hosttype, host, * native filenametype, native filename. The only time * the name will not exist if if the link is a union link. */ int vl_comp(VLINK vl1,VLINK vl2) { int retval; if(vl1->name && !vl2->name) return(1); if(!vl1->name && vl2->name) return(-1); if(vl1->name && vl2->name && (*(vl1->name) || *(vl2->name))) return(strcmp(vl1->name,vl2->name)); retval = strcmp(vl1->hosttype,vl2->hosttype); if(!retval) retval = strcmp(vl1->host,vl2->host); if(!retval) retval = strcmp(vl1->nametype,vl2->nametype); if(!retval) retval = strcmp(vl1->filename,vl2->filename); return(retval); } /* * vl_equal - compare the values of two virtual links * * VL_EQUAL compares the values of two links. It returns * 1 if all important fields are the same, and 0 otherwise. * * ARGS: vl1,vl2 - Virtual links to be compared * * RETURNS: 1 if equal, 0 if not equal * */ int vl_equal(VLINK vl1,VLINK vl2) { return strcmp(vl1->name, vl2->name) == 0 && vl1->linktype == vl2->linktype && strcmp(vl1->type, vl2->type) == 0 && strcmp(vl1->hosttype, vl2->hosttype) == 0 && strcmp(vl1->host, vl2->host) == 0 && strcmp(vl1->nametype, vl2->nametype) == 0 && strcmp(vl1->filename, vl2->filename) == 0 && vl1->version == vl2->version && vl1->f_magic_no == vl2->f_magic_no ; }
964990.c
// SPDX-License-Identifier: GPL-2.0 /* * N64 IRQ * * Copyright (C) 2021 Lauri Kasanen */ #include <linux/export.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/irq_cpu.h> void __init arch_init_irq(void) { mips_cpu_irq_init(); }
650974.c
/* * linux/arch/arm/mach-omap2/id.c * * OMAP2 CPU identification code * * Copyright (C) 2005 Nokia Corporation * Written by Tony Lindgren <[email protected]> * * Copyright (C) 2009-11 Texas Instruments * Added OMAP4 support - Santosh Shilimkar <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/random.h> #include <linux/slab.h> #ifdef CONFIG_SOC_BUS #include <linux/sys_soc.h> #endif #include <asm/cputype.h> #include "common.h" #include "id.h" #include "soc.h" #include "control.h" #define OMAP4_SILICON_TYPE_STANDARD 0x01 #define OMAP4_SILICON_TYPE_PERFORMANCE 0x02 #define OMAP_SOC_MAX_NAME_LENGTH 16 static unsigned int omap_revision; static char soc_name[OMAP_SOC_MAX_NAME_LENGTH]; static char soc_rev[OMAP_SOC_MAX_NAME_LENGTH]; u32 omap_features; unsigned int omap_rev(void) { return omap_revision; } EXPORT_SYMBOL(omap_rev); int omap_type(void) { static u32 val = OMAP2_DEVICETYPE_MASK; if (val < OMAP2_DEVICETYPE_MASK) return val; if (soc_is_omap24xx()) { val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS); } else if (soc_is_ti81xx()) { val = omap_ctrl_readl(TI81XX_CONTROL_STATUS); } else if (soc_is_am33xx() || soc_is_am43xx()) { val = omap_ctrl_readl(AM33XX_CONTROL_STATUS); } else if (soc_is_omap34xx()) { val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS); } else if (soc_is_omap44xx()) { val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS); } else if (soc_is_omap54xx() || soc_is_dra7xx()) { val = omap_ctrl_readl(OMAP5XXX_CONTROL_STATUS); val &= OMAP5_DEVICETYPE_MASK; val >>= 6; goto out; } else { pr_err("Cannot detect omap type!\n"); goto out; } val &= OMAP2_DEVICETYPE_MASK; val >>= 8; out: return val; } EXPORT_SYMBOL(omap_type); /*----------------------------------------------------------------------------*/ #define OMAP_TAP_IDCODE 0x0204 #define OMAP_TAP_DIE_ID_0 0x0218 #define OMAP_TAP_DIE_ID_1 0x021C #define OMAP_TAP_DIE_ID_2 0x0220 #define OMAP_TAP_DIE_ID_3 0x0224 #define OMAP_TAP_DIE_ID_44XX_0 0x0200 #define OMAP_TAP_DIE_ID_44XX_1 0x0208 #define OMAP_TAP_DIE_ID_44XX_2 0x020c #define OMAP_TAP_DIE_ID_44XX_3 0x0210 #define read_tap_reg(reg) readl_relaxed(tap_base + (reg)) struct omap_id { u16 hawkeye; /* Silicon type (Hawkeye id) */ u8 dev; /* Device type from production_id reg */ u32 type; /* Combined type id copied to omap_revision */ }; /* Register values to detect the OMAP version */ static struct omap_id omap_ids[] __initdata = { { .hawkeye = 0xb5d9, .dev = 0x0, .type = 0x24200024 }, { .hawkeye = 0xb5d9, .dev = 0x1, .type = 0x24201024 }, { .hawkeye = 0xb5d9, .dev = 0x2, .type = 0x24202024 }, { .hawkeye = 0xb5d9, .dev = 0x4, .type = 0x24220024 }, { .hawkeye = 0xb5d9, .dev = 0x8, .type = 0x24230024 }, { .hawkeye = 0xb68a, .dev = 0x0, .type = 0x24300024 }, }; static void __iomem *tap_base; static u16 tap_prod_id; void omap_get_die_id(struct omap_die_id *odi) { if (soc_is_omap44xx() || soc_is_omap54xx() || soc_is_dra7xx()) { odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_0); odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_1); odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_2); odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_3); return; } odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_0); odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_1); odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_2); odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3); } static int __init omap_feed_randpool(void) { struct omap_die_id odi; /* Throw the die ID into the entropy pool at boot */ omap_get_die_id(&odi); add_device_randomness(&odi, sizeof(odi)); return 0; } omap_device_initcall(omap_feed_randpool); void __init omap2xxx_check_revision(void) { int i, j; u32 idcode, prod_id; u16 hawkeye; u8 dev_type, rev; struct omap_die_id odi; idcode = read_tap_reg(OMAP_TAP_IDCODE); prod_id = read_tap_reg(tap_prod_id); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0x0f; dev_type = (prod_id >> 16) & 0x0f; omap_get_die_id(&odi); pr_debug("OMAP_TAP_IDCODE 0x%08x REV %i HAWKEYE 0x%04x MANF %03x\n", idcode, rev, hawkeye, (idcode >> 1) & 0x7ff); pr_debug("OMAP_TAP_DIE_ID_0: 0x%08x\n", odi.id_0); pr_debug("OMAP_TAP_DIE_ID_1: 0x%08x DEV_REV: %i\n", odi.id_1, (odi.id_1 >> 28) & 0xf); pr_debug("OMAP_TAP_DIE_ID_2: 0x%08x\n", odi.id_2); pr_debug("OMAP_TAP_DIE_ID_3: 0x%08x\n", odi.id_3); pr_debug("OMAP_TAP_PROD_ID_0: 0x%08x DEV_TYPE: %i\n", prod_id, dev_type); /* Check hawkeye ids */ for (i = 0; i < ARRAY_SIZE(omap_ids); i++) { if (hawkeye == omap_ids[i].hawkeye) break; } if (i == ARRAY_SIZE(omap_ids)) { printk(KERN_ERR "Unknown OMAP CPU id\n"); return; } for (j = i; j < ARRAY_SIZE(omap_ids); j++) { if (dev_type == omap_ids[j].dev) break; } if (j == ARRAY_SIZE(omap_ids)) { pr_err("Unknown OMAP device type. Handling it as OMAP%04x\n", omap_ids[i].type >> 16); j = i; } sprintf(soc_name, "OMAP%04x", omap_rev() >> 16); sprintf(soc_rev, "ES%x", (omap_rev() >> 12) & 0xf); pr_info("%s", soc_name); if ((omap_rev() >> 8) & 0x0f) pr_cont("%s", soc_rev); pr_cont("\n"); } #define OMAP3_SHOW_FEATURE(feat) \ if (omap3_has_ ##feat()) \ n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); static void __init omap3_cpuinfo(void) { const char *cpu_name; char buf[64]; int n = 0; memset(buf, 0, sizeof(buf)); /* * OMAP3430 and OMAP3530 are assumed to be same. * * OMAP3525, OMAP3515 and OMAP3503 can be detected only based * on available features. Upon detection, update the CPU id * and CPU class bits. */ if (soc_is_omap3630()) { if (omap3_has_iva() && omap3_has_sgx()) { cpu_name = (omap3_has_isp()) ? "OMAP3630/DM3730" : "OMAP3621"; } else if (omap3_has_iva()) { cpu_name = "DM3725"; } else if (omap3_has_sgx()) { cpu_name = "OMAP3615/AM3715"; } else { cpu_name = (omap3_has_isp()) ? "AM3703" : "OMAP3611"; } } else if (soc_is_am35xx()) { cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505"; } else if (soc_is_ti816x()) { cpu_name = "TI816X"; } else if (soc_is_am335x()) { cpu_name = "AM335X"; } else if (soc_is_am437x()) { cpu_name = "AM437x"; } else if (soc_is_ti814x()) { cpu_name = "TI814X"; } else if (omap3_has_iva() && omap3_has_sgx()) { /* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */ cpu_name = "OMAP3430/3530"; } else if (omap3_has_iva()) { cpu_name = "OMAP3525"; } else if (omap3_has_sgx()) { cpu_name = "OMAP3515"; } else { cpu_name = "OMAP3503"; } scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name); /* Print verbose information */ n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); OMAP3_SHOW_FEATURE(l2cache); OMAP3_SHOW_FEATURE(iva); OMAP3_SHOW_FEATURE(sgx); OMAP3_SHOW_FEATURE(neon); OMAP3_SHOW_FEATURE(isp); OMAP3_SHOW_FEATURE(192mhz_clk); if (*(buf + n - 1) == ' ') n--; n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); pr_info("%s", buf); } #define OMAP3_CHECK_FEATURE(status,feat) \ if (((status & OMAP3_ ##feat## _MASK) \ >> OMAP3_ ##feat## _SHIFT) != FEAT_ ##feat## _NONE) { \ omap_features |= OMAP3_HAS_ ##feat; \ } void __init omap3xxx_check_features(void) { u32 status; omap_features = 0; status = omap_ctrl_readl(OMAP3_CONTROL_OMAP_STATUS); OMAP3_CHECK_FEATURE(status, L2CACHE); OMAP3_CHECK_FEATURE(status, IVA); OMAP3_CHECK_FEATURE(status, SGX); OMAP3_CHECK_FEATURE(status, NEON); OMAP3_CHECK_FEATURE(status, ISP); if (soc_is_omap3630()) omap_features |= OMAP3_HAS_192MHZ_CLK; if (soc_is_omap3430() || soc_is_omap3630()) omap_features |= OMAP3_HAS_IO_WAKEUP; if (soc_is_omap3630() || omap_rev() == OMAP3430_REV_ES3_1 || omap_rev() == OMAP3430_REV_ES3_1_2) omap_features |= OMAP3_HAS_IO_CHAIN_CTRL; omap_features |= OMAP3_HAS_SDRC; /* * am35x fixups: * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as * reserved and therefore return 0 when read. Unfortunately, * OMAP3_CHECK_FEATURE() will interpret some of those zeroes to * mean that a feature is present even though it isn't so clear * the incorrectly set feature bits. */ if (soc_is_am35xx()) omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP); /* * TODO: Get additional info (where applicable) * e.g. Size of L2 cache. */ omap3_cpuinfo(); } void __init omap4xxx_check_features(void) { u32 si_type; si_type = (read_tap_reg(OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_1) >> 16) & 0x03; if (si_type == OMAP4_SILICON_TYPE_PERFORMANCE) omap_features = OMAP4_HAS_PERF_SILICON; } void __init ti81xx_check_features(void) { omap_features = OMAP3_HAS_NEON; omap3_cpuinfo(); } void __init am33xx_check_features(void) { u32 status; omap_features = OMAP3_HAS_NEON; status = omap_ctrl_readl(AM33XX_DEV_FEATURE); if (status & AM33XX_SGX_MASK) omap_features |= OMAP3_HAS_SGX; omap3_cpuinfo(); } void __init omap3xxx_check_revision(void) { const char *cpu_rev; u32 cpuid, idcode; u16 hawkeye; u8 rev; /* * We cannot access revision registers on ES1.0. * If the processor type is Cortex-A8 and the revision is 0x0 * it means its Cortex r0p0 which is 3430 ES1.0. */ cpuid = read_cpuid_id(); if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) { omap_revision = OMAP3430_REV_ES1_0; cpu_rev = "1.0"; return; } /* * Detection for 34xx ES2.0 and above can be done with just * hawkeye and rev. See TRM 1.5.2 Device Identification. * Note that rev does not map directly to our defined processor * revision numbers as ES1.0 uses value 0. */ idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { case 0xb7ae: /* Handle 34xx/35xx devices */ switch (rev) { case 0: /* Take care of early samples */ case 1: omap_revision = OMAP3430_REV_ES2_0; cpu_rev = "2.0"; break; case 2: omap_revision = OMAP3430_REV_ES2_1; cpu_rev = "2.1"; break; case 3: omap_revision = OMAP3430_REV_ES3_0; cpu_rev = "3.0"; break; case 4: omap_revision = OMAP3430_REV_ES3_1; cpu_rev = "3.1"; break; case 7: /* FALLTHROUGH */ default: /* Use the latest known revision as default */ omap_revision = OMAP3430_REV_ES3_1_2; cpu_rev = "3.1.2"; } break; case 0xb868: /* * Handle OMAP/AM 3505/3517 devices * * Set the device to be OMAP3517 here. Actual device * is identified later based on the features. */ switch (rev) { case 0: omap_revision = AM35XX_REV_ES1_0; cpu_rev = "1.0"; break; case 1: /* FALLTHROUGH */ default: omap_revision = AM35XX_REV_ES1_1; cpu_rev = "1.1"; } break; case 0xb891: /* Handle 36xx devices */ switch(rev) { case 0: /* Take care of early samples */ omap_revision = OMAP3630_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = OMAP3630_REV_ES1_1; cpu_rev = "1.1"; break; case 2: /* FALLTHROUGH */ default: omap_revision = OMAP3630_REV_ES1_2; cpu_rev = "1.2"; } break; case 0xb81e: switch (rev) { case 0: omap_revision = TI8168_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = TI8168_REV_ES1_1; cpu_rev = "1.1"; break; case 2: omap_revision = TI8168_REV_ES2_0; cpu_rev = "2.0"; break; case 3: /* FALLTHROUGH */ default: omap_revision = TI8168_REV_ES2_1; cpu_rev = "2.1"; } break; case 0xb944: switch (rev) { case 0: omap_revision = AM335X_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = AM335X_REV_ES2_0; cpu_rev = "2.0"; break; case 2: /* FALLTHROUGH */ default: omap_revision = AM335X_REV_ES2_1; cpu_rev = "2.1"; break; } break; case 0xb98c: switch (rev) { case 0: omap_revision = AM437X_REV_ES1_0; cpu_rev = "1.0"; break; case 1: omap_revision = AM437X_REV_ES1_1; cpu_rev = "1.1"; break; case 2: /* FALLTHROUGH */ default: omap_revision = AM437X_REV_ES1_2; cpu_rev = "1.2"; break; } break; case 0xb8f2: case 0xb968: switch (rev) { case 0: /* FALLTHROUGH */ case 1: omap_revision = TI8148_REV_ES1_0; cpu_rev = "1.0"; break; case 2: omap_revision = TI8148_REV_ES2_0; cpu_rev = "2.0"; break; case 3: /* FALLTHROUGH */ default: omap_revision = TI8148_REV_ES2_1; cpu_rev = "2.1"; break; } break; default: /* Unknown default to latest silicon rev as default */ omap_revision = OMAP3630_REV_ES1_2; cpu_rev = "1.2"; pr_warn("Warning: unknown chip type: hawkeye %04x, assuming OMAP3630ES1.2\n", hawkeye); } sprintf(soc_rev, "ES%s", cpu_rev); } void __init omap4xxx_check_revision(void) { u32 idcode; u16 hawkeye; u8 rev; /* * The IC rev detection is done with hawkeye and rev. * Note that rev does not map directly to defined processor * revision numbers as ES1.0 uses value 0. */ idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xf; /* * Few initial 4430 ES2.0 samples IDCODE is same as ES1.0 * Use ARM register to detect the correct ES version */ if (!rev && (hawkeye != 0xb94e) && (hawkeye != 0xb975)) { idcode = read_cpuid_id(); rev = (idcode & 0xf) - 1; } switch (hawkeye) { case 0xb852: switch (rev) { case 0: omap_revision = OMAP4430_REV_ES1_0; break; case 1: default: omap_revision = OMAP4430_REV_ES2_0; } break; case 0xb95c: switch (rev) { case 3: omap_revision = OMAP4430_REV_ES2_1; break; case 4: omap_revision = OMAP4430_REV_ES2_2; break; case 6: default: omap_revision = OMAP4430_REV_ES2_3; } break; case 0xb94e: switch (rev) { case 0: omap_revision = OMAP4460_REV_ES1_0; break; case 2: default: omap_revision = OMAP4460_REV_ES1_1; break; } break; case 0xb975: switch (rev) { case 0: default: omap_revision = OMAP4470_REV_ES1_0; break; } break; default: /* Unknown default to latest silicon rev as default */ omap_revision = OMAP4430_REV_ES2_3; } sprintf(soc_name, "OMAP%04x", omap_rev() >> 16); sprintf(soc_rev, "ES%d.%d", (omap_rev() >> 12) & 0xf, (omap_rev() >> 8) & 0xf); pr_info("%s %s\n", soc_name, soc_rev); } void __init omap5xxx_check_revision(void) { u32 idcode; u16 hawkeye; u8 rev; idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { case 0xb942: switch (rev) { case 0: /* No support for ES1.0 Test chip */ BUG(); case 1: default: omap_revision = OMAP5430_REV_ES2_0; } break; case 0xb998: switch (rev) { case 0: /* No support for ES1.0 Test chip */ BUG(); case 1: default: omap_revision = OMAP5432_REV_ES2_0; } break; default: /* Unknown default to latest silicon rev as default*/ omap_revision = OMAP5430_REV_ES2_0; } sprintf(soc_name, "OMAP%04x", omap_rev() >> 16); sprintf(soc_rev, "ES%d.0", (omap_rev() >> 12) & 0xf); pr_info("%s %s\n", soc_name, soc_rev); } void __init dra7xxx_check_revision(void) { u32 idcode; u16 hawkeye; u8 rev, package; struct omap_die_id odi; omap_get_die_id(&odi); package = (odi.id_2 >> 16) & 0x3; idcode = read_tap_reg(OMAP_TAP_IDCODE); hawkeye = (idcode >> 12) & 0xffff; rev = (idcode >> 28) & 0xff; switch (hawkeye) { case 0xbb50: switch (rev) { case 0: default: switch (package) { case 0x2: omap_revision = DRA762_ABZ_REV_ES1_0; break; case 0x3: omap_revision = DRA762_ACD_REV_ES1_0; break; default: omap_revision = DRA762_REV_ES1_0; break; } break; } break; case 0xb990: switch (rev) { case 0: omap_revision = DRA752_REV_ES1_0; break; case 1: omap_revision = DRA752_REV_ES1_1; break; case 2: default: omap_revision = DRA752_REV_ES2_0; break; } break; case 0xb9bc: switch (rev) { case 0: omap_revision = DRA722_REV_ES1_0; break; case 1: omap_revision = DRA722_REV_ES2_0; break; case 2: default: omap_revision = DRA722_REV_ES2_1; break; } break; default: /* Unknown default to latest silicon rev as default*/ pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", __func__, idcode, hawkeye, rev); omap_revision = DRA752_REV_ES2_0; } sprintf(soc_name, "DRA%03x", omap_rev() >> 16); sprintf(soc_rev, "ES%d.%d", (omap_rev() >> 12) & 0xf, (omap_rev() >> 8) & 0xf); pr_info("%s %s\n", soc_name, soc_rev); } /* * Set up things for map_io and processor detection later on. Gets called * pretty much first thing from board init. For multi-omap, this gets * cpu_is_omapxxxx() working accurately enough for map_io. Then we'll try to * detect the exact revision later on in omap2_detect_revision() once map_io * is done. */ void __init omap2_set_globals_tap(u32 class, void __iomem *tap) { omap_revision = class; tap_base = tap; /* XXX What is this intended to do? */ if (soc_is_omap34xx()) tap_prod_id = 0x0210; else tap_prod_id = 0x0208; } #ifdef CONFIG_SOC_BUS static const char * const omap_types[] = { [OMAP2_DEVICE_TYPE_TEST] = "TST", [OMAP2_DEVICE_TYPE_EMU] = "EMU", [OMAP2_DEVICE_TYPE_SEC] = "HS", [OMAP2_DEVICE_TYPE_GP] = "GP", [OMAP2_DEVICE_TYPE_BAD] = "BAD", }; static const char * __init omap_get_family(void) { if (soc_is_omap24xx()) return kasprintf(GFP_KERNEL, "OMAP2"); else if (soc_is_omap34xx()) return kasprintf(GFP_KERNEL, "OMAP3"); else if (soc_is_omap44xx()) return kasprintf(GFP_KERNEL, "OMAP4"); else if (soc_is_omap54xx()) return kasprintf(GFP_KERNEL, "OMAP5"); else if (soc_is_am33xx() || soc_is_am335x()) return kasprintf(GFP_KERNEL, "AM33xx"); else if (soc_is_am43xx()) return kasprintf(GFP_KERNEL, "AM43xx"); else if (soc_is_dra7xx()) return kasprintf(GFP_KERNEL, "DRA7"); else return kasprintf(GFP_KERNEL, "Unknown"); } static ssize_t omap_get_type(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", omap_types[omap_type()]); } static struct device_attribute omap_soc_attr = __ATTR(type, S_IRUGO, omap_get_type, NULL); void __init omap_soc_device_init(void) { struct device *parent; struct soc_device *soc_dev; struct soc_device_attribute *soc_dev_attr; soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return; soc_dev_attr->machine = soc_name; soc_dev_attr->family = omap_get_family(); soc_dev_attr->revision = soc_rev; soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { kfree(soc_dev_attr); return; } parent = soc_device_to_device(soc_dev); device_create_file(parent, &omap_soc_attr); } #endif /* CONFIG_SOC_BUS */
570299.c
/* * Rogue * * Advanced Rogue * Copyright (C) 1984, 1985 Michael Morgan, Ken Dalka and AT&T * All rights reserved. * * Based on "Rogue: Exploring the Dungeons of Doom" * Copyright (C) 1980, 1981 Michael Toy, Ken Arnold and Glenn Wichman * All rights reserved. * * See the file LICENSE.TXT for full copyright and licensing information. */ #include "curses.h" #include <fcntl.h> #include <sys/stat.h> #include <limits.h> #include <signal.h> #include <time.h> #include "mach_dep.h" #include "network.h" #include "rogue.h" #ifdef CHECKTIME static int num_checks; /* times we've gone over in checkout() */ #endif /* * fruits that you get at startup */ static char *funfruit[] = { "candleberry", "caprifig", "dewberry", "elderberry", "gooseberry", "guanabana", "hagberry", "ilama", "imbu", "jaboticaba", "jujube", "litchi", "mombin", "pitanga", "prickly pear", "rambutan", "sapodilla", "soursop", "sweetsop", "whortleberry", "jellybean", "apple", "strawberry", "blueberry", "peach", "banana" }; #define NFRUIT (sizeof(funfruit) / sizeof (char *)) main(argc, argv, envp) char **argv; char **envp; { register char *env; int lowtime; time_t now; char *roguedir = md_getroguedir(); md_init(); /* * get home and options from environment */ strncpy(home,md_gethomedir(),LINELEN); /* Get default save file */ strcpy(file_name, roguedir); strcat(file_name, "arogue58.save"); /* Get default score file */ strcpy(score_file, roguedir); if (*score_file) strcat(score_file,"/"); strcat(score_file, "arogue58.score"); if ((env = getenv("ROGUEOPTS")) != NULL) parse_opts(env); if (whoami[0] == '\0') strucpy(whoami, md_getusername(), strlen(md_getusername())); if (env == NULL || fruit[0] == '\0') { md_srand((long)(getpid()+time(0))); strcpy(fruit, funfruit[rnd(NFRUIT)]); } /* * check for print-score option */ if (argc == 2 && strcmp(argv[1], "-s") == 0) { waswizard = TRUE; score(0, SCOREIT, 0); exit(0); } #ifdef NUMNET /* * Check for a network update */ if (argc == 2 && strcmp(argv[1], "-u") == 0) { unsigned long netread(); int errcheck, errors = 0; unsigned long amount; short monster; /* Read in the amount and monster values to pass to score */ amount = netread(&errcheck, sizeof(unsigned long), stdin); if (errcheck) errors++; monster = (short) netread(&errcheck, sizeof(short), stdin); if (errcheck) errors++; /* Now do the update if there were no errors */ if (errors) exit(1); else { score(amount, UPDATE, monster); exit(0); } } #endif #ifdef WIZARD /* * Check to see if he is a wizard */ if (argc >= 2 && argv[1][0] == '\0') if (strcmp(PASSWD, md_crypt(md_getpass("Wizard's password: "), "Si")) == 0) { printf("Hail Mighty Wizard\n"); wizard = TRUE; argv++; argc--; } #endif #if MAXLOAD|MAXUSERS if (too_much() && !wizard && !author()) { printf("Sorry, %s, but the system is too loaded now.\n", whoami); printf("Try again later. Meanwhile, why not enjoy a%s %s?\n", vowelstr(fruit), fruit); exit(1); } #endif if (argc == 2) if (!restore(argv[1], envp)) /* Note: restore will never return */ exit(1); lowtime = (int) time(&now); dnum = (wizard && getenv("SEED") != NULL ? atoi(getenv("SEED")) : lowtime + getpid()); if (wizard) printf("Hello %s, welcome to dungeon #%d\n", whoami, dnum); else printf("Hello %s, just a moment while I dig the dungeon...\n", whoami); fflush(stdout); seed = dnum; md_srand(seed); init_things(); /* Set up probabilities of things */ init_colors(); /* Set up colors of potions */ init_stones(); /* Set up stone settings of rings */ init_materials(); /* Set up materials of wands */ initscr(); /* Start up cursor package */ init_names(); /* Set up names of scrolls */ init_misc(); /* Set up miscellaneous magic */ if (LINES < 24 || COLS < 80) { printf("\nERROR: screen size to small for rogue\n"); byebye(-1); } if ((whoami == NULL) || (*whoami == '\0') || (strcmp(whoami,"dosuser")==0)) { echo(); mvaddstr(23,2,"Rogue's Name? "); wgetnstr(stdscr,whoami,LINELEN); noecho(); } if ((whoami == NULL) || (*whoami == '\0')) strcpy(whoami,"Rodney"); setup(); /* * Set up windows */ cw = newwin(LINES, COLS, 0, 0); mw = newwin(LINES, COLS, 0, 0); hw = newwin(LINES, COLS, 0, 0); keypad(cw,1); init_player(); /* Roll up the rogue */ waswizard = wizard; new_level(NORMLEV); /* Draw current level */ /* * Start up daemons and fuses */ daemon(doctor, &player, AFTER); fuse(swander, 0, WANDERTIME, AFTER); daemon(stomach, 0, AFTER); daemon(runners, 0, AFTER); if (player.t_ctype == C_THIEF) daemon(trap_look, 0, AFTER); /* Choose a quest item */ quest_item = rnd(MAXRELIC); msg("You have been quested to retrieve the %s....", rel_magic[quest_item].mi_name); mpos = 0; playit(); } /* * endit: * Exit the program abnormally. */ void endit(int sig) { NOOP(sig); fatal("Ok, if you want to exit that badly, I'll have to allow it\n"); } /* * fatal: * Exit the program, printing a message. */ fatal(s) char *s; { clear(); move(LINES-2, 0); printw("%s", s); draw(stdscr); endwin(); printf("\n"); /* So the cursor doesn't stop at the end of the line */ exit(0); } /* * rnd: * Pick a very random number. */ rnd(range) register int range; { return(range == 0 ? 0 : md_rand() % range); } /* * roll: * roll a number of dice */ roll(number, sides) register int number, sides; { register int dtotal = 0; while(number--) dtotal += rnd(sides)+1; return dtotal; } # ifdef SIGTSTP /* * handle stop and start signals */ void tstp(int a) { mvcur(0, COLS - 1, LINES - 1, 0); endwin(); fflush(stdout); kill(0, SIGTSTP); signal(SIGTSTP, tstp); raw(); noecho(); keypad(cw,1); clearok(curscr, TRUE); touchwin(cw); draw(cw); md_flushinp(); } # endif setup() { #ifdef CHECKTIME int checkout(); #endif #ifndef DUMP #ifdef SIGHUP signal(SIGHUP, auto_save); #endif signal(SIGILL, bugkill); #ifdef SIGTRAP signal(SIGTRAP, bugkill); #endif #ifdef SIGIOT signal(SIGIOT, bugkill); #endif #if 0 signal(SIGEMT, bugkill); signal(SIGFPE, bugkill); signal(SIGBUS, bugkill); signal(SIGSEGV, bugkill); signal(SIGSYS, bugkill); signal(SIGPIPE, bugkill); #endif signal(SIGTERM, auto_save); #endif signal(SIGINT, quit); #ifndef DUMP #ifdef SIGQUIT signal(SIGQUIT, endit); #endif #endif #ifdef SIGTSTP signal(SIGTSTP, tstp); #endif #ifdef CHECKTIME if (!author()) { signal(SIGALRM, checkout); alarm(CHECKTIME * 60); num_checks = 0; } #endif crmode(); /* Cbreak mode */ noecho(); /* Echo off */ } /* * playit: * The main loop of the program. Loop until the game is over, * refreshing things and looking at the proper times. */ playit() { register char *opts; /* * parse environment declaration of options */ if ((opts = getenv("ROGUEOPTS")) != NULL) parse_opts(opts); player.t_oldpos = hero; oldrp = roomin(&hero); after = TRUE; while (playing) command(); /* Command execution */ endit(0); } #if MAXLOAD|MAXUSERS /* * see if the system is being used too much for this game */ too_much() { #ifdef MAXLOAD double avec[3]; #endif #ifdef MAXLOAD loadav(avec); return (avec[2] > (MAXLOAD / 10.0)); #else return (ucount() > MAXUSERS); #endif } #endif /* * author: * See if a user is an author of the program */ author() { switch (md_getuid()) { #if AUTHOR case AUTHOR: #endif case 0: return TRUE; default: return FALSE; } } #ifdef CHECKTIME checkout() { static char *msgs[] = { "The system is too loaded for games. Please leave in %d minutes", "Please save your game. You have %d minutes", "This is your last chance. You had better leave in %d minutes", }; int checktime; signal(SIGALRM, checkout); if (!holiday() && !author()) { wclear(cw); mvwaddstr(cw, LINES / 2, 0, "Game time is over. Your game is being saved.\n\n"); draw(cw); auto_save(); /* NO RETURN */ } if (too_much()) { if (num_checks >= 3) fatal("You didn't listen, so now you are DEAD !!\n"); checktime = CHECKTIME / (num_checks + 1); chmsg(msgs[num_checks++], checktime); alarm(checktime * 60); } else { if (num_checks) { chmsg("The load has dropped. You have a reprieve."); num_checks = 0; } alarm(CHECKTIME * 60); } } /* * checkout()'s version of msg. If we are in the middle of a shell, do a * printf instead of a msg to avoid the refresh. */ chmsg(fmt, arg) char *fmt; int arg; { if (in_shell) { printf(fmt, arg); putchar('\n'); fflush(stdout); } else msg(fmt, arg); } #endif #ifdef LOADAV #include <nlist.h> struct nlist avenrun = { "_avenrun" }; loadav(avg) reg double *avg; { reg int kmem; if ((kmem = open("/dev/kmem", 0)) < 0) goto bad; nlist(NAMELIST, &avenrun); if (avenrun.n_type == 0) { bad: avg[0] = avg[1] = avg[2] = 0.0; return; } lseek(kmem, (long) avenrun.n_value, 0); read(kmem, avg, 3 * sizeof (double)); } #endif #ifdef UCOUNT /* * ucount: * Count the number of people on the system */ #include <sys/types.h> #include <utmp.h> struct utmp buf; ucount() { reg struct utmp *up; reg FILE *utmp; reg int count; if ((utmp = fopen(UTMP, "r")) == NULL) return 0; up = &buf; count = 0; while (fread(up, 1, sizeof (*up), utmp) > 0) if (buf.ut_type == USER_PROCESS) count++; fclose(utmp); return count; } #endif /* * holiday: * Returns TRUE when it is a good time to play rogue */ holiday() { time_t now; struct tm *localtime(); reg struct tm *ntime; time(&now); /* get the current time */ ntime = localtime(&now); if(ntime->tm_wday == 0 || ntime->tm_wday == 6) return TRUE; /* OK on Sat & Sun */ if(ntime->tm_hour < 8 || ntime->tm_hour >= 17) return TRUE; /* OK before 8AM & after 5PM */ if(ntime->tm_yday <= 7 || ntime->tm_yday >= 350) return TRUE; /* OK during Christmas */ #if 0 /* not for now */ if (access("/usr/tmp/.ryes",0) == 0) return TRUE; /* if author permission */ #endif return FALSE; /* All other times are bad */ }
759901.c
/** * Function to reverse an array. * * Author: Paul McCarthy <[email protected]> */ #include <stdint.h> #include <stdlib.h> #include "reverse.h" void reverse(void *src, void *dst, int len) { int i; uint8_t tmp; uint8_t *usrc = (uint8_t *)src; uint8_t *udst = (uint8_t *)dst; if (src == NULL) return; if (dst == NULL) return; if (len == 0) return; for (i = 0; i < len/2; i++) { tmp = usrc[i]; udst[i] = usrc[len-1-i]; udst[len-1-i] = tmp; } }
823769.c
/* * Implementing STK500/AVRISP protocol. * Documented in AVR068 application note. */ #include <stdio.h> #include <stdlib.h> #ifndef WIN32 # include <unistd.h> #endif #include <fcntl.h> #include <string.h> #include "avr.h" #include "stk500.h" #if defined(__WIN32__) || defined(WIN32) # include <winsock.h> #else # include <sys/ioctl.h> # include <termios.h> # include <errno.h> #endif extern int debug; struct _avr_t { char *name; unsigned char sequence_number; unsigned char have_fuse; // unsigned char have_checksum_cmd; // unsigned char vendor_code; // unsigned char part_family; // unsigned char part_number; unsigned char stk_flag; // unsigned char hardware_version; // unsigned char software_major; // unsigned char software_minor; // unsigned char sck_duration; // unsigned char topcard; // unsigned char target_voltage; // unsigned char aref_voltage; // unsigned char osc_pscale; // unsigned char osc_cmatch; unsigned char page_addr_fetched; u_int32_t flash_size; unsigned short page_size; unsigned short page_delay; u_int32_t page_addr; u_int32_t last_load_addr; unsigned char page [256]; #if defined(__WIN32__) || defined(WIN32) void *fd; DCB saved_mode; #else int fd; struct termios saved_mode; #endif }; #if 0 #define DEBUG(x) { if (debug) printf x; } #else #define DEBUG(x) /* void */ #endif #if defined(__WIN32__) || defined(WIN32) int usleep (u_int32_t usec); #endif char *avr_name (avr_t *avr) { if (! avr->name) return "Unknown"; return avr->name; } u_int32_t avr_flash_size (avr_t *avr) { return avr->flash_size; } /* * Receive data from device. */ static int avr_receive (avr_t *avr, unsigned char *data, int len) { #if defined(__WIN32__) || defined(WIN32) DWORD got; if (! ReadFile (avr->fd, data, len, &got, 0)) { fprintf (stderr, "avr-send: read error\n"); exit (-1); } #else struct timeval timeout, to2; long got; fd_set rfds; timeout.tv_sec = 5; timeout.tv_usec = 0; to2 = timeout; again: FD_ZERO (&rfds); FD_SET (avr->fd, &rfds); got = select (avr->fd + 1, &rfds, 0, 0, &to2); if (got < 0) { if (errno == EINTR || errno == EAGAIN) { if (debug > 1) printf ("avr-send: programmer is not responding\n"); goto again; } fprintf (stderr, "avr-send: select error: %s\n", strerror (errno)); exit (1); } #endif if (got == 0) { if (debug > 1) printf ("avr-send: programmer is not responding\n"); return 0; } #if ! defined(__WIN32__) && !defined(WIN32) got = read (avr->fd, data, (len > 1024) ? 1024 : len); if (got < 0) { fprintf (stderr, "avr-send: read error\n"); exit (-1); } #endif return got; } /* * Send the command sequence and get back a response. */ static int avr_send (avr_t *avr, unsigned char *cmd, int cmdlen, unsigned char *response, int reply_len) { unsigned char *p, sum, hdr [5]; int len, i, got, rlen; /* * Prepare header and checksum. */ hdr[0] = MESSAGE_START; hdr[1] = ++avr->sequence_number; hdr[2] = cmdlen >> 8; hdr[3] = cmdlen; hdr[4] = TOKEN; sum = hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3] ^ hdr[4]; for (i=0; i<cmdlen; ++i) sum ^= cmd[i]; /* * Send command. */ if (debug > 1) { printf ("send [%d] %x-%x-%x-%x-%x", 5 + cmdlen + 1, hdr[0], hdr[1], hdr[2], hdr[3], hdr[4]); for (i=0; i<cmdlen; ++i) printf ("-%x", cmd[i]); printf ("-%x\n", sum); } #if defined(__WIN32__) || defined(WIN32) { DWORD written; if (! WriteFile (avr->fd, hdr, 5, &written, 0) || ! WriteFile (avr->fd, cmd, cmdlen, &written, 0) || ! WriteFile (avr->fd, &sum, 1, &written, 0)) { fprintf (stderr, "avr-send: write error\n"); exit (-1); } } #else if (write (avr->fd, hdr, 5) < 0 || write (avr->fd, cmd, cmdlen) < 0 || write (avr->fd, &sum, 1) < 0) { fprintf (stderr, "avr-send: write error\n"); exit (-1); } #endif /* * Get header. */ p = hdr; len = 0; while (len < 5) { got = avr_receive (avr, p, 5 - len); if (! got) return 0; p += got; len += got; } if (hdr[0] != MESSAGE_START || hdr[1] != avr->sequence_number || hdr[4] != TOKEN) { printf ("got invalid header: %x-%x-%x-%x-%x\n", hdr[0], hdr[1], hdr[2], hdr[3], hdr[4]); flush_input: { /* Skip all incoming data. */ unsigned char buf [300]; avr_receive (avr, buf, sizeof (buf)); } return 0; } rlen = hdr[2] << 8 | hdr[3]; if (rlen == 0 || rlen > reply_len) { printf ("invalid reply length=%d, expecting %d bytes\n", rlen, reply_len); goto flush_input; } /* * Get response. */ p = response; len = 0; while (len < rlen) { got = avr_receive (avr, p, rlen - len); if (! got) return 0; p += got; len += got; } /* * Get sum. */ p = &sum; len = 0; while (len < 1) { got = avr_receive (avr, p, 1); if (! got) return 0; ++len; } if (debug > 1) { printf (" got [%d] %x-%x-%x-%x-%x", 5 + rlen + 1, hdr[0], hdr[1], hdr[2], hdr[3], hdr[4]); for (i=0; i<rlen; ++i) printf ("-%x", response[i]); printf ("-%x\n", sum); } /* Check sum. */ sum ^= hdr[0] ^ hdr[1] ^ hdr[2] ^ hdr[3] ^ hdr[4]; for (i=0; i<rlen; ++i) sum ^= response[i]; if (sum != 0) { printf ("invalid reply checksum\n"); goto flush_input; } return 1; } static void avr_prog_enable (avr_t *avr) { unsigned char cmd [12] = { CMD_ENTER_PROGMODE_ISP, 200, /* timeout in msec */ 100, /* pin stabilization delay in msec */ 25, /* command execution delay in msec */ 32, /* number of synchronization loops */ 0, /* per byte delay */ 0x53, /* poll value, 53h for AVR, 69h for AT89xx */ 3, /* poll index, 3 for AVR, 4 for AT89xx */ 0xAC, 0x53, 0x00, 0x00 }; unsigned char response [2]; if (! avr_send (avr, cmd, 12, response, 2) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK) { fprintf (stderr, "Cannot enter programming mode.\n"); exit (-1); } } static void avr_prog_disable (avr_t *avr) { unsigned char cmd [3] = { CMD_LEAVE_PROGMODE_ISP, 1, /* pre-delay in msec */ 1, /* post-delay in msec */ }; unsigned char response [2]; if (! avr_send (avr, cmd, 3, response, 2) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK) { fprintf (stderr, "Cannot leave programming mode.\n"); exit (-1); } } #if 0 static unsigned char avr_read_signature (avr_t *avr, unsigned char addr) { unsigned char cmd [6] = { CMD_READ_FUSE_ISP, 4, 0x30, 0, addr, 0 }; unsigned char response [4]; if (! avr_send (avr, cmd, 6, response, 4) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[3] != STATUS_CMD_OK) { fprintf (stderr, "Read signature failed.\n"); exit (-1); } return response[2]; } static unsigned char avr_get_param (avr_t *avr, unsigned char addr) { unsigned char cmd [2] = { CMD_GET_PARAMETER, addr }; unsigned char response [3]; if (avr_send (avr, cmd, 2, response, 3) && response[0] == cmd[0] && response[1] == STATUS_CMD_OK) return response [2]; return 0; } static void avr_set_param (avr_t *avr, unsigned char addr, unsigned char val) { unsigned char cmd [3] = { CMD_SET_PARAMETER, addr, val }; unsigned char response [2]; if (! avr_send (avr, cmd, 3, response, 2) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK) { fprintf (stderr, "Cannot set parameter %02x to %02x.\n", addr, val); exit (-1); } } #endif static int avr_detect (avr_t *avr) { int retry_count, ok_count; unsigned char response [12]; /* Synchronize */ retry_count = ok_count = 0; while (ok_count < 3 && retry_count < 7) { /* Send CMD_SIGN_ON. */ if (! avr_send (avr, (unsigned char*)"\1", 1, response, 11)) goto retry; if (memcmp (response, "\1\0\10STK500_2", 11) == 0) { avr->stk_flag = 1; detected: ++ok_count; if (debug > 1) printf ("avr-probe: ok %d\n", ok_count); continue; } if (memcmp (response, "\1\0\10AVRISP_2", 11) == 0) { avr->stk_flag = 0; goto detected; } retry: ++retry_count; if (debug > 1) printf ("avr-probe: error %d, ok %d\n", retry_count, ok_count); } if (retry_count >= 7) return 0; #if 0 avr_set_param (avr, PARAM_SCK_DURATION, 2); /* Get programmer parameters */ avr->hardware_version = avr_get_param (avr, PARAM_HW_VER); avr->software_major = avr_get_param (avr, PARAM_SW_MAJOR); avr->software_minor = avr_get_param (avr, PARAM_SW_MINOR); avr->sck_duration = avr_get_param (avr, PARAM_SCK_DURATION); if (avr->stk_flag) { /* For STK500 only, not for AVRISP. */ avr->topcard = avr_get_param (avr, PARAM_TOPCARD_DETECT); avr->target_voltage = avr_get_param (avr, PARAM_VTARGET); avr->aref_voltage = avr_get_param (avr, PARAM_VADJUST); avr->osc_pscale = avr_get_param (avr, PARAM_OSC_PSCALE); avr->osc_cmatch = avr_get_param (avr, PARAM_OSC_CMATCH); } #endif avr_prog_enable (avr); avr->last_load_addr = -1; #if 0 /* Get AVR Info */ avr->vendor_code = avr_read_signature (avr, 0); /* 0x1e is Atmel */ avr->part_family = avr_read_signature (avr, 1); avr->part_number = avr_read_signature (avr, 2); /* Assume checksum command is available. */ avr->have_checksum_cmd = 1; if (debug) { printf ("Hardware Version: %d\n", avr->hardware_version); printf ("Firmware Version: %d.%d\n", avr->software_major, avr->software_minor); printf (" SCK Period: %.1f usec\n", avr->sck_duration * 8.0e6 / STK500_XTAL + 0.05); if (avr->stk_flag) { char *topcard_name = "Unknown"; switch (avr->topcard) { case 0xFF: topcard_name = "Not installed"; break; case 0xAA: topcard_name = "STK501"; break; case 0x55: topcard_name = "STK502"; break; case 0xFA: topcard_name = "STK503"; break; case 0xEE: topcard_name = "STK504"; break; case 0xE4: topcard_name = "STK505"; break; case 0xDD: topcard_name = "STK520"; break; } printf (" Top Card: %s\n", topcard_name); printf (" Target Voltage: %.1f V\n", avr->target_voltage / 10.0); printf (" AREF Voltage: %.1f V\n", avr->aref_voltage / 10.0); printf (" Oscillator: "); if (avr->osc_pscale == 0) printf("Off\n"); else { double f; f = STK500_XTAL / 2 / (avr->osc_cmatch + 1); switch (avr->osc_pscale) { case 2: f /= 8; break; case 3: f /= 32; break; case 4: f /= 64; break; case 5: f /= 128; break; case 6: f /= 256; break; case 7: f /= 1024; break; } if (f > 1e6) { printf ("%.3f MHz\n", f / 1000000); } else if (f > 1e3) { printf ("%.3f kHz\n", f / 1000); } else printf ("%.3f Hz\n", f); } } printf ("Vendor Code = %x, Part Family = %x, Part Number = %x\n", avr->vendor_code, avr->part_family, avr->part_number); } #endif /* Identify device according to family. * Assume ATmega128. */ avr->flash_size = 0xfe00 * 2; // avr->have_fuse = 1; avr->page_delay = 5; avr->page_size = 256; #if 0 /* Identify device according to family. */ switch (avr->part_family) { case 0x01: avr->have_fuse = 0; avr->flash_size = 0x20000; avr->page_delay = 56; avr->name = "Old ATmega103"; if (avr->vendor_code == 0 && avr->part_number == 2) { if (debug) printf ("Device is protected, assuming ATmega103.\n"); avr->name = "Protected"; } break; case 0x90: avr->flash_size = 1024; avr->page_size = 32; switch (avr->part_number) { case 0x01: avr->name = "AT90s1200"; avr->page_size = 0; break; case 0x04: avr->name = "ATtiny11"; avr->page_size = 0; break; case 0x05: avr->name = "ATtiny12"; avr->page_size = 0; break; case 0x06: avr->name = "ATtiny15"; avr->page_size = 0; break; case 0x07: avr->name = "ATtiny13"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATtiny13.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x91: avr->flash_size = 2048; avr->page_size = 32; switch (avr->part_number) { case 0x01: avr->name = "AT90s2313"; avr->page_size = 0; break; case 0x02: avr->name = "AT90s2323"; avr->page_size = 0; break; case 0x03: avr->name = "AT90s2343"; avr->page_size = 0; break; case 0x06: avr->name = "ATtiny22"; avr->page_size = 0; break; case 0x07: avr->name = "ATtiny28"; avr->page_size = 0; break; case 0x08: avr->name = "ATtiny25"; break; case 0x09: avr->name = "ATtiny26"; break; case 0x0A: avr->name = "ATtiny2313"; break; case 0x0B: avr->name = "ATtiny24"; break; case 0x0C: avr->name = "ATtiny261"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATtiny24.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x92: avr->flash_size = 4096; avr->page_size = 64; switch (avr->part_number) { case 0x01: avr->name = "AT90s4414"; avr->page_size = 0; break; case 0x03: avr->name = "AT90s4433"; avr->page_size = 0; break; case 0x05: avr->name = "ATmega48"; break; case 0x06: avr->name = "ATtiny45"; break; case 0x07: avr->name = "ATtiny44"; avr->page_size = 32; break; case 0x08: avr->name = "ATtiny461"; avr->page_size = 32; break; default: if (debug) printf ("Unknown part number %#x, assuming ATtiny44.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x93: avr->flash_size = 0xf80*2; avr->page_size = 64; switch (avr->part_number) { case 0x01: avr->name = "AT90s8515"; avr->page_size = 0; break; case 0x03: avr->name = "AT90s8535"; avr->page_size = 0; break; case 0x06: avr->name = "ATmega8515"; break; case 0x07: avr->name = "ATmega8"; break; case 0x08: avr->name = "ATmega8535"; break; case 0x0A: avr->name = "ATmega88"; break; case 0x0B: avr->name = "ATtiny85"; break; case 0x0C: avr->name = "ATtiny84"; break; case 0x0D: avr->name = "ATtiny861"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATmega8.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x94: avr->flash_size = 0x1f80*2; avr->page_size = 128; switch (avr->part_number) { case 0x01: avr->name = "ATmega161"; break; case 0x02: avr->name = "ATmega163"; break; case 0x03: avr->name = "ATmega16"; break; case 0x04: avr->name = "ATmega162"; break; case 0x05: avr->name = "ATmega169"; break; case 0x06: avr->name = "ATmega168"; break; case 0x07: avr->name = "ATmega165"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATmega16.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x95: avr->flash_size = 0x3f00*2; avr->page_size = 128; switch (avr->part_number) { case 0x01: avr->name = "ATmega323"; break; case 0x02: avr->name = "ATmega32"; break; case 0x03: avr->name = "ATmega329"; break; case 0x04: avr->name = "ATmega3290"; break; case 0x05: avr->name = "ATmega325"; break; case 0x06: avr->name = "ATmega3250"; break; case 0x07: avr->name = "ATmega406"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATmega32.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x96: avr->flash_size = 0x7e00*2; switch (avr->part_number) { case 0x01: avr->name = "ATmega603"; avr->flash_size = 0x10000; avr->have_fuse = 0; avr->page_delay = 56; break; case 0x02: avr->name = "ATmega64"; break; case 0x03: avr->name = "ATmega649"; break; case 0x04: avr->name = "ATmega6490"; break; case 0x05: avr->name = "ATmega645"; break; case 0x06: avr->name = "ATmega6450"; break; case 0x08: avr->name = "ATmega640"; break; case 0x09: avr->name = "ATmega644"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATmega64.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x97: switch (avr->part_number) { case 0x01: avr->name = "ATmega103"; avr->flash_size = 0x20000; avr->have_fuse = 0; avr->page_delay = 56; break; case 0x02: avr->name = "ATmega128"; break; case 0x03: avr->name = "ATmega1280"; break; case 0x04: avr->name = "ATmega1281"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATmega128.\n", avr->part_number); avr->name = "Unknown"; break; } break; case 0x98: avr->flash_size = 0x3E000; // LY: use only first 248K, see ERRATA; switch (avr->part_number) { case 0x01: avr->name = "ATmega2560"; break; case 0x02: avr->name = "ATmega2561"; break; default: if (debug) printf ("Unknown part number %#x, assuming ATmega256.\n", avr->part_number); avr->name = "Unknown"; break; } break; default: if (debug) printf ("Unknown part family %#x, assuming ATmega128.\n", avr->part_family); avr->name = "Unknown"; break; } #endif return 1; } /* * Open the device. */ avr_t *avr_open (char *devname) { avr_t *avr; #if defined(__WIN32__) || defined(WIN32) DCB new_mode; COMMTIMEOUTS ctmo; #else struct termios new_mode; #endif avr = calloc (1, sizeof (avr_t)); if (! avr) { fprintf (stderr, "Out of memory\n"); exit (-1); } #if defined(__WIN32__) || defined(WIN32) /* Open port */ avr->fd = CreateFile (devname, GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, 0, 0); if (avr->fd == INVALID_HANDLE_VALUE) { fprintf (stderr, "%s: Cannot open\n", devname); exit (-1); } /* Set serial attributes */ memset (&avr->saved_mode, 0, sizeof(avr->saved_mode)); if (! GetCommState (avr->fd, &avr->saved_mode)) { fprintf (stderr, "%s: Cannot get state\n", devname); exit (-1); } new_mode = avr->saved_mode; new_mode.BaudRate = CBR_9600; new_mode.ByteSize = 8; new_mode.StopBits = ONESTOPBIT; new_mode.Parity = 0; new_mode.fParity = FALSE; new_mode.fOutX = FALSE; new_mode.fInX = FALSE; new_mode.fOutxCtsFlow = FALSE; new_mode.fOutxDsrFlow = FALSE; new_mode.fRtsControl = RTS_CONTROL_ENABLE; new_mode.fNull = FALSE; new_mode.fAbortOnError = FALSE; new_mode.fBinary = TRUE; if (! SetCommState (avr->fd, &new_mode)) { fprintf (stderr, "%s: Cannot set state\n", devname); exit (-1); } memset (&ctmo, 0, sizeof(ctmo)); ctmo.ReadIntervalTimeout = 0; ctmo.ReadTotalTimeoutMultiplier = 0; ctmo.ReadTotalTimeoutConstant = 500; if (! SetCommTimeouts (avr->fd, &ctmo)) { fprintf (stderr, "%s: Cannot set timeouts\n", devname); exit (-1); } #else /* Open port */ avr->fd = open (devname, O_RDWR | O_NONBLOCK); if (avr->fd < 0) { perror (devname); exit (-1); } /* Set serial attributes */ memset (&avr->saved_mode, 0, sizeof(avr->saved_mode)); tcgetattr (avr->fd, &avr->saved_mode); new_mode = avr->saved_mode; cfmakeraw (&new_mode); cfsetispeed (&new_mode, 9600); cfsetospeed (&new_mode, 9600); new_mode.c_iflag |= IGNBRK; tcsetattr (avr->fd, TCSANOW, &new_mode); if (avr_detect (avr)) goto ok; #endif /* Try another baud rate. */ #if defined(__WIN32__) || defined(WIN32) new_mode.BaudRate = CBR_115200; SetCommState (avr->fd, &new_mode); #else cfsetispeed (&new_mode, 115200); cfsetospeed (&new_mode, 115200); tcsetattr (avr->fd, TCSANOW, &new_mode); #endif if (! avr_detect (avr)) { fprintf (stderr, "No response from device.\n"); exit (-1); } ok: #if defined(__WIN32__) || defined(WIN32) ctmo.ReadTotalTimeoutConstant = 5000; SetCommTimeouts (avr->fd, &ctmo); #endif return avr; } /* * Close the device. */ void avr_close (avr_t *avr) { if (avr->name) { avr_prog_disable (avr); avr->name = 0; } #if defined(__WIN32__) || defined(WIN32) SetCommState (avr->fd, &avr->saved_mode); CloseHandle (avr->fd); #else tcsetattr (avr->fd, TCSANOW, &avr->saved_mode); close (avr->fd); #endif } void avr_erase (avr_t *avr) { unsigned char cmd [7] = { CMD_CHIP_ERASE_ISP, 3 * avr->page_delay * 10, 0, 0xAC, 0x80, 0x00, 0x00 }; unsigned char response [2]; printf ("Erasing..."); fflush (stdout); if (! avr_send (avr, cmd, 7, response, 2) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK) { fprintf (stderr, "Chip erase failed.\n"); exit (-1); } printf (" done\n"); avr_detect (avr); } static void avr_load_address (avr_t *avr, u_int32_t addr) { unsigned char cmd [5] = { CMD_LOAD_ADDRESS, addr >> 24, addr >> 16, addr >> 8, addr }; unsigned char response [2]; if (avr->last_load_addr == addr) return; if (debug > 1) printf ("Load address: %#x\n", addr); /* Extended address flag. */ if (avr->flash_size > 0x10000) cmd[1] |= 0x80; if (! avr_send (avr, cmd, 5, response, 2) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK) { fprintf (stderr, "Load address failed.\n"); exit (-1); } avr->last_load_addr = addr; } /* * PAGE MODE PROGRAMMING: * Cache page address. When current address is out of the page address, * flush page buffer and continue programming. */ void avr_write_byte (avr_t *avr, u_int32_t addr, unsigned char byte) { int page_bytes = avr->page_size; if (page_bytes == 0) { /* Word mode. */ page_bytes = 256; } if (debug > 2) printf ("Loading to address: %#x (page_addr_fetched=%s)\n", addr, avr->page_addr_fetched ? "Yes" : "No"); if (avr->page_addr / page_bytes != addr / page_bytes) avr_flush_write_buffer (avr); if (! avr->page_addr_fetched) { avr->page_addr = addr / page_bytes * page_bytes; avr->page_addr_fetched = 1; } avr->page [addr % page_bytes] = byte; } void avr_flush_write_buffer (avr_t *avr) { unsigned char cmd [10+256] = { CMD_PROGRAM_FLASH_ISP, avr->page_size >> 8, avr->page_size, 0xA1, /* mode */ avr->page_delay * 3/2, /* delay */ 0x40, /* Load Page command */ 0x4c, /* Write Program Memory Page command */ 0x20, /* Read Program Memory command */ 0xFF, /* poll value 1 */ 0 }; /* poll value 2 (for EEPROM only) */ unsigned char response [2]; int page_bytes = avr->page_size; if (! avr->page_addr_fetched) return; avr_load_address (avr, avr->page_addr >> 1); if (debug > 1) printf ("Programming page: %#x\n", avr->page_addr); if (page_bytes == 0) { /* Word mode. */ page_bytes = 256; cmd [1] = page_bytes >> 8; cmd [2] = page_bytes; cmd [3] = 4; /* mode */ cmd [6] = 0x20; /* Read Program Memory command */ cmd [8] = 0x7F; /* poll value 1 */ } memcpy (cmd+10, avr->page, page_bytes); if (! avr_send (avr, cmd, 10+page_bytes, response, 2) || response[0] != cmd[0]) { fprintf (stderr, "Program flash failed.\n"); exit (-1); } if (response[1] != STATUS_CMD_OK) printf ("Programming flash: timeout at %#x\n", avr->page_addr); usleep (avr->page_delay * 1000L); avr->page_addr_fetched = 0; avr->last_load_addr += page_bytes / 2; } unsigned char avr_read_byte (avr_t *avr, u_int32_t addr) { unsigned char cmd [4] = { CMD_READ_FLASH_ISP, 0, 2, 0x20 }; unsigned char response [5]; avr_load_address (avr, addr >> 1); if (! avr_send (avr, cmd, 4, response, 5) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[4] != STATUS_CMD_OK) { fprintf (stderr, "Read byte failed.\n"); exit (-1); } return (addr & 1) ? response[3] : response[2]; } void avr_write_block (avr_t *avr, u_int32_t addr, unsigned char *buf, u_int32_t bytes) { unsigned short i; for (i=0; i<bytes; ++i) { avr_write_byte (avr, addr+i, buf[i]); } avr_flush_write_buffer (avr); } #if 0 static int avr_get_checksum (avr_t *avr, u_int32_t addr, u_int32_t bytes, unsigned short *sum) { unsigned char cmd [4] = { CMD_READ_FLASH_ISP | 0x80, 1, 0, 0x20 }; unsigned char response [4]; avr_load_address (avr, addr >> 1); if (debug > 1) printf ("Get checksum: %#x\n", addr); if (! avr_send (avr, cmd, 4, response, 4) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK) { return 0; } *sum = response[2] << 8 | response[3]; avr->last_load_addr += 256 / 2; return 1; } /* * Calculate a new sum given the current sum and the new data. * Use 0xffff as the initial sum value. * Do not forget to invert the final checksum value. */ static unsigned short crc16 (unsigned const char *buf, unsigned short len) { static const unsigned short poly_tab [16] = { 0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401, 0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400, }; unsigned short sum; sum = 0; for (; len--; ++buf) { /* compute checksum of lower four bits of *buf */ sum = ((sum >> 4) & 0x0FFF) ^ poly_tab [sum & 0xF] ^ poly_tab [*buf & 0xF]; /* now compute checksum of upper four bits of *buf */ sum = ((sum >> 4) & 0x0FFF) ^ poly_tab [sum & 0xF] ^ poly_tab [(*buf >> 4) & 0xF]; } return sum; } #endif int avr_check_block (avr_t *avr, u_int32_t addr, unsigned char *buf, u_int32_t bytes) { unsigned short i; unsigned char page [256]; #if 0 unsigned short sum, memsum; if (avr->have_checksum_cmd && avr_get_checksum (avr, addr, bytes, &memsum)) { sum = crc16 (buf, bytes); if (memsum != sum) { printf ("\nchecksum error at address %#x: file=%#x, mem=%#x\n", addr, sum, memsum); return 0; } return 1; } /* No checksum command available. */ avr->have_checksum_cmd = 0; #endif avr_read_block (avr, addr, page, 256); for (i=0; i<bytes; ++i) { if (page[i] != buf[i]) { printf ("\nerror at address %#x: file=%#x, mem=%#x\n", addr+i, buf[i], page[i]); return 0; } } return 1; } void avr_read_block (avr_t *avr, u_int32_t addr, unsigned char *buf, u_int32_t bytes) { unsigned char cmd [4] = { CMD_READ_FLASH_ISP, 1, 0, 0x20 }; unsigned char response [3+256]; avr_load_address (avr, addr >> 1); again: if (debug > 1) printf ("Read page: %#x\n", addr); if (! avr_send (avr, cmd, 4, response, 3+256) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[2+256] != STATUS_CMD_OK) { fprintf (stderr, "Read page failed.\n"); exit (-1); } memcpy (buf, response+2, 256); bytes -= 256; if (bytes > 0) { addr += 256; buf += 256; goto again; } avr->last_load_addr += 256 / 2; } #if 0 void avr_lock (avr_t *avr) { unsigned char cmd [6] = { CMD_READ_FUSE_ISP, 0, 0xAC, 0xF9, 0x00, 0x00 }; unsigned char response [4]; if (debug) printf ("Writing lock bits...\n"); if (! avr_send (avr, cmd, 6, response, 4) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[3] != STATUS_CMD_OK) { fprintf (stderr, "Chip lock failed.\n"); exit (-1); } } #endif unsigned char avr_have_fuse (avr_t *avr) { return avr->have_fuse; } #if 0 unsigned char avr_read_fuse (avr_t *avr) { unsigned char cmd [6] = { CMD_READ_FUSE_ISP, 4, 0x50, 0, 0, 0 }; unsigned char response [4]; if (! avr->have_fuse) return 0xff; if (! avr_send (avr, cmd, 6, response, 4) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[3] != STATUS_CMD_OK) { fprintf (stderr, "Read fuse failed.\n"); exit (-1); } return response[2]; } unsigned char avr_read_fuse_high (avr_t *avr) { unsigned char cmd [6] = { CMD_READ_FUSE_ISP, 4, 0x58, 0x08, 0, 0 }; unsigned char response [4]; if (! avr->have_fuse) return 0xff; if (! avr_send (avr, cmd, 6, response, 4) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[3] != STATUS_CMD_OK) { fprintf (stderr, "Read fuse failed.\n"); exit (-1); } return response[2]; } unsigned char avr_read_fuse_extended (avr_t *avr) { unsigned char cmd [6] = { CMD_READ_FUSE_ISP, 4, 0x50, 0x08, 0, 0 }; unsigned char response [4]; if (! avr->have_fuse) return 0xff; if (! avr_send (avr, cmd, 6, response, 4) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[3] != STATUS_CMD_OK) { fprintf (stderr, "Read fuse failed.\n"); exit (-1); } return response[2]; } void avr_write_fuse (avr_t *avr, unsigned char val) { unsigned char cmd [5] = { CMD_PROGRAM_FUSE_ISP, 0xAC, 0xA0, 0, val }; unsigned char response [3]; if (! avr->have_fuse) return; if (! avr_send (avr, cmd, 5, response, 3) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[2] != STATUS_CMD_OK) { fprintf (stderr, "Write fuse failed.\n"); exit (-1); } } void avr_write_fuse_high (avr_t *avr, unsigned char val) { unsigned char cmd [5] = { CMD_PROGRAM_FUSE_ISP, 0xAC, 0xA8, 0, val }; unsigned char response [3]; if (! avr->have_fuse) return; if (! avr_send (avr, cmd, 5, response, 3) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[2] != STATUS_CMD_OK) { fprintf (stderr, "Write fuse failed.\n"); exit (-1); } } void avr_write_fuse_extended (avr_t *avr, unsigned char val) { unsigned char cmd [5] = { CMD_PROGRAM_FUSE_ISP, 0xAC, 0xA4, 0, val }; unsigned char response [3]; if (! avr->have_fuse) return; if (! avr_send (avr, cmd, 5, response, 3) || response[0] != cmd[0] || response[1] != STATUS_CMD_OK || response[2] != STATUS_CMD_OK) { fprintf (stderr, "Write fuse failed.\n"); exit (-1); } } #endif
591457.c
/* * dcssblk.c -- the S/390 block driver for dcss memory * * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer */ #define KMSG_COMPONENT "dcssblk" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/pfn_t.h> #include <asm/extmem.h> #include <asm/io.h> #define DCSSBLK_NAME "dcssblk" #define DCSSBLK_MINORS_PER_DISK 1 #define DCSSBLK_PARM_LEN 400 #define DCSS_BUS_ID_SIZE 20 static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); static blk_qc_t dcssblk_make_request(struct request_queue *q, struct bio *bio); static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, void **kaddr, pfn_t *pfn, long size); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; static int dcssblk_major; static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, .open = dcssblk_open, .release = dcssblk_release, .direct_access = dcssblk_direct_access, }; struct dcssblk_dev_info { struct list_head lh; struct device dev; char segment_name[DCSS_BUS_ID_SIZE]; atomic_t use_count; struct gendisk *gd; unsigned long start; unsigned long end; int segment_type; unsigned char save_pending; unsigned char is_shared; struct request_queue *dcssblk_queue; int num_of_segments; struct list_head seg_list; }; struct segment_info { struct list_head lh; char segment_name[DCSS_BUS_ID_SIZE]; unsigned long start; unsigned long end; int segment_type; }; static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count); static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store); static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store); static struct device *dcssblk_root_dev; static LIST_HEAD(dcssblk_devices); static struct rw_semaphore dcssblk_devices_sem; /* * release function for segment device. */ static void dcssblk_release_segment(struct device *dev) { struct dcssblk_dev_info *dev_info; struct segment_info *entry, *temp; dev_info = container_of(dev, struct dcssblk_dev_info, dev); list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) { list_del(&entry->lh); kfree(entry); } kfree(dev_info); module_put(THIS_MODULE); } /* * get a minor number. needs to be called with * down_write(&dcssblk_devices_sem) and the * device needs to be enqueued before the semaphore is * freed. */ static int dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) { int minor, found; struct dcssblk_dev_info *entry; if (dev_info == NULL) return -EINVAL; for (minor = 0; minor < (1<<MINORBITS); minor++) { found = 0; // test if minor available list_for_each_entry(entry, &dcssblk_devices, lh) if (minor == entry->gd->first_minor) found++; if (!found) break; // got unused minor } if (found) return -EBUSY; dev_info->gd->first_minor = minor; return 0; } /* * get the struct dcssblk_dev_info from dcssblk_devices * for the given name. * down_read(&dcssblk_devices_sem) must be held. */ static struct dcssblk_dev_info * dcssblk_get_device_by_name(char *name) { struct dcssblk_dev_info *entry; list_for_each_entry(entry, &dcssblk_devices, lh) { if (!strcmp(name, entry->segment_name)) { return entry; } } return NULL; } /* * get the struct segment_info from seg_list * for the given name. * down_read(&dcssblk_devices_sem) must be held. */ static struct segment_info * dcssblk_get_segment_by_name(char *name) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; list_for_each_entry(dev_info, &dcssblk_devices, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) { if (!strcmp(name, entry->segment_name)) return entry; } } return NULL; } /* * get the highest address of the multi-segment block. */ static unsigned long dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info) { unsigned long highest_addr; struct segment_info *entry; highest_addr = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (highest_addr < entry->end) highest_addr = entry->end; } return highest_addr; } /* * get the lowest address of the multi-segment block. */ static unsigned long dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info) { int set_first; unsigned long lowest_addr; struct segment_info *entry; set_first = 0; lowest_addr = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (set_first == 0) { lowest_addr = entry->start; set_first = 1; } else { if (lowest_addr > entry->start) lowest_addr = entry->start; } } return lowest_addr; } /* * Check continuity of segments. */ static int dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) { int i, j, rc; struct segment_info *sort_list, *entry, temp; if (dev_info->num_of_segments <= 1) return 0; sort_list = kzalloc( sizeof(struct segment_info) * dev_info->num_of_segments, GFP_KERNEL); if (sort_list == NULL) return -ENOMEM; i = 0; list_for_each_entry(entry, &dev_info->seg_list, lh) { memcpy(&sort_list[i], entry, sizeof(struct segment_info)); i++; } /* sort segments */ for (i = 0; i < dev_info->num_of_segments; i++) for (j = 0; j < dev_info->num_of_segments; j++) if (sort_list[j].start > sort_list[i].start) { memcpy(&temp, &sort_list[i], sizeof(struct segment_info)); memcpy(&sort_list[i], &sort_list[j], sizeof(struct segment_info)); memcpy(&sort_list[j], &temp, sizeof(struct segment_info)); } /* check continuity */ for (i = 0; i < dev_info->num_of_segments - 1; i++) { if ((sort_list[i].end + 1) != sort_list[i+1].start) { pr_err("Adjacent DCSSs %s and %s are not " "contiguous\n", sort_list[i].segment_name, sort_list[i+1].segment_name); rc = -EINVAL; goto out; } /* EN and EW are allowed in a block device */ if (sort_list[i].segment_type != sort_list[i+1].segment_type) { if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i].segment_type == SEG_TYPE_ER) || !(sort_list[i+1].segment_type & SEGMENT_EXCLUSIVE) || (sort_list[i+1].segment_type == SEG_TYPE_ER)) { pr_err("DCSS %s and DCSS %s have " "incompatible types\n", sort_list[i].segment_name, sort_list[i+1].segment_name); rc = -EINVAL; goto out; } } } rc = 0; out: kfree(sort_list); return rc; } /* * Load a segment */ static int dcssblk_load_segment(char *name, struct segment_info **seg_info) { int rc; /* already loaded? */ down_read(&dcssblk_devices_sem); *seg_info = dcssblk_get_segment_by_name(name); up_read(&dcssblk_devices_sem); if (*seg_info != NULL) return -EEXIST; /* get a struct segment_info */ *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL); if (*seg_info == NULL) return -ENOMEM; strcpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, &(*seg_info)->start, &(*seg_info)->end); if (rc < 0) { segment_warning(rc, (*seg_info)->segment_name); kfree(*seg_info); } else { INIT_LIST_HEAD(&(*seg_info)->lh); (*seg_info)->segment_type = rc; } return rc; } /* * device attribute for switching shared/nonshared (exclusive) * operation (show + store) */ static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n"); } static ssize_t dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry, *temp; int rc; if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; down_write(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); if (atomic_read(&dev_info->use_count)) { rc = -EBUSY; goto out; } if (inbuf[0] == '1') { /* reload segments in shared mode */ list_for_each_entry(entry, &dev_info->seg_list, lh) { rc = segment_modify_shared(entry->segment_name, SEGMENT_SHARED); if (rc < 0) { BUG_ON(rc == -EINVAL); if (rc != -EAGAIN) goto removeseg; } } dev_info->is_shared = 1; switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: set_disk_ro(dev_info->gd, 1); } } else if (inbuf[0] == '0') { /* reload segments in exclusive mode */ if (dev_info->segment_type == SEG_TYPE_SC) { pr_err("DCSS %s is of type SC and cannot be " "loaded as exclusive-writable\n", dev_info->segment_name); rc = -EINVAL; goto out; } list_for_each_entry(entry, &dev_info->seg_list, lh) { rc = segment_modify_shared(entry->segment_name, SEGMENT_EXCLUSIVE); if (rc < 0) { BUG_ON(rc == -EINVAL); if (rc != -EAGAIN) goto removeseg; } } dev_info->is_shared = 0; set_disk_ro(dev_info->gd, 0); } else { rc = -EINVAL; goto out; } rc = count; goto out; removeseg: pr_err("DCSS device %s is removed after a failed access mode " "change\n", dev_info->segment_name); temp = entry; list_for_each_entry(entry, &dev_info->seg_list, lh) { if (entry != temp) segment_unload(entry->segment_name); } list_del(&dev_info->lh); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); up_write(&dcssblk_devices_sem); if (device_remove_file_self(dev, attr)) { device_unregister(dev); put_device(dev); } return rc; out: up_write(&dcssblk_devices_sem); return rc; } static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show, dcssblk_shared_store); /* * device attribute for save operation on current copy * of the segment. If the segment is busy, saving will * become pending until it gets released, which can be * undone by storing a non-true value to this entry. * (show + store) */ static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf) { struct dcssblk_dev_info *dev_info; dev_info = container_of(dev, struct dcssblk_dev_info, dev); return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n"); } static ssize_t dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0')) return -EINVAL; dev_info = container_of(dev, struct dcssblk_dev_info, dev); down_write(&dcssblk_devices_sem); if (inbuf[0] == '1') { if (atomic_read(&dev_info->use_count) == 0) { // device is idle => we save immediately pr_info("All DCSSs that map to device %s are " "saved\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { if (entry->segment_type == SEG_TYPE_EN || entry->segment_type == SEG_TYPE_SN) pr_warn("DCSS %s is of type SN or EN" " and cannot be saved\n", entry->segment_name); else segment_save(entry->segment_name); } } else { // device is busy => we save it when it becomes // idle in dcssblk_release pr_info("Device %s is in use, its DCSSs will be " "saved when it becomes idle\n", dev_info->segment_name); dev_info->save_pending = 1; } } else if (inbuf[0] == '0') { if (dev_info->save_pending) { // device is busy & the user wants to undo his save // request dev_info->save_pending = 0; pr_info("A pending save request for device %s " "has been canceled\n", dev_info->segment_name); } } else { up_write(&dcssblk_devices_sem); return -EINVAL; } up_write(&dcssblk_devices_sem); return count; } static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show, dcssblk_save_store); /* * device attribute for showing all segments in a device */ static ssize_t dcssblk_seglist_show(struct device *dev, struct device_attribute *attr, char *buf) { int i; struct dcssblk_dev_info *dev_info; struct segment_info *entry; down_read(&dcssblk_devices_sem); dev_info = container_of(dev, struct dcssblk_dev_info, dev); i = 0; buf[0] = '\0'; list_for_each_entry(entry, &dev_info->seg_list, lh) { strcpy(&buf[i], entry->segment_name); i += strlen(entry->segment_name); buf[i] = '\n'; i++; } up_read(&dcssblk_devices_sem); return i; } static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL); static struct attribute *dcssblk_dev_attrs[] = { &dev_attr_shared.attr, &dev_attr_save.attr, &dev_attr_seglist.attr, NULL, }; static struct attribute_group dcssblk_dev_attr_group = { .attrs = dcssblk_dev_attrs, }; static const struct attribute_group *dcssblk_dev_attr_groups[] = { &dcssblk_dev_attr_group, NULL, }; /* * device attribute for adding devices */ static ssize_t dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc, i, j, num_of_segments; struct dcssblk_dev_info *dev_info; struct segment_info *seg_info, *temp; char *local_buf; unsigned long seg_byte_size; dev_info = NULL; seg_info = NULL; if (dev != dcssblk_root_dev) { rc = -EINVAL; goto out_nobuf; } if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) { rc = -ENAMETOOLONG; goto out_nobuf; } local_buf = kmalloc(count + 1, GFP_KERNEL); if (local_buf == NULL) { rc = -ENOMEM; goto out_nobuf; } /* * parse input */ num_of_segments = 0; for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { for (j = i; j < count && (buf[j] != ':') && (buf[j] != '\0') && (buf[j] != '\n'); j++) { local_buf[j-i] = toupper(buf[j]); } local_buf[j-i] = '\0'; if (((j - i) == 0) || ((j - i) > 8)) { rc = -ENAMETOOLONG; goto seg_list_del; } rc = dcssblk_load_segment(local_buf, &seg_info); if (rc < 0) goto seg_list_del; /* * get a struct dcssblk_dev_info */ if (num_of_segments == 0) { dev_info = kzalloc(sizeof(struct dcssblk_dev_info), GFP_KERNEL); if (dev_info == NULL) { rc = -ENOMEM; goto out; } strcpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } list_add_tail(&seg_info->lh, &dev_info->seg_list); num_of_segments++; i = j; if ((buf[j] == '\0') || (buf[j] == '\n')) break; } /* no trailing colon at the end of the input */ if ((i > 0) && (buf[i-1] == ':')) { rc = -ENAMETOOLONG; goto seg_list_del; } strlcpy(local_buf, buf, i + 1); dev_info->num_of_segments = num_of_segments; rc = dcssblk_is_continuous(dev_info); if (rc < 0) goto seg_list_del; dev_info->start = dcssblk_find_lowest_addr(dev_info); dev_info->end = dcssblk_find_highest_addr(dev_info); dev_set_name(&dev_info->dev, "%s", dev_info->segment_name); dev_info->dev.release = dcssblk_release_segment; dev_info->dev.groups = dcssblk_dev_attr_groups; INIT_LIST_HEAD(&dev_info->lh); dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK); if (dev_info->gd == NULL) { rc = -ENOMEM; goto seg_list_del; } dev_info->gd->major = dcssblk_major; dev_info->gd->fops = &dcssblk_devops; dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL); dev_info->gd->queue = dev_info->dcssblk_queue; dev_info->gd->private_data = dev_info; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors pr_info("Loaded %s with total size %lu bytes and capacity %lu " "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); dev_info->save_pending = 0; dev_info->is_shared = 1; dev_info->dev.parent = dcssblk_root_dev; /* *get minor, add to list */ down_write(&dcssblk_devices_sem); if (dcssblk_get_segment_by_name(local_buf)) { rc = -EEXIST; goto release_gd; } rc = dcssblk_assign_free_minor(dev_info); if (rc) goto release_gd; sprintf(dev_info->gd->disk_name, "dcssblk%d", dev_info->gd->first_minor); list_add_tail(&dev_info->lh, &dcssblk_devices); if (!try_module_get(THIS_MODULE)) { rc = -ENODEV; goto dev_list_del; } /* * register the device */ rc = device_register(&dev_info->dev); if (rc) goto put_dev; get_device(&dev_info->dev); device_add_disk(&dev_info->dev, dev_info->gd); switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: set_disk_ro(dev_info->gd,1); break; default: set_disk_ro(dev_info->gd,0); break; } up_write(&dcssblk_devices_sem); rc = count; goto out; put_dev: list_del(&dev_info->lh); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); list_for_each_entry(seg_info, &dev_info->seg_list, lh) { segment_unload(seg_info->segment_name); } put_device(&dev_info->dev); up_write(&dcssblk_devices_sem); goto out; dev_list_del: list_del(&dev_info->lh); release_gd: blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); up_write(&dcssblk_devices_sem); seg_list_del: if (dev_info == NULL) goto out; list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) { list_del(&seg_info->lh); segment_unload(seg_info->segment_name); kfree(seg_info); } kfree(dev_info); out: kfree(local_buf); out_nobuf: return rc; } /* * device attribute for removing devices */ static ssize_t dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; int rc, i; char *local_buf; if (dev != dcssblk_root_dev) { return -EINVAL; } local_buf = kmalloc(count + 1, GFP_KERNEL); if (local_buf == NULL) { return -ENOMEM; } /* * parse input */ for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) { local_buf[i] = toupper(buf[i]); } local_buf[i] = '\0'; if ((i == 0) || (i > 8)) { rc = -ENAMETOOLONG; goto out_buf; } down_write(&dcssblk_devices_sem); dev_info = dcssblk_get_device_by_name(local_buf); if (dev_info == NULL) { up_write(&dcssblk_devices_sem); pr_warn("Device %s cannot be removed because it is not a known device\n", local_buf); rc = -ENODEV; goto out_buf; } if (atomic_read(&dev_info->use_count) != 0) { up_write(&dcssblk_devices_sem); pr_warn("Device %s cannot be removed while it is in use\n", local_buf); rc = -EBUSY; goto out_buf; } list_del(&dev_info->lh); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; put_disk(dev_info->gd); /* unload all related segments */ list_for_each_entry(entry, &dev_info->seg_list, lh) segment_unload(entry->segment_name); up_write(&dcssblk_devices_sem); device_unregister(&dev_info->dev); put_device(&dev_info->dev); rc = count; out_buf: kfree(local_buf); return rc; } static int dcssblk_open(struct block_device *bdev, fmode_t mode) { struct dcssblk_dev_info *dev_info; int rc; dev_info = bdev->bd_disk->private_data; if (NULL == dev_info) { rc = -ENODEV; goto out; } atomic_inc(&dev_info->use_count); bdev->bd_block_size = 4096; rc = 0; out: return rc; } static void dcssblk_release(struct gendisk *disk, fmode_t mode) { struct dcssblk_dev_info *dev_info = disk->private_data; struct segment_info *entry; if (!dev_info) { WARN_ON(1); return; } down_write(&dcssblk_devices_sem); if (atomic_dec_and_test(&dev_info->use_count) && (dev_info->save_pending)) { pr_info("Device %s has become idle and is being saved " "now\n", dev_info->segment_name); list_for_each_entry(entry, &dev_info->seg_list, lh) { if (entry->segment_type == SEG_TYPE_EN || entry->segment_type == SEG_TYPE_SN) pr_warn("DCSS %s is of type SN or EN and cannot" " be saved\n", entry->segment_name); else segment_save(entry->segment_name); } dev_info->save_pending = 0; } up_write(&dcssblk_devices_sem); } static blk_qc_t dcssblk_make_request(struct request_queue *q, struct bio *bio) { struct dcssblk_dev_info *dev_info; struct bio_vec bvec; struct bvec_iter iter; unsigned long index; unsigned long page_addr; unsigned long source_addr; unsigned long bytes_done; blk_queue_split(q, &bio, q->bio_split); bytes_done = 0; dev_info = bio->bi_bdev->bd_disk->private_data; if (dev_info == NULL) goto fail; if ((bio->bi_iter.bi_sector & 7) != 0 || (bio->bi_iter.bi_size & 4095) != 0) /* Request is not page-aligned. */ goto fail; if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { /* Request beyond end of DCSS segment. */ goto fail; } /* verify data transfer direction */ if (dev_info->is_shared) { switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: /* cannot write to these segments */ if (bio_data_dir(bio) == WRITE) { pr_warn("Writing to %s failed because it is a read-only device\n", dev_name(&dev_info->dev)); goto fail; } } } index = (bio->bi_iter.bi_sector >> 3); bio_for_each_segment(bvec, bio, iter) { page_addr = (unsigned long) page_address(bvec.bv_page) + bvec.bv_offset; source_addr = dev_info->start + (index<<12) + bytes_done; if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0) // More paranoia. goto fail; if (bio_data_dir(bio) == READ) { memcpy((void*)page_addr, (void*)source_addr, bvec.bv_len); } else { memcpy((void*)source_addr, (void*)page_addr, bvec.bv_len); } bytes_done += bvec.bv_len; } bio_endio(bio); return BLK_QC_T_NONE; fail: bio_io_error(bio); return BLK_QC_T_NONE; } static long dcssblk_direct_access (struct block_device *bdev, sector_t secnum, void **kaddr, pfn_t *pfn, long size) { struct dcssblk_dev_info *dev_info; unsigned long offset, dev_sz; dev_info = bdev->bd_disk->private_data; if (!dev_info) return -ENODEV; dev_sz = dev_info->end - dev_info->start; offset = secnum * 512; *kaddr = (void *) dev_info->start + offset; *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV); return dev_sz - offset; } static void dcssblk_check_params(void) { int rc, i, j, k; char buf[DCSSBLK_PARM_LEN + 1]; struct dcssblk_dev_info *dev_info; for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0'); i++) { for (j = i; (j < DCSSBLK_PARM_LEN) && (dcssblk_segments[j] != ',') && (dcssblk_segments[j] != '\0') && (dcssblk_segments[j] != '('); j++) { buf[j-i] = dcssblk_segments[j]; } buf[j-i] = '\0'; rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i); if ((rc >= 0) && (dcssblk_segments[j] == '(')) { for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++) buf[k] = toupper(buf[k]); buf[k] = '\0'; if (!strncmp(&dcssblk_segments[j], "(local)", 7)) { down_read(&dcssblk_devices_sem); dev_info = dcssblk_get_device_by_name(buf); up_read(&dcssblk_devices_sem); if (dev_info) dcssblk_shared_store(&dev_info->dev, NULL, "0\n", 2); } } while ((dcssblk_segments[j] != ',') && (dcssblk_segments[j] != '\0')) { j++; } if (dcssblk_segments[j] == '\0') break; i = j; } } /* * Suspend / Resume */ static int dcssblk_freeze(struct device *dev) { struct dcssblk_dev_info *dev_info; int rc = 0; list_for_each_entry(dev_info, &dcssblk_devices, lh) { switch (dev_info->segment_type) { case SEG_TYPE_SR: case SEG_TYPE_ER: case SEG_TYPE_SC: if (!dev_info->is_shared) rc = -EINVAL; break; default: rc = -EINVAL; break; } if (rc) break; } if (rc) pr_err("Suspending the system failed because DCSS device %s " "is writable\n", dev_info->segment_name); return rc; } static int dcssblk_restore(struct device *dev) { struct dcssblk_dev_info *dev_info; struct segment_info *entry; unsigned long start, end; int rc = 0; list_for_each_entry(dev_info, &dcssblk_devices, lh) { list_for_each_entry(entry, &dev_info->seg_list, lh) { segment_unload(entry->segment_name); rc = segment_load(entry->segment_name, SEGMENT_SHARED, &start, &end); if (rc < 0) { // TODO in_use check ? segment_warning(rc, entry->segment_name); goto out_panic; } if (start != entry->start || end != entry->end) { pr_err("The address range of DCSS %s changed " "while the system was suspended\n", entry->segment_name); goto out_panic; } } } return 0; out_panic: panic("fatal dcssblk resume error\n"); } static int dcssblk_thaw(struct device *dev) { return 0; } static const struct dev_pm_ops dcssblk_pm_ops = { .freeze = dcssblk_freeze, .thaw = dcssblk_thaw, .restore = dcssblk_restore, }; static struct platform_driver dcssblk_pdrv = { .driver = { .name = "dcssblk", .pm = &dcssblk_pm_ops, }, }; static struct platform_device *dcssblk_pdev; /* * The init/exit functions. */ static void __exit dcssblk_exit(void) { platform_device_unregister(dcssblk_pdev); platform_driver_unregister(&dcssblk_pdrv); root_device_unregister(dcssblk_root_dev); unregister_blkdev(dcssblk_major, DCSSBLK_NAME); } static int __init dcssblk_init(void) { int rc; rc = platform_driver_register(&dcssblk_pdrv); if (rc) return rc; dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL, 0); if (IS_ERR(dcssblk_pdev)) { rc = PTR_ERR(dcssblk_pdev); goto out_pdrv; } dcssblk_root_dev = root_device_register("dcssblk"); if (IS_ERR(dcssblk_root_dev)) { rc = PTR_ERR(dcssblk_root_dev); goto out_pdev; } rc = device_create_file(dcssblk_root_dev, &dev_attr_add); if (rc) goto out_root; rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); if (rc) goto out_root; rc = register_blkdev(0, DCSSBLK_NAME); if (rc < 0) goto out_root; dcssblk_major = rc; init_rwsem(&dcssblk_devices_sem); dcssblk_check_params(); return 0; out_root: root_device_unregister(dcssblk_root_dev); out_pdev: platform_device_unregister(dcssblk_pdev); out_pdrv: platform_driver_unregister(&dcssblk_pdrv); return rc; } module_init(dcssblk_init); module_exit(dcssblk_exit); module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444); MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, " "comma-separated list, names in each set separated " "by commas are separated by colons, each set contains " "names of contiguous segments and each name max. 8 chars.\n" "Adding \"(local)\" to the end of each set equals echoing 0 " "to /sys/devices/dcssblk/<device name>/shared after loading " "the contiguous segments - \n" "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\""); MODULE_LICENSE("GPL");
350129.c
/* * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2005 Blender Foundation * All rights reserved. * * The Original Code is: all of this file. * * Contributor(s): Austin Benesh. Ton Roosendaal. * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/imbuf/intern/metadata.c * \ingroup imbuf */ #include <stdlib.h> #include <string.h> #include "BLI_utildefines.h" #include "BLI_string.h" #include "BKE_idprop.h" #include "MEM_guardedalloc.h" #include "IMB_imbuf_types.h" #include "IMB_imbuf.h" #include "IMB_metadata.h" void IMB_metadata_free(struct ImBuf *img) { if (!img) return; if (!img->metadata) { return; } IDP_FreeProperty(img->metadata); MEM_freeN(img->metadata); } bool IMB_metadata_get_field(struct ImBuf *img, const char *key, char *field, const size_t len) { IDProperty *prop; bool retval = false; if (!img) return false; if (!img->metadata) return false; prop = IDP_GetPropertyFromGroup(img->metadata, key); if (prop && prop->type == IDP_STRING) { BLI_strncpy(field, IDP_String(prop), len); retval = true; } return retval; } void IMB_metadata_copy(struct ImBuf *dimb, struct ImBuf *simb) { BLI_assert(dimb != simb); if (simb->metadata) { IMB_metadata_free(dimb); dimb->metadata = IDP_CopyProperty(simb->metadata); } } bool IMB_metadata_add_field(struct ImBuf *img, const char *key, const char *value) { IDProperty *prop; if (!img) return false; if (!img->metadata) { IDPropertyTemplate val; img->metadata = IDP_New(IDP_GROUP, &val, "metadata"); } prop = IDP_NewString(value, key, 512); return IDP_AddToGroup(img->metadata, prop); } bool IMB_metadata_del_field(struct ImBuf *img, const char *key) { IDProperty *prop; if ((!img) || (!img->metadata)) return false; prop = IDP_GetPropertyFromGroup(img->metadata, key); if (prop) { IDP_FreeFromGroup(img->metadata, prop); } return false; } bool IMB_metadata_change_field(struct ImBuf *img, const char *key, const char *field) { IDProperty *prop; if (!img) return false; prop = (img->metadata) ? IDP_GetPropertyFromGroup(img->metadata, key) : NULL; if (!prop) { return (IMB_metadata_add_field(img, key, field)); } else if (prop->type == IDP_STRING) { IDP_AssignString(prop, field, 1024); return true; } else { return false; } }
478164.c
/* * Copyright (c) 2016 Intel Corporation * * SPDX-License-Identifier: Apache-2.0 */ #include <ztest.h> #include "test_mslab.h" /** TESTPOINT: Statically define and initialize a memory slab*/ K_MEM_SLAB_DEFINE(kmslab, BLK_SIZE, BLK_NUM, BLK_ALIGN); static char __aligned(BLK_ALIGN) tslab[BLK_SIZE * BLK_NUM]; static struct k_mem_slab mslab; void tmslab_alloc_free(void *data) { struct k_mem_slab *pslab = (struct k_mem_slab *)data; void *block[BLK_NUM]; memset(block, 0, sizeof(block)); /** * TESTPOINT: The memory slab's buffer contains @a slab_num_blocks * memory blocks that are @a slab_block_size bytes long. */ for (int i = 0; i < BLK_NUM; i++) { /** TESTPOINT: Allocate memory from a memory slab.*/ /** TESTPOINT: @retval 0 Memory allocated.*/ zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0, NULL); /** * TESTPOINT: The block address area pointed at by @a mem is set * to the starting address of the memory block. */ zassert_not_null(block[i], NULL); } for (int i = 0; i < BLK_NUM; i++) { /** TESTPOINT: Free memory allocated from a memory slab.*/ k_mem_slab_free(pslab, &block[i]); } } static void tmslab_alloc_align(void *data) { struct k_mem_slab *pslab = (struct k_mem_slab *)data; void *block[BLK_NUM]; for (int i = 0; i < BLK_NUM; i++) { zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0, NULL); /** * TESTPOINT: To ensure that each memory block is similarly * aligned to this boundary */ zassert_true((u32_t)block[i] % BLK_ALIGN == 0, NULL); } for (int i = 0; i < BLK_NUM; i++) { k_mem_slab_free(pslab, &block[i]); } } static void tmslab_alloc_timeout(void *data) { struct k_mem_slab *pslab = (struct k_mem_slab *)data; void *block[BLK_NUM], *block_fail; s64_t tms; for (int i = 0; i < BLK_NUM; i++) { zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0, NULL); } /** TESTPOINT: Use K_NO_WAIT to return without waiting*/ /** TESTPOINT: -ENOMEM Returned without waiting.*/ zassert_equal(k_mem_slab_alloc(pslab, &block_fail, K_NO_WAIT), -ENOMEM, NULL); /** TESTPOINT: -EAGAIN Waiting period timed out*/ tms = k_uptime_get(); zassert_equal(k_mem_slab_alloc(pslab, &block_fail, TIMEOUT), -EAGAIN, NULL); /** * TESTPOINT: timeout Maximum time to wait for operation to * complete (in milliseconds) */ zassert_true(k_uptime_delta(&tms) >= TIMEOUT, NULL); for (int i = 0; i < BLK_NUM; i++) { k_mem_slab_free(pslab, &block[i]); } } static void tmslab_used_get(void *data) { struct k_mem_slab *pslab = (struct k_mem_slab *)data; void *block[BLK_NUM], *block_fail; for (int i = 0; i < BLK_NUM; i++) { zassert_true(k_mem_slab_alloc(pslab, &block[i], K_NO_WAIT) == 0, NULL); /** TESTPOINT: Get the number of used blocks in a memory slab.*/ zassert_equal(k_mem_slab_num_used_get(pslab), i + 1, NULL); /** * TESTPOINT: Get the number of unused blocks in a memory slab. */ zassert_equal(k_mem_slab_num_free_get(pslab), BLK_NUM - 1 - i, NULL); } zassert_equal(k_mem_slab_alloc(pslab, &block_fail, K_NO_WAIT), -ENOMEM, NULL); /* free get on allocation failure*/ zassert_equal(k_mem_slab_num_free_get(pslab), 0, NULL); /* used get on allocation failure*/ zassert_equal(k_mem_slab_num_used_get(pslab), BLK_NUM, NULL); zassert_equal(k_mem_slab_alloc(pslab, &block_fail, TIMEOUT), -EAGAIN, NULL); zassert_equal(k_mem_slab_num_free_get(pslab), 0, NULL); zassert_equal(k_mem_slab_num_used_get(pslab), BLK_NUM, NULL); for (int i = 0; i < BLK_NUM; i++) { k_mem_slab_free(pslab, &block[i]); zassert_equal(k_mem_slab_num_free_get(pslab), i + 1, NULL); zassert_equal(k_mem_slab_num_used_get(pslab), BLK_NUM - 1 - i, NULL); } } /*test cases*/ /** * @brief Initialize the memory slab using k_mem_slab_init() * and allocates/frees blocks. * * @details Initialize 3 memory blocks of block size 8 bytes * using @see k_mem_slab_init() and check if number of used blocks * is 0 and free blocks is equal to number of blocks initialized. */ void test_mslab_kinit(void) { k_mem_slab_init(&mslab, tslab, BLK_SIZE, BLK_NUM); zassert_equal(k_mem_slab_num_used_get(&mslab), 0, NULL); zassert_equal(k_mem_slab_num_free_get(&mslab), BLK_NUM, NULL); } /** * @brief Verify K_MEM_SLAB_DEFINE() with allocates/frees blocks. * * @details Initialize 3 memory blocks of block size 8 bytes * using @see K_MEM_SLAB_DEFINE() and check if number of used blocks * is 0 and free blocks is equal to number of blocks initialized. */ void test_mslab_kdefine(void) { zassert_equal(k_mem_slab_num_used_get(&kmslab), 0, NULL); zassert_equal(k_mem_slab_num_free_get(&kmslab), BLK_NUM, NULL); } /** * @brief Verify alloc and free of blocks from mem_slab * */ void test_mslab_alloc_free_thread(void) { tmslab_alloc_free(&mslab); } /** * @brief Allocate memory blocks and check for alignment of 8 bytes * * @details Allocate 3 blocks of memory from 2 memory slabs * respectively and check if all blocks are aligned to 8 bytes * and free them. */ void test_mslab_alloc_align(void) { tmslab_alloc_align(&mslab); tmslab_alloc_align(&kmslab); } /** * @brief Verify allocation of memory blocks with timeouts * * @details Allocate 3 memory blocks from memory slab. Check * allocation of another memory block with NO_WAIT set, since * there are no blocks left to allocate in the memory slab, * the allocation fails with return value -ENOMEM. Then the * system up time is obtained, memory block allocation is * tried with timeout of 2000 ms. Now the allocation API * returns -EAGAIN as the waiting period is timeout. The * test case also checks if timeout has really happened by * checking delta period between the allocation request * was made and return of -EAGAIN. */ void test_mslab_alloc_timeout(void) { tmslab_alloc_timeout(&mslab); } /** * @brief Verify count of allocated blocks * * @details The test case allocates 3 blocks one after the * other by checking for used block and free blocks in the * memory slab - mslab. Once all 3 blocks are allocated, * one more block is tried to allocates, which fails with * return value -ENOMEM. It also checks the allocation with * timeout. Again checks for used block and free blocks * number using @see k_mem_slab_num_used_get() and * @see k_mem_slab_num_free_get(). */ void test_mslab_used_get(void) { tmslab_used_get(&mslab); tmslab_used_get(&kmslab); }