sync smart & dfs (#8672)

Signed-off-by: xqyjlj <xqyjlj@126.com>
Signed-off-by: Shell <smokewood@qq.com>
Co-authored-by: xqyjlj <xqyjlj@126.com>
This commit is contained in:
Shell 2024-03-28 23:42:56 +08:00 committed by GitHub
parent 40e26f4909
commit 83e95bdff4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
131 changed files with 14954 additions and 6478 deletions

View File

@ -81,7 +81,7 @@ CONFIG_RT_USING_CONSOLE=y
CONFIG_RT_CONSOLEBUF_SIZE=256
CONFIG_RT_CONSOLE_DEVICE_NAME="uart0"
CONFIG_RT_VER_NUM=0x50100
# CONFIG_RT_USING_STDC_ATOMIC is not set
CONFIG_RT_USING_STDC_ATOMIC=y
CONFIG_RT_BACKTRACE_LEVEL_MAX_NR=32
CONFIG_RT_USING_CACHE=y
CONFIG_RT_USING_HW_ATOMIC=y
@ -203,7 +203,7 @@ CONFIG_RT_USING_SOFT_RTC=y
CONFIG_RT_USING_SDIO=y
CONFIG_RT_SDIO_STACK_SIZE=4096
CONFIG_RT_SDIO_THREAD_PRIORITY=15
CONFIG_RT_MMCSD_STACK_SIZE=4096
CONFIG_RT_MMCSD_STACK_SIZE=16384
CONFIG_RT_MMCSD_THREAD_PREORITY=22
CONFIG_RT_MMCSD_MAX_PARTITION=16
# CONFIG_RT_SDIO_DEBUG is not set

View File

@ -54,6 +54,7 @@
#define RT_CONSOLEBUF_SIZE 256
#define RT_CONSOLE_DEVICE_NAME "uart0"
#define RT_VER_NUM 0x50100
#define RT_USING_STDC_ATOMIC
#define RT_BACKTRACE_LEVEL_MAX_NR 32
#define RT_USING_CACHE
#define RT_USING_HW_ATOMIC
@ -138,7 +139,7 @@
#define RT_USING_SDIO
#define RT_SDIO_STACK_SIZE 4096
#define RT_SDIO_THREAD_PRIORITY 15
#define RT_MMCSD_STACK_SIZE 4096
#define RT_MMCSD_STACK_SIZE 16384
#define RT_MMCSD_THREAD_PREORITY 22
#define RT_MMCSD_MAX_PARTITION 16
#define RT_USING_SPI

View File

@ -10,10 +10,6 @@
#include <rtthread.h>
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_SMART)
#include <console.h>
#endif
#include <virtio_console.h>
static int console_init()
@ -36,8 +32,6 @@ static int console_init()
}
INIT_ENV_EXPORT(console_init);
#ifdef FINSH_USING_MSH
static int console(int argc, char **argv)
{
rt_err_t result = RT_EOK;
@ -48,23 +42,6 @@ static int console(int argc, char **argv)
{
rt_kprintf("console change to %s\n", argv[2]);
rt_console_set_device(argv[2]);
#ifdef RT_USING_POSIX_DEVIO
{
rt_device_t dev = rt_device_find(argv[2]);
if (dev != RT_NULL)
{
#ifdef RT_USING_SMART
console_set_iodev(dev);
#else
rt_kprintf("TODO not supported\n");
#endif
}
}
#else
finsh_set_device(argv[2]);
#endif /* RT_USING_POSIX_DEVIO */
}
else
{
@ -81,5 +58,3 @@ static int console(int argc, char **argv)
return result;
}
MSH_CMD_EXPORT(console, set console name);
#endif /* FINSH_USING_MSH */

View File

@ -8,7 +8,6 @@
* 2020/10/7 bernard the first version
*/
#include <stdio.h>
#include <rtthread.h>
int main(void)

View File

@ -45,6 +45,7 @@ endif
config RT_USING_DFS_V2
bool "DFS v2.0"
select RT_USING_DEVICE_OPS
endchoice
if RT_USING_DFS_V1
@ -170,6 +171,13 @@ endif
depends on RT_USING_DFS_ROMFS
default n
if RT_USING_SMART
config RT_USING_DFS_PTYFS
bool "Using Pseudo-Teletype Filesystem (UNIX98 PTY)"
depends on RT_USING_DFS_DEVFS
default y
endif
config RT_USING_DFS_CROMFS
bool "Enable ReadOnly compressed file system on flash"
default n

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,9 @@
#ifndef __DEVICE_FS_H__
#define __DEVICE_FS_H__
int dfs_devfs_init(void);
const struct dfs_file_ops *dfs_devfs_fops(void);
mode_t dfs_devfs_device_to_mode(struct rt_device *device);
void dfs_devfs_device_add(rt_device_t device);
int dfs_devfs_update(void);
#endif

View File

@ -0,0 +1,678 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-10-24 flybreak the first version
* 2023-02-01 xqyjlj fix cannot open the same file repeatedly in 'w' mode
* 2023-09-20 zmq810150896 adds truncate functionality and standardized unlink adaptations
* 2023-12-02 Shell Support of dynamic device
*/
#include <rthw.h>
#include <rtthread.h>
#include <dfs.h>
#include <dfs_fs.h>
#include <dfs_dentry.h>
#include <dfs_file.h>
#include <dfs_mnt.h>
#include <devfs.h>
#include <unistd.h>
#define TMPFS_MAGIC 0x0B0B0B0B
#define TMPFS_TYPE_FILE 0x00
#define TMPFS_TYPE_DIR 0x01
#define TMPFS_TYPE_DYN_DEV 0x02 /* dynamic device */
struct devtmpfs_sb;
struct devtmpfs_file
{
char name[DIRENT_NAME_MAX]; /* file name */
rt_uint32_t type; /* file type */
rt_list_t subdirs; /* file subdir list */
rt_list_t sibling; /* file sibling list */
struct devtmpfs_sb *sb; /* superblock ptr */
rt_uint32_t mode;
char *link;
};
struct devtmpfs_sb
{
rt_uint32_t magic; /* TMPFS_MAGIC */
struct devtmpfs_file root; /* root dir */
rt_size_t df_size; /* df size */
rt_list_t sibling; /* sb sibling list */
struct rt_spinlock lock; /* tmpfs lock */
};
static struct dfs_file_ops _default_fops = { 0 };
static int _path_separate(const char *path, char *parent_path, char *file_name)
{
const char *path_p, *path_q;
RT_ASSERT(path[0] == '/');
file_name[0] = '\0';
path_p = path_q = &path[1];
__next_dir:
while (*path_q != '/' && *path_q != '\0')
{
path_q++;
}
if (path_q != path_p) /*sub dir*/
{
if (*path_q != '\0')
{
path_q++;
path_p = path_q;
goto __next_dir;
}
else /* Last level dir */
{
rt_memcpy(parent_path, path, path_p - path - 1);
parent_path[path_p - path - 1] = '\0';
rt_memcpy(file_name, path_p, path_q - path_p);
file_name[path_q - path_p] = '\0';
}
}
if (parent_path[0] == 0)
{
parent_path[0] = '/';
parent_path[1] = '\0';
}
//LOG_D("parent_path: %s", parent_path);
//LOG_D("file_name: %s", file_name);
return 0;
}
static int _get_subdir(const char *path, char *name)
{
const char *subpath = path;
while (*subpath == '/' && *subpath)
subpath ++;
while (*subpath != '/' && *subpath)
{
*name = *subpath;
name ++;
subpath ++;
}
return 0;
}
#if 0
static int _free_subdir(struct devtmpfs_file *dfile)
{
struct devtmpfs_file *file;
rt_list_t *list, *temp_list;
struct devtmpfs_sb *superblock;
RT_ASSERT(dfile->type == TMPFS_TYPE_DIR);
rt_list_for_each_safe(list, temp_list, &dfile->subdirs)
{
file = rt_list_entry(list, struct devtmpfs_file, sibling);
if (file->type == TMPFS_TYPE_DIR)
{
_free_subdir(file);
}
if (file->link)
{
rt_free(file->link);
}
superblock = file->sb;
RT_ASSERT(superblock);
rt_spin_lock(&superblock->lock);
rt_list_remove(&(file->sibling));
rt_spin_unlock(&superblock->lock);
rt_free(file);
}
return 0;
}
#endif
static int devtmpfs_mount(struct dfs_mnt *mnt, unsigned long rwflag, const void *data)
{
struct devtmpfs_sb *superblock;
superblock = rt_calloc(1, sizeof(struct devtmpfs_sb));
if (superblock)
{
superblock->df_size = sizeof(struct devtmpfs_sb);
superblock->magic = TMPFS_MAGIC;
rt_list_init(&superblock->sibling);
superblock->root.name[0] = '/';
superblock->root.sb = superblock;
superblock->root.type = TMPFS_TYPE_DIR;
superblock->root.mode = S_IFDIR | (S_IRUSR | S_IRGRP | S_IROTH) | (S_IXUSR | S_IXGRP | S_IXOTH);
rt_list_init(&superblock->root.sibling);
rt_list_init(&superblock->root.subdirs);
rt_spin_lock_init(&superblock->lock);
mnt->data = superblock;
}
else
{
return -RT_ERROR;
}
return RT_EOK;
}
static int devtmpfs_unmount(struct dfs_mnt *mnt)
{
#if 0
struct devtmpfs_sb *superblock;
/* FIXME: don't unmount on busy. */
superblock = (struct devtmpfs_sb *)mnt->data;
RT_ASSERT(superblock != NULL);
mnt->data = NULL;
_free_subdir(&(superblock->root));
rt_free(superblock);
#endif
return -RT_ERROR;
}
static struct devtmpfs_file *devtmpfs_file_lookup(struct devtmpfs_sb *superblock, const char *path)
{
const char *subpath, *curpath, *filename = RT_NULL;
char subdir_name[DIRENT_NAME_MAX];
struct devtmpfs_file *file, *curfile;
rt_list_t *list;
subpath = path;
while (*subpath == '/' && *subpath)
subpath ++;
if (! *subpath) /* is root directory */
{
return &(superblock->root);
}
curpath = subpath;
curfile = &superblock->root;
find_subpath:
while (*subpath != '/' && *subpath)
subpath ++;
if (! *subpath) /* is last directory */
filename = curpath;
else
subpath ++; /* skip '/' */
memset(subdir_name, 0, DIRENT_NAME_MAX);
_get_subdir(curpath, subdir_name);
rt_spin_lock(&superblock->lock);
rt_list_for_each(list, &curfile->subdirs)
{
file = rt_list_entry(list, struct devtmpfs_file, sibling);
if (filename) /* find file */
{
if (rt_strcmp(file->name, filename) == 0)
{
rt_spin_unlock(&superblock->lock);
return file;
}
}
else if (rt_strcmp(file->name, subdir_name) == 0)
{
curpath = subpath;
curfile = file;
rt_spin_unlock(&superblock->lock);
goto find_subpath;
}
}
rt_spin_unlock(&superblock->lock);
/* not found */
return NULL;
}
static int devtmpfs_statfs(struct dfs_mnt *mnt, struct statfs *buf)
{
struct devtmpfs_sb *superblock;
RT_ASSERT(mnt != NULL);
RT_ASSERT(buf != NULL);
superblock = (struct devtmpfs_sb *)mnt->data;
RT_ASSERT(superblock != NULL);
buf->f_bsize = 512;
buf->f_blocks = (superblock->df_size + 511) / 512;
buf->f_bfree = 1;
buf->f_bavail = buf->f_bfree;
return RT_EOK;
}
static int devtmpfs_stat(struct dfs_dentry *dentry, struct stat *st)
{
struct dfs_vnode *vnode;
if (dentry && dentry->vnode)
{
vnode = dentry->vnode;
st->st_dev = (dev_t)(long)(dentry->mnt->dev_id);
st->st_ino = (ino_t)dfs_dentry_full_path_crc32(dentry);
st->st_gid = vnode->gid;
st->st_uid = vnode->uid;
st->st_mode = vnode->mode;
st->st_nlink = vnode->nlink;
st->st_size = vnode->size;
st->st_mtim.tv_nsec = vnode->mtime.tv_nsec;
st->st_mtim.tv_sec = vnode->mtime.tv_sec;
st->st_ctim.tv_nsec = vnode->ctime.tv_nsec;
st->st_ctim.tv_sec = vnode->ctime.tv_sec;
st->st_atim.tv_nsec = vnode->atime.tv_nsec;
st->st_atim.tv_sec = vnode->atime.tv_sec;
}
return RT_EOK;
}
static int devtmpfs_getdents(struct dfs_file *file, struct dirent *dirp, uint32_t count)
{
struct devtmpfs_file *d_file;
struct devtmpfs_sb *superblock;
RT_ASSERT(file);
RT_ASSERT(file->dentry);
RT_ASSERT(file->dentry->mnt);
superblock = (struct devtmpfs_sb *)file->dentry->mnt->data;
RT_ASSERT(superblock);
d_file = devtmpfs_file_lookup(superblock, file->dentry->pathname);
if (d_file)
{
rt_size_t index, end;
struct dirent *d;
struct devtmpfs_file *n_file;
rt_list_t *list;
/* make integer count */
count = (count / sizeof(struct dirent));
if (count == 0)
{
return -EINVAL;
}
end = file->fpos + count;
index = 0;
count = 0;
rt_list_for_each(list, &d_file->subdirs)
{
if (index >= (rt_size_t)file->fpos)
{
n_file = rt_list_entry(list, struct devtmpfs_file, sibling);
d = dirp + count;
if (n_file->type == TMPFS_TYPE_FILE)
{
d->d_type = DT_REG;
}
if (n_file->type == TMPFS_TYPE_DIR)
{
d->d_type = DT_DIR;
}
d->d_reclen = (rt_uint16_t)sizeof(struct dirent);
rt_strncpy(d->d_name, n_file->name, DIRENT_NAME_MAX);
d->d_namlen = rt_strlen(d->d_name);
count += 1;
file->fpos += 1;
}
index += 1;
if (index >= end)
{
break;
}
}
}
return count * sizeof(struct dirent);
}
static int devtmpfs_symlink(struct dfs_dentry *parent_dentry, const char *target, const char *linkpath)
{
int ret = RT_EOK;
struct devtmpfs_file *p_file, *l_file;
struct devtmpfs_sb *superblock;
RT_ASSERT(parent_dentry);
RT_ASSERT(parent_dentry->mnt);
superblock = (struct devtmpfs_sb *)parent_dentry->mnt->data;
RT_ASSERT(superblock);
p_file = devtmpfs_file_lookup(superblock, parent_dentry->pathname);
if (p_file)
{
l_file = (struct devtmpfs_file *)rt_calloc(1, sizeof(struct devtmpfs_file));
if (l_file)
{
superblock->df_size += sizeof(struct devtmpfs_file);
strncpy(l_file->name, linkpath, DIRENT_NAME_MAX - 1);
rt_list_init(&(l_file->subdirs));
rt_list_init(&(l_file->sibling));
l_file->sb = superblock;
l_file->type = TMPFS_TYPE_FILE;
l_file->mode = p_file->mode;
l_file->mode &= ~S_IFMT;
l_file->mode |= S_IFLNK;
l_file->link = rt_strdup(target);
rt_spin_lock(&superblock->lock);
rt_list_insert_after(&(p_file->subdirs), &(l_file->sibling));
rt_spin_unlock(&superblock->lock);
}
}
return ret;
}
static int devtmpfs_readlink(struct dfs_dentry *dentry, char *buf, int len)
{
int ret = 0;
struct devtmpfs_file *d_file;
struct devtmpfs_sb *superblock;
RT_ASSERT(dentry);
RT_ASSERT(dentry->mnt);
superblock = (struct devtmpfs_sb *)dentry->mnt->data;
RT_ASSERT(superblock);
d_file = devtmpfs_file_lookup(superblock, dentry->pathname);
if (d_file)
{
if (d_file->link)
{
if (d_file->type == TMPFS_TYPE_DYN_DEV)
{
rt_device_t device = (void *)d_file->link;
buf[0] = '\0';
ret = device->readlink(device, buf, len);
if (ret == 0)
{
buf[len - 1] = '\0';
ret = rt_strlen(buf);
}
else
{
ret = 0;
}
}
else
{
rt_strncpy(buf, (const char *)d_file->link, len);
buf[len - 1] = '\0';
ret = rt_strlen(buf);
}
}
}
return ret;
}
static int devtmpfs_unlink(struct dfs_dentry *dentry)
{
struct devtmpfs_file *d_file;
struct devtmpfs_sb *superblock;
RT_ASSERT(dentry);
RT_ASSERT(dentry->mnt);
superblock = (struct devtmpfs_sb *)dentry->mnt->data;
RT_ASSERT(superblock);
d_file = devtmpfs_file_lookup(superblock, dentry->pathname);
if (d_file)
{
if (d_file->link && d_file->type != TMPFS_TYPE_DYN_DEV)
{
rt_free(d_file->link);
}
rt_spin_lock(&superblock->lock);
rt_list_remove(&(d_file->sibling));
rt_spin_unlock(&superblock->lock);
rt_free(d_file);
}
return RT_EOK;
}
static int devtmpfs_setattr(struct dfs_dentry *dentry, struct dfs_attr *attr)
{
struct devtmpfs_file *d_file;
struct devtmpfs_sb *superblock;
RT_ASSERT(dentry);
RT_ASSERT(dentry->mnt);
superblock = (struct devtmpfs_sb *)dentry->mnt->data;
RT_ASSERT(superblock);
d_file = devtmpfs_file_lookup(superblock, dentry->pathname);
if (d_file)
{
d_file->mode &= ~0xFFF;
d_file->mode |= attr->st_mode & 0xFFF;
return RT_EOK;
}
return -RT_ERROR;
}
static struct dfs_vnode *devtmpfs_create_vnode(struct dfs_dentry *dentry, int type, mode_t mode)
{
struct dfs_vnode *vnode = RT_NULL;
struct devtmpfs_sb *superblock;
struct devtmpfs_file *d_file, *p_file;
char parent_path[DFS_PATH_MAX], file_name[DIRENT_NAME_MAX];
if (dentry == NULL || dentry->mnt == NULL || dentry->mnt->data == NULL)
{
return NULL;
}
superblock = (struct devtmpfs_sb *)dentry->mnt->data;
RT_ASSERT(superblock != NULL);
vnode = dfs_vnode_create();
if (vnode)
{
/* find parent file */
_path_separate(dentry->pathname, parent_path, file_name);
if (file_name[0] == '\0') /* it's root dir */
{
dfs_vnode_destroy(vnode);
return NULL;
}
/* open parent directory */
p_file = devtmpfs_file_lookup(superblock, parent_path);
if (p_file == NULL)
{
dfs_vnode_destroy(vnode);
return NULL;
}
/* create a file entry */
d_file = (struct devtmpfs_file *)rt_calloc(1, sizeof(struct devtmpfs_file));
if (d_file == NULL)
{
dfs_vnode_destroy(vnode);
return NULL;
}
superblock->df_size += sizeof(struct devtmpfs_file);
strncpy(d_file->name, file_name, DIRENT_NAME_MAX);
rt_list_init(&(d_file->subdirs));
rt_list_init(&(d_file->sibling));
d_file->sb = superblock;
vnode->nlink = 1;
vnode->size = 0;
vnode->mode = mode;
vnode->mnt = dentry->mnt;
vnode->fops = &_default_fops;
if (type == FT_DIRECTORY)
{
d_file->type = TMPFS_TYPE_DIR;
vnode->type = FT_DIRECTORY;
vnode->mode &= ~S_IFMT;
vnode->mode |= S_IFDIR;
}
else
{
d_file->type = TMPFS_TYPE_FILE;
vnode->type = FT_DEVICE;
}
d_file->mode = vnode->mode;
rt_spin_lock(&superblock->lock);
rt_list_insert_after(&(p_file->subdirs), &(d_file->sibling));
rt_spin_unlock(&superblock->lock);
}
return vnode;
}
static struct dfs_vnode *devtmpfs_lookup(struct dfs_dentry *dentry)
{
struct dfs_vnode *vnode = RT_NULL;
struct devtmpfs_sb *superblock;
struct devtmpfs_file *d_file;
if (dentry == NULL || dentry->mnt == NULL || dentry->mnt->data == NULL)
{
return NULL;
}
superblock = (struct devtmpfs_sb *)dentry->mnt->data;
d_file = devtmpfs_file_lookup(superblock, dentry->pathname);
if (d_file)
{
vnode = dfs_vnode_create();
if (vnode)
{
vnode->nlink = 1;
vnode->size = 0;
vnode->mnt = dentry->mnt;
vnode->fops = &_default_fops;
vnode->mode = d_file->mode;
if (d_file->type == TMPFS_TYPE_DIR)
{
vnode->type = FT_DIRECTORY;
}
else if (d_file->link)
{
vnode->type = FT_SYMLINK;
}
else
{
vnode->type = FT_DEVICE;
}
}
}
else
{
rt_device_t device = RT_NULL;
device = rt_device_find(&dentry->pathname[1]);
if (device)
{
vnode = devtmpfs_create_vnode(dentry, FT_REGULAR, dfs_devfs_device_to_mode(device));
if (device->flag & RT_DEVICE_FLAG_DYNAMIC)
{
d_file = devtmpfs_file_lookup(superblock, dentry->pathname);
d_file->type = TMPFS_TYPE_DYN_DEV;
d_file->link = (char *)device;
}
}
}
return vnode;
}
static int devtmpfs_free_vnode(struct dfs_vnode *vnode)
{
return RT_EOK;
}
static const struct dfs_filesystem_ops _devtmpfs_ops =
{
.name = "devtmpfs",
.flags = DFS_FS_FLAG_DEFAULT,
.default_fops = &_default_fops,
.mount = devtmpfs_mount,
.umount = devtmpfs_unmount,
.symlink = devtmpfs_symlink,
.readlink = devtmpfs_readlink,
.unlink = devtmpfs_unlink,
.setattr = devtmpfs_setattr,
.statfs = devtmpfs_statfs,
.stat = devtmpfs_stat,
.lookup = devtmpfs_lookup,
.create_vnode = devtmpfs_create_vnode,
.free_vnode = devtmpfs_free_vnode
};
static struct dfs_filesystem_type _devtmpfs =
{
.fs_ops = &_devtmpfs_ops,
};
int dfs_devtmpfs_init(void)
{
_default_fops = *dfs_devfs_fops();
_default_fops.getdents = devtmpfs_getdents;
/* register file system */
dfs_register(&_devtmpfs);
dfs_mount(RT_NULL, "/dev", "devtmpfs", 0, RT_NULL);
dfs_devfs_update();
return 0;
}
INIT_COMPONENT_EXPORT(dfs_devtmpfs_init);

View File

@ -145,7 +145,7 @@ static struct dfs_vnode *dfs_mqueue_create_vnode(struct dfs_dentry *dentry, int
}
mq_file->msg_size = 8192;
mq_file->max_msgs = 10;
strncpy(mq_file->name, dentry->pathname + 1, RT_NAME_MAX);
strncpy(mq_file->name, dentry->pathname + 1, RT_NAME_MAX - 1);
dfs_mqueue_insert_after(&(mq_file->list));
}

View File

@ -0,0 +1,5 @@
# The Pseudo Terminal Filesystem
The device register on ptyfs is also registered in device frameworks with `rt_device_register()`.
It's possible to mount a new ptyfs instance on another path. Each instance is isolated to each other. And they don't share the id system. But generally speaking, you have to mount the ptyfs on `/dev` root, since all the file nodes in ptyfs are devices.

View File

@ -0,0 +1,11 @@
# RT-Thread building script for component
from building import *
cwd = GetCurrentDir()
src = Glob('*.c')
CPPPATH = [cwd]
group = DefineGroup('Filesystem', src, depend = ['RT_USING_DFS', 'RT_USING_DFS_PTYFS'], CPPPATH = CPPPATH)
Return('group')

View File

@ -0,0 +1,658 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-02 Shell init ver.
*/
#define DBG_TAG "filesystem.ptyfs"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "ptyfs.h"
#include <dfs.h>
#include <dfs_fs.h>
#include <dfs_dentry.h>
#include <dfs_file.h>
#include <dfs_mnt.h>
#include <devfs.h>
#include <rid_bitmap.h>
#include <rthw.h>
#include <rtthread.h>
#include <terminal/terminal.h>
#include <dirent.h>
#include <unistd.h>
#ifndef S_IRWXUGO
#define S_IRWXUGO (S_IRWXU | S_IRWXG | S_IRWXO)
#endif /* S_IRWXUGO */
#ifndef S_IALLUGO
#define S_IALLUGO (S_ISUID | S_ISGID | S_ISVTX | S_IRWXUGO)
#endif /* S_IALLUGO */
#ifndef S_IRUGO
#define S_IRUGO (S_IRUSR | S_IRGRP | S_IROTH)
#endif /* S_IRUGO */
#ifndef S_IWUGO
#define S_IWUGO (S_IWUSR | S_IWGRP | S_IWOTH)
#endif /* S_IWUGO */
#ifndef S_IXUGO
#define S_IXUGO (S_IXUSR | S_IXGRP | S_IXOTH)
#endif /* S_IXUGO */
#define PTYFS_MAGIC 0x9D94A07D
#define PTYFS_TYPE_DIR 0x00
#define PTYFS_TYPE_FILE_PTMX 0x01
#define PTYFS_TYPE_FILE_SLAVE 0x02
/* TODO: using Symbolic permission, but not ours */
#define PTMX_DEFAULT_FILE_MODE (S_IFCHR | 0666)
#define PTS_DEFAULT_FILE_MODE (S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP)
#define ROOT_DEFUALT_FILE_MODE (S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR)
struct ptyfs_sb;
struct ptyfs_file
{
char basename[DIRENT_NAME_MAX]; /* file name */
rt_uint32_t mode; /* file modes allowed */
rt_uint32_t type; /* file type */
rt_list_t subdirs; /* file subdir list */
rt_list_t ent_node; /* entry node in subdir list */
struct ptyfs_sb *sb; /* superblock ptr */
rt_device_t device; /* device binding on this file */
};
struct ptyfs_sb
{
struct rt_device ptmx_device; /* ptmx device */
struct rt_mutex lock; /* tmpfs lock */
struct ptyfs_file root_file; /* root dir */
struct ptyfs_file ptmx_file; /* `/ptmx` file */
struct rid_bitmap ptsno_pool; /* pts number pool */
rt_uint32_t magic; /* PTYFS_MAGIC */
rt_size_t df_size; /* df size */
rt_list_t sibling; /* sb sibling list */
struct dfs_mnt *mount; /* mount data */
/**
* Note: This upper limit is set to protect kernel memory from draining
* out by the application if it keeps allocating pty devices.
*
* Still, current implementation of bitmap can not efficiently use the
* memory
*/
rt_bitmap_t
ptsno_pool_bitset[LWP_PTY_MAX_PARIS_LIMIT / (sizeof(rt_bitmap_t) * 8)];
};
static struct dfs_file_ops _default_fops;
static int _split_out_subdir(const char *path, char *name)
{
const char *subpath = path;
while (*subpath == '/' && *subpath)
{
subpath++;
}
while (*subpath != '/' && *subpath)
{
*name++ = *subpath++;
}
*name = '\0';
return 0;
}
static rt_err_t ptyfile_init(struct ptyfs_file *file, struct ptyfs_sb *sb,
const char *name, rt_uint32_t type,
rt_uint32_t mode, rt_device_t device)
{
if (name)
strncpy(file->basename, name, sizeof(file->basename));
file->type = type;
file->mode = mode;
rt_list_init(&file->subdirs);
rt_list_init(&file->ent_node);
file->sb = sb;
file->device = device;
return 0;
}
static rt_err_t ptyfile_add_to_root(struct ptyfs_sb *sb,
struct ptyfs_file *new_file)
{
struct ptyfs_file *root_file = &sb->root_file;
/* update super block */
sb->df_size += sizeof(struct ptyfs_file);
rt_mutex_take(&sb->lock, RT_WAITING_FOREVER);
rt_list_insert_after(&(root_file->subdirs), &(new_file->ent_node));
rt_mutex_release(&sb->lock);
return 0;
}
static rt_err_t ptyfile_remove_from_root(struct ptyfs_sb *sb,
struct ptyfs_file *rm_file)
{
/* update super block */
sb->df_size -= sizeof(struct ptyfs_file);
rt_mutex_take(&sb->lock, RT_WAITING_FOREVER);
rt_list_remove(&(rm_file->ent_node));
rt_mutex_release(&sb->lock);
return 0;
}
static struct ptyfs_file *ptyfile_lookup(struct ptyfs_sb *superblock,
const char *path)
{
const char *subpath_iter, *curpath_iter, *basename = RT_NULL;
char subdir_name[DIRENT_NAME_MAX];
struct ptyfs_file *curfile, *found_file = RT_NULL;
rt_list_t *list;
int do_path_resolve = 1;
subpath_iter = path;
/* skip starting "/" */
while (*subpath_iter == '/') subpath_iter++;
if (!*subpath_iter)
{
return &(superblock->root_file);
}
curpath_iter = subpath_iter;
curfile = &superblock->root_file;
/* resolve chain of files splited from path one by one */
while (do_path_resolve)
{
do_path_resolve = 0;
/* splitout sub-directory or basename */
while (*subpath_iter != '/' && *subpath_iter) subpath_iter++;
if (!*subpath_iter)
{
basename = curpath_iter;
}
else
{
_split_out_subdir(curpath_iter, subdir_name);
/* skip "/" for next search */
subpath_iter++;
}
rt_mutex_take(&superblock->lock, RT_WAITING_FOREVER);
rt_list_for_each(list, &curfile->subdirs)
{
struct ptyfs_file *file_iter;
file_iter = rt_list_entry(list, struct ptyfs_file, ent_node);
if (basename)
{
if (strcmp(file_iter->basename, basename) == 0)
{
found_file = file_iter;
break;
}
}
else if (strcmp(file_iter->basename, subdir_name) == 0)
{
curpath_iter = subpath_iter;
curfile = file_iter;
do_path_resolve = 1;
break;
}
}
rt_mutex_release(&superblock->lock);
}
return found_file;
}
const char *ptyfs_get_rootpath(rt_device_t ptmx)
{
const char *rc;
struct ptyfs_sb *sb;
/* allocate id for it and register file */
sb = rt_container_of(ptmx, struct ptyfs_sb, ptmx_device);
if (sb->magic != PTYFS_MAGIC)
{
rc = 0;
}
else
{
/* fullpath is always started with /dev/ */
return sb->mount->fullpath + 5;
}
return rc;
}
ptsno_t ptyfs_register_pts(rt_device_t ptmx, rt_device_t pts)
{
ptsno_t rc;
struct ptyfs_sb *sb;
struct ptyfs_file *pts_file;
struct rid_bitmap *ptsno_pool;
/* allocate id for it and register file */
sb = rt_container_of(ptmx, struct ptyfs_sb, ptmx_device);
if (sb->magic != PTYFS_MAGIC)
{
rc = -1;
}
else
{
ptsno_pool = &sb->ptsno_pool;
rc = rid_bitmap_get(ptsno_pool);
if (rc >= 0)
{
pts_file = rt_calloc(1, sizeof(struct ptyfs_file));
if (pts_file)
{
snprintf(pts_file->basename, DIRENT_NAME_MAX, "%lu", rc);
ptyfile_init(pts_file, sb, 0, PTYFS_TYPE_FILE_SLAVE,
PTS_DEFAULT_FILE_MODE, pts);
ptyfile_add_to_root(sb, pts_file);
}
else
{
rid_bitmap_put(ptsno_pool, rc);
rc = -1;
}
}
/* else rc == -1 */
}
return rc;
}
rt_err_t ptyfs_unregister_pts(rt_device_t ptmx, ptsno_t ptsno)
{
ptsno_t rc;
struct ptyfs_sb *sb;
struct ptyfs_file *pts_file;
struct rid_bitmap *ptsno_pool;
char path_buf[DIRENT_NAME_MAX];
/* allocate id for it and register file */
sb = rt_container_of(ptmx, struct ptyfs_sb, ptmx_device);
if (sb->magic != PTYFS_MAGIC || ptsno < 0)
{
rc = -EINVAL;
}
else
{
/* get path and findout device */
snprintf(path_buf, sizeof(path_buf), "%lu", ptsno);
pts_file = ptyfile_lookup(sb, path_buf);
if (pts_file)
{
ptyfile_remove_from_root(sb, pts_file);
ptsno_pool = &sb->ptsno_pool;
rid_bitmap_put(ptsno_pool, ptsno);
rc = 0;
}
else
{
rc = -ENOENT;
}
}
return rc;
}
#define DEVFS_PREFIX "/dev/"
#define DEVFS_PREFIX_LEN (sizeof(DEVFS_PREFIX) - 1)
/**
* Create an new instance of ptyfs, and mount on target point
* 2 basic files are created: root, ptmx.
*
* todo: support of mount options?
*/
static int ptyfs_ops_mount(struct dfs_mnt *mnt, unsigned long rwflag,
const void *data)
{
struct ptyfs_sb *sb;
rt_device_t ptmx_device;
rt_err_t rc;
if (strncmp(mnt->fullpath, DEVFS_PREFIX, DEVFS_PREFIX_LEN) != 0)
{
LOG_I("%s() Not mounted on `/dev/'", __func__);
return -EINVAL;
}
sb = rt_calloc(1, sizeof(struct ptyfs_sb));
if (sb)
{
rt_mutex_init(&sb->lock, "ptyfs", RT_IPC_FLAG_PRIO);
/* setup the ptmx device */
ptmx_device = &sb->ptmx_device;
rc = lwp_ptmx_init(ptmx_device, mnt->fullpath + DEVFS_PREFIX_LEN);
if (rc == RT_EOK)
{
/* setup 2 basic files */
ptyfile_init(&sb->root_file, sb, "/", PTYFS_TYPE_DIR,
ROOT_DEFUALT_FILE_MODE, 0);
ptyfile_init(&sb->ptmx_file, sb, "ptmx", PTYFS_TYPE_FILE_PTMX,
PTMX_DEFAULT_FILE_MODE, ptmx_device);
ptyfile_add_to_root(sb, &sb->ptmx_file);
/* setup rid */
rid_bitmap_init(&sb->ptsno_pool, 0, LWP_PTY_MAX_PARIS_LIMIT,
sb->ptsno_pool_bitset, &sb->lock);
/* setup properties and members */
sb->magic = PTYFS_MAGIC;
sb->df_size = sizeof(struct ptyfs_sb);
rt_list_init(&sb->sibling);
/* binding superblocks and mount point */
mnt->data = sb;
sb->mount = mnt;
rc = 0;
}
/* else just return rc */
}
else
{
rc = -ENOMEM;
}
return rc;
}
static int ptyfs_ops_umount(struct dfs_mnt *mnt)
{
/* Not supported yet */
return -1;
}
static int ptyfs_ops_setattr(struct dfs_dentry *dentry, struct dfs_attr *attr)
{
struct ptyfs_file *pty_file;
struct ptyfs_sb *superblock;
RT_ASSERT(dentry);
RT_ASSERT(dentry->mnt);
superblock = (struct ptyfs_sb *)dentry->mnt->data;
RT_ASSERT(superblock);
/* find the device related to current pts slave device */
pty_file = ptyfile_lookup(superblock, dentry->pathname);
if (pty_file && pty_file->type == PTYFS_TYPE_FILE_SLAVE)
{
pty_file->mode &= ~0xFFF;
pty_file->mode |= attr->st_mode & 0xFFF;
return 0;
}
return -1;
}
#define OPTIMAL_BSIZE 1024
static int ptyfs_ops_statfs(struct dfs_mnt *mnt, struct statfs *buf)
{
struct ptyfs_sb *superblock;
RT_ASSERT(mnt != NULL);
RT_ASSERT(buf != NULL);
superblock = (struct ptyfs_sb *)mnt->data;
RT_ASSERT(superblock != NULL);
buf->f_bsize = OPTIMAL_BSIZE;
buf->f_blocks = (superblock->df_size + OPTIMAL_BSIZE - 1) / OPTIMAL_BSIZE;
buf->f_bfree = 1;
buf->f_bavail = buf->f_bfree;
return RT_EOK;
}
static int ptyfs_ops_stat(struct dfs_dentry *dentry, struct stat *st)
{
struct dfs_vnode *vnode;
if (dentry && dentry->vnode)
{
vnode = dentry->vnode;
/* device id ? */
st->st_dev = (dev_t)(long)(dentry->mnt->dev_id);
st->st_ino = (ino_t)dfs_dentry_full_path_crc32(dentry);
st->st_gid = vnode->gid;
st->st_uid = vnode->uid;
st->st_mode = vnode->mode;
st->st_nlink = vnode->nlink;
st->st_size = vnode->size;
st->st_mtim.tv_nsec = vnode->mtime.tv_nsec;
st->st_mtim.tv_sec = vnode->mtime.tv_sec;
st->st_ctim.tv_nsec = vnode->ctime.tv_nsec;
st->st_ctim.tv_sec = vnode->ctime.tv_sec;
st->st_atim.tv_nsec = vnode->atime.tv_nsec;
st->st_atim.tv_sec = vnode->atime.tv_sec;
}
return 0;
}
static struct dfs_vnode *ptyfs_ops_lookup(struct dfs_dentry *dentry)
{
struct dfs_vnode *vnode = RT_NULL;
struct ptyfs_sb *superblock;
struct ptyfs_file *pty_file;
if (dentry == NULL || dentry->mnt == NULL || dentry->mnt->data == NULL)
{
return NULL;
}
superblock = (struct ptyfs_sb *)dentry->mnt->data;
pty_file = ptyfile_lookup(superblock, dentry->pathname);
if (pty_file)
{
vnode = dfs_vnode_create();
if (vnode)
{
vnode->data = pty_file->device;
vnode->nlink = 1;
vnode->size = 0;
vnode->mnt = dentry->mnt;
/* if it's root directory */
vnode->fops = &_default_fops;
vnode->mode = pty_file->mode;
vnode->type = pty_file->type == PTYFS_TYPE_DIR ? FT_DIRECTORY : FT_DEVICE;
}
}
return vnode;
}
static struct dfs_vnode *ptyfs_ops_create_vnode(struct dfs_dentry *dentry,
int type, mode_t mode)
{
struct dfs_vnode *vnode = RT_NULL;
struct ptyfs_sb *sb;
struct ptyfs_file *pty_file;
char *vnode_path;
if (dentry == NULL || dentry->mnt == NULL || dentry->mnt->data == NULL)
{
return NULL;
}
sb = (struct ptyfs_sb *)dentry->mnt->data;
RT_ASSERT(sb != NULL);
vnode = dfs_vnode_create();
if (vnode)
{
vnode_path = dentry->pathname;
/* Query if file existed. Filter out illegal open modes */
pty_file = ptyfile_lookup(sb, vnode_path);
if (!pty_file || (~pty_file->mode & mode))
{
dfs_vnode_destroy(vnode);
return NULL;
}
vnode->data = pty_file->device;
vnode->nlink = 1;
vnode->size = 0;
vnode->mnt = dentry->mnt;
vnode->fops = pty_file->device ? pty_file->device->fops : RT_NULL;
vnode->mode &= pty_file->mode;
if (type == FT_DIRECTORY)
{
vnode->mode |= S_IFDIR;
vnode->type = FT_DIRECTORY;
LOG_I("%s: S_IFDIR created", __func__);
}
else if (type == FT_REGULAR)
{
vnode->mode |= S_IFCHR;
vnode->type = FT_DEVICE;
LOG_I("%s: S_IFDIR created", __func__);
}
else
{
/* unsupported types */
dfs_vnode_destroy(vnode);
return NULL;
}
}
return vnode;
}
static int ptyfs_ops_free_vnode(struct dfs_vnode *vnode)
{
return RT_EOK;
}
static int devpty_deffops_getdents(struct dfs_file *file, struct dirent *dirp,
uint32_t count)
{
struct ptyfs_file *d_file;
struct ptyfs_sb *superblock;
RT_ASSERT(file);
RT_ASSERT(file->dentry);
RT_ASSERT(file->dentry->mnt);
superblock = (struct ptyfs_sb *)file->dentry->mnt->data;
RT_ASSERT(superblock);
d_file = ptyfile_lookup(superblock, file->dentry->pathname);
if (d_file)
{
rt_size_t index, end;
struct dirent *d;
struct ptyfs_file *n_file;
rt_list_t *list;
/* make integer count */
count = (count / sizeof(struct dirent));
if (count == 0)
{
return -EINVAL;
}
end = file->fpos + count;
index = 0;
count = 0;
rt_list_for_each(list, &d_file->subdirs)
{
if (index >= (rt_size_t)file->fpos)
{
n_file = rt_list_entry(list, struct ptyfs_file, ent_node);
d = dirp + count;
if (n_file->type == PTYFS_TYPE_DIR)
{
d->d_type = DT_DIR;
}
else
{
/* ptmx(5,2) or slave(136,[0,1048575]) device, on Linux */
d->d_type = DT_CHR;
}
d->d_reclen = (rt_uint16_t)sizeof(struct dirent);
rt_strncpy(d->d_name, n_file->basename, DIRENT_NAME_MAX);
d->d_namlen = rt_strlen(d->d_name);
count += 1;
file->fpos += 1;
}
index += 1;
if (index >= end)
{
break;
}
}
}
return count * sizeof(struct dirent);
}
static const struct dfs_filesystem_ops _ptyfs_ops = {
.name = "ptyfs",
.flags = DFS_FS_FLAG_DEFAULT,
.default_fops = &_default_fops,
.mount = ptyfs_ops_mount,
.umount = ptyfs_ops_umount,
/* don't allow to create symbolic link */
.symlink = RT_NULL,
.readlink = RT_NULL,
.unlink = RT_NULL,
.setattr = ptyfs_ops_setattr,
.statfs = ptyfs_ops_statfs,
.stat = ptyfs_ops_stat,
.lookup = ptyfs_ops_lookup,
.create_vnode = ptyfs_ops_create_vnode,
.free_vnode = ptyfs_ops_free_vnode,
};
static struct dfs_filesystem_type _devptyfs = {
.fs_ops = &_ptyfs_ops,
};
static int _ptyfs_init(void)
{
_default_fops = *dfs_devfs_fops();
_default_fops.getdents = devpty_deffops_getdents;
/* register file system */
dfs_register(&_devptyfs);
return 0;
}
INIT_COMPONENT_EXPORT(_ptyfs_init);

View File

@ -0,0 +1,22 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-12-02 Shell init ver.
*/
#ifndef __FS_PTYFS_H__
#define __FS_PTYFS_H__
#include <rtthread.h>
typedef rt_base_t ptsno_t;
ptsno_t ptyfs_register_pts(rt_device_t ptmx, rt_device_t pts);
rt_err_t ptyfs_unregister_pts(rt_device_t ptmx, ptsno_t ptsno);
const char *ptyfs_get_rootpath(rt_device_t ptmx);
#endif /* __FS_PTYFS_H__ */

View File

@ -29,6 +29,9 @@ rt_weak const struct romfs_dirent _root_dirent[] =
{
{ROMFS_DIRENT_DIR, "dev", RT_NULL, 0},
{ROMFS_DIRENT_DIR, "mnt", RT_NULL, 0},
{ROMFS_DIRENT_DIR, "proc", RT_NULL, 0},
{ROMFS_DIRENT_DIR, "etc", RT_NULL, 0},
{ROMFS_DIRENT_DIR, "bin", RT_NULL, 0},
{ROMFS_DIRENT_DIR, "dummy", (rt_uint8_t *)_dummy, sizeof(_dummy) / sizeof(_dummy[0])},
{ROMFS_DIRENT_FILE, "dummy.txt", _dummy_txt, sizeof(_dummy_txt)},
};

View File

@ -202,6 +202,7 @@ int dfs_tmpfs_ioctl(struct dfs_file *file, int cmd, void *args)
superblock = d_file->sb;
RT_ASSERT(superblock != NULL);
RT_UNUSED(superblock);
switch (cmd)
{
@ -317,26 +318,21 @@ static ssize_t dfs_tmpfs_read(struct dfs_file *file, void *buf, size_t count, of
return length;
}
static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
static ssize_t _dfs_tmpfs_write(struct tmpfs_file *d_file, const void *buf, size_t count, off_t *pos)
{
struct tmpfs_file *d_file;
struct tmpfs_sb *superblock;
d_file = (struct tmpfs_file *)file->vnode->data;
RT_ASSERT(d_file != NULL);
superblock = d_file->sb;
RT_ASSERT(superblock != NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
if (count + *pos > file->vnode->size)
if (count + *pos > d_file->size)
{
rt_uint8_t *ptr;
ptr = rt_realloc(d_file->data, *pos + count);
if (ptr == NULL)
{
rt_mutex_release(&file->vnode->lock);
rt_set_errno(-ENOMEM);
return 0;
}
@ -347,7 +343,6 @@ static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t co
/* update d_file and file size */
d_file->data = ptr;
d_file->size = *pos + count;
file->vnode->size = d_file->size;
LOG_D("tmpfile ptr:%x, size:%d", ptr, d_file->size);
}
@ -356,6 +351,21 @@ static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t co
/* update file current position */
*pos += count;
return count;
}
static ssize_t dfs_tmpfs_write(struct dfs_file *file, const void *buf, size_t count, off_t *pos)
{
struct tmpfs_file *d_file;
d_file = (struct tmpfs_file *)file->vnode->data;
RT_ASSERT(d_file != NULL);
rt_mutex_take(&file->vnode->lock, RT_WAITING_FOREVER);
count = _dfs_tmpfs_write(d_file, buf, count, pos);
rt_mutex_release(&file->vnode->lock);
return count;
@ -504,6 +514,7 @@ static int dfs_tmpfs_getdents(struct dfs_file *file,
superblock = d_file->sb;
RT_ASSERT(superblock != RT_NULL);
RT_UNUSED(superblock);
/* make integer count */
count = (count / sizeof(struct dirent));
@ -797,6 +808,8 @@ static ssize_t dfs_tmp_page_read(struct dfs_file *file, struct dfs_page *page)
ssize_t dfs_tmp_page_write(struct dfs_page *page)
{
off_t pos;
size_t count = 0;
struct tmpfs_file *d_file;
if (page->aspace->vnode->type == FT_DIRECTORY)
@ -806,13 +819,16 @@ ssize_t dfs_tmp_page_write(struct dfs_page *page)
d_file = (struct tmpfs_file *)(page->aspace->vnode->data);
RT_ASSERT(d_file != RT_NULL);
rt_mutex_take(&page->aspace->vnode->lock, RT_WAITING_FOREVER);
if (page->len > 0)
memcpy(d_file->data + page->fpos, page->page, page->len);
{
pos = page->fpos;
count = _dfs_tmpfs_write(d_file, page->page, page->len, &pos);
}
rt_mutex_release(&page->aspace->vnode->lock);
return F_OK;
return count;
}
#endif

View File

@ -25,6 +25,10 @@
#include <rtatomic.h>
#include <rtdevice.h>
#ifndef ATTR_MODE_SET
#define ATTR_MODE_SET (1 << 6)
#endif
#ifndef ATTR_ATIME_SET
#define ATTR_ATIME_SET (1 << 7)
#endif
@ -33,6 +37,14 @@
#define ATTR_MTIME_SET (1 << 8)
#endif
#ifndef ATTR_UID_SET
#define ATTR_UID_SET (1 << 9)
#endif
#ifndef ATTR_GID_SET
#define ATTR_GID_SET (1 << 10)
#endif
#ifndef AT_SYMLINK_NOFOLLOW
#define AT_SYMLINK_NOFOLLOW 0x100
#endif

View File

@ -175,20 +175,25 @@ int dfs_file_isdir(const char *path);
int dfs_file_access(const char *path, mode_t mode);
int dfs_file_chdir(const char *path);
char *dfs_file_getcwd(char *buf, size_t size);
char *dfs_nolink_path(struct dfs_mnt **mnt, char *fullpath, int mode);
#ifdef RT_USING_SMART
int dfs_file_mmap2(struct dfs_file *file, struct dfs_mmap2_args *mmap2);
int dfs_file_mmap(struct dfs_file *file, struct dfs_mmap2_args *mmap2);
#endif
char *dfs_nolink_path(struct dfs_mnt **mnt, char *fullpath, int mode);
/* 0x5254 is just a magic number to make these relatively unique ("RT") */
#define RT_FIOFTRUNCATE 0x52540000U
#define RT_FIOGETADDR 0x52540001U
#define RT_FIOMMAP2 0x52540002U
/* dfs_file_realpath mode */
#define DFS_REALPATH_EXCEPT_LAST 0
#define DFS_REALPATH_EXCEPT_NONE 1
#define DFS_REALPATH_ONLY_LAST 3
char *dfs_file_realpath(struct dfs_mnt **mnt, const char *fullpath, int mode);
#ifdef __cplusplus
}
#endif

View File

@ -32,7 +32,9 @@ struct dfs_partition
struct dfs_attr
{
unsigned int ia_valid;
mode_t st_mode;
uid_t st_uid;
gid_t st_gid;
mode_t st_mode;
struct timespec ia_atime;
struct timespec ia_mtime;
};

View File

@ -23,6 +23,8 @@ extern "C"
{
#endif
struct rt_varea;
struct rt_aspace;
struct dfs_vnode;
struct dfs_dentry;
struct dfs_aspace;
@ -30,7 +32,8 @@ struct dfs_aspace;
struct dfs_mmap
{
rt_list_t mmap_node;
struct rt_varea *varea;
struct rt_aspace *aspace;
void *vaddr;
};
struct dfs_page

View File

@ -557,6 +557,111 @@ exit:
return newfd;
}
/**
* @brief The fd in the current process dup to designate fd table.
*
* @param oldfd is the fd in current process.
*
* @param fdtab is the fd table to dup, if empty, use global (_fdtab).
*
* @return -1 on failed or the allocated file descriptor.
*/
int dfs_dup_to(int oldfd, struct dfs_fdtable *fdtab)
{
int newfd = -1;
struct dfs_fdtable *fdt = NULL;
if (dfs_file_lock() != RT_EOK)
{
return -RT_ENOSYS;
}
if (fdtab == NULL)
{
fdtab = &_fdtab;
}
/* check old fd */
fdt = dfs_fdtable_get();
if ((oldfd < 0) || (oldfd >= fdt->maxfd))
{
goto exit;
}
if (!fdt->fds[oldfd])
{
goto exit;
}
/* get a new fd*/
newfd = _fdt_slot_alloc(fdtab, DFS_STDIO_OFFSET);
if (newfd >= 0)
{
fdtab->fds[newfd] = fdt->fds[oldfd];
/* inc ref_count */
rt_atomic_add(&(fdtab->fds[newfd]->ref_count), 1);
}
exit:
dfs_file_unlock();
return newfd;
}
/**
* @brief The fd in the designate fd table dup to current process.
*
* @param oldfd is the fd in the designate fd table.
*
* @param fdtab is the fd table for oldfd, if empty, use global (_fdtab).
*
* @return -1 on failed or the allocated file descriptor.
*/
int dfs_dup_from(int oldfd, struct dfs_fdtable *fdtab)
{
int newfd = -1;
struct dfs_file *file;
if (dfs_file_lock() != RT_EOK)
{
return -RT_ENOSYS;
}
if (fdtab == NULL)
{
fdtab = &_fdtab;
}
/* check old fd */
if ((oldfd < 0) || (oldfd >= fdtab->maxfd))
{
goto exit;
}
if (!fdtab->fds[oldfd])
{
goto exit;
}
/* get a new fd*/
newfd = fd_new();
file = fd_get(newfd);
if (newfd >= 0 && file)
{
file->mode = fdtab->fds[oldfd]->mode;
file->flags = fdtab->fds[oldfd]->flags;
file->fops = fdtab->fds[oldfd]->fops;
file->dentry = dfs_dentry_ref(fdtab->fds[oldfd]->dentry);
file->vnode = fdtab->fds[oldfd]->vnode;
file->mmap_context = RT_NULL;
file->data = fdtab->fds[oldfd]->data;
}
dfs_file_close(fdtab->fds[oldfd]);
exit:
fdt_fd_release(fdtab, oldfd);
dfs_file_unlock();
return newfd;
}
#ifdef RT_USING_SMART
sysret_t sys_dup(int oldfd)
#else
@ -856,7 +961,11 @@ int dfs_fd_dump(int argc, char** argv)
{
int index;
dfs_file_lock();
if (dfs_file_lock() != RT_EOK)
{
return -RT_ENOSYS;
}
for (index = 0; index < _fdtab.maxfd; index++)
{
struct dfs_file *file = _fdtab.fds[index];

View File

@ -29,22 +29,86 @@
#define MAX_RW_COUNT 0xfffc0000
rt_inline int _find_path_node(const char *path)
rt_inline int _first_path_len(const char *path)
{
int i = 0;
while (path[i] != '\0')
if (path[i] == '/')
{
if ('/' == path[i++])
i++;
while (path[i] != '\0' && path[i] != '/')
{
break;
i++;
}
}
/* return path-note length */
return i;
}
static int _get_parent_path(const char *fullpath, char *path)
{
int len = 0;
char *str = 0;
str = strrchr(fullpath, '/');
if (str)
{
len = str - fullpath;
if (len > 0)
{
rt_memcpy(path, fullpath, len);
path[len] = '\0';
}
}
return len;
}
static int _try_readlink(const char *path, struct dfs_mnt *mnt, char *link)
{
int ret = -1;
struct dfs_dentry *dentry = dfs_dentry_lookup(mnt, path, 0);
if (dentry && dentry->vnode->type == FT_SYMLINK)
{
if (mnt->fs_ops->readlink)
{
if (dfs_is_mounted(mnt) == 0)
{
ret = mnt->fs_ops->readlink(dentry, link, DFS_PATH_MAX);
}
}
}
dfs_dentry_unref(dentry);
return ret;
}
static int _insert_link_path(const char *link_fn, int link_len, char *tmp_path, int *index)
{
int ret = -1;
if (link_fn[0] != '/')
{
if (link_len + 1 <= *index)
{
*index -= link_len;
rt_memcpy(tmp_path + *index, link_fn, link_len);
*index -= 1;
tmp_path[*index] = '/';
ret = 0;
}
}
else if (link_len <= *index)
{
*index -= link_len;
rt_memcpy(tmp_path + *index, link_fn, link_len);
ret = 1;
}
return ret;
}
/*
* rw_verify_area doesn't like huge counts. We limit
* them to something that fits in "int" so that others
@ -149,166 +213,97 @@ static void dfs_file_unref(struct dfs_file *file)
}
}
struct dfs_dentry* dfs_file_follow_link(struct dfs_dentry *dentry)
{
int ret = 0;
struct dfs_dentry *tmp = dfs_dentry_ref(dentry);
if (dentry && dentry->vnode && dentry->vnode->type == FT_SYMLINK)
{
char *buf = NULL;
buf = (char *)rt_malloc(DFS_PATH_MAX);
if (buf)
{
do
{
if (dfs_is_mounted(tmp->mnt) == 0)
{
ret = tmp->mnt->fs_ops->readlink(tmp, buf, DFS_PATH_MAX);
}
if (ret > 0)
{
struct dfs_mnt *mnt = NULL;
if (buf[0] != '/')
{
char *dir = dfs_dentry_pathname(tmp);
/* is the relative directory */
if (dir)
{
char *fullpath = dfs_normalize_path(dir, buf);
if (fullpath)
{
strncpy(buf, fullpath, DFS_PATH_MAX);
rt_free(fullpath);
}
rt_free(dir);
}
}
mnt = dfs_mnt_lookup(buf);
if (mnt)
{
struct dfs_dentry *de = dfs_dentry_lookup(mnt, buf, 0);
/* release the old dentry */
dfs_dentry_unref(tmp);
tmp = de;
}
}
else
{
break;
}
} while (tmp && tmp->vnode->type == FT_SYMLINK);
}
rt_free(buf);
}
return tmp;
}
/*
* this function is creat a nolink path.
*
* @param mnt
* @param fullpath
* @param mode 0 middle path nolink; 1 all path nolink.
* @param mode
*
* @return new path.
*/
char *dfs_nolink_path(struct dfs_mnt **mnt, char *fullpath, int mode)
char *dfs_file_realpath(struct dfs_mnt **mnt, const char *fullpath, int mode)
{
int index = 0;
char *path = RT_NULL;
char *link_fn;
struct dfs_dentry *dentry = RT_NULL;
path = (char *)rt_malloc((DFS_PATH_MAX * 2) + 2); // path + \0 + link_fn + \0
if (!path)
{
return path;
}
link_fn = path + DFS_PATH_MAX + 1;
int path_len = 0, index = 0;
char *path = RT_NULL, *link_fn, *tmp_path;
struct dfs_mnt *tmp_mnt;
if (*mnt && fullpath)
{
int i = 0;
char *fp = fullpath;
int len, link_len;
while ((i = _find_path_node(fp)) > 0)
path = (char *)rt_malloc((DFS_PATH_MAX * 3) + 3); // path + \0 + link_fn + \0 + tmp_path + \0
if (!path)
{
if (i + index > DFS_PATH_MAX)
return RT_NULL;
}
link_fn = path + DFS_PATH_MAX + 1;
tmp_path = link_fn + (DFS_PATH_MAX + 1);
len = rt_strlen(fullpath);
if (len > DFS_PATH_MAX)
{
goto _ERR_RET;
}
index = (DFS_PATH_MAX - len);
rt_strcpy(tmp_path + index, fullpath);
if (mode == DFS_REALPATH_ONLY_LAST)
{
path_len = _get_parent_path(fullpath, path);
index += path_len;
}
while ((len = _first_path_len(tmp_path + index)) > 0)
{
if (len + path_len > DFS_PATH_MAX)
{
goto _ERR_RET;
}
rt_memcpy(path + index, fp, i);
path[index + i] = '\0';
rt_memcpy(path + path_len, tmp_path + index, len);
path[path_len + len] = '\0';
index += len;
tmp_mnt = dfs_mnt_lookup(path);
if (tmp_mnt == RT_NULL)
{
goto _ERR_RET;
}
*mnt = tmp_mnt;
/* the last should by mode process. */
if ((fp[i] == '\0') && (!mode))
if ((tmp_path[index] == '\0') && (mode == DFS_REALPATH_EXCEPT_LAST))
{
break;
}
fp += i;
dentry = dfs_dentry_lookup(*mnt, path, 0);
if (dentry && dentry->vnode->type == FT_SYMLINK)
link_len = _try_readlink(path, *mnt, link_fn);
if (link_len > 0)
{
int ret = -1;
int ret = _insert_link_path(link_fn, link_len, tmp_path, &index);
if ((*mnt)->fs_ops->readlink)
if (ret == 1)
{
if (dfs_is_mounted((*mnt)) == 0)
{
ret = (*mnt)->fs_ops->readlink(dentry, link_fn, DFS_PATH_MAX);
}
/* link_fn[0] == '/' */
path_len = 0;
}
if (ret > 0)
{
int len = rt_strlen(link_fn);
if (link_fn[0] != '/')
{
path[index] = '/';
}
else
{
index = 0;
}
if (len + index + 1 >= DFS_PATH_MAX)
{
goto _ERR_RET;
}
rt_memcpy(path + index, link_fn, len);
index += len;
path[index] = '\0';
*mnt = dfs_mnt_lookup(path);
}
else
else if (ret < 0)
{
goto _ERR_RET;
}
}
else
{
index += i;
path_len += len;
}
dfs_dentry_unref(dentry);
}
}
else
{
return path;
_ERR_RET:
rt_free(path);
path = RT_NULL;
@ -349,7 +344,7 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(fullpath);
@ -370,9 +365,12 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
else
{
struct dfs_dentry *target_dentry = RT_NULL;
/* follow symbol link */
target_dentry = dfs_file_follow_link(dentry);
char *path = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_ONLY_LAST);
if (path)
{
target_dentry = dfs_dentry_lookup(mnt, path, oflags);
rt_free(path);
}
dfs_dentry_unref(dentry);
dentry = target_dentry;
}
@ -507,7 +505,7 @@ int dfs_file_open(struct dfs_file *file, const char *path, int oflags, mode_t mo
if (ret < 0)
{
LOG_E("open %s failed in file system: %s", path, dentry->mnt->fs_ops->name);
LOG_I("open %s failed in file system: %s", path, dentry->mnt->fs_ops->name);
DLOG(msg, mnt->fs_ops->name, "dfs_file", DLOG_MSG_RET, "open failed.");
dfs_file_unref(file);
}
@ -791,7 +789,7 @@ int dfs_file_stat(const char *path, struct stat *buf)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 1);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_NONE);
if (tmp)
{
rt_free(fullpath);
@ -845,7 +843,7 @@ int dfs_file_lstat(const char *path, struct stat *buf)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(fullpath);
@ -924,7 +922,7 @@ int dfs_file_setattr(const char *path, struct dfs_attr *attr)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(fullpath);
@ -1097,7 +1095,7 @@ int dfs_file_unlink(const char *path)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(fullpath);
@ -1199,7 +1197,7 @@ int dfs_file_link(const char *oldname, const char *newname)
return -1;
}
char *tmp = dfs_nolink_path(&mnt, old_fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, old_fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(old_fullpath);
@ -1210,7 +1208,7 @@ int dfs_file_link(const char *oldname, const char *newname)
new_fullpath = dfs_normalize_path(NULL, newname);
if (new_fullpath)
{
char *tmp = dfs_nolink_path(&mnt, new_fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, new_fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(new_fullpath);
@ -1310,7 +1308,7 @@ int dfs_file_symlink(const char *target, const char *linkpath)
mnt = dfs_mnt_lookup(parent);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, parent, 0);
char *tmp = dfs_file_realpath(&mnt, parent, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(parent);
@ -1318,7 +1316,7 @@ int dfs_file_symlink(const char *target, const char *linkpath)
}
DLOG(msg, "dfs_file", "dentry", DLOG_MSG, "dfs_dentry_lookup(mnt, %s)", fullpath);
dentry = dfs_dentry_lookup(mnt, parent, 0);
dentry = dfs_dentry_lookup(mnt, parent, DFS_REALPATH_EXCEPT_LAST);
if (dentry)
{
if (dentry->mnt->fs_ops->symlink)
@ -1326,17 +1324,6 @@ int dfs_file_symlink(const char *target, const char *linkpath)
char *path = dfs_normalize_path(parent, target);
if (path)
{
char *tmp = dfs_nolink_path(&mnt, path, 0);
if (tmp)
{
rt_free(path);
path = tmp;
}
else
{
tmp = path;
}
ret = rt_strncmp(parent, path, strlen(parent));
if (ret == 0)
{
@ -1346,6 +1333,10 @@ int dfs_file_symlink(const char *target, const char *linkpath)
tmp ++;
}
}
else
{
tmp = path;
}
if (dfs_is_mounted(mnt) == 0)
{
@ -1401,7 +1392,7 @@ int dfs_file_readlink(const char *path, char *buf, int bufsize)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(fullpath);
@ -1466,7 +1457,7 @@ int dfs_file_rename(const char *old_file, const char *new_file)
return -1;
}
char *tmp = dfs_nolink_path(&mnt, old_fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, old_fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(old_fullpath);
@ -1477,7 +1468,7 @@ int dfs_file_rename(const char *old_file, const char *new_file)
new_fullpath = dfs_normalize_path(NULL, new_file);
if (new_fullpath)
{
char *tmp = dfs_nolink_path(&mnt, new_fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, new_fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
rt_free(new_fullpath);
@ -1652,7 +1643,7 @@ int dfs_file_isdir(const char *path)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 1);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_NONE);
if (tmp)
{
rt_free(fullpath);
@ -1853,7 +1844,7 @@ void ls(const char *pathname)
mnt = dfs_mnt_lookup(fullpath);
if (mnt)
{
char *tmp = dfs_nolink_path(&mnt, fullpath, 0);
char *tmp = dfs_file_realpath(&mnt, fullpath, DFS_REALPATH_EXCEPT_LAST);
if (tmp)
{
char *index;

View File

@ -269,7 +269,6 @@ static void dfs_pcache_thread(void *parameter)
{
page->len = page->size;
}
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
if (aspace->ops->write)
{
aspace->ops->write(page);
@ -676,9 +675,14 @@ static int dfs_page_unmap(struct dfs_page *page)
if (map)
{
void *vaddr = dfs_aspace_vaddr(map->varea, page->fpos);
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
rt_varea_unmap_page(map->varea, vaddr);
rt_varea_t varea;
void *vaddr;
varea = rt_aspace_query(map->aspace, map->vaddr);
RT_ASSERT(varea);
vaddr = dfs_aspace_vaddr(varea, page->fpos);
rt_varea_unmap_page(varea, vaddr);
rt_free(map);
}
@ -741,7 +745,6 @@ static void dfs_page_release(struct dfs_page *page)
{
page->len = page->size;
}
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
if (aspace->ops->write)
{
aspace->ops->write(page);
@ -995,7 +998,6 @@ static struct dfs_page *dfs_aspace_load_page(struct dfs_file *file, off_t pos)
page->size = ARCH_PAGE_SIZE;
page->fpos = pos / ARCH_PAGE_SIZE * ARCH_PAGE_SIZE;
aspace->ops->read(file, page);
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
page->ref_count ++;
dfs_page_insert(page);
@ -1105,7 +1107,6 @@ int dfs_aspace_read(struct dfs_file *file, void *buf, size_t count, off_t *pos)
len = count > len ? len : count;
if (len)
{
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
rt_memcpy(ptr, page->page + *pos - page->fpos, len);
ptr += len;
*pos += len;
@ -1158,7 +1159,6 @@ int dfs_aspace_write(struct dfs_file *file, const void *buf, size_t count, off_t
len = page->fpos + ARCH_PAGE_SIZE - *pos;
len = count > len ? len : count;
rt_memcpy(page->page + *pos - page->fpos, ptr, len);
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
ptr += len;
*pos += len;
count -= len;
@ -1225,7 +1225,7 @@ int dfs_aspace_flush(struct dfs_aspace *aspace)
{
page->len = page->size;
}
//rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, page->page, page->size);
if (aspace->ops->write)
{
aspace->ops->write(page);
@ -1277,6 +1277,7 @@ void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr
void *ret = RT_NULL;
struct dfs_page *page;
struct dfs_aspace *aspace = file->vnode->aspace;
rt_aspace_t target_aspace = varea->aspace;
page = dfs_page_lookup(file, dfs_aspace_fpos(varea, vaddr));
if (page)
@ -1284,7 +1285,8 @@ void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr
struct dfs_mmap *map = (struct dfs_mmap *)rt_calloc(1, sizeof(struct dfs_mmap));
if (map)
{
void *pg_paddr = rt_kmem_v2p(page->page);
void *pg_vaddr = page->page;
void *pg_paddr = rt_kmem_v2p(pg_vaddr);
int err = rt_varea_map_range(varea, vaddr, pg_paddr, page->size);
if (err == RT_EOK)
{
@ -1301,10 +1303,12 @@ void *dfs_aspace_mmap(struct dfs_file *file, struct rt_varea *varea, void *vaddr
* fetching of the next instruction can see the coherent data with the data cache,
* TLB, MMU, main memory, and all the other observers in the computer system.
*/
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, vaddr, ARCH_PAGE_SIZE);
rt_hw_cpu_icache_ops(RT_HW_CACHE_INVALIDATE, vaddr, ARCH_PAGE_SIZE);
ret = page->page;
map->varea = varea;
ret = pg_vaddr;
map->aspace = target_aspace;
map->vaddr = vaddr;
dfs_aspace_lock(aspace);
rt_list_insert_after(&page->mmap_head, &map->mmap_node);
dfs_page_release(page);
@ -1329,6 +1333,8 @@ int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
{
struct dfs_vnode *vnode = file->vnode;
struct dfs_aspace *aspace = vnode->aspace;
void *unmap_start = varea->start;
void *unmap_end = (char *)unmap_start + varea->size;
if (aspace)
{
@ -1347,20 +1353,32 @@ int dfs_aspace_unmap(struct dfs_file *file, struct rt_varea *varea)
{
rt_list_t *node, *tmp;
struct dfs_mmap *map;
rt_varea_t map_varea = RT_NULL;
node = page->mmap_head.next;
while (node != &page->mmap_head)
{
rt_aspace_t map_aspace;
map = rt_list_entry(node, struct dfs_mmap, mmap_node);
tmp = node;
node = node->next;
if (map && varea == map->varea)
if (map && varea->aspace == map->aspace
&& map->vaddr >= unmap_start && map->vaddr < unmap_end)
{
void *vaddr = dfs_aspace_vaddr(map->varea, page->fpos);
void *vaddr = map->vaddr;
map_aspace = map->aspace;
rt_varea_unmap_page(map->varea, vaddr);
if (!map_varea || map_varea->aspace != map_aspace ||
vaddr < map_varea->start ||
vaddr >= map_varea->start + map_varea->size)
{
/* lock the tree so we don't access uncompleted data */
map_varea = rt_aspace_query(map_aspace, vaddr);
}
rt_varea_unmap_page(map_varea, vaddr);
if (varea->attr == MMU_MAP_U_RWCB && page->fpos < page->aspace->vnode->size)
{
@ -1405,7 +1423,7 @@ int dfs_aspace_page_unmap(struct dfs_file *file, struct rt_varea *varea, void *v
tmp = node;
node = node->next;
if (map && varea == map->varea)
if (map && varea->aspace == map->aspace && vaddr == map->vaddr)
{
if (varea->attr == MMU_MAP_U_RWCB)
{

View File

@ -57,17 +57,6 @@ menuconfig RT_USING_SERIAL
default 64
endif
config RT_USING_TTY
bool "Using TTY SYSTEM"
depends on RT_USING_SMART
default y
if RT_USING_TTY
config RT_TTY_DEBUG
bool "Using TTY DEBUG"
default n
endif
config RT_USING_CAN
bool "Using CAN device drivers"
default n

View File

@ -29,6 +29,10 @@
#include <rtdevice.h> /* for wqueue_init */
#endif /* RT_USING_POSIX_DEVIO */
#ifdef RT_USING_DFS_V2
#include <devfs.h>
#endif /* RT_USING_DFS_V2 */
#ifdef RT_USING_DEVICE
#ifdef RT_USING_DEVICE_OPS
@ -78,6 +82,10 @@ rt_err_t rt_device_register(rt_device_t dev,
rt_wqueue_init(&(dev->wait_queue));
#endif /* RT_USING_POSIX_DEVIO */
#ifdef RT_USING_DFS_V2
dfs_devfs_device_add(dev);
#endif /* RT_USING_DFS_V2 */
return RT_EOK;
}
RTM_EXPORT(rt_device_register);

View File

@ -107,7 +107,7 @@ struct rt_pic_irq
rt_uint32_t mode;
rt_uint32_t priority;
RT_DECLARE_BITMAP(affinity, RT_CPUS_NR);
RT_BITMAP_DECLARE(affinity, RT_CPUS_NR);
rt_list_t list;
rt_list_t children_nodes;

View File

@ -179,4 +179,5 @@ rt_err_t rt_hw_serial_register(struct rt_serial_device *serial,
rt_uint32_t flag,
void *data);
rt_err_t rt_hw_serial_register_tty(struct rt_serial_device *serial);
#endif

View File

@ -191,4 +191,5 @@ rt_err_t rt_hw_serial_register(struct rt_serial_device *serial,
rt_uint32_t flag,
void *data);
rt_err_t rt_hw_serial_register_tty(struct rt_serial_device *serial);
#endif

View File

@ -33,5 +33,6 @@ void rt_completion_init(struct rt_completion *completion);
rt_err_t rt_completion_wait(struct rt_completion *completion,
rt_int32_t timeout);
void rt_completion_done(struct rt_completion *completion);
rt_err_t rt_completion_wakeup(struct rt_completion *completion);
#endif

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-20 Shell Add cond var API in kernel
*/
#ifndef __LWP_TERMINAL_CONDVAR_H__
#define __LWP_TERMINAL_CONDVAR_H__
#include <rtthread.h>
typedef struct rt_condvar
{
#ifdef USING_RT_OBJECT
struct rt_object parent;
#endif
rt_atomic_t waiters_cnt;
rt_atomic_t waiting_mtx;
struct rt_wqueue event;
} *rt_condvar_t;
void rt_condvar_init(rt_condvar_t cv, char *name);
int rt_condvar_timedwait(rt_condvar_t cv, rt_mutex_t mtx, int suspend_flag,
rt_tick_t timeout);
int rt_condvar_signal(rt_condvar_t cv);
int rt_condvar_broadcast(rt_condvar_t cv);
rt_inline void rt_condvar_detach(rt_condvar_t cv)
{
RT_UNUSED(cv);
return ;
}
#endif /* __LWP_TERMINAL_CONDVAR_H__ */

View File

@ -12,6 +12,7 @@
#include <rtdef.h>
#include <rtconfig.h>
#include "condvar.h"
/**
* Pipe Device
@ -34,6 +35,7 @@ struct rt_pipe_device
int writer;
int reader;
struct rt_condvar waitfor_parter;
struct rt_mutex lock;
};
typedef struct rt_pipe_device rt_pipe_t;

View File

@ -49,6 +49,7 @@ int rt_wqueue_wait(rt_wqueue_t *queue, int condition, int timeout);
int rt_wqueue_wait_killable(rt_wqueue_t *queue, int condition, int timeout);
int rt_wqueue_wait_interruptible(rt_wqueue_t *queue, int condition, int timeout);
void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key);
void rt_wqueue_wakeup_all(rt_wqueue_t *queue, void *key);
#define DEFINE_WAIT_FUNC(name, function) \
struct rt_wqueue_node name = { \

View File

@ -26,6 +26,7 @@
#include "ipc/completion.h"
#include "ipc/dataqueue.h"
#include "ipc/workqueue.h"
#include "ipc/condvar.h"
#include "ipc/waitqueue.h"
#include "ipc/pipe.h"
#include "ipc/poll.h"

View File

@ -134,7 +134,7 @@ RTM_EXPORT(rt_completion_wait);
*
* @param completion is a pointer to a completion object.
*/
void rt_completion_done(struct rt_completion *completion)
static int _completion_done(struct rt_completion *completion)
{
rt_base_t level;
rt_err_t error;
@ -145,7 +145,7 @@ void rt_completion_done(struct rt_completion *completion)
if (RT_COMPLETION_FLAG(completion) == RT_COMPLETED)
{
rt_spin_unlock_irqrestore(&_completion_lock, level);
return;
return -RT_EBUSY;
}
suspend_thread = RT_COMPLETION_THREAD(completion);
@ -160,10 +160,38 @@ void rt_completion_done(struct rt_completion *completion)
LOG_D("%s: failed to resume thread", __func__);
}
}
else
{
/* no thread waiting */
error = -RT_EEMPTY;
}
completion->susp_thread_n_flag = RT_COMPLETION_NEW_STAT(RT_NULL, RT_COMPLETED);
rt_spin_unlock_irqrestore(&_completion_lock, level);
return error;
}
/**
* @brief This function indicates a completion has done.
*
* @param completion is a pointer to a completion object.
*/
void rt_completion_done(struct rt_completion *completion)
{
_completion_done(completion);
}
RTM_EXPORT(rt_completion_done);
/**
* @brief This function indicates a completion has done and wakeup the thread
*
* @param completion is a pointer to a completion object.
*/
rt_err_t rt_completion_wakeup(struct rt_completion *completion)
{
return _completion_done(completion);
}
RTM_EXPORT(rt_completion_wakeup);

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-20 Shell Support of condition variable
*/
#define DBG_TAG "ipc.condvar"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rtdevice.h>
#include <rtatomic.h>
#include <rtthread.h>
static struct rt_spinlock _local_cv_queue_lock = RT_SPINLOCK_INIT;
#define CV_ASSERT_LOCKED(cv) \
RT_ASSERT(!(cv)->waiting_mtx || \
rt_mutex_get_owner((rt_mutex_t)(cv)->waiting_mtx) == \
rt_thread_self())
void rt_condvar_init(rt_condvar_t cv, char *name)
{
#ifdef USING_RT_OBJECT
/* TODO: support rt object */
rt_object_init();
#endif
rt_wqueue_init(&cv->event);
rt_atomic_store(&cv->waiters_cnt, 0);
rt_atomic_store(&cv->waiting_mtx, 0);
}
static int _waitq_inqueue(rt_wqueue_t *queue, struct rt_wqueue_node *node,
rt_tick_t timeout, int suspend_flag)
{
rt_thread_t tcb = node->polling_thread;
rt_timer_t timer = &(tcb->thread_timer);
rt_err_t ret;
if (queue->flag != RT_WQ_FLAG_WAKEUP)
{
ret = rt_thread_suspend_with_flag(tcb, suspend_flag);
if (ret == RT_EOK)
{
rt_wqueue_add(queue, node);
if (timeout != RT_WAITING_FOREVER)
{
rt_timer_control(timer, RT_TIMER_CTRL_SET_TIME, &timeout);
rt_timer_start(timer);
}
}
}
else
{
ret = RT_EOK;
}
return ret;
}
#define INIT_WAITQ_NODE(node) \
{ \
.polling_thread = rt_thread_self(), .key = 0, \
.wakeup = __wqueue_default_wake, .wqueue = &cv->event, \
.list = RT_LIST_OBJECT_INIT(node.list) \
}
int rt_condvar_timedwait(rt_condvar_t cv, rt_mutex_t mtx, int suspend_flag,
rt_tick_t timeout)
{
rt_err_t acq_mtx_succ, rc;
rt_atomic_t waiting_mtx;
struct rt_wqueue_node node = INIT_WAITQ_NODE(node);
/* not allowed in IRQ & critical section */
RT_DEBUG_SCHEDULER_AVAILABLE(1);
CV_ASSERT_LOCKED(cv);
/**
* for the worst case, this is racy with the following works to reset field
* before mutex is taken. The spinlock then comes to rescue.
*/
rt_spin_lock(&_local_cv_queue_lock);
waiting_mtx = rt_atomic_load(&cv->waiting_mtx);
if (!waiting_mtx)
acq_mtx_succ = rt_atomic_compare_exchange_strong(
&cv->waiting_mtx, &waiting_mtx, (size_t)mtx);
else
acq_mtx_succ = 0;
rt_spin_unlock(&_local_cv_queue_lock);
if (acq_mtx_succ == 1 || waiting_mtx == (size_t)mtx)
{
rt_atomic_add(&cv->waiters_cnt, 1);
rt_enter_critical();
if (suspend_flag == RT_INTERRUPTIBLE)
rc = _waitq_inqueue(&cv->event, &node, timeout, RT_INTERRUPTIBLE);
else /* UNINTERRUPTIBLE is forbidden, since it's not safe for user space */
rc = _waitq_inqueue(&cv->event, &node, timeout, RT_KILLABLE);
acq_mtx_succ = rt_mutex_release(mtx);
RT_ASSERT(acq_mtx_succ == 0);
rt_exit_critical();
if (rc == RT_EOK)
{
rt_schedule();
rc = rt_get_errno();
rc = rc > 0 ? -rc : rc;
}
else
{
LOG_D("%s() failed to suspend", __func__);
}
rt_wqueue_remove(&node);
rt_spin_lock(&_local_cv_queue_lock);
if (rt_atomic_add(&cv->waiters_cnt, -1) == 1)
{
waiting_mtx = (size_t)mtx;
acq_mtx_succ = rt_atomic_compare_exchange_strong(&cv->waiting_mtx,
&waiting_mtx, 0);
RT_ASSERT(acq_mtx_succ == 1);
}
rt_spin_unlock(&_local_cv_queue_lock);
acq_mtx_succ = rt_mutex_take(mtx, RT_WAITING_FOREVER);
RT_ASSERT(acq_mtx_succ == 0);
}
else
{
LOG_D("%s: conflict waiting mutex", __func__);
rc = -EBUSY;
}
return rc;
}
/** Keep in mind that we always operating when cv.waiting_mtx is taken */
int rt_condvar_signal(rt_condvar_t cv)
{
CV_ASSERT_LOCKED(cv);
/* to avoid spurious wakeups */
if (rt_atomic_load(&cv->waiters_cnt) > 0)
rt_wqueue_wakeup(&cv->event, 0);
cv->event.flag = 0;
return 0;
}
int rt_condvar_broadcast(rt_condvar_t cv)
{
CV_ASSERT_LOCKED(cv);
/* to avoid spurious wakeups */
if (rt_atomic_load(&cv->waiters_cnt) > 0)
rt_wqueue_wakeup_all(&cv->event, 0);
cv->event.flag = 0;
return 0;
}

View File

@ -9,11 +9,13 @@
* 2017-11-08 JasonJiaJie fix memory leak issue when close a pipe.
* 2023-06-28 shell return POLLHUP when writer closed its channel on poll()
* fix flag test on pipe_fops_open()
* 2023-12-02 shell Make read pipe operation interruptable.
*/
#include <rthw.h>
#include <rtdevice.h>
#include <stdint.h>
#include <sys/errno.h>
#include <ipc/condvar.h>
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
#include <unistd.h>
@ -69,12 +71,12 @@ static int pipe_fops_open(struct dfs_file *fd)
if ((fd->flags & O_ACCMODE) == O_RDONLY)
{
pipe->reader = 1;
pipe->reader += 1;
}
if ((fd->flags & O_ACCMODE) == O_WRONLY)
{
pipe->writer = 1;
pipe->writer += 1;
}
if (fd->vnode->ref_count == 1)
{
@ -86,6 +88,21 @@ static int pipe_fops_open(struct dfs_file *fd)
}
}
if ((fd->flags & O_ACCMODE) == O_RDONLY && !pipe->writer)
{
/* wait for partner */
rc = rt_condvar_timedwait(&pipe->waitfor_parter, &pipe->lock,
RT_INTERRUPTIBLE, RT_WAITING_FOREVER);
if (rc != 0)
{
pipe->reader--;
}
}
else if ((fd->flags & O_ACCMODE) == O_WRONLY)
{
rt_condvar_broadcast(&pipe->waitfor_parter);
}
__exit:
rt_mutex_release(&pipe->lock);
@ -117,12 +134,12 @@ static int pipe_fops_close(struct dfs_file *fd)
if ((fd->flags & O_RDONLY) == O_RDONLY)
{
pipe->reader = 0;
pipe->reader -= 1;
}
if ((fd->flags & O_WRONLY) == O_WRONLY)
{
pipe->writer = 0;
pipe->writer -= 1;
while (!rt_list_isempty(&pipe->reader_queue.waiting_list))
{
rt_wqueue_wakeup(&pipe->reader_queue, (void*)POLLIN);
@ -234,7 +251,8 @@ static ssize_t pipe_fops_read(struct dfs_file *fd, void *buf, size_t count)
rt_mutex_release(&pipe->lock);
rt_wqueue_wakeup(&pipe->writer_queue, (void*)POLLOUT);
rt_wqueue_wait(&pipe->reader_queue, 0, -1);
if (rt_wqueue_wait_interruptible(&pipe->reader_queue, 0, -1) == -RT_EINTR)
return -EINTR;
rt_mutex_take(&pipe->lock, RT_WAITING_FOREVER);
}
}
@ -309,7 +327,8 @@ static ssize_t pipe_fops_write(struct dfs_file *fd, const void *buf, size_t coun
rt_mutex_release(&pipe->lock);
rt_wqueue_wakeup(&pipe->reader_queue, (void*)POLLIN);
/* pipe full, waiting on suspended write list */
rt_wqueue_wait(&pipe->writer_queue, 0, -1);
if (rt_wqueue_wait_interruptible(&pipe->writer_queue, 0, -1) == -RT_EINTR)
return -EINTR;
rt_mutex_take(&pipe->lock, -1);
}
rt_mutex_release(&pipe->lock);
@ -611,6 +630,8 @@ rt_pipe_t *rt_pipe_create(const char *name, int bufsz)
rt_mutex_init(&pipe->lock, name, RT_IPC_FLAG_FIFO);
rt_wqueue_init(&pipe->reader_queue);
rt_wqueue_init(&pipe->writer_queue);
rt_condvar_init(&pipe->waitfor_parter, "piwfp");
pipe->writer = 0;
pipe->reader = 0;
@ -674,6 +695,7 @@ int rt_pipe_delete(const char *name)
pipe = (rt_pipe_t *)device;
rt_condvar_detach(&pipe->waitfor_parter);
rt_mutex_detach(&pipe->lock);
#if defined(RT_USING_POSIX_DEVIO) && defined(RT_USING_POSIX_PIPE)
resource_id_put(&id_mgr, pipe->pipeno);
@ -736,11 +758,6 @@ int pipe(int fildes[2])
pipe->is_named = RT_FALSE; /* unamed pipe */
pipe->pipeno = pipeno;
rt_snprintf(dev_name, sizeof(dev_name), "/dev/%s", dname);
fildes[0] = open(dev_name, O_RDONLY, 0);
if (fildes[0] < 0)
{
return -1;
}
fildes[1] = open(dev_name, O_WRONLY, 0);
if (fildes[1] < 0)
@ -749,6 +766,12 @@ int pipe(int fildes[2])
return -1;
}
fildes[0] = open(dev_name, O_RDONLY, 0);
if (fildes[0] < 0)
{
return -1;
}
return 0;
}

View File

@ -9,7 +9,11 @@
* to blocked thread.
* 2022-01-24 THEWON let rt_wqueue_wait return thread->error when using signal
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
* 2023-11-21 Shell Support wakeup_all
*/
#define DBG_TAG "ipc.waitqueue"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <stdint.h>
#include <rthw.h>
@ -64,7 +68,8 @@ int __wqueue_default_wake(struct rt_wqueue_node *wait, void *key)
}
/**
* @brief This function will wake up a pending thread on the specified waiting queue that meets the conditions.
* @brief This function will wake up a pending thread on the specified
* waiting queue that meets the conditions.
*
* @param queue is a pointer to the wait queue.
*
@ -94,17 +99,89 @@ void rt_wqueue_wakeup(rt_wqueue_t *queue, void *key)
entry = rt_list_entry(node, struct rt_wqueue_node, list);
if (entry->wakeup(entry, key) == 0)
{
rt_thread_resume(entry->polling_thread);
need_schedule = 1;
/**
* even though another thread may interrupt the thread and
* wakeup it meanwhile, we can asuume that condition is ready
*/
entry->polling_thread->error = RT_EOK;
if (!rt_thread_resume(entry->polling_thread))
{
need_schedule = 1;
rt_list_remove(&(entry->list));
break;
rt_list_remove(&(entry->list));
break;
}
}
}
}
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
if (need_schedule)
rt_schedule();
return;
}
/**
* @brief This function will wake up all pending thread on the specified
* waiting queue that meets the conditions.
*
* @param queue is a pointer to the wait queue.
*
* @param key is the wakeup conditions, but it is not effective now, because
* default wakeup function always return 0.
* If user wants to use it, user should define their own wakeup
* function.
*/
void rt_wqueue_wakeup_all(rt_wqueue_t *queue, void *key)
{
rt_base_t level;
int need_schedule = 0;
rt_list_t *queue_list;
struct rt_list_node *node;
struct rt_wqueue_node *entry;
queue_list = &(queue->waiting_list);
level = rt_spin_lock_irqsave(&(queue->spinlock));
/* set wakeup flag in the queue */
queue->flag = RT_WQ_FLAG_WAKEUP;
if (!(rt_list_isempty(queue_list)))
{
for (node = queue_list->next; node != queue_list; )
{
entry = rt_list_entry(node, struct rt_wqueue_node, list);
if (entry->wakeup(entry, key) == 0)
{
/**
* even though another thread may interrupt the thread and
* wakeup it meanwhile, we can asuume that condition is ready
*/
entry->polling_thread->error = RT_EOK;
if (!rt_thread_resume(entry->polling_thread))
{
need_schedule = 1;
}
else
{
/* wakeup happened too soon that waker hadn't slept */
LOG_D("%s: Thread resume failed", __func__);
}
node = node->next;
}
else
{
node = node->next;
}
}
}
rt_spin_unlock_irqrestore(&(queue->spinlock), level);
if (need_schedule)
rt_schedule();
return;
}
/**
@ -184,7 +261,7 @@ __exit_wakeup:
rt_wqueue_remove(&__wait);
return tid->error;
return tid->error > 0 ? -tid->error : tid->error;
}
int rt_wqueue_wait(rt_wqueue_t *queue, int condition, int msec)

View File

@ -136,6 +136,7 @@ rt_inline void rt_ktime_hrtimer_keep_errno(rt_ktime_hrtimer_t timer, rt_err_t er
RT_ASSERT(timer != RT_NULL);
timer->error = err;
rt_set_errno(-err);
}
/**

View File

@ -1109,17 +1109,20 @@ struct rt_ofw_node *rt_ofw_get_alias_node(const char *tag, int id)
if (tag && id >= 0)
{
rt_list_for_each_entry(info, &_aliases_nodes, list)
if (!rt_list_isempty(&_aliases_nodes))
{
if (rt_strncmp(info->tag, tag, info->tag_len))
rt_list_for_each_entry(info, &_aliases_nodes, list)
{
continue;
}
if (rt_strncmp(info->tag, tag, info->tag_len))
{
continue;
}
if (info->id == id)
{
np = info->np;
break;
if (info->id == id)
{
np = info->np;
break;
}
}
}
}
@ -1130,18 +1133,20 @@ struct rt_ofw_node *rt_ofw_get_alias_node(const char *tag, int id)
int ofw_alias_node_id(struct rt_ofw_node *np)
{
int id;
struct alias_info *info;
struct alias_info *info = RT_NULL;
if (np)
{
id = -1;
rt_list_for_each_entry(info, &_aliases_nodes, list)
if (!rt_list_isempty(&_aliases_nodes))
{
if (info->np == np)
rt_list_for_each_entry(info, &_aliases_nodes, list)
{
id = info->id;
break;
if (info->np == np)
{
id = info->id;
break;
}
}
}
}
@ -1161,18 +1166,20 @@ int rt_ofw_get_alias_id(struct rt_ofw_node *np, const char *tag)
if (np && tag)
{
id = -1;
rt_list_for_each_entry(info, &_aliases_nodes, list)
if (!rt_list_isempty(&_aliases_nodes))
{
if (rt_strncmp(info->tag, tag, info->tag_len))
rt_list_for_each_entry(info, &_aliases_nodes, list)
{
continue;
}
if (rt_strncmp(info->tag, tag, info->tag_len))
{
continue;
}
if (info->np == np)
{
id = info->id;
break;
if (info->np == np)
{
id = info->id;
break;
}
}
}
}
@ -1192,17 +1199,19 @@ int rt_ofw_get_alias_last_id(const char *tag)
if (tag)
{
id = -1;
rt_list_for_each_entry(info, &_aliases_nodes, list)
if (!rt_list_isempty(&_aliases_nodes))
{
if (rt_strncmp(info->tag, tag, info->tag_len))
rt_list_for_each_entry(info, &_aliases_nodes, list)
{
continue;
}
if (rt_strncmp(info->tag, tag, info->tag_len))
{
continue;
}
if (info->id > id)
{
id = info->id;
if (info->id > id)
{
id = info->id;
}
}
}
}
@ -1465,7 +1474,7 @@ static const char *ofw_get_prop_fuzzy_name(const struct rt_ofw_node *np, const c
char *sf, split_field[64];
rt_size_t len = 0, max_ak = 0;
const char *str, *result = RT_NULL;
RT_DECLARE_BITMAP(ak, sizeof(split_field));
RT_BITMAP_DECLARE(ak, sizeof(split_field));
struct rt_ofw_prop *prop;
/*

View File

@ -8,6 +8,9 @@ src = []
if not GetDepend(['RT_USING_SERIAL']):
Return('group')
if GetDepend(['RT_USING_SMART']):
src += Glob('serial_tty.c')
if GetDepend(['RT_USING_SERIAL_V2']):
src += ['serial_v2.c']
else:

View File

@ -909,7 +909,7 @@ static rt_ssize_t rt_serial_write(struct rt_device *dev,
}
}
#if defined(RT_USING_POSIX_TERMIOS) && !defined(RT_USING_TTY)
#if defined(RT_USING_POSIX_TERMIOS) && !defined(RT_USING_SMART)
struct speed_baudrate_item
{
speed_t speed;
@ -1058,7 +1058,7 @@ static rt_err_t rt_serial_control(struct rt_device *dev,
}
break;
#ifdef RT_USING_POSIX_STDIO
#if defined(RT_USING_POSIX_TERMIOS) && !defined(RT_USING_TTY)
#if defined(RT_USING_POSIX_TERMIOS) && !defined(RT_USING_SMART)
case TCGETA:
{
struct termios *tio = (struct termios*)args;
@ -1315,9 +1315,21 @@ rt_err_t rt_hw_serial_register(struct rt_serial_device *serial,
device->fops = &_serial_fops;
#endif
#if defined(RT_USING_SMART)
rt_hw_serial_register_tty(serial);
#endif
return ret;
}
#if defined(RT_USING_SMART) && defined(LWP_DEBUG)
static volatile int _early_input = 0;
int lwp_startup_debug_request(void)
{
return _early_input;
}
#endif
/* ISR for serial interrupt */
void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
{
@ -1360,8 +1372,17 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
rt_spin_unlock_irqrestore(&(serial->spinlock), level);
}
/* invoke callback */
if (serial->parent.rx_indicate != RT_NULL)
/**
* Invoke callback.
* First try notify if any, and if notify is existed, rx_indicate()
* is not callback. This seperate the priority and makes the reuse
* of same serial device reasonable for RT console.
*/
if (serial->rx_notify.notify)
{
serial->rx_notify.notify(serial->rx_notify.dev);
}
else if (serial->parent.rx_indicate != RT_NULL)
{
rt_size_t rx_length;
@ -1376,10 +1397,9 @@ void rt_hw_serial_isr(struct rt_serial_device *serial, int event)
serial->parent.rx_indicate(&serial->parent, rx_length);
}
}
if (serial->rx_notify.notify)
{
serial->rx_notify.notify(serial->rx_notify.dev);
}
#if defined(RT_USING_SMART) && defined(LWP_DEBUG)
_early_input = 1;
#endif
break;
}
case RT_SERIAL_EVENT_TX_DONE:

View File

@ -0,0 +1,321 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-21 Shell init ver.
*/
#define DBG_TAG "drivers.serial"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#include <terminal/terminal.h>
#define TTY_NAME_PREFIX "S" /* (S)erial */
#define LWP_TTY_WORKQUEUE_PRIORITY 3
struct serial_tty_context
{
struct rt_serial_device *parent;
struct rt_device_notify backup_notify;
struct rt_work work;
};
static struct rt_workqueue *_ttyworkq; /* system work queue */
static rt_atomic_t _device_id_counter = 0;
static long get_dec_digits(rt_ubase_t val)
{
long result = 1;
while (1)
{
if (val < 10)
return result;
if (val < 100)
return result + 1;
if (val < 1000)
return result + 2;
if (val < 10000)
return result + 3;
val /= 10000U;
result += 4;
}
return result;
}
static char *alloc_device_name(void)
{
char *tty_dev_name;
unsigned int devid = rt_atomic_add(&_device_id_counter, 1);
long digits_len = (sizeof(TTY_NAME_PREFIX) - 1) /* raw prefix */
+ get_dec_digits(devid) + 1; /* tailing \0 */
tty_dev_name = rt_malloc(digits_len);
if (tty_dev_name)
rt_sprintf(tty_dev_name, "%s%u", TTY_NAME_PREFIX, devid);
return tty_dev_name;
}
static void _tty_rx_notify(struct rt_device *device)
{
lwp_tty_t tp;
struct serial_tty_context *softc;
tp = rt_container_of(device, struct lwp_tty, parent);
RT_ASSERT(tp);
softc = tty_softc(tp);
if (_ttyworkq)
rt_workqueue_submit_work(_ttyworkq, &softc->work, 0);
}
static void _tty_rx_worker(struct rt_work *work, void *data)
{
char input;
rt_ssize_t readbytes;
lwp_tty_t tp = data;
struct serial_tty_context *softc;
struct rt_serial_device *serial;
tty_lock(tp);
while (1)
{
softc = tty_softc(tp);
serial = softc->parent;
readbytes = rt_device_read(&serial->parent, -1, &input, 1);
if (readbytes != 1)
{
break;
}
ttydisc_rint(tp, input, 0);
}
ttydisc_rint_done(tp);
tty_unlock(tp);
}
rt_inline void _setup_serial(struct rt_serial_device *serial, lwp_tty_t tp,
struct serial_tty_context *softc)
{
struct rt_device_notify notify;
softc->backup_notify = serial->rx_notify;
notify.dev = &tp->parent;
notify.notify = _tty_rx_notify;
rt_device_init(&serial->parent);
rt_work_init(&softc->work, _tty_rx_worker, tp);
rt_device_control(&serial->parent, RT_DEVICE_CTRL_NOTIFY_SET, &notify);
}
rt_inline void _restore_serial(struct rt_serial_device *serial, lwp_tty_t tp,
struct serial_tty_context *softc)
{
rt_device_control(&serial->parent, RT_DEVICE_CTRL_NOTIFY_SET, &softc->backup_notify);
}
static int _serial_isbusy(struct rt_serial_device *serial)
{
rt_thread_t user_thread = rt_console_current_user();
rt_thread_t self_thread = rt_thread_self();
return rt_console_get_device() == &serial->parent &&
(user_thread != RT_NULL && user_thread != self_thread);
}
static void serial_tty_outwakeup(struct lwp_tty *tp)
{
char out_char;
int len;
struct serial_tty_context *context = tty_softc(tp);
struct rt_serial_device *device;
if (!context || !context->parent)
{
LOG_E("%s: Data corruption", __func__);
return;
}
device = context->parent;
if (_serial_isbusy(device))
{
return ;
}
while ((len = ttydisc_getc(tp, &out_char, sizeof(out_char))) != 0)
{
device->ops->putc(device, out_char);
/* discard remaining if emergency output is happened */
if (_serial_isbusy(device))
{
break;
}
}
}
static int serial_tty_open(struct lwp_tty *tp)
{
struct serial_tty_context *softc;
struct rt_serial_device *serial;
rt_err_t error;
int oflags;
softc = tty_softc(tp);
serial = softc->parent;
LOG_D("%s", __func__);
rt_device_control(&serial->parent, RT_DEVICE_CTRL_CONSOLE_OFLAG, &oflags);
error = rt_device_open(&serial->parent, oflags);
if (!error)
{
/**
* to avoid driver accesssing null data,
* these are setup only after tty is registered
*/
_setup_serial(serial, tp, softc);
}
return error;
}
static void serial_tty_close(struct lwp_tty *tp)
{
struct serial_tty_context *softc;
struct rt_serial_device *serial;
softc = tty_softc(tp);
serial = softc->parent;
LOG_D("%s", __func__);
_restore_serial(serial, tp, softc);
rt_device_close(&serial->parent);
}
static int serial_tty_ioctl(struct lwp_tty *tp, rt_ubase_t cmd, rt_caddr_t data,
struct rt_thread *td)
{
int error;
switch (cmd)
{
default:
/**
* Note: for the most case, we don't let serial layer handle ioctl,
* for that they can't act properly regarding to the process
* management system, since it is unawared of that. So a ENOSYS is
* returned and caused the TTY layer to handle ioctl itself.
*/
error = -ENOSYS;
break;
}
return error;
}
static struct lwp_ttydevsw serial_ttydevsw = {
.tsw_open = serial_tty_open,
.tsw_close = serial_tty_close,
.tsw_ioctl = serial_tty_ioctl,
.tsw_outwakeup = serial_tty_outwakeup,
};
rt_err_t rt_hw_serial_register_tty(struct rt_serial_device *serial)
{
rt_err_t rc;
lwp_tty_t tty;
char *dev_name;
struct serial_tty_context *softc;
if (serial->rx_notify.dev)
{
return -RT_EBUSY;
}
softc = rt_malloc(sizeof(struct serial_tty_context));
if (softc)
{
dev_name = alloc_device_name();
if (dev_name)
{
softc->parent = serial;
tty = lwp_tty_create(&serial_ttydevsw, softc);
if (tty)
{
rc = lwp_tty_register(tty, dev_name);
if (rc != RT_EOK)
{
rt_free(tty);
rt_free(softc);
}
}
else
{
rt_free(softc);
rc = -RT_ENOMEM;
}
rt_free(dev_name);
}
else
{
rt_free(softc);
rc = -RT_ENOMEM;
}
}
else
{
rc = -RT_ENOMEM;
}
return rc;
}
rt_err_t rt_hw_serial_unregister_tty(struct rt_serial_device *serial)
{
rt_device_t tty_dev;
lwp_tty_t tp;
struct serial_tty_context *softc;
tty_dev = serial->rx_notify.dev;
tp = rt_container_of(tty_dev, struct lwp_tty, parent);
/* restore serial setting */
softc = tty_softc(tp);
serial->rx_notify = softc->backup_notify;
tty_rel_gone(tp);
/* device unregister? */
rt_device_destroy(&tp->parent);
/* resource free? */
lwp_tty_delete(tp);
return RT_EOK;
}
static int _tty_workqueue_init(void)
{
if (_ttyworkq != RT_NULL)
return RT_EOK;
_ttyworkq = rt_workqueue_create("ttyworkq", RT_SYSTEM_WORKQUEUE_STACKSIZE,
LWP_TTY_WORKQUEUE_PRIORITY);
RT_ASSERT(_ttyworkq != RT_NULL);
return RT_EOK;
}
INIT_PREV_EXPORT(_tty_workqueue_init);

View File

@ -1,10 +0,0 @@
from building import *
# The set of source files associated with this SConscript file.
src = Glob('*.c')
cwd = GetCurrentDir()
CPPPATH = [cwd + "/include"]
group = DefineGroup('tty', src, depend = ['RT_USING_SMART', 'RT_USING_TTY'], CPPPATH = CPPPATH)
Return('group')

View File

@ -1,346 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021.12.07 linzhenxing first version
*/
#include <rtthread.h>
#include <dfs_file.h>
#include <dfs_fs.h>
#include <tty.h>
#define DBG_TAG "CONSOLE"
#ifdef RT_TTY_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_TTY_DEBUG */
#include <rtdbg.h>
#include <ipc/waitqueue.h>
#include <ipc/ringbuffer.h>
static struct tty_struct console_dev;
static struct rt_ringbuffer console_rx_ringbuffer;
static struct rt_wqueue console_rx_wqueue;
static rt_thread_t console_rx_thread;
static const size_t rb_bufsz = 0x1000;
static void console_rx_work(void *parameter)
{
int len;
char ch;
int lens;
static char buf[0x1000];
struct tty_struct *console;
console = &console_dev;
while (1)
{
rt_wqueue_wait(&console_rx_wqueue, 0, RT_WAITING_FOREVER);
lens = 0;
while (lens < sizeof(buf))
{
len = rt_ringbuffer_get(&console_rx_ringbuffer, (void *)&ch, sizeof(ch));
if (len == 0)
{
break;
}
lens += len;
buf[lens-1] = ch;
}
if (lens && console->ldisc->ops->receive_buf)
{
console->ldisc->ops->receive_buf((struct tty_struct *)console, buf, lens);
}
}
}
static int rx_thread_init(void)
{
void *rb_buffer;
rt_thread_t thread;
rb_buffer = rt_malloc(rb_bufsz);
rt_ringbuffer_init(&console_rx_ringbuffer, rb_buffer, rb_bufsz);
rt_wqueue_init(&console_rx_wqueue);
thread = rt_thread_create("console_rx", console_rx_work, &console_dev, rb_bufsz, 10, 10);
if (thread != RT_NULL)
{
rt_thread_startup(thread);
console_rx_thread = thread;
}
return 0;
}
INIT_COMPONENT_EXPORT(rx_thread_init);
static void console_rx_notify(struct rt_device *dev)
{
struct tty_struct *console = NULL;
int len = 0;
int lens = 0;
char ch = 0;
console = (struct tty_struct *)dev;
RT_ASSERT(console != RT_NULL);
while (1)
{
len = rt_device_read(console->io_dev, -1, &ch, 1);
if (len == 0)
{
break;
}
lens += len;
rt_ringbuffer_put(&console_rx_ringbuffer, (void *)&ch, sizeof(ch));
if (lens > rb_bufsz)
{
break;
}
}
if (console_rx_thread)
rt_wqueue_wakeup(&console_rx_wqueue, 0);
}
struct tty_struct *console_tty_get(void)
{
return &console_dev;
}
static void iodev_close(struct tty_struct *console)
{
struct rt_device_notify rx_notify;
rx_notify.notify = RT_NULL;
rx_notify.dev = RT_NULL;
/* clear notify */
rt_device_control(console->io_dev, RT_DEVICE_CTRL_NOTIFY_SET, &rx_notify);
rt_device_close(console->io_dev);
}
static rt_err_t iodev_open(struct tty_struct *console)
{
rt_err_t ret = RT_EOK;
struct rt_device_notify rx_notify;
rt_uint16_t oflags = 0;
rt_device_control(console->io_dev, RT_DEVICE_CTRL_CONSOLE_OFLAG, &oflags);
ret = rt_device_open(console->io_dev, oflags);
if (ret != RT_EOK)
{
return -RT_ERROR;
}
rx_notify.notify = console_rx_notify;
rx_notify.dev = (struct rt_device *)console;
rt_device_control(console->io_dev, RT_DEVICE_CTRL_NOTIFY_SET, &rx_notify);
return RT_EOK;
}
struct rt_device *console_get_iodev(void)
{
return console_dev.io_dev;
}
struct rt_device *console_set_iodev(struct rt_device *iodev)
{
rt_base_t level = 0;
struct rt_device *io_before = RT_NULL;
struct tty_struct *console = RT_NULL;
RT_ASSERT(iodev != RT_NULL);
console = &console_dev;
level = rt_spin_lock_irqsave(&console->spinlock);
RT_ASSERT(console->init_flag >= TTY_INIT_FLAG_REGED);
io_before = console->io_dev;
if (iodev == io_before)
{
goto exit;
}
if (console->init_flag >= TTY_INIT_FLAG_INITED)
{
/* close old device */
iodev_close(console);
}
console->io_dev = iodev;
if (console->init_flag >= TTY_INIT_FLAG_INITED)
{
rt_err_t ret;
/* open new device */
ret = iodev_open(console);
RT_ASSERT(ret == RT_EOK);
}
exit:
rt_spin_unlock_irqrestore(&console->spinlock, level);
return io_before;
}
/* RT-Thread Device Interface */
/*
* This function initializes console device.
*/
static rt_err_t rt_console_init(struct rt_device *dev)
{
rt_base_t level = 0;
rt_err_t result = RT_EOK;
struct tty_struct *console = RT_NULL;
RT_ASSERT(dev != RT_NULL);
console = (struct tty_struct *)dev;
level = rt_spin_lock_irqsave(&console->spinlock);
RT_ASSERT(console->init_flag == TTY_INIT_FLAG_REGED);
result = iodev_open(console);
if (result != RT_EOK)
{
goto exit;
}
console->init_flag = TTY_INIT_FLAG_INITED;
exit:
rt_spin_unlock_irqrestore(&console->spinlock, level);
return result;
}
static rt_err_t rt_console_open(struct rt_device *dev, rt_uint16_t oflag)
{
rt_err_t result = RT_EOK;
struct tty_struct *console = RT_NULL;
RT_ASSERT(dev != RT_NULL);
console = (struct tty_struct *)dev;
RT_ASSERT(console != RT_NULL);
RT_ASSERT(console->init_flag == TTY_INIT_FLAG_INITED);
return result;
}
static rt_err_t rt_console_close(struct rt_device *dev)
{
rt_err_t result = RT_EOK;
struct tty_struct *console = RT_NULL;
console = (struct tty_struct *)dev;
RT_ASSERT(console != RT_NULL);
RT_ASSERT(console->init_flag == TTY_INIT_FLAG_INITED);
return result;
}
static rt_ssize_t rt_console_read(struct rt_device *dev,
rt_off_t pos,
void *buffer,
rt_size_t size)
{
rt_size_t len = 0;
return len;
}
static rt_ssize_t rt_console_write(struct rt_device *dev,
rt_off_t pos,
const void *buffer,
rt_size_t size)
{
rt_size_t len = 0;
struct tty_struct *console = RT_NULL;
console = (struct tty_struct *)dev;
RT_ASSERT(console != RT_NULL);
RT_ASSERT(console->init_flag == TTY_INIT_FLAG_INITED);
len = rt_device_write((struct rt_device *)console->io_dev, -1, buffer, size);
return len;
}
static rt_err_t rt_console_control(rt_device_t dev, int cmd, void *args)
{
rt_size_t len = 0;
struct tty_struct *console = RT_NULL;
console = (struct tty_struct *)dev;
RT_ASSERT(console != RT_NULL);
RT_ASSERT(console->init_flag == TTY_INIT_FLAG_INITED);
len = rt_device_control((struct rt_device *)console->io_dev, cmd, args);
return len;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops console_ops =
{
rt_console_init,
rt_console_open,
rt_console_close,
rt_console_read,
rt_console_write,
rt_console_control,
};
#endif
/*
* console register
*/
static struct dfs_file_ops con_fops;
rt_err_t console_register(const char *name, struct rt_device *iodev)
{
rt_err_t ret = RT_EOK;
struct rt_device *device = RT_NULL;
struct tty_struct *console = &console_dev;
RT_ASSERT(iodev != RT_NULL);
RT_ASSERT(console->init_flag == TTY_INIT_FLAG_NONE);
tty_init(console, TTY_DRIVER_TYPE_CONSOLE, SERIAL_TYPE_NORMAL, iodev);
console_ldata_init(console);
device = &(console->parent);
device->type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
device->ops = &console_ops;
#else
device->init = rt_console_init;
device->open = rt_console_open;
device->close = rt_console_close;
device->read = rt_console_read;
device->write = rt_console_write;
device->control = rt_console_control;
#endif
/* register a character device */
ret = rt_device_register(device, name, 0);
if (ret != RT_EOK)
{
LOG_E("console driver register fail\n");
}
else
{
#ifdef RT_USING_POSIX_DEVIO
/* set fops */
memcpy(&con_fops, tty_get_fops(), sizeof(struct dfs_file_ops));
device->fops = &con_fops;
#endif
}
return ret;
}

View File

@ -1,19 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021.12.07 linzhenxing first version
*/
#ifndef __CONSOLE_
#define __CONSOLE_
#include <rtthread.h>
#include "tty.h"
struct tty_struct *console_tty_get(void);
struct rt_device *console_get_iodev(void);
struct rt_device *console_set_iodev(struct rt_device *iodev);
rt_err_t console_register(const char *name, struct rt_device *iodev);
#endif

View File

@ -1,304 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021.12.07 linzhenxing first version
*/
#ifndef __TTY_H__
#define __TTY_H__
#include <rthw.h>
#include <rtthread.h>
#include <tty_ldisc.h>
#ifdef RT_USING_SMART
#include <lwp.h>
#endif
#if defined(RT_USING_POSIX_TERMIOS)
#include <poll.h>
#include <termios.h>
#endif
#ifndef ENOIOCTLCMD
#define ENOIOCTLCMD (515) /* No ioctl command */
#endif
#define current lwp_self()
#define __DISABLED_CHAR '\0'
struct tty_node
{
struct rt_lwp *lwp;
struct tty_node *next;
};
void tty_initstack(struct tty_node *node);
int tty_push(struct tty_node **head, struct rt_lwp *lwp);
struct rt_lwp *tty_pop(struct tty_node **head, struct rt_lwp *target_lwp);
/*
* When a break, frame error, or parity error happens, these codes are
* stuffed into the flags buffer.
*/
#define TTY_NORMAL 0
#define TTY_BREAK 1
#define TTY_FRAME 2
#define TTY_PARITY 3
#define TTY_OVERRUN 4
#define INTR_CHAR(tty) ((tty)->init_termios.c_cc[VINTR])
#define QUIT_CHAR(tty) ((tty)->init_termios.c_cc[VQUIT])
#define ERASE_CHAR(tty) ((tty)->init_termios.c_cc[VERASE])
#define KILL_CHAR(tty) ((tty)->init_termios.c_cc[VKILL])
#define EOF_CHAR(tty) ((tty)->init_termios.c_cc[VEOF])
#define TIME_CHAR(tty) ((tty)->init_termios.c_cc[VTIME])
#define MIN_CHAR(tty) ((tty)->init_termios.c_cc[VMIN])
#define SWTC_CHAR(tty) ((tty)->init_termios.c_cc[VSWTC])
#define START_CHAR(tty) ((tty)->init_termios.c_cc[VSTART])
#define STOP_CHAR(tty) ((tty)->init_termios.c_cc[VSTOP])
#define SUSP_CHAR(tty) ((tty)->init_termios.c_cc[VSUSP])
#define EOL_CHAR(tty) ((tty)->init_termios.c_cc[VEOL])
#define REPRINT_CHAR(tty) ((tty)->init_termios.c_cc[VREPRINT])
#define DISCARD_CHAR(tty) ((tty)->init_termios.c_cc[VDISCARD])
#define WERASE_CHAR(tty) ((tty)->init_termios.c_cc[VWERASE])
#define LNEXT_CHAR(tty) ((tty)->init_termios.c_cc[VLNEXT])
#define EOL2_CHAR(tty) ((tty)->init_termios.c_cc[VEOL2])
#define _I_FLAG(tty,f) ((tty)->init_termios.c_iflag & (f))
#define _O_FLAG(tty,f) ((tty)->init_termios.c_oflag & (f))
#define _C_FLAG(tty,f) ((tty)->init_termios.c_cflag & (f))
#define _L_FLAG(tty,f) ((tty)->init_termios.c_lflag & (f))
#define I_IGNBRK(tty) _I_FLAG((tty),IGNBRK)
#define I_BRKINT(tty) _I_FLAG((tty),BRKINT)
#define I_IGNPAR(tty) _I_FLAG((tty),IGNPAR)
#define I_PARMRK(tty) _I_FLAG((tty),PARMRK)
#define I_INPCK(tty) _I_FLAG((tty),INPCK)
#define I_ISTRIP(tty) _I_FLAG((tty),ISTRIP)
#define I_INLCR(tty) _I_FLAG((tty),INLCR)
#define I_IGNCR(tty) _I_FLAG((tty),IGNCR)
#define I_ICRNL(tty) _I_FLAG((tty),ICRNL)
#define I_IUCLC(tty) _I_FLAG((tty),IUCLC)
#define I_IXON(tty) _I_FLAG((tty),IXON)
#define I_IXANY(tty) _I_FLAG((tty),IXANY)
#define I_IXOFF(tty) _I_FLAG((tty),IXOFF)
#define I_IMAXBEL(tty) _I_FLAG((tty),IMAXBEL)
#define I_IUTF8(tty) _I_FLAG((tty), IUTF8)
#define O_OPOST(tty) _O_FLAG((tty),OPOST)
#define O_OLCUC(tty) _O_FLAG((tty),OLCUC)
#define O_ONLCR(tty) _O_FLAG((tty),ONLCR)
#define O_OCRNL(tty) _O_FLAG((tty),OCRNL)
#define O_ONOCR(tty) _O_FLAG((tty),ONOCR)
#define O_ONLRET(tty) _O_FLAG((tty),ONLRET)
#define O_OFILL(tty) _O_FLAG((tty),OFILL)
#define O_OFDEL(tty) _O_FLAG((tty),OFDEL)
#define O_NLDLY(tty) _O_FLAG((tty),NLDLY)
#define O_CRDLY(tty) _O_FLAG((tty),CRDLY)
#define O_TABDLY(tty) _O_FLAG((tty),TABDLY)
#define O_BSDLY(tty) _O_FLAG((tty),BSDLY)
#define O_VTDLY(tty) _O_FLAG((tty),VTDLY)
#define O_FFDLY(tty) _O_FLAG((tty),FFDLY)
#define C_BAUD(tty) _C_FLAG((tty),CBAUD)
#define C_CSIZE(tty) _C_FLAG((tty),CSIZE)
#define C_CSTOPB(tty) _C_FLAG((tty),CSTOPB)
#define C_CREAD(tty) _C_FLAG((tty),CREAD)
#define C_PARENB(tty) _C_FLAG((tty),PARENB)
#define C_PARODD(tty) _C_FLAG((tty),PARODD)
#define C_HUPCL(tty) _C_FLAG((tty),HUPCL)
#define C_CLOCAL(tty) _C_FLAG((tty),CLOCAL)
#define C_CIBAUD(tty) _C_FLAG((tty),CIBAUD)
#define C_CRTSCTS(tty) _C_FLAG((tty),CRTSCTS)
#define L_ISIG(tty) _L_FLAG((tty),ISIG)
#define L_ICANON(tty) _L_FLAG((tty),ICANON)
#define L_XCASE(tty) _L_FLAG((tty),XCASE)
#define L_ECHO(tty) _L_FLAG((tty),ECHO)
#define L_ECHOE(tty) _L_FLAG((tty),ECHOE)
#define L_ECHOK(tty) _L_FLAG((tty),ECHOK)
#define L_ECHONL(tty) _L_FLAG((tty),ECHONL)
#define L_NOFLSH(tty) _L_FLAG((tty),NOFLSH)
#define L_TOSTOP(tty) _L_FLAG((tty),TOSTOP)
#define L_ECHOCTL(tty) _L_FLAG((tty),ECHOCTL)
#define L_ECHOPRT(tty) _L_FLAG((tty),ECHOPRT)
#define L_ECHOKE(tty) _L_FLAG((tty),ECHOKE)
#define L_FLUSHO(tty) _L_FLAG((tty),FLUSHO)
#define L_PENDIN(tty) _L_FLAG((tty),PENDIN)
#define L_IEXTEN(tty) _L_FLAG((tty),IEXTEN)
#define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC)
/*
* Where all of the state associated with a tty is kept while the tty
* is open. Since the termios state should be kept even if the tty
* has been closed --- for things like the baud rate, etc --- it is
* not stored here, but rather a pointer to the real state is stored
* here. Possible the winsize structure should have the same
* treatment, but (1) the default 80x24 is usually right and (2) it's
* most often used by a windowing system, which will set the correct
* size each time the window is created or resized anyway.
* - TYT, 9/14/92
*/
struct tty_struct
{
struct rt_device parent;
int type;
int subtype;
int init_flag;
int index; //for pty
int pts_lock; //for pty
struct tty_struct *other_struct; //for pty
struct termios init_termios;
struct winsize winsize;
struct rt_mutex lock;
pid_t pgrp;
pid_t session;
struct rt_lwp *foreground;
struct tty_node *head;
struct tty_ldisc *ldisc;
void *disc_data;
struct rt_device *io_dev;
struct rt_wqueue wait_queue;
#define RT_TTY_BUF 1024
rt_list_t tty_drivers;
struct rt_spinlock spinlock;
};
enum
{
TTY_INIT_FLAG_NONE = 0,
TTY_INIT_FLAG_ALLOCED,
TTY_INIT_FLAG_REGED,
TTY_INIT_FLAG_INITED,
};
#define TTY_DRIVER_TYPE_SYSTEM 0x0001
#define TTY_DRIVER_TYPE_CONSOLE 0x0002
#define TTY_DRIVER_TYPE_SERIAL 0x0003
#define TTY_DRIVER_TYPE_PTY 0x0004
#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
#define TTY_DRIVER_TYPE_SYSCONS 0x0006
/* tty magic number */
#define TTY_MAGIC 0x5401
/*
* These bits are used in the flags field of the tty structure.
*
* So that interrupts won't be able to mess up the queues,
* copy_to_cooked must be atomic with respect to itself, as must
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
*/
#define TTY_THROTTLED 0
#define TTY_IO_ERROR 1
#define TTY_OTHER_CLOSED 2
#define TTY_EXCLUSIVE 3
#define TTY_DEBUG 4
#define TTY_DO_WRITE_WAKEUP 5
#define TTY_PUSH 6
#define TTY_CLOSING 7
#define TTY_DONT_FLIP 8
#define TTY_HW_COOK_OUT 14
#define TTY_HW_COOK_IN 15
#define TTY_PTY_LOCK 16
#define TTY_NO_WRITE_SPLIT 17
#define NR_LDISCS 30
/* line disciplines */
#define N_TTY 0
#define N_SLIP 1
#define N_MOUSE 2
#define N_PPP 3
#define N_STRIP 4
#define N_AX25 5
#define N_X25 6 /* X.25 async */
#define N_6PACK 7
#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */
#define N_R3964 9 /* Reserved for Simatic R3964 module */
#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */
#define N_IRDA 11 /* Linux IrDa - http://irda.sourceforge.net/ */
#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data */
/* cards about SMS messages */
#define N_HDLC 13 /* synchronous HDLC */
#define N_SYNC_PPP 14 /* synchronous PPP */
#define N_HCI 15 /* Bluetooth HCI UART */
#define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */
#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
#define N_PPS 18 /* Pulse per Second */
#define N_V253 19 /* Codec control over voice modem */
#define N_CAIF 20 /* CAIF protocol for talking to modems */
#define N_GSM0710 21 /* GSM 0710 Mux */
#define N_TI_WL 22 /* for TI's WL BT, FM, GPS combo chips */
#define N_TRACESINK 23 /* Trace data routing for MIPI P1149.7 */
#define N_TRACEROUTER 24 /* Trace data routing for MIPI P1149.7 */
#define N_NCI 25 /* NFC NCI UART */
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
/* pty subtypes */
#define PTY_TYPE_MASTER 0x0001
#define PTY_TYPE_SLAVE 0x0002
/* serial subtype definitions */
#define SERIAL_TYPE_NORMAL 1
#define max(a, b) ({\
typeof(a) _a = a;\
typeof(b) _b = b;\
_a > _b ? _a : _b; })
#define min(a, b) ({\
typeof(a) _a = a;\
typeof(b) _b = b;\
_a < _b ? _a : _b; })
void mutex_lock(rt_mutex_t mutex);
void mutex_unlock(rt_mutex_t mutex);
int __tty_check_change(struct tty_struct *tty, int sig);
int tty_check_change(struct tty_struct *tty);
rt_inline struct rt_wqueue *wait_queue_get(struct rt_lwp *lwp, struct tty_struct *tty)
{
if (lwp == RT_NULL)
{
return &tty->wait_queue;
}
return &lwp->wait_queue;
}
rt_inline struct rt_wqueue *wait_queue_current_get(struct rt_lwp *lwp, struct tty_struct *tty)
{
return wait_queue_get(lwp, tty);
}
rt_inline void tty_wakeup_check(struct tty_struct *tty)
{
struct rt_wqueue *wq = NULL;
wq = wait_queue_current_get(tty->foreground, tty);
rt_wqueue_wakeup(wq, (void*)POLLIN);
}
int tty_init(struct tty_struct *tty, int type, int subtype, struct rt_device *iodev);
const struct dfs_file_ops *tty_get_fops(void);
int n_tty_ioctl_extend(struct tty_struct *tty, int cmd, void *arg);
void console_ldata_init(struct tty_struct *tty);
int n_tty_receive_buf(struct tty_struct *tty, char *cp, int count);
#endif /*__TTY_H__*/

View File

@ -1,56 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021.12.07 linzhenxing first version
*/
#ifndef __TTY_LDISC_
#define __TTY_LDISC_
#include <rtthread.h>
#include <dfs.h>
#include <dfs_fs.h>
#include <tty.h>
#if defined(RT_USING_POSIX_TERMIOS)
#include <termios.h>
#endif
struct tty_struct;
struct tty_ldisc_ops
{
char *name;
int num;
int (*open) (struct dfs_file *fd);
int (*close) (struct tty_struct *tty);
int (*ioctl) (struct dfs_file *fd, int cmd, void *args);
int (*read) (struct dfs_file *fd, void *buf, size_t count);
int (*write) (struct dfs_file *fd, const void *buf, size_t count);
int (*flush) (struct dfs_file *fd);
int (*lseek) (struct dfs_file *fd, off_t offset);
int (*getdents) (struct dfs_file *fd, struct dirent *dirp, uint32_t count);
int (*poll) (struct dfs_file *fd, struct rt_pollreq *req);
void (*set_termios) (struct tty_struct *tty, struct termios *old);
int (*receive_buf) (struct tty_struct *tty,char *cp, int count);
int refcount;
};
struct tty_ldisc
{
struct tty_ldisc_ops *ops;
struct tty_struct *tty;
};
#define TTY_LDISC_MAGIC 0x5403
int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
void tty_ldisc_kill(struct tty_struct *tty);
void tty_ldisc_init(struct tty_struct *tty);
void tty_ldisc_release(struct tty_struct *tty);
void n_tty_init(void);
#endif // __TTY_LDISC_

File diff suppressed because it is too large Load Diff

View File

@ -1,326 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021.12.07 linzhenxing first version
*/
#include <rtthread.h>
#include <tty.h>
#include <tty_ldisc.h>
#define DBG_TAG "PTY"
#ifdef RT_TTY_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_TTY_DEBUG */
#include <rtdbg.h>
#define PTY_PTS_SIZE 10
static struct tty_struct ptm_dev;
static struct tty_struct pts_devs[PTY_PTS_SIZE];
static int pts_index = 0;
static int pts_register(struct tty_struct *ptmx, struct tty_struct *pts, int pts_index);
/* check free pts device */
static struct tty_struct *find_freepts(void)
{
for(int i = 0; i < PTY_PTS_SIZE; i++)
{
if (pts_devs[i].init_flag == TTY_INIT_FLAG_NONE)
{
pts_devs[i].init_flag = TTY_INIT_FLAG_ALLOCED;
return &pts_devs[i];
}
}
return RT_NULL;
}
/* Set the lock flag on a pty */
static int pty_set_lock(struct tty_struct *tty, int *arg)
{
int val = *arg;
if (val)
{
tty->pts_lock = val;
}
else
{
tty->pts_lock = val;
}
return 0;
}
static int pty_get_lock(struct tty_struct *tty, int *arg)
{
*arg = tty->pts_lock;
return 0;
}
static int pty_get_index(struct tty_struct *tty, int *arg)
{
*arg = tty->index;
return 0;
}
/* RT-Thread Device Interface */
/*
* This function initializes console device.
*/
static rt_err_t pty_device_init(struct rt_device *dev)
{
rt_err_t result = RT_EOK;
struct tty_struct *tty = RT_NULL;
RT_ASSERT(dev != RT_NULL);
tty = (struct tty_struct *)dev;
RT_ASSERT(tty->init_flag == TTY_INIT_FLAG_REGED);
tty->init_flag = TTY_INIT_FLAG_INITED;
return result;
}
static rt_err_t pty_device_open(struct rt_device *dev, rt_uint16_t oflag)
{
rt_err_t result = RT_EOK;
return result;
}
static rt_err_t pty_device_close(struct rt_device *dev)
{
rt_err_t result = RT_EOK;
struct tty_struct *tty = (struct tty_struct*)dev;
//struct tty_struct *to = RT_NULL;
if (tty->subtype == PTY_TYPE_MASTER)
{
// to = tty->other_struct;
// to->init_flag = TTY_INIT_FLAG_NONE;
// to->other_struct = RT_NULL;
// to->foreground = RT_NULL;
// to->index = -1;
// tty_ldisc_kill(to);
// tty->other_struct = RT_NULL;
}
else
{
// to = tty->other_struct;
// to->other_struct = RT_NULL;
// tty->init_flag = TTY_INIT_FLAG_NONE;
// tty->other_struct = RT_NULL;
// tty->foreground = RT_NULL;
// tty->index = -1;
// tty->other_struct = RT_NULL;
// tty_ldisc_kill(tty);
}
return result;
}
static rt_ssize_t pty_device_read(struct rt_device *dev,
rt_off_t pos,
void *buffer,
rt_size_t size)
{
rt_size_t len = 0;
return len;
}
static rt_ssize_t pty_device_write(struct rt_device *dev,
rt_off_t pos,
const void *buffer,
rt_size_t size)
{
rt_size_t len = 0;
rt_base_t level = 0;
struct tty_struct *tty = RT_NULL;
struct tty_struct *to = RT_NULL;
tty = (struct tty_struct *)dev;
RT_ASSERT(tty != RT_NULL);
RT_ASSERT(tty->init_flag == TTY_INIT_FLAG_INITED);
to = tty->other_struct;
level = rt_spin_lock_irqsave(&tty->spinlock);
if (to->ldisc->ops->receive_buf)
{
len = to->ldisc->ops->receive_buf(to, (char *)buffer, size);
}
rt_spin_unlock_irqrestore(&tty->spinlock, level);
return len;
}
static rt_err_t pty_device_control(rt_device_t dev, int cmd, void *args)
{
struct tty_struct *tty = (struct tty_struct *)dev;
switch (cmd)
{
case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */
return pty_set_lock(tty, (int *)args);
case TIOCGPTLCK: /* Get PT Lock status */
return pty_get_lock(tty, (int *)args);
case TIOCGPTN: /* Get PT Number */
return pty_get_index(tty, (int *)args);
}
return -ENOIOCTLCMD;
}
static int ptmx_open(struct dfs_file *fd)
{
int ret = 0;
struct tty_struct *tty = RT_NULL;
struct tty_struct *pts = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
pts = find_freepts();
if (pts == RT_NULL)
{
LOG_E("No free PTS device found.\n");
return -1;
}
ret = pts_register(tty, pts, pts_index);
if (ret < 0)
{
LOG_E("pts register fail\n");
rt_free(pts);
return -1;
}
pts_index++;
tty->other_struct = pts;
ld = tty->ldisc;
if (ld->ops->open)
{
ret = ld->ops->open(fd);
}
rt_device_t device = (rt_device_t)fd->vnode->data;
if(fd->vnode->ref_count == 1)
{
ret = rt_device_open(device, fd->flags);
}
return ret;
}
#ifdef RT_USING_DEVICE_OPS
const static struct rt_device_ops pty_device_ops =
{
pty_device_init,
pty_device_open,
pty_device_close,
pty_device_read,
pty_device_write,
pty_device_control,
};
#endif /* RT_USING_DEVICE_OPS */
static struct dfs_file_ops pts_fops;
static struct dfs_file_ops ptmx_fops;
static int pts_register(struct tty_struct *ptmx, struct tty_struct *pts, int pts_index)
{
char name[20];
rt_err_t ret = RT_EOK;
struct rt_device *device = RT_NULL;
RT_ASSERT(ptmx!=RT_NULL);
if (pts->init_flag != TTY_INIT_FLAG_ALLOCED)
{
LOG_E("pts%d has been registered\n", pts_index);
ret = (-RT_EBUSY);
}
else
{
tty_init(pts, TTY_DRIVER_TYPE_PTY, PTY_TYPE_SLAVE, NULL);
pts->index = pts_index;
pts->pts_lock = 1;
pts->other_struct = ptmx;
device = &pts->parent;
device->type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
device->ops = &pty_device_ops;
#else
device->init = pty_device_init;
device->open = pty_device_open;
device->close = pty_device_close;
device->read = pty_device_read;
device->write = pty_device_write;
device->control = pty_device_control;
#endif /* RT_USING_DEVICE_OPS */
rt_snprintf(name, sizeof(name), "pts%d", pts_index);
ret = rt_device_register(device, name, RT_DEVICE_FLAG_RDWR);
if (ret != RT_EOK)
{
LOG_E("pts%d register failed\n", pts_index);
ret = -RT_EIO;
}
else
{
#ifdef RT_USING_POSIX_DEVIO
/* set fops */
memcpy(&pts_fops, tty_get_fops(), sizeof(struct dfs_file_ops));
device->fops = &pts_fops;
#endif
}
}
return ret;
}
static int ptmx_register(void)
{
rt_err_t ret = RT_EOK;
struct rt_device *device = RT_NULL;
struct tty_struct *ptmx = &ptm_dev;
RT_ASSERT(ptmx->init_flag == TTY_INIT_FLAG_NONE);
tty_init(ptmx, TTY_DRIVER_TYPE_PTY, PTY_TYPE_MASTER, NULL);
device = &(ptmx->parent);
device->type = RT_Device_Class_Char;
#ifdef RT_USING_DEVICE_OPS
device->ops = &pty_device_ops;
#else
device->init = pty_device_init;
device->open = pty_device_open;
device->close = pty_device_close;
device->read = pty_device_read;
device->write = pty_device_write;
device->control = pty_device_control;
#endif /* RT_USING_DEVICE_OPS */
ret = rt_device_register(device, "ptmx", RT_DEVICE_FLAG_RDWR);
if (ret != RT_EOK)
{
LOG_E("ptmx register fail\n");
ret = -RT_EIO;
}
else
{
#ifdef RT_USING_POSIX_DEVIO
/* set fops */
memcpy(&ptmx_fops, tty_get_fops(), sizeof(struct dfs_file_ops));
ptmx_fops.open = ptmx_open;
device->fops = &ptmx_fops;
#endif
}
return ret;
}
INIT_DEVICE_EXPORT(ptmx_register);

View File

@ -1,504 +0,0 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-12-07 linzhenxing first version
* 2023-06-26 WangXiaoyao fix bug on foreground app switch
*/
#include <dfs_file.h>
#include <dfs_fs.h>
#include <lwp.h>
#include <rtdevice.h>
#include <rtthread.h>
#include <tty.h>
#include <tty_ldisc.h>
#include <shell.h>
#if defined(RT_USING_POSIX_DEVIO)
#include <termios.h>
#endif
#define DBG_TAG "TTY"
#ifdef RT_TTY_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_TTY_DEBUG */
#include <rtdbg.h>
const struct termios tty_std_termios = { /* for the benefit of tty drivers */
.c_iflag = IMAXBEL | IUCLC | INLCR | ICRNL | IGNPAR,
.c_oflag = OPOST,
.c_cflag = B38400 | CS8 | CREAD | HUPCL,
.c_lflag = ISIG | ECHOE | TOSTOP | NOFLSH,
RT_NULL,/* .c_line = N_TTY, */
.c_cc = INIT_C_CC,
.__c_ispeed = 38400,
.__c_ospeed = 38400
};
void tty_initstack(struct tty_node *node)
{
node->lwp = RT_NULL;
node->next = RT_NULL;
}
static struct tty_node tty_node_cache = { RT_NULL, RT_NULL };
static struct tty_node *_tty_node_alloc(void)
{
struct tty_node *node = tty_node_cache.next;
if (node == RT_NULL)
{
node = rt_calloc(1, sizeof(struct tty_node));
}
else
{
tty_node_cache.next = node->next;
}
return node;
}
static void _tty_node_free(struct tty_node *node)
{
node->next = tty_node_cache.next;
tty_node_cache.next = node;
}
int tty_push(struct tty_node **head, struct rt_lwp *lwp)
{
struct tty_node *node = _tty_node_alloc();
if (!node)
{
return -1;
}
node->lwp = lwp;
node->next = *head;
*head = node;
return 0;
}
struct rt_lwp *tty_pop(struct tty_node **head, struct rt_lwp *target_lwp)
{
struct tty_node *node;
struct rt_lwp *lwp = RT_NULL;
if (!head || !*head)
{
return RT_NULL;
}
node = *head;
if (target_lwp != RT_NULL && node->lwp != target_lwp)
{
struct tty_node *prev = RT_NULL;
while (node != RT_NULL && node->lwp != target_lwp)
{
prev = node;
node = node->next;
}
if (node != RT_NULL)
{
/* prev is impossible equ RT_NULL */
prev->next = node->next;
lwp = target_lwp;
_tty_node_free(node);
}
}
else
{
lwp = (*head)->lwp;
*head = (*head)->next;
node->lwp = RT_NULL;
_tty_node_free(node);
}
return lwp;
}
/**
* tty_check_change - check for POSIX terminal changes
* @tty: tty to check
*
* If we try to write to, or set the state of, a terminal and we're
* not in the foreground, send a SIGTTOU. If the signal is blocked or
* ignored, go ahead and perform the operation. (POSIX 7.2)
*
* Locking: ctrl_lock
*/
int __tty_check_change(struct tty_struct *tty, int sig)
{
pid_t pgrp = 0, tty_pgrp = 0;
int ret = 0;
struct rt_lwp *lwp;
lwp = lwp_self();
if (lwp == RT_NULL)
{
return 0;
}
if (lwp->tty != tty)
{
return 0;
}
pgrp = lwp->__pgrp;
tty_pgrp = tty->pgrp;
if (tty_pgrp && (pgrp != tty->pgrp))
{
lwp_signal_kill(lwp, sig, SI_USER, 0);
}
if (!tty_pgrp)
{
LOG_D("sig=%d, tty->pgrp == -1!\n", sig);
}
return ret;
}
int tty_check_change(struct tty_struct *tty)
{
return __tty_check_change(tty, SIGTTOU);
}
static int tty_open(struct dfs_file *fd)
{
int ret = 0;
int noctty = 0;
struct tty_struct *tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
ld = tty->ldisc;
if (ld->ops->open)
{
ret = ld->ops->open(fd);
}
noctty = (fd->flags & O_NOCTTY);
rt_device_t device = (rt_device_t)fd->vnode->data;
if (fd->vnode->ref_count == 1)
{
ret = rt_device_open(device, fd->flags);
}
if (current == RT_NULL) //kernel mode not lwp
{
return ret;
}
if (!noctty &&
current->leader &&
!current->tty &&
tty->session == -1)
{
current->tty = tty;
current->tty_old_pgrp = 0;
tty->session = current->session;
tty->pgrp = current->__pgrp;
tty->foreground = current;
}
return ret;
}
static int tty_close(struct dfs_file *fd)
{
int ret = 0;
struct tty_struct *tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
ld = tty->ldisc;
if (ld->ops->close)
{
//ld->ops->close(tty);
}
if (fd->vnode->ref_count == 1)
{
ret = rt_device_close((rt_device_t)tty);
}
return ret;
}
static int tiocsctty(struct tty_struct *tty, int arg)
{
if (current->leader &&
(current->session == tty->session))
{
return 0;
}
/*
* The process must be a session leader and
* not have a controlling tty already.
*/
if (!current->leader || current->tty)
{
return -EPERM;
}
if (tty->session > 0)
{
LOG_E("this tty have control process\n");
}
current->tty = tty;
current->tty_old_pgrp = 0;
tty->session = current->session;
tty->pgrp = current->__pgrp;
tty->foreground = current;
if (tty->type == TTY_DRIVER_TYPE_PTY)
{
tty->other_struct->foreground = current;
}
return 0;
}
static int tiocswinsz(struct tty_struct *tty, struct winsize *p_winsize)
{
rt_kprintf("\x1b[8;%d;%dt", p_winsize->ws_col, p_winsize->ws_row);
return 0;
}
static int tiocgwinsz(struct tty_struct *tty, struct winsize *p_winsize)
{
if(rt_thread_self() != rt_thread_find(FINSH_THREAD_NAME))
{
/* only can be used in tshell thread; otherwise, return default size */
p_winsize->ws_col = 80;
p_winsize->ws_row = 24;
}
else
{
#define _TIO_BUFLEN 20
char _tio_buf[_TIO_BUFLEN];
unsigned char cnt1, cnt2, cnt3, i;
char row_s[4], col_s[4];
char *p;
rt_memset(_tio_buf, 0, _TIO_BUFLEN);
/* send the command to terminal for getting the window size of the terminal */
rt_kprintf("\033[18t");
/* waiting for the response from the terminal */
i = 0;
while(i < _TIO_BUFLEN)
{
_tio_buf[i] = finsh_getchar();
if(_tio_buf[i] != 't')
{
i ++;
}
else
{
break;
}
}
if(i == _TIO_BUFLEN)
{
/* buffer overloaded, and return default size */
p_winsize->ws_col = 80;
p_winsize->ws_row = 24;
return 0;
}
/* interpreting data eg: "\033[8;1;15t" which means row is 1 and col is 15 (unit: size of ONE character) */
rt_memset(row_s,0,4);
rt_memset(col_s,0,4);
cnt1 = 0;
while(cnt1 < _TIO_BUFLEN && _tio_buf[cnt1] != ';')
{
cnt1++;
}
cnt2 = ++cnt1;
while(cnt2 < _TIO_BUFLEN && _tio_buf[cnt2] != ';')
{
cnt2++;
}
p = row_s;
while(cnt1 < cnt2)
{
*p++ = _tio_buf[cnt1++];
}
p = col_s;
cnt2++;
cnt3 = rt_strlen(_tio_buf) - 1;
while(cnt2 < cnt3)
{
*p++ = _tio_buf[cnt2++];
}
/* load the window size date */
p_winsize->ws_col = atoi(col_s);
p_winsize->ws_row = atoi(row_s);
#undef _TIO_BUFLEN
}
p_winsize->ws_xpixel = 0;/* unused */
p_winsize->ws_ypixel = 0;/* unused */
return 0;
}
static int tty_ioctl(struct dfs_file *fd, int cmd, void *args)
{
int ret = 0;
void *p = (void *)args;
struct tty_struct *tty = RT_NULL;
struct tty_struct *real_tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
if (tty->type == TTY_DRIVER_TYPE_PTY && tty->subtype == PTY_TYPE_MASTER)
{
real_tty = tty->other_struct;
}
else
{
real_tty = tty;
}
switch (cmd)
{
case TIOCSCTTY:
return tiocsctty(real_tty, 1);
case TIOCGWINSZ:
return tiocgwinsz(real_tty, p);
case TIOCSWINSZ:
return tiocswinsz(real_tty, p);
}
ld = tty->ldisc;
if (ld->ops->ioctl)
{
ret = ld->ops->ioctl(fd, cmd, args);
}
return ret;
}
#ifdef RT_USING_DFS_V2
static ssize_t tty_read(struct dfs_file *fd, void *buf, size_t count, off_t *pos)
#else
static ssize_t tty_read(struct dfs_file *fd, void *buf, size_t count)
#endif
{
int ret = 0;
struct tty_struct *tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
ld = tty->ldisc;
if (ld && ld->ops->read)
{
ret = ld->ops->read(fd, buf, count);
}
return ret;
}
#ifdef RT_USING_DFS_V2
static ssize_t tty_write(struct dfs_file *fd, const void *buf, size_t count, off_t *pos)
#else
static ssize_t tty_write(struct dfs_file *fd, const void *buf, size_t count )
#endif
{
int ret = 0;
struct tty_struct *tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
ld = tty->ldisc;
if (ld && ld->ops->write)
{
ret = ld->ops->write(fd, buf, count);
}
return ret;
}
static int tty_poll(struct dfs_file *fd, struct rt_pollreq *req)
{
int ret = 0;
struct tty_struct *tty = RT_NULL;
struct tty_ldisc *ld = RT_NULL;
tty = (struct tty_struct *)fd->vnode->data;
RT_ASSERT(tty != RT_NULL);
ld = tty->ldisc;
if (ld->ops->poll)
{
ret = ld->ops->poll(fd, req);
}
return ret;
}
static const struct dfs_file_ops tty_fops =
{
.open = tty_open,
.close = tty_close,
.ioctl = tty_ioctl,
.read = tty_read,
.write = tty_write,
.poll = tty_poll,
};
const struct dfs_file_ops *tty_get_fops(void)
{
return &tty_fops;
}
int tty_init(struct tty_struct *tty, int type, int subtype, struct rt_device *iodev)
{
if (tty)
{
struct tty_node *node = NULL;
node = rt_calloc(1, sizeof(struct tty_node));
if (node)
{
tty->type = type;
tty->subtype = subtype;
tty->io_dev = iodev;
tty->head = node;
tty_initstack(tty->head);
tty->pgrp = -1;
tty->session = -1;
tty->foreground = RT_NULL;
rt_mutex_init(&tty->lock, "ttyLock", RT_IPC_FLAG_PRIO);
rt_wqueue_init(&tty->wait_queue);
rt_spin_lock_init(&tty->spinlock);
tty_ldisc_init(tty);
tty->init_termios = tty_std_termios;
tty->init_flag = TTY_INIT_FLAG_REGED;
}
}
return 0;
}

View File

@ -1,122 +0,0 @@
#include <stddef.h>
#include <rtthread.h>
#include <tty.h>
#if defined(RT_USING_POSIX_DEVIO)
#include <termios.h>
#endif
#define DBG_TAG "TTY_IOCTL"
#ifdef RT_TTY_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_TTY_DEBUG */
#include <rtdbg.h>
/*
* Internal flag options for termios setting behavior
*/
#define TERMIOS_FLUSH 1
#define TERMIOS_WAIT 2
#define TERMIOS_TERMIO 4
#define TERMIOS_OLD 8
/**
* set_termios - set termios values for a tty
* @tty: terminal device
* @arg: user data
* @opt: option information
*
* Helper function to prepare termios data and run necessary other
* functions before using tty_set_termios to do the actual changes.
*
* Locking:
* Called functions take ldisc and termios_rwsem locks
*/
static int set_termios(struct tty_struct *tty, void *arg, int opt)
{
struct termios old_termios;
struct tty_ldisc *ld = RT_NULL;
struct termios *new_termios = (struct termios *)arg;
rt_base_t level = 0;
int retval = tty_check_change(tty);
if (retval)
{
return retval;
}
memcpy(&old_termios, &(tty->init_termios), sizeof(struct termios));
level = rt_spin_lock_irqsave(&tty->spinlock);
tty->init_termios = *new_termios;
rt_spin_unlock_irqrestore(&tty->spinlock, level);
ld = tty->ldisc;
if (ld != NULL)
{
if (ld->ops->set_termios)
{
ld->ops->set_termios(tty, &old_termios);
}
}
return 0;
}
int n_tty_ioctl_extend(struct tty_struct *tty, int cmd, void *args)
{
int ret = 0;
void *p = (void *)args;
struct tty_struct *real_tty = RT_NULL;
if (tty->type == TTY_DRIVER_TYPE_PTY && tty->subtype == PTY_TYPE_MASTER)
{
real_tty = tty->other_struct;
}
else
{
real_tty = tty;
}
switch(cmd)
{
case TCGETS:
case TCGETA:
{
struct termios *tio = (struct termios *)p;
if (tio == RT_NULL)
{
return -RT_EINVAL;
}
memcpy(tio, &real_tty->init_termios, sizeof(real_tty->init_termios));
return ret;
}
case TCSETSF:
{
return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_OLD);
}
case TCSETSW:
{
return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_OLD);
}
case TCSETS:
{
return set_termios(real_tty, p, TERMIOS_OLD);
}
case TCSETAF:
{
return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_TERMIO);
}
case TCSETAW:
{
return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_TERMIO);
}
case TCSETA:
{
return set_termios(real_tty, p, TERMIOS_TERMIO);
}
default:
break;
}
return -ENOIOCTLCMD;
}

View File

@ -1,178 +0,0 @@
#include <tty.h>
#include <tty_ldisc.h>
#define DBG_TAG "TTY_LDISC"
#ifdef RT_TTY_DEBUG
#define DBG_LVL DBG_LOG
#else
#define DBG_LVL DBG_INFO
#endif /* RT_TTY_DEBUG */
#include <rtdbg.h>
extern struct tty_ldisc_ops n_tty_ops;
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS] = {
&n_tty_ops, /* N_TTY = 0 */
};
static struct rt_spinlock _spinlock = RT_SPINLOCK_INIT;
static struct tty_ldisc_ops *get_ldops(int disc)
{
struct tty_ldisc_ops *ldops = RT_NULL;
rt_base_t level = 0;
level = rt_spin_lock_irqsave(&_spinlock);
ldops = tty_ldiscs[disc];
if (ldops)
{
ldops->refcount++;
}
rt_spin_unlock_irqrestore(&_spinlock, level);
return ldops;
}
static void put_ldops(struct tty_ldisc_ops *ldops)
{
rt_base_t level = 0;
level = rt_spin_lock_irqsave(&_spinlock);
ldops->refcount--;
rt_spin_unlock_irqrestore(&_spinlock, level);
}
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld = RT_NULL;
struct tty_ldisc_ops *ldops = RT_NULL;
if (disc < N_TTY || disc >= NR_LDISCS)
{
return RT_NULL;
}
ldops = get_ldops(disc);
if (ldops == RT_NULL)
{
LOG_E("tty ldisc get error\n");
return RT_NULL;
}
ld = rt_malloc(sizeof(struct tty_ldisc));
if (ld == RT_NULL)
{
ldops->refcount--;
return RT_NULL;
}
ld->ops = ldops;
ld->tty = tty;
return ld;
}
/**
* tty_ldisc_put - release the ldisc
*
* Complement of tty_ldisc_get().
*/
static void tty_ldisc_put(struct tty_ldisc *ld)
{
if (ld)
{
put_ldops(ld->ops);
rt_free(ld);
}
}
/**
* tty_ldisc_close - close a line discipline
* @tty: tty we are opening the ldisc on
* @ld: discipline to close
*
* A helper close method. Also a convenient debugging and check
* point.
*/
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
{
if (ld && ld->ops->close)
{
ld->ops->close(tty);
}
}
/**
* tty_ldisc_kill - teardown ldisc
* @tty: tty being released
*
* Perform final close of the ldisc and reset tty->ldisc
*/
void tty_ldisc_kill(struct tty_struct *tty)
{
if (tty && tty->ldisc)
{
/*
* Now kill off the ldisc
*/
tty_ldisc_close(tty, tty->ldisc);
tty_ldisc_put(tty->ldisc);
/* Force an oops if we mess this up */
tty->ldisc = NULL;
}
}
int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
{
int ret = 0;
rt_base_t level = 0;
if (disc < N_TTY || disc >= NR_LDISCS)
{
return -EINVAL;
}
level = rt_spin_lock_irqsave(&_spinlock);
tty_ldiscs[disc] = new_ldisc;
new_ldisc->num = disc;
new_ldisc->refcount = 0;
rt_spin_unlock_irqrestore(&_spinlock, level);
return ret;
}
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down (or one end of pty pair)
*
* Called during the final close of a tty or a pty pair in order to shut
* down the line discpline layer. On exit, each tty's ldisc is NULL.
*/
void tty_ldisc_release(struct tty_struct *tty)
{
struct tty_struct *o_tty = tty->other_struct;
/*
* Shutdown this line discipline. As this is the final close,
* it does not race with the set_ldisc code path.
*/
tty_ldisc_kill(tty);
if (o_tty)
{
tty_ldisc_kill(o_tty);
}
}
/**
* tty_ldisc_init - ldisc setup for new tty
* @tty: tty being allocated
*
* Set up the line discipline objects for a newly allocated tty. Note that
* the tty structure is not completely set up when this call is made.
*/
void tty_ldisc_init(struct tty_struct *tty)
{
if (tty)
{
tty->ldisc = tty_ldisc_get(tty, N_TTY);
}
}

View File

@ -22,6 +22,10 @@
#include <dfs_mnt.h>
#endif
#ifdef RT_USING_SMART
#include "lwp.h"
#endif /* RT_USING_SMART */
static int msh_readline(int fd, char *line_buf, int size)
{
char ch;
@ -159,7 +163,11 @@ static int cmd_ls(int argc, char **argv)
if (argc == 1)
{
#ifdef DFS_USING_WORKDIR
#ifdef RT_USING_SMART
ls(lwp_getcwd());
#else
ls(working_directory);
#endif
#else
ls("/");
#endif

View File

@ -25,6 +25,7 @@
* 2023-08-12 Meco Man re-implement RT-Thread lightweight timezone API
* 2023-09-15 xqyjlj perf rt_hw_interrupt_disable/enable
* 2023-10-23 Shell add lock for _g_timerid
* 2023-11-16 Shell Fixup of nanosleep if previous call was interrupted
*/
#include "sys/time.h"
@ -544,20 +545,24 @@ int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
unsigned long ns = rqtp->tv_sec * NANOSECOND_PER_SECOND + rqtp->tv_nsec;
rt_ktime_boottime_get_ns(&old_ts);
rt_ktime_hrtimer_ndelay(ns);
if (rt_get_errno() == -RT_EINTR)
if (rt_get_errno() == RT_EINTR)
{
if (rmtp)
{
rt_base_t rsec, rnsec;
rt_ktime_boottime_get_ns(&new_ts);
rmtp->tv_sec = 0;
rmtp->tv_nsec =
(old_ts.tv_nsec + ns) - ((new_ts.tv_sec - old_ts.tv_sec) * NANOSECOND_PER_SECOND + new_ts.tv_nsec);
if (rmtp->tv_nsec > NANOSECOND_PER_SECOND)
rsec = old_ts.tv_sec + rqtp->tv_sec - new_ts.tv_sec;
rnsec = old_ts.tv_nsec + rqtp->tv_nsec - new_ts.tv_nsec;
if (rnsec < 0)
{
rmtp->tv_nsec %= NANOSECOND_PER_SECOND;
rmtp->tv_sec += rmtp->tv_nsec / NANOSECOND_PER_SECOND;
rmtp->tv_sec = rsec - 1;
rmtp->tv_nsec = NANOSECOND_PER_SECOND + rnsec;
}
else
{
rmtp->tv_sec = rsec;
rmtp->tv_nsec = rnsec;
}
}
rt_set_errno(EINTR);

View File

@ -32,9 +32,13 @@ extern "C" {
#define DT_UNKNOWN 0x00
#define DT_FIFO 0x01
#define DT_SYMLINK 0x03
#define DT_CHR 0x02
#define DT_DIR 0x04
#define DT_BLK 0x06
#define DT_REG 0x08
#define DT_LNK 0x0a
#define DT_SOCK 0x0c
#define DT_SYMLINK DT_LNK
#ifndef HAVE_DIR_STRUCTURE
#define HAVE_DIR_STRUCTURE

View File

@ -15,6 +15,10 @@
extern "C" {
#endif
#ifdef RT_USING_MUSLLIBC
#include_next <sys/ioctl.h>
#else
struct winsize
{
unsigned short ws_row;
@ -23,9 +27,6 @@ struct winsize
unsigned short ws_ypixel;
};
#ifdef RT_USING_MUSLLIBC
#include <bits/ioctl.h>
#else
/*
* Direction bits, which any architecture can choose to override
* before including this file.

View File

@ -19,6 +19,8 @@
extern "C" {
#endif
#define _POSIX_VDISABLE 0
#define STDIN_FILENO 0 /* standard input file descriptor */
#define STDOUT_FILENO 1 /* standard output file descriptor */
#define STDERR_FILENO 2 /* standard error file descriptor */

View File

@ -13,12 +13,37 @@
#include_next <fcntl.h>
#ifndef O_DIRECTORY
#define O_DIRECTORY 0x200000
#endif
#define O_CREAT 0100
#define O_EXCL 0200
#define O_NOCTTY 0400
#define O_TRUNC 01000
#define O_APPEND 02000
#define O_NONBLOCK 04000
#define O_DSYNC 010000
#define O_SYNC 04010000
#define O_RSYNC 04010000
#define O_DIRECTORY 040000
#define O_NOFOLLOW 0100000
#define O_CLOEXEC 02000000
#define O_ASYNC 020000
#define O_DIRECT 0200000
#define O_LARGEFILE 0400000
#define O_NOATIME 01000000
#define O_PATH 010000000
#define O_TMPFILE 020040000
#define O_NDELAY O_NONBLOCK
#ifndef O_BINARY
#define O_BINARY 0x10000
#define O_BINARY 00
#endif
#ifndef O_SEARCH
#define O_SEARCH O_PATH
#endif /* O_SEARCH */
#ifndef O_EXEC
#define O_EXEC O_PATH
#endif /* O_EXEC */
#endif

View File

@ -27,7 +27,7 @@ if RT_USING_POSIX_FS
select RT_USING_POSIX_POLL
default y if RT_USING_SMART
default n
config RT_USING_POSIX_EVENTFD
bool "Enable I/O event eventfd <sys/eventfd.h>"
select RT_USING_POSIX_POLL

View File

@ -4,9 +4,14 @@
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-07-29 zmq810150896 first version
* 2024/03/26 TroyMitchelle Add comments for all functions, members within structure members and fix incorrect naming of triggered
* Date Author Notes
* 2023-07-29 zmq810150896 first version
* 2024/03/26 TroyMitchelle Add comments for all functions, members within structure members and fix incorrect naming of triggered
* 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
* record and finished enumerating all the fd's, it may be
* incorrectly woken up. This is basically because the poll
* mechanism wakeup algorithm does not correctly distinguish
* the current wait state.
*/
#include <rtthread.h>
@ -27,6 +32,12 @@
struct rt_eventpoll;
enum rt_epoll_status {
RT_EPOLL_STAT_INIT,
RT_EPOLL_STAT_TRIG,
RT_EPOLL_STAT_WAITING,
};
/* Monitor queue */
struct rt_fd_list
{
@ -44,15 +55,15 @@ struct rt_fd_list
struct rt_eventpoll
{
rt_uint32_t triggered; /**< Indicates if the wait thread is triggered */
rt_wqueue_t epoll_read; /**< Epoll read queue */
rt_thread_t polling_thread; /**< Polling thread */
struct rt_mutex lock; /**< Mutex lock */
struct rt_fd_list *fdlist; /**< Monitor list */
int eventpoll_num; /**< Number of ready lists */
rt_pollreq_t req; /**< Poll request structure */
struct rt_spinlock spinlock;/**< Spin lock */
rt_slist_t rdl_head; /**< Ready list head */
rt_wqueue_t epoll_read; /**< Epoll read queue */
rt_thread_t polling_thread; /**< Polling thread */
struct rt_mutex lock; /**< Mutex lock */
struct rt_fd_list *fdlist; /**< Monitor list */
int eventpoll_num; /**< Number of ready lists */
rt_pollreq_t req; /**< Poll request structure */
struct rt_spinlock spinlock; /**< Spin lock */
rt_slist_t rdl_head; /**< Ready list head */
enum rt_epoll_status status; /* the waited thread whether triggered */
};
static int epoll_close(struct dfs_file *file);
@ -85,8 +96,7 @@ static int epoll_close_fdlist(struct rt_fd_list *fdlist)
while (list->next != RT_NULL)
{
fre_node = list->next;
if (fre_node->wqn.wqueue)
rt_wqueue_remove(&fre_node->wqn);
rt_wqueue_remove(&fre_node->wqn);
list->next = fre_node->next;
rt_free(fre_node);
@ -188,13 +198,14 @@ static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
struct rt_fd_list *fdlist;
struct rt_eventpoll *ep;
rt_base_t level;
int is_waiting = 0;
if (key && !((rt_ubase_t)key & wait->key))
return -1;
fdlist = rt_container_of(wait, struct rt_fd_list, wqn);
ep = fdlist->ep;
if (ep)
{
level = rt_spin_lock_irqsave(&ep->spinlock);
@ -203,16 +214,21 @@ static int epoll_wqueue_callback(struct rt_wqueue_node *wait, void *key)
rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
fdlist->exclusive = 0;
fdlist->is_rdl_node = RT_TRUE;
ep->triggered = 1;
ep->eventpoll_num++;
is_waiting = (ep->status == RT_EPOLL_STAT_WAITING);
ep->status = RT_EPOLL_STAT_TRIG;
rt_wqueue_wakeup(&ep->epoll_read, (void *)POLLIN);
}
rt_spin_unlock_irqrestore(&ep->spinlock, level);
}
return __wqueue_default_wake(wait, key);
}
if (is_waiting)
{
return __wqueue_default_wake(wait, key);
}
return -1;
}
/**
* @brief Adds a callback function to the wait queue associated with epoll.
@ -265,8 +281,8 @@ static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep
rt_slist_append(&ep->rdl_head, &fdlist->rdl_node);
fdlist->exclusive = 0;
fdlist->is_rdl_node = RT_TRUE;
ep->triggered = 1;
ep->eventpoll_num++;
ep->status = RT_EPOLL_STAT_TRIG;
ep->eventpoll_num ++;
rt_spin_unlock_irqrestore(&ep->spinlock, level);
rt_mutex_release(&ep->lock);
}
@ -282,7 +298,7 @@ static void epoll_ctl_install(struct rt_fd_list *fdlist, struct rt_eventpoll *ep
*/
static void epoll_member_init(struct rt_eventpoll *ep)
{
ep->triggered = 0;
ep->status = RT_EPOLL_STAT_INIT;
ep->eventpoll_num = 0;
ep->polling_thread = rt_thread_self();
ep->fdlist = RT_NULL;
@ -678,7 +694,7 @@ static int epoll_wait_timeout(struct rt_eventpoll *ep, int msec)
level = rt_spin_lock_irqsave(&ep->spinlock);
if (timeout != 0 && !ep->triggered)
if (timeout != 0 && ep->status != RT_EPOLL_STAT_TRIG)
{
if (rt_thread_suspend_with_flag(thread, RT_KILLABLE) == RT_EOK)
{
@ -690,15 +706,19 @@ static int epoll_wait_timeout(struct rt_eventpoll *ep, int msec)
rt_timer_start(&(thread->thread_timer));
}
ep->status = RT_EPOLL_STAT_WAITING;
rt_spin_unlock_irqrestore(&ep->spinlock, level);
rt_schedule();
level = rt_spin_lock_irqsave(&ep->spinlock);
if (ep->status == RT_EPOLL_STAT_WAITING)
ep->status = RT_EPOLL_STAT_INIT;
}
}
ret = !ep->triggered;
ret = !(ep->status == RT_EPOLL_STAT_TRIG);
rt_spin_unlock_irqrestore(&ep->spinlock, level);
return ret;
@ -867,7 +887,7 @@ static int epoll_do(struct rt_eventpoll *ep, struct epoll_event *events, int max
if (event_num || istimeout)
{
level = rt_spin_lock_irqsave(&ep->spinlock);
ep->triggered = 0;
ep->status = RT_EPOLL_STAT_INIT;
rt_spin_unlock_irqrestore(&ep->spinlock, level);
if ((timeout >= 0) || (event_num > 0))
break;

View File

@ -7,6 +7,12 @@
* Date Author Notes
* 2016-12-28 Bernard first version
* 2018-03-09 Bernard Add protection for pt->triggered.
* 2023-12-04 Shell Fix return code and error verification
* 2023-12-14 Shell When poll goes to sleep before the waitqueue has added a
* record and finished enumerating all the fd's, it may be
* incorrectly woken up. This is basically because the poll
* mechanism wakeup algorithm does not correctly distinguish
* the current wait state.
*/
#include <stdint.h>
@ -16,11 +22,16 @@
#include "poll.h"
struct rt_poll_node;
enum rt_poll_status {
RT_POLL_STAT_INIT,
RT_POLL_STAT_TRIG,
RT_POLL_STAT_WAITING,
};
struct rt_poll_table
{
rt_pollreq_t req;
rt_uint32_t triggered; /* the waited thread whether triggered */
enum rt_poll_status status; /* the waited thread whether triggered */
rt_thread_t polling_thread;
struct rt_poll_node *nodes;
};
@ -36,15 +47,25 @@ static RT_DEFINE_SPINLOCK(_spinlock);
static int __wqueue_pollwake(struct rt_wqueue_node *wait, void *key)
{
rt_ubase_t level;
struct rt_poll_node *pn;
int is_waiting;
if (key && !((rt_ubase_t)key & wait->key))
return -1;
pn = rt_container_of(wait, struct rt_poll_node, wqn);
pn->pt->triggered = 1;
return __wqueue_default_wake(wait, key);
level = rt_spin_lock_irqsave(&_spinlock);
is_waiting = (pn->pt->status == RT_POLL_STAT_WAITING);
pn->pt->status = RT_POLL_STAT_TRIG;
rt_spin_unlock_irqrestore(&_spinlock, level);
if (is_waiting)
return __wqueue_default_wake(wait, key);
return -1;
}
static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
@ -71,7 +92,7 @@ static void _poll_add(rt_wqueue_t *wq, rt_pollreq_t *req)
static void poll_table_init(struct rt_poll_table *pt)
{
pt->req._proc = _poll_add;
pt->triggered = 0;
pt->status = RT_POLL_STAT_INIT;
pt->nodes = RT_NULL;
pt->polling_thread = rt_thread_self();
}
@ -89,7 +110,7 @@ static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
level = rt_spin_lock_irqsave(&_spinlock);
if (timeout != 0 && !pt->triggered)
if (timeout != 0 && pt->status != RT_POLL_STAT_TRIG)
{
if (rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE) == RT_EOK)
{
@ -99,17 +120,31 @@ static int poll_wait_timeout(struct rt_poll_table *pt, int msec)
RT_TIMER_CTRL_SET_TIME,
&timeout);
rt_timer_start(&(thread->thread_timer));
rt_set_errno(RT_ETIMEOUT);
}
else
{
rt_set_errno(0);
}
pt->status = RT_POLL_STAT_WAITING;
rt_spin_unlock_irqrestore(&_spinlock, level);
rt_schedule();
level = rt_spin_lock_irqsave(&_spinlock);
if (pt->status == RT_POLL_STAT_WAITING)
pt->status = RT_POLL_STAT_INIT;
}
}
ret = !pt->triggered;
ret = rt_get_errno();
if (ret == RT_EINTR)
ret = -RT_EINTR;
else if (pt->status == RT_POLL_STAT_TRIG)
ret = RT_EOK;
else
ret = -RT_ETIMEOUT;
rt_spin_unlock_irqrestore(&_spinlock, level);
return ret;
@ -170,7 +205,7 @@ static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, in
{
pf = fds;
num = 0;
pt->triggered = 0;
pt->status = RT_POLL_STAT_INIT;
for (n = 0; n < nfds; n ++)
{
@ -194,8 +229,13 @@ static int poll_do(struct pollfd *fds, nfds_t nfds, struct rt_poll_table *pt, in
if (num || istimeout)
break;
if (poll_wait_timeout(pt, msec))
ret = poll_wait_timeout(pt, msec);
if (ret == -RT_EINTR)
return -EINTR;
else if (ret == -RT_ETIMEOUT)
istimeout = 1;
else
istimeout = 0;
}
return num;

View File

@ -36,6 +36,20 @@ struct termios {
speed_t __c_ospeed;
};
#ifndef NCC
#define NCC 8
struct termio
{
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
#endif
/* c_cc characters */
#define VINTR 0
#define VQUIT 1

View File

@ -8,7 +8,7 @@ menuconfig RT_USING_LWP
if RT_USING_LWP
config LWP_DEBUG
bool "Enable debugging features of LwP"
default n
default y
config RT_LWP_MAX_NR
int "The max number of light-weight process"
@ -51,24 +51,6 @@ if RT_USING_LWP
default y
endif
config LWP_UNIX98_PTY
bool "The unix98 PTY support"
default n
if LWP_UNIX98_PTY
config LWP_PTY_INPUT_BFSZ
int "The unix98 PTY input buffer size"
default 1024
config LWP_PTY_PTS_SIZE
int "The unix98 PTY device max num"
default 3
config LWP_PTY_USING_DEBUG
bool "The unix98 PTY debug output"
default n
endif
menuconfig RT_USING_LDSO
bool "LDSO: dynamic load shared objects"
depends on RT_USING_DFS_V2
@ -85,5 +67,6 @@ if RT_USING_LWP
default n
endif
source "$RTT_DIR/components/lwp/terminal/Kconfig"
endif

View File

@ -22,11 +22,6 @@ if arch == 'risc-v':
if cpu in rv64:
cpu = 'rv64'
if GetDepend('LWP_UNIX98_PTY'):
# print("LWP_UNIX98_PTY")
src += Glob('unix98pty/*.c')
CPPPATH += ['unix98pty/']
if platform in platform_file.keys(): # support platforms
if arch in support_arch.keys() and cpu in support_arch[arch]:
asm_path = 'arch/' + arch + '/' + cpu + '/*_' + platform_file[platform]
@ -40,6 +35,12 @@ if platform in platform_file.keys(): # support platforms
CPPPATH = [cwd]
CPPPATH += [cwd + '/arch/' + arch + '/' + cpu]
# Terminal I/O Subsystem
termios_path = ['./terminal/', './terminal/freebsd/']
for item in termios_path:
src += Glob(item + '*.c')
CPPPATH += ['./terminal/']
group = DefineGroup('lwP', src, depend = ['RT_USING_SMART'], CPPPATH = CPPPATH)
Return('group')

View File

@ -8,6 +8,7 @@
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-10-16 Shell Support a new backtrace framework
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#include <armv8.h>
@ -123,6 +124,7 @@ int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
#define ALGIN_BYTES (16)
/* the layout is part of ABI, dont change it */
struct signal_ucontext
{
rt_int64_t sigreturn;
@ -130,11 +132,62 @@ struct signal_ucontext
siginfo_t si;
rt_align(16)
rt_align(ALGIN_BYTES)
struct rt_hw_exp_stack frame;
};
void *arch_signal_ucontext_restore(rt_base_t user_sp)
RT_STATIC_ASSERT(abi_offset_compatible, offsetof(struct signal_ucontext, si) == UCTX_ABI_OFFSET_TO_SI);
void *arch_signal_ucontext_get_frame(struct signal_ucontext *uctx)
{
return &uctx->frame;
}
/* internal used only */
void arch_syscall_prepare_signal(rt_base_t rc, struct rt_hw_exp_stack *exp_frame)
{
long x0 = exp_frame->x0;
exp_frame->x0 = rc;
exp_frame->x7 = x0;
return ;
}
void arch_syscall_restart(void *sp, void *ksp);
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
struct rt_hw_exp_stack *exp_frame = eframe;
if (exp_frame->x0 == -expected)
exp_frame->x0 = -code;
return ;
}
void arch_signal_check_erestart(void *eframe, void *ksp)
{
struct rt_hw_exp_stack *exp_frame = eframe;
long rc = exp_frame->x0;
long sys_id = exp_frame->x8;
(void)sys_id;
if (rc == -ERESTART)
{
LOG_D("%s(rc=%ld,sys_id=%ld,pid=%d)", __func__, rc, sys_id, lwp_self()->pid);
LOG_D("%s: restart rc = %ld", lwp_get_syscall_name(sys_id), rc);
exp_frame->x0 = exp_frame->x7;
arch_syscall_restart(eframe, ksp);
}
return ;
}
static void arch_signal_post_action(struct signal_ucontext *new_sp, rt_base_t kernel_sp)
{
arch_signal_check_erestart(&new_sp->frame, (void *)kernel_sp);
return ;
}
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp)
{
struct signal_ucontext *new_sp;
new_sp = (void *)user_sp;
@ -142,6 +195,7 @@ void *arch_signal_ucontext_restore(rt_base_t user_sp)
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{
lwp_thread_signal_mask(rt_thread_self(), LWP_SIG_MASK_CMD_SET_MASK, &new_sp->save_sigmask, RT_NULL);
arch_signal_post_action(new_sp, kernel_sp);
}
else
{
@ -157,7 +211,7 @@ void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
lwp_sigset_t *save_sig_mask)
{
struct signal_ucontext *new_sp;
new_sp = (void *)(user_sp - sizeof(struct signal_ucontext));
new_sp = (void *)((user_sp - sizeof(struct signal_ucontext)) & ~0xf);
if (lwp_user_accessable(new_sp, sizeof(*new_sp)))
{

View File

@ -11,8 +11,7 @@
#ifndef LWP_ARCH_H__
#define LWP_ARCH_H__
#include <lwp.h>
#include <lwp_arch_comm.h>
#include <rtconfig.h>
#ifdef ARCH_MM_MMU
@ -26,6 +25,13 @@
#define USER_VADDR_START 0x00200000UL
#define USER_LOAD_VADDR USER_VADDR_START
#define UCTX_ABI_OFFSET_TO_SI 16
#ifndef __ASSEMBLY__
#include <lwp.h>
#include <lwp_arch_comm.h>
#ifdef __cplusplus
extern "C" {
#endif
@ -34,7 +40,7 @@ unsigned long rt_hw_ffz(unsigned long x);
rt_inline void icache_invalid_all(void)
{
asm volatile ("ic ialluis\n\tisb sy":::"memory");
__asm__ volatile ("ic ialluis\n\tisb sy":::"memory");
}
/**
@ -57,11 +63,14 @@ void *arch_signal_ucontext_save(rt_base_t user_sp, siginfo_t *psiginfo,
* @param user_sp sp of user
* @return void*
*/
void *arch_signal_ucontext_restore(rt_base_t user_sp);
void *arch_signal_ucontext_restore(rt_base_t user_sp, rt_base_t kernel_sp);
void arch_syscall_restart(void *sp, void *ksp);
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLY__ */
#endif
#endif /* ARCH_MM_MMU */
#endif /*LWP_ARCH_H__*/

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2021-05-18 Jesven first version
* 2023-07-16 Shell Move part of the codes to C from asm in signal handling
* 2023-08-03 Shell Support of syscall restart (SA_RESTART)
*/
#ifndef __ASSEMBLY__
@ -17,6 +18,7 @@
#include "asm-generic.h"
#include "asm-fpu.h"
#include "armv8.h"
#include "lwp_arch.h"
/*********************
* SPSR BIT *
@ -155,6 +157,14 @@ START_POINT_END(SVC_Handler)
.global arch_syscall_exit
arch_syscall_exit:
/**
* @brief back up former x0 which is required to restart syscall, then setup
* syscall return value in stack frame
*/
mov x1, sp
bl arch_syscall_prepare_signal
msr daifset, #3
ldp x2, x3, [sp], #0x10 /* SPSR and ELR. */
@ -177,7 +187,10 @@ arch_syscall_exit:
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
add sp, sp, #0x40
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
/* the sp is reset to the outer most level, irq and fiq are disabled */
@ -383,10 +396,53 @@ lwp_check_debug_quit:
ldp x29, x30, [sp], #0x10
ret
.global arch_syscall_restart
arch_syscall_restart:
msr daifset, 3
mov sp, x1
/* drop exception frame in user stack */
msr sp_el0, x0
/* restore previous exception frame */
msr spsel, #0
ldp x2, x3, [sp], #0x10
msr elr_el1, x2
msr spsr_el1, x3
ldp x29, x30, [sp], #0x10
ldp x28, x29, [sp], #0x10
msr fpcr, x28
msr fpsr, x29
ldp x28, x29, [sp], #0x10
ldp x26, x27, [sp], #0x10
ldp x24, x25, [sp], #0x10
ldp x22, x23, [sp], #0x10
ldp x20, x21, [sp], #0x10
ldp x18, x19, [sp], #0x10
ldp x16, x17, [sp], #0x10
ldp x14, x15, [sp], #0x10
ldp x12, x13, [sp], #0x10
ldp x10, x11, [sp], #0x10
ldp x8, x9, [sp], #0x10
ldp x6, x7, [sp], #0x10
ldp x4, x5, [sp], #0x10
ldp x2, x3, [sp], #0x10
ldp x0, x1, [sp], #0x10
RESTORE_FPU sp
msr spsel, #1
b vector_exception
arch_signal_quit:
/* drop current exception frame */
add sp, sp, #CONTEXT_SIZE
mov x1, sp
mrs x0, sp_el0
bl arch_signal_ucontext_restore
add x0, x0, #-CONTEXT_SIZE
@ -456,6 +512,11 @@ arch_thread_signal_enter:
/* arch_signal_ucontext_save(user_sp, psiginfo, exp_frame, save_sig_mask); */
bl arch_signal_ucontext_save
mov x22, x0
/* get and saved pointer to uframe */
bl arch_signal_ucontext_get_frame
mov x2, x0
mov x0, x22
dc cvau, x0
dsb sy
@ -483,12 +544,15 @@ arch_thread_signal_enter:
/** set the return address to the sigreturn */
mov x30, x0
cbnz x21, 1f
mov x21, x30
1:
/** set the entry address of signal handler */
msr elr_el1, x21
/* siginfo is above the return address */
add x2, x30, 16
add x1, x2, #CONTEXT_SIZE
add x1, x30, UCTX_ABI_OFFSET_TO_SI
/* uframe is saved in x2 */
mov x0, x19
/**

View File

@ -192,6 +192,12 @@ void *arch_signal_ucontext_save(rt_base_t lr, siginfo_t *psiginfo,
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
#ifdef LWP_ENABLE_ASID
#define MAX_ASID_BITS 8
#define MAX_ASID (1 << MAX_ASID_BITS)

View File

@ -35,7 +35,7 @@ rt_inline unsigned long rt_hw_ffz(unsigned long x)
rt_inline void icache_invalid_all(void)
{
asm volatile ("mcr p15, 0, r0, c7, c5, 0\ndsb\nisb":::"memory");//iciallu
__asm__ volatile ("mcr p15, 0, r0, c7, c5, 0\ndsb\nisb":::"memory");//iciallu
}
unsigned int arch_get_asid(struct rt_lwp *lwp);

View File

@ -242,6 +242,7 @@ int arch_set_thread_context(void (*exit)(void), void *new_thread_stack,
* | |
* +------------------------+ --> thread sp
*/
return 0;
}
#define ALGIN_BYTES (16)
@ -317,6 +318,12 @@ void *arch_signal_ucontext_save(int signo, siginfo_t *psiginfo,
return new_sp;
}
void arch_syscall_set_errno(void *eframe, int expected, int code)
{
/* NO support */
return ;
}
/**
* void lwp_exec_user(void *args, void *kernel_stack, void *user_entry)
*/

View File

@ -28,9 +28,9 @@
#define FUTEX_CLOCK_REALTIME 256
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
#define FUTEX_TID_MASK 0x3fffffff
#define FUTEX_WAITERS 0x80000000
#define FUTEX_OWNER_DIED 0x40000000
#define FUTEX_TID_MASK 0x3fffffff
struct robust_list
{

View File

@ -12,6 +12,8 @@
* 2023-02-20 wangxiaoyao inv icache before new app startup
* 2023-02-20 wangxiaoyao fix bug on foreground app switch
* 2023-10-16 Shell Support a new backtrace framework
* 2023-11-17 xqyjlj add process group and session support
* 2023-11-30 Shell add lwp_startup()
*/
#define DBG_TAG "lwp"
@ -39,7 +41,7 @@
#include "lwp_arch_comm.h"
#include "lwp_signal.h"
#include "lwp_dbg.h"
#include "console.h"
#include <terminal/terminal.h>
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
@ -59,16 +61,31 @@ static const char elf_magic[] = {0x7f, 'E', 'L', 'F'};
#ifdef DFS_USING_WORKDIR
extern char working_directory[];
#endif
static struct termios stdin_termios, old_stdin_termios;
int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
struct termios *get_old_termios(void)
/**
* @brief The default console is only a backup device with lowest priority.
* It's always recommended to scratch the console from the boot arguments.
* And dont forget to register the device with a higher priority.
*/
static rt_err_t lwp_default_console_setup(void)
{
return &old_stdin_termios;
rt_device_t bakdev = rt_device_find("ttyS0");
rt_err_t rc;
if (bakdev)
{
lwp_console_register_backend(bakdev, LWP_CONSOLE_LOWEST_PRIOR);
rc = RT_EOK;
}
else
{
rc = -RT_EINVAL;
}
return rc;
}
int lwp_component_init(void)
static int lwp_component_init(void)
{
int rc;
if ((rc = lwp_tid_init()) != RT_EOK)
@ -83,10 +100,99 @@ int lwp_component_init(void)
{
LOG_E("%s: rt_channel_component_init failed", __func__);
}
else if ((rc = lwp_futex_init()) != RT_EOK)
{
LOG_E("%s: lwp_futex_init() failed", __func__);
}
else if ((rc = lwp_default_console_setup()) != RT_EOK)
{
LOG_E("%s: lwp_default_console_setup() failed", __func__);
}
return rc;
}
INIT_COMPONENT_EXPORT(lwp_component_init);
rt_weak int lwp_startup_debug_request(void)
{
return 0;
}
#define LATENCY_TIMES (3)
#define LATENCY_IN_MSEC (128)
#define LWP_CONSOLE_PATH "CONSOLE=/dev/console"
const char *init_search_path[] = {
"/sbin/init",
"/bin/init",
};
/**
* Startup process 0 and do the essential works
* This is the "Hello World" point of RT-Smart
*/
static int lwp_startup(void)
{
int error;
const char *init_path;
char *argv[] = {0, "&"};
char *envp[] = {LWP_CONSOLE_PATH, 0};
#ifdef LWP_DEBUG
int command;
int countdown = LATENCY_TIMES;
while (countdown)
{
command = lwp_startup_debug_request();
if (command)
{
return 0;
}
rt_kprintf("Press any key to stop init process startup ... %d\n", countdown);
countdown -= 1;
rt_thread_mdelay(LATENCY_IN_MSEC);
}
rt_kprintf("Starting init ...\n");
#endif
for (size_t i = 0; i < sizeof(init_search_path)/sizeof(init_search_path[0]); i++)
{
struct stat s;
init_path = init_search_path[i];
error = stat(init_path, &s);
if (error == 0)
{
argv[0] = (void *)init_path;
error = lwp_execve((void *)init_path, 0, sizeof(argv)/sizeof(argv[0]), argv, envp);
if (error < 0)
{
LOG_E("%s: failed to startup process 0 (init)\n"
"Switching to legacy mode...", __func__);
}
else if (error != 1)
{
LOG_E("%s: pid 1 is already allocated", __func__);
error = -EBUSY;
}
else
{
rt_lwp_t p = lwp_from_pid_locked(1);
p->sig_protected = 0;
error = 0;
}
break;
}
}
if (error)
{
LOG_E("%s: init program not found\n"
"Switching to legacy mode...", __func__);
}
return error;
}
INIT_APP_EXPORT(lwp_startup);
void lwp_setcwd(char *buf)
{
struct rt_lwp *lwp = RT_NULL;
@ -100,11 +206,11 @@ void lwp_setcwd(char *buf)
lwp = (struct rt_lwp *)rt_thread_self()->lwp;
if (lwp)
{
rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX);
rt_strncpy(lwp->working_directory, buf, DFS_PATH_MAX - 1);
}
else
{
rt_strncpy(working_directory, buf, DFS_PATH_MAX);
rt_strncpy(working_directory, buf, DFS_PATH_MAX - 1);
}
return ;
@ -114,8 +220,13 @@ char *lwp_getcwd(void)
{
char *dir_buf = RT_NULL;
struct rt_lwp *lwp = RT_NULL;
rt_thread_t thread = rt_thread_self();
if (thread)
{
lwp = (struct rt_lwp *)thread->lwp;
}
lwp = (struct rt_lwp *)rt_thread_self()->lwp;
if (lwp)
{
if(lwp->working_directory[0] != '/')
@ -1077,25 +1188,35 @@ void lwp_cleanup(struct rt_thread *tid)
return;
}
static void lwp_copy_stdio_fdt(struct rt_lwp *lwp)
static void lwp_execve_setup_stdio(struct rt_lwp *lwp)
{
struct dfs_file *d;
struct dfs_fdtable *lwp_fdt;
struct dfs_file *cons_file;
int cons_fd;
lwp_fdt = &lwp->fdt;
/* open console */
cons_fd = open("/dev/console", O_RDWR);
if (cons_fd < 0)
{
LOG_E("%s: Cannot open console tty", __func__);
return ;
}
LOG_D("%s: open console as fd %d", __func__, cons_fd);
/* init 4 fds */
lwp_fdt->fds = rt_calloc(4, sizeof(void *));
if (lwp_fdt->fds)
{
cons_file = fd_get(cons_fd);
lwp_fdt->maxfd = 4;
d = fd_get(0);
fdt_fd_associate_file(lwp_fdt, 0, d);
d = fd_get(1);
fdt_fd_associate_file(lwp_fdt, 1, d);
d = fd_get(2);
fdt_fd_associate_file(lwp_fdt, 2, d);
fdt_fd_associate_file(lwp_fdt, 0, cons_file);
fdt_fd_associate_file(lwp_fdt, 1, cons_file);
fdt_fd_associate_file(lwp_fdt, 2, cons_file);
}
close(cons_fd);
return;
}
@ -1197,11 +1318,8 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
int result;
struct rt_lwp *lwp;
char *thread_name;
char *argv_last = argv[argc - 1];
int bg = 0;
struct process_aux *aux;
int tid = 0;
int ret;
if (filename == RT_NULL)
{
@ -1213,7 +1331,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
return -EACCES;
}
lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID);
lwp = lwp_create(LWP_CREATE_FLAG_ALLOC_PID | LWP_CREATE_FLAG_NOTRACE_EXEC);
if (lwp == RT_NULL)
{
@ -1236,12 +1354,6 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
}
#endif
if (argv_last[0] == '&' && argv_last[1] == '\0')
{
argc--;
bg = 1;
}
if ((aux = lwp_argscopy(lwp, argc, argv, envp)) == RT_NULL)
{
lwp_tid_put(tid);
@ -1263,7 +1375,7 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
rt_thread_t thread = RT_NULL;
rt_uint32_t priority = 25, tick = 200;
lwp_copy_stdio_fdt(lwp);
lwp_execve_setup_stdio(lwp);
/* obtain the base name */
thread_name = strrchr(filename, '/');
@ -1284,88 +1396,46 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
if (thread != RT_NULL)
{
struct rt_lwp *self_lwp;
rt_session_t session;
rt_processgroup_t group;
thread->tid = tid;
lwp_tid_set_thread(tid, thread);
LOG_D("lwp kernel => (0x%08x, 0x%08x)\n", (rt_size_t)thread->stack_addr,
(rt_size_t)thread->stack_addr + thread->stack_size);
self_lwp = lwp_self();
/* when create init, self_lwp == null */
if (self_lwp == RT_NULL && lwp_to_pid(lwp) != 1)
{
self_lwp = lwp_from_pid_and_lock(1);
}
if (self_lwp)
{
//lwp->tgroup_leader = &thread; //add thread group leader for lwp
lwp->__pgrp = tid;
lwp->session = self_lwp->session;
/* lwp add to children link */
lwp_children_register(self_lwp, lwp);
}
else
session = RT_NULL;
group = RT_NULL;
group = lwp_pgrp_create(lwp);
if (group)
{
//lwp->tgroup_leader = &thread; //add thread group leader for lwp
lwp->__pgrp = tid;
}
if (!bg)
{
if (lwp->session == -1)
lwp_pgrp_insert(group, lwp);
if (self_lwp == RT_NULL)
{
struct tty_struct *tty = RT_NULL;
struct rt_lwp *old_lwp;
tty = (struct tty_struct *)console_tty_get();
old_lwp = tty->foreground;
if (old_lwp)
{
rt_mutex_take(&tty->lock, RT_WAITING_FOREVER);
ret = tty_push(&tty->head, old_lwp);
rt_mutex_release(&tty->lock);
if (ret < 0)
{
lwp_tid_put(tid);
lwp_ref_dec(lwp);
LOG_E("malloc fail!\n");
return -ENOMEM;
}
}
lwp->tty = tty;
lwp->tty->pgrp = lwp->__pgrp;
lwp->tty->session = lwp->session;
lwp->tty->foreground = lwp;
tcgetattr(1, &stdin_termios);
old_stdin_termios = stdin_termios;
stdin_termios.c_lflag |= ICANON | ECHO | ECHOCTL;
tcsetattr(1, 0, &stdin_termios);
session = lwp_session_create(lwp);
lwp_session_insert(session, group);
}
else
{
if (self_lwp != RT_NULL)
{
rt_mutex_take(&self_lwp->tty->lock, RT_WAITING_FOREVER);
ret = tty_push(&self_lwp->tty->head, self_lwp);
rt_mutex_release(&self_lwp->tty->lock);
if (ret < 0)
{
lwp_tid_put(tid);
lwp_ref_dec(lwp);
LOG_E("malloc fail!\n");
return -ENOMEM;
}
lwp->tty = self_lwp->tty;
lwp->tty->pgrp = lwp->__pgrp;
lwp->tty->session = lwp->session;
lwp->tty->foreground = lwp;
}
else
{
lwp->tty = RT_NULL;
}
session = lwp_session_find(lwp_sid_get_byprocess(self_lwp));
lwp_session_insert(session, group);
}
}
else
{
lwp->background = RT_TRUE;
}
thread->lwp = lwp;
#ifndef ARCH_MM_MMU
struct lwp_app_head *app_head = (struct lwp_app_head*)lwp->text_entry;
@ -1381,6 +1451,8 @@ pid_t lwp_execve(char *filename, int debug, int argc, char **argv, char **envp)
#endif /* not defined ARCH_MM_MMU */
rt_list_insert_after(&lwp->t_grp, &thread->sibling);
lwp->did_exec = RT_TRUE;
if (debug && rt_dbg_ops)
{
lwp->debug = debug;
@ -1482,30 +1554,30 @@ rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *
char **argv;
rt_lwp_t lwp;
if (uthread->lwp)
if (uthread && uthread->lwp && rt_scheduler_is_available())
{
lwp = uthread->lwp;
argv = lwp_get_command_line_args(lwp);
if (argv)
{
LOG_RAW("please use: addr2line -e %s -a -f", argv[0]);
rt_kprintf("please use: addr2line -e %s -a -f", argv[0]);
lwp_free_command_line_args(argv);
}
else
{
LOG_RAW("please use: addr2line -e %s -a -f", lwp->cmd);
rt_kprintf("please use: addr2line -e %s -a -f", lwp->cmd);
}
while (nesting < RT_BACKTRACE_LEVEL_MAX_NR)
{
LOG_RAW(" 0x%lx", frame->pc);
rt_kprintf(" 0x%lx", frame->pc);
if (rt_hw_backtrace_frame_unwind(uthread, frame))
{
break;
}
nesting++;
}
LOG_RAW("\n");
rt_kprintf("\n");
rc = RT_EOK;
}
return rc;

View File

@ -9,6 +9,9 @@
* 2019-10-12 Jesven Add MMU and userspace support
* 2020-10-08 Bernard Architecture and code cleanup
* 2021-08-26 linzhenxing add lwp_setcwd\lwp_getcwd
* 2023-11-17 xqyjlj add process group and session support
* 2023-12-02 Shell Add macro to create lwp status and
* fix dead lock problem on pgrp
*/
/*
@ -48,10 +51,6 @@
#include <locale.h>
#endif /* RT_USING_MUSLLIBC */
#ifdef RT_USING_TTY
struct tty_struct;
#endif /* RT_USING_TTY */
#ifdef __cplusplus
extern "C" {
#endif
@ -76,12 +75,48 @@ struct rt_lwp_notify
rt_slist_t list_node;
};
struct lwp_tty;
#ifdef RT_USING_MUSLLIBC
#define LWP_CREATE_STAT(exit_code) (((exit_code) & 0xff) << 8)
#define LWP_COREDUMP_FLAG 0x80
#define LWP_CREATE_STAT_EXIT(exit_code) (((exit_code)&0xff) << 8)
#define LWP_CREATE_STAT_SIGNALED(signo, coredump) (((signo) & 0x7f) | (coredump ? LWP_COREDUMP_FLAG : 0))
#define LWP_CREATE_STAT_STOPPED(signo) (LWP_CREATE_STAT_EXIT(signo) | 0x7f)
#define LWP_CREATE_STAT_CONTINUED (0xffff)
#else
#error "No compatible lwp set status provided for this libc"
#endif
typedef struct rt_lwp *rt_lwp_t;
typedef struct rt_session *rt_session_t;
typedef struct rt_processgroup *rt_processgroup_t;
struct rt_session {
struct rt_object object;
rt_lwp_t leader;
rt_list_t processgroup;
pid_t sid;
pid_t foreground_pgid;
struct rt_mutex mutex;
struct lwp_tty *ctty;
};
struct rt_processgroup {
struct rt_object object;
rt_lwp_t leader;
rt_list_t process;
rt_list_t pgrp_list_node;
pid_t pgid;
pid_t sid;
struct rt_session *session;
struct rt_mutex mutex;
rt_atomic_t ref;
/* flags on process group */
unsigned int is_orphaned:1;
};
struct rt_lwp
{
#ifdef ARCH_MM_MMU
@ -100,14 +135,21 @@ struct rt_lwp
uint8_t lwp_type;
uint8_t reserv[3];
struct rt_lwp *parent;
struct rt_lwp *first_child;
struct rt_lwp *sibling;
/* flags */
unsigned int terminated:1;
unsigned int background:1;
unsigned int term_ctrlterm:1; /* have control terminal? */
unsigned int did_exec:1; /* Whether exec has been performed */
unsigned int jobctl_stopped:1; /* job control: current proc is stopped */
unsigned int wait_reap_stp:1; /* job control: has wait event for parent */
unsigned int sig_protected:1; /* signal: protected proc cannot be killed or stopped */
rt_list_t wait_list;
rt_bool_t terminated;
rt_bool_t background;
int lwp_ret;
struct rt_lwp *parent; /* parent process */
struct rt_lwp *first_child; /* first child process */
struct rt_lwp *sibling; /* sibling(child) process */
struct rt_wqueue waitpid_waiters;
lwp_status_t lwp_status;
void *text_entry;
uint32_t text_size;
@ -118,15 +160,16 @@ struct rt_lwp
void *args;
uint32_t args_length;
pid_t pid;
pid_t __pgrp; /*Accessed via process_group()*/
pid_t tty_old_pgrp;
pid_t session;
rt_list_t t_grp;
rt_list_t timer; /* POSIX timer object binding to a process */
pid_t sid; /* session ID */
pid_t pgid; /* process group ID */
struct rt_processgroup *pgrp;
rt_list_t pgrp_node; /* process group node */
rt_list_t t_grp; /* thread group */
rt_list_t timer; /* POSIX timer object binding to a process */
int leader; /* boolean value for session group_leader*/
struct dfs_fdtable fdt;
char cmd[RT_NAME_MAX];
char *exe_file; /* process file path */
/* POSIX signal */
struct lwp_signal signal;
@ -135,7 +178,7 @@ struct rt_lwp
struct rt_mutex object_mutex;
struct rt_user_context user_ctx;
struct rt_wqueue wait_queue; /*for console */
struct rt_wqueue wait_queue; /* for console */
struct tty_struct *tty; /* NULL if no tty */
struct lwp_avl_struct *address_search_head; /* for addressed object fast search */
@ -152,8 +195,9 @@ struct rt_lwp
uint64_t generation;
unsigned int asid;
#endif
struct rusage rt_rusage;
};
typedef struct rt_lwp *rt_lwp_t;
struct rt_lwp *lwp_self(void);
rt_err_t lwp_children_register(struct rt_lwp *parent, struct rt_lwp *child);
@ -182,7 +226,7 @@ void lwp_tid_put(int tid);
* @return rt_thread_t
*/
rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid);
rt_thread_t lwp_tid_get_thread_raw(int tid);
/**
* @brief Decrease a reference count
*
@ -216,6 +260,78 @@ rt_err_t lwp_futex_init(void);
rt_err_t lwp_futex(struct rt_lwp *lwp, int *uaddr, int op, int val,
const struct timespec *timeout, int *uaddr2, int val3);
/* processgroup api */
rt_inline pid_t lwp_pgid_get_bypgrp(rt_processgroup_t group)
{
return group ? group->pgid : 0;
}
rt_inline pid_t lwp_pgid_get_byprocess(rt_lwp_t process)
{
return process ? process->pgid : 0;
}
rt_processgroup_t lwp_pgrp_find(pid_t pgid);
void lwp_pgrp_dec_ref(rt_processgroup_t pgrp);
rt_processgroup_t lwp_pgrp_find_and_inc_ref(pid_t pgid);
rt_processgroup_t lwp_pgrp_create(rt_lwp_t leader);
int lwp_pgrp_delete(rt_processgroup_t group);
/**
* Note: all the pgrp with process operation must be called in the context where
* process lock is taken. This is protect us from a possible dead lock condition
*
* The order is mandatory in the case:
* PGRP_LOCK(pgrp);
* LWP_LOCK(p);
* ... bussiness logic
* LWP_UNLOCK(p);
* PGRP_UNLOCK(pgrp);
*/
int lwp_pgrp_insert(rt_processgroup_t group, rt_lwp_t process);
int lwp_pgrp_remove(rt_processgroup_t group, rt_lwp_t process);
int lwp_pgrp_move(rt_processgroup_t group, rt_lwp_t process);
int lwp_pgrp_update_children_info(rt_processgroup_t group, pid_t sid, pid_t pgid);
/* session api */
rt_inline pid_t lwp_sid_get_bysession(rt_session_t session)
{
return session ? session->sid : 0;
}
rt_inline pid_t lwp_sid_get_bypgrp(rt_processgroup_t group)
{
return group ? group->sid : 0;
}
rt_inline pid_t lwp_sid_get_byprocess(rt_lwp_t process)
{
return process ? process->sid : 0;
}
rt_session_t lwp_session_find(pid_t sid);
rt_session_t lwp_session_create(struct rt_lwp *leader);
int lwp_session_delete(rt_session_t session);
/**
* Note: all the session operation must be called in the context where
* process lock is taken. This is protect us from a possible dead lock condition
*
* The order is mandatory in the case:
* PGRP_LOCK(pgrp);
* LWP_LOCK(p);
* ... bussiness logic
* LWP_UNLOCK(p);
* PGRP_UNLOCK(pgrp);
*/
int lwp_session_insert(rt_session_t session, rt_processgroup_t group);
int lwp_session_remove(rt_session_t session, rt_processgroup_t group);
int lwp_session_move(rt_session_t session, rt_processgroup_t group);
int lwp_session_update_children_info(rt_session_t session, pid_t sid);
int lwp_session_set_foreground(rt_session_t session, pid_t pgid);
/* complete the job control related bussiness on process exit */
void lwp_jobctrl_on_exit(struct rt_lwp *lwp);
#ifdef __cplusplus
}
@ -301,6 +417,7 @@ int dbg_step_type(void);
void dbg_attach_req(void *pc);
int dbg_check_suspend(void);
void rt_hw_set_process_id(int pid);
void lwp_futex_exit_robust_list(rt_thread_t thread);
/* backtrace service */
rt_err_t lwp_backtrace_frame(rt_thread_t uthread, struct rt_hw_backtrace_frame *frame);

View File

@ -61,6 +61,10 @@ rt_noreturn void arch_thread_signal_enter(int signo, siginfo_t *psiginfo,
void *exp_frame, void *entry_uaddr,
lwp_sigset_t *save_sig_mask);
void arch_signal_check_erestart(void *eframe, void *ksp);
void arch_syscall_set_errno(void *eframe, int expected, int code);
int arch_backtrace_uthread(rt_thread_t thread);
#endif /* __LWP_ARCH_COMM__ */

View File

@ -797,6 +797,7 @@ int lwp_load(const char *filename, struct rt_lwp *lwp, uint8_t *load_addr, size_
/* copy file name to process name */
rt_strncpy(lwp->cmd, filename, RT_NAME_MAX);
lwp->exe_file = dfs_normalize_path(NULL, filename); // malloc
ret = elf_file_load(&load_info);
if (ret != RT_EOK)

View File

@ -475,6 +475,11 @@ static long _futex_wake(rt_futex_t futex, struct rt_lwp *lwp, int number,
{
number--;
woken_cnt++;
is_empty = RT_FALSE;
}
else
{
is_empty = RT_TRUE;
}
_futex_unlock(lwp, op_flags);
}
@ -512,23 +517,16 @@ static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
*/
while (nr_wake && !is_empty)
{
rt_sched_lock_level_t slvl;
rt_sched_lock(&slvl);
is_empty = rt_list_isempty(&(futex1->waiting_thread));
if (!is_empty)
if (rt_susp_list_dequeue(&futex1->waiting_thread, RT_EOK))
{
thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
/* remove from waiting list */
rt_list_remove(&RT_THREAD_LIST_NODE(thread));
thread->error = RT_EOK;
/* resume the suspended thread */
rt_thread_resume(thread);
nr_wake--;
woken_cnt++;
is_empty = RT_FALSE;
}
else
{
is_empty = RT_TRUE;
}
rt_sched_unlock(slvl);
}
rtn = woken_cnt;
@ -542,7 +540,10 @@ static long _futex_requeue(rt_futex_t futex1, rt_futex_t futex2,
{
rt_sched_lock_level_t slvl;
rt_sched_lock(&slvl);
/* moving from one susp list to another */
is_empty = rt_list_isempty(&(futex1->waiting_thread));
if (!is_empty)
{
thread = RT_THREAD_LIST_NODE_ENTRY(futex1->waiting_thread.next);
@ -914,7 +915,7 @@ void lwp_futex_exit_robust_list(rt_thread_t thread)
rc = _fetch_robust_entry(&next_entry, &entry->next, &next_pi);
if (entry != pending)
{
if (_handle_futex_death((void *)entry + futex_offset, thread, pi,
if (_handle_futex_death((int *)((size_t)entry + futex_offset), thread, pi,
RT_FALSE))
return;
}

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2023-07-25 Shell first version
* 2023-11-25 Shell Add pgrp, session lock API
*/
#define DBG_TAG "lwp.internal"
@ -15,7 +16,7 @@
#include <stdlib.h>
#include "lwp_internal.h"
static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags)
{
LWP_DEF_RETURN_CODE(rc);
int retry;
@ -45,15 +46,15 @@ static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t i
do {
retry = 0;
if (interruptable)
if (flags & LWP_MTX_FLAGS_INTR)
rc = rt_mutex_take_interruptible(mtx, effect_timeout);
else
rc = rt_mutex_take(mtx, effect_timeout);
rc = rt_mutex_take_killable(mtx, effect_timeout);
#ifdef LWP_DEBUG
if (rc == RT_EOK)
{
if (rt_mutex_get_hold(mtx) > 1)
if (!(flags & LWP_MTX_FALGS_NESTED) && rt_mutex_get_hold(mtx) > 1)
{
LOG_W("Already hold the lock");
rt_backtrace();
@ -88,6 +89,7 @@ static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t i
}
else
{
rc = -RT_EINVAL;
LOG_W("%s: mtx should not be NULL", __func__);
RT_ASSERT(0);
}
@ -95,10 +97,10 @@ static rt_err_t _mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t i
LWP_RETURN(rc);
}
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable)
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags)
{
LWP_DEF_RETURN_CODE(rc);
rc = _mutex_take_safe(mtx, timeout, interruptable);
rc = _mutex_take_safe(mtx, timeout, flags);
LWP_RETURN(rc);
}
@ -116,18 +118,17 @@ rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx)
LWP_RETURN(rc);
}
rt_err_t lwp_critical_enter(struct rt_lwp *lwp)
rt_err_t lwp_critical_enter(struct rt_lwp *lwp, int flags)
{
rt_err_t rc;
rc = lwp_mutex_take_safe(&lwp->lwp_lock, RT_WAITING_FOREVER, 0);
do {
rc = lwp_mutex_take_safe(&lwp->lwp_lock, RT_WAITING_FOREVER, flags);
} while (rc != RT_EOK && !(flags & LWP_MTX_FLAGS_INTR) && rc == -RT_EINTR);
/* if current process is force killed */
if (rc != RT_EOK)
if (rc != RT_EOK && rc != -RT_EINTR)
{
if (rc == -RT_EINTR && lwp_self() != RT_NULL)
sys_exit(EXIT_SUCCESS);
else
LOG_I("%s: unexpected return code = %ld", __func__, rc);
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
@ -137,3 +138,45 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp)
{
return lwp_mutex_release_safe(&lwp->lwp_lock);
}
rt_err_t lwp_pgrp_critical_enter(struct rt_processgroup *pgrp, int flags)
{
rt_err_t rc;
do {
rc = lwp_mutex_take_safe(&pgrp->mutex, RT_WAITING_FOREVER, flags);
} while (rc != RT_EOK && !(flags & LWP_MTX_FLAGS_INTR) && rc == -RT_EINTR);
/* if current process is force killed */
if (rc != RT_EOK && rc != -RT_EINTR)
{
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
}
rt_err_t lwp_pgrp_critical_exit(struct rt_processgroup *pgrp)
{
return lwp_mutex_release_safe(&pgrp->mutex);
}
rt_err_t lwp_sess_critical_enter(struct rt_session *sess, int flags)
{
rt_err_t rc;
do {
rc = lwp_mutex_take_safe(&sess->mutex, RT_WAITING_FOREVER, flags);
} while (rc != RT_EOK && !(flags & LWP_MTX_FLAGS_INTR) && rc == -RT_EINTR);
/* if current process is force killed */
if (rc != RT_EOK && rc != -RT_EINTR)
{
LOG_I("%s: unexpected return code = %ld", __func__, rc);
}
return rc;
}
rt_err_t lwp_sess_critical_exit(struct rt_session *sess)
{
return lwp_mutex_release_safe(&sess->mutex);
}

View File

@ -6,21 +6,32 @@
* Change Logs:
* Date Author Notes
* 2023-07-25 Shell first version
* 2023-11-25 Shell Add pgrp, session lock API
*/
#ifndef __LWP_INTERNAL_H__
#define __LWP_INTERNAL_H__
#include "lwp.h"
#include "lwp_arch.h"
#include "lwp_user_mm.h"
#include "lwp_mm.h"
#include <rtthread.h>
#include "libc_musl.h"
struct rt_lwp;
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, rt_bool_t interruptable);
#define LWP_MTX_FLAGS_INTR 0x1 /* interruptible waiting */
#define LWP_MTX_FALGS_NESTED 0x2 /* allow nested */
rt_err_t lwp_mutex_take_safe(rt_mutex_t mtx, rt_int32_t timeout, int flags);
rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx);
rt_inline rt_bool_t lwp_in_user_space(const char *addr)
{
return (addr >= (char *)USER_VADDR_START && addr < (char *)USER_VADDR_TOP);
}
#ifdef RT_USING_SMP
#define LOCAL_IRQ_MASK() rt_hw_local_irq_disable()
#define LOCAL_IRQ_UNMASK(level) rt_hw_local_irq_enable(level)
@ -30,16 +41,34 @@ rt_err_t lwp_mutex_release_safe(rt_mutex_t mtx);
#endif
#ifndef LWP_USING_CPUS_LOCK
rt_err_t lwp_critical_enter(struct rt_lwp *lwp);
rt_err_t lwp_sess_critical_enter(struct rt_session *sess, int flags);
rt_err_t lwp_sess_critical_exit(struct rt_session *sess);
rt_err_t lwp_pgrp_critical_enter(struct rt_processgroup *pgrp, int flags);
rt_err_t lwp_pgrp_critical_exit(struct rt_processgroup *pgrp);
rt_err_t lwp_critical_enter(struct rt_lwp *lwp, int flags);
rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
#define LWP_LOCK(lwp) \
do { \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_critical_enter(lwp) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
#define LWP_ASSERT_LOCKED(proc) RT_ASSERT(rt_mutex_get_owner(&(proc)->lwp_lock) == rt_thread_self())
#define PGRP_ASSERT_LOCKED(pgrp) RT_ASSERT(rt_mutex_get_owner(&(pgrp)->mutex) == rt_thread_self())
#define LWP_LOCK(lwp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_critical_enter(lwp, 0) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define LWP_LOCK_NESTED(lwp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_critical_enter(lwp, LWP_MTX_FALGS_NESTED) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define LWP_UNLOCK(lwp) \
@ -50,10 +79,72 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
} \
} while (0)
#define PGRP_LOCK(pgrp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_pgrp_critical_enter(pgrp, 0) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define PGRP_LOCK_NESTED(pgrp) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_pgrp_critical_enter(pgrp, LWP_MTX_FALGS_NESTED) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define PGRP_UNLOCK(pgrp) \
do \
{ \
if (lwp_pgrp_critical_exit(pgrp) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define SESS_LOCK(sess) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_sess_critical_enter(sess, 0) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define SESS_LOCK_NESTED(sess) \
do \
{ \
RT_DEBUG_SCHEDULER_AVAILABLE(1); \
if (lwp_sess_critical_enter(sess, LWP_MTX_FALGS_NESTED) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#define SESS_UNLOCK(sess) \
do \
{ \
if (lwp_sess_critical_exit(sess) != RT_EOK) \
{ \
RT_ASSERT(0); \
} \
} while (0)
#else
#define LWP_LOCK(lwp) rt_base_t level = rt_hw_interrupt_disable()
#define LWP_UNLOCK(lwp) rt_hw_interrupt_enable(level)
#define PGRP_LOCK(pgrp) rt_base_t level = rt_hw_interrupt_disable()
#define PGRP_UNLOCK(pgrp) rt_hw_interrupt_enable(level)
#define SESS_LOCK(sess) rt_base_t level = rt_hw_interrupt_disable()
#define SESS_UNLOCK(sess) rt_hw_interrupt_enable(level)
#endif /* LWP_USING_CPUS_LOCK */
@ -95,4 +186,6 @@ rt_err_t lwp_critical_exit(struct rt_lwp *lwp);
#define LWP_RETURN(name) {RT_ASSERT(name != _LWP_UNINITIALIZED_RC);return name;}
#endif /* LWP_DEBUG */
int load_ldso(struct rt_lwp *lwp, char *exec_name, char *const argv[], char *const envp[]);
#endif /* __LWP_INTERNAL_H__ */

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <terminal/terminal.h>
#include "lwp_internal.h"
static void jobctrl_set_pgrp_orphaned(struct rt_processgroup *pgrp)
{
rt_lwp_t proc, nx_proc;
PGRP_LOCK(pgrp);
pgrp->is_orphaned = 1;
rt_list_for_each_entry(proc, &pgrp->process, pgrp_node)
{
LWP_LOCK(proc);
if (proc->jobctl_stopped)
{
LWP_UNLOCK(proc);
rt_list_for_each_entry_safe(proc, nx_proc, &pgrp->process, pgrp_node)
{
LWP_LOCK(proc);
lwp_signal_kill(proc, SIGHUP, SI_KERNEL, 0);
lwp_signal_kill(proc, SIGCONT, SI_KERNEL, 0);
LWP_UNLOCK(proc);
}
}
LWP_UNLOCK(proc);
}
PGRP_UNLOCK(pgrp);
}
void lwp_jobctrl_on_exit(struct rt_lwp *lwp)
{
rt_processgroup_t pgrp;
rt_session_t session;
lwp_tty_t tp;
pgrp = lwp->pgrp;
RT_ASSERT(pgrp);
session = pgrp->session;
RT_ASSERT(session);
/**
* as a session leader, we have to mark tty as freed. So others can race to
* take it before we actually close and released that tty
*/
SESS_LOCK(session);
if (session->sid == lwp->pid)
{
tp = session->ctty;
session->leader = 0;
/* signal to foreground group that modem is disconnected */
if (tp)
{
tty_lock(tp);
if (tp->t_session == session)
lwp_tty_signal_pgrp(tp, SIGHUP);
tty_unlock(tp);
}
/* revoke tty vnode ? */
rt_list_for_each_entry(pgrp, &session->processgroup, pgrp_list_node)
{
jobctrl_set_pgrp_orphaned(pgrp);
}
}
SESS_UNLOCK(session);
/* release tty */
/* allow tty stolen? */
}

542
components/lwp/lwp_pgrp.c Normal file
View File

@ -0,0 +1,542 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-17 xqyjlj the first version
* 2023-11-28 Shell Add reference management for pgrp;
* Using lwp lock API and fix the dead lock problem
*/
#include "lwp.h"
#include "lwp_internal.h"
#include "lwp_syscall.h"
#define DBG_TAG "lwp.pgrp"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
void lwp_pgrp_dec_ref(rt_processgroup_t pgrp)
{
if (rt_atomic_add(&pgrp->ref, -1) == 1)
{
rt_mutex_detach(&(pgrp->mutex));
/* clear self pgid */
pgrp->pgid = 0;
rt_free(pgrp);
}
}
rt_processgroup_t lwp_pgrp_find_and_inc_ref(pid_t pgid)
{
rt_processgroup_t group;
group = lwp_pgrp_find(pgid);
if (group)
{
rt_atomic_add(&(group->ref), 1);
}
return group;
}
rt_processgroup_t lwp_pgrp_find(pid_t pgid)
{
rt_base_t level;
rt_processgroup_t group = RT_NULL;
rt_list_t *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
information = rt_object_get_information(RT_Object_Class_ProcessGroup);
/* parameter check */
if ((pgid < 0) || (information == RT_NULL))
{
return RT_NULL;
}
if (pgid == 0)
{
pgid = lwp_getpid();
}
/* enter critical */
level = rt_spin_lock_irqsave(&(information->spinlock));
/* try to find process group */
rt_list_for_each(node, &(information->object_list))
{
group = (rt_processgroup_t)rt_list_entry(node, struct rt_object, list);
if (group->pgid == pgid)
{
rt_spin_unlock_irqrestore(&(information->spinlock), level);
return group;
}
}
rt_spin_unlock_irqrestore(&(information->spinlock), level);
LOG_I("cannot find(pgid:%d)() by (pid:%d, pgid:%d)", pgid, lwp_getpid(), lwp_pgid_get_byprocess(lwp_self()));
return RT_NULL;
}
rt_processgroup_t lwp_pgrp_create(rt_lwp_t leader)
{
rt_processgroup_t group = RT_NULL;
/* parameter check */
if (leader == RT_NULL)
{
return RT_NULL;
}
group = rt_malloc(sizeof(struct rt_processgroup));
if (group != RT_NULL)
{
rt_object_init(&(group->object), RT_Object_Class_ProcessGroup, "pgrp");
rt_list_init(&(group->process));
rt_list_init(&(group->pgrp_list_node));
rt_mutex_init(&(group->mutex), "pgrp", RT_IPC_FLAG_PRIO);
group->leader = leader;
group->sid = 0;
group->session = RT_NULL;
group->is_orphaned = 0;
group->pgid = lwp_to_pid(leader);
rt_atomic_store(&group->ref, 1);
}
LOG_I("create(ptr:%p, pgid:%d)() by pid:%d", group, group->pgid, lwp_getpid());
return group;
}
#include <terminal/terminal.h>
int lwp_pgrp_delete(rt_processgroup_t group)
{
int retry = 1;
rt_session_t session = RT_NULL;
int is_session_free = 0;
lwp_tty_t ctty;
/* parameter check */
if (group == RT_NULL)
{
return -EINVAL;
}
LOG_I("delete(ptr:%p, pgid:%d)() by pid:%d", group, group->pgid, lwp_getpid());
while (retry)
{
retry = 0;
session = lwp_session_find(lwp_sid_get_bypgrp(group));
if (session)
{
ctty = session->ctty;
if (ctty)
{
/**
* Note: it's safe to release pgrp even we do this multiple,
* the neccessary check is done before the tty actually detach
*/
tty_lock(ctty);
tty_rel_pgrp(ctty, group); // tty_unlock
}
SESS_LOCK(session);
PGRP_LOCK_NESTED(group);
if (group->session == session && session->ctty == ctty)
{
rt_object_detach(&(group->object));
is_session_free = lwp_session_remove(session, group);
}
else
{
retry = 1;
}
PGRP_UNLOCK(group);
if (is_session_free != 1)
SESS_UNLOCK(session);
}
else
{
rt_object_detach(&(group->object));
}
}
lwp_pgrp_dec_ref(group);
return 0;
}
int lwp_pgrp_insert(rt_processgroup_t group, rt_lwp_t process)
{
/* parameter check */
if (group == RT_NULL || process == RT_NULL)
{
return -EINVAL;
}
PGRP_LOCK_NESTED(group);
LWP_LOCK_NESTED(process);
RT_ASSERT(rt_mutex_get_hold(&process->lwp_lock) <= rt_mutex_get_hold(&group->mutex));
process->pgid = group->pgid;
process->pgrp = group;
process->sid = group->sid;
rt_list_insert_after(&(group->process), &(process->pgrp_node));
LWP_UNLOCK(process);
PGRP_UNLOCK(group);
return 0;
}
int lwp_pgrp_remove(rt_processgroup_t group, rt_lwp_t process)
{
rt_bool_t is_empty = RT_FALSE;
/* parameter check */
if (group == RT_NULL || process == RT_NULL)
{
return -EINVAL;
}
PGRP_LOCK_NESTED(group);
LWP_LOCK_NESTED(process);
RT_ASSERT(rt_mutex_get_hold(&process->lwp_lock) <= rt_mutex_get_hold(&group->mutex));
rt_list_remove(&(process->pgrp_node));
/* clear children sid and pgid */
process->pgrp = RT_NULL;
process->pgid = 0;
process->sid = 0;
LWP_UNLOCK(process);
is_empty = rt_list_isempty(&(group->process));
PGRP_UNLOCK(group);
if (is_empty)
{
lwp_pgrp_delete(group);
return 1;
}
return 0;
}
int lwp_pgrp_move(rt_processgroup_t group, rt_lwp_t process)
{
int retry = 1;
rt_processgroup_t old_group;
/* parameter check */
if (group == RT_NULL || process == RT_NULL)
{
return -EINVAL;
}
if (lwp_pgid_get_bypgrp(group) == lwp_pgid_get_byprocess(process))
{
return 0;
}
PGRP_LOCK(group);
while (retry)
{
retry = 0;
old_group = lwp_pgrp_find_and_inc_ref(lwp_pgid_get_byprocess(process));
PGRP_LOCK(old_group);
LWP_LOCK(process);
if (process->pgrp == old_group)
{
lwp_pgrp_remove(old_group, process);
lwp_pgrp_insert(group, process);
}
else
{
retry = 1;
}
PGRP_UNLOCK(old_group);
LWP_UNLOCK(process);
lwp_pgrp_dec_ref(old_group);
}
PGRP_UNLOCK(group);
return 0;
}
int lwp_pgrp_update_children_info(rt_processgroup_t group, pid_t sid, pid_t pgid)
{
rt_list_t *node = RT_NULL;
rt_lwp_t process = RT_NULL;
if (group == RT_NULL)
{
return -EINVAL;
}
PGRP_LOCK_NESTED(group);
/* try to find process group */
rt_list_for_each(node, &(group->process))
{
process = (rt_lwp_t)rt_list_entry(node, struct rt_lwp, pgrp_node);
LWP_LOCK(process);
if (sid != -1)
{
process->sid = sid;
}
if (pgid != -1)
{
process->pgid = pgid;
process->pgrp = group;
}
LWP_UNLOCK(process);
}
PGRP_UNLOCK(group);
return 0;
}
/**
* setpgid() sets the PGID of the process specified by pid to pgid.
* If pid is zero, then the process ID of the calling process is used.
* If pgid is zero, then the PGID of the process specified by pid is made the same as its process ID.
* If setpgid() is used to move a process from one process group to another (as is done by some shells when
* creating pipelines), both process groups must be part of the same session (see setsid(2) and credentials(7)).
* In this case, the pgid specifies an existing process group to be joined and the session ID of that group must
* match the session ID of the joining process.
*/
sysret_t sys_setpgid(pid_t pid, pid_t pgid)
{
rt_lwp_t process, self_process;
pid_t sid;
rt_processgroup_t group;
rt_session_t session;
sysret_t err = 0;
if (pgid == 0)
{
pgid = pid;
}
if (pgid < 0)
{
return -EINVAL;
}
self_process = lwp_self();
if (pid == 0)
{
pid = self_process->pid;
process = self_process;
}
else
{
lwp_pid_lock_take();
process = lwp_from_pid_locked(pid);
lwp_pid_lock_release();
if (process == RT_NULL)
{
return -ESRCH;
}
}
LWP_LOCK(process);
if (process->parent == self_process)
{
/**
* change the process group ID of one of the children of the calling process and the child was in
* a different session
*/
if (lwp_sid_get_byprocess(process) != lwp_sid_get_byprocess(self_process))
{
err = -EPERM;
LWP_UNLOCK(process);
goto exit;
}
/**
* An attempt was made to change the process group ID of one of the children of the calling process
* and the child had already performed an execve(2)
*/
if (process->did_exec)
{
err = -EACCES;
LWP_UNLOCK(process);
goto exit;
}
}
else
{
/**
* pid is not the calling process and not a child of the calling process.
*/
if (process != self_process)
{
err = -ESRCH;
LWP_UNLOCK(process);
goto exit;
}
}
LWP_UNLOCK(process);
sid = lwp_sid_get_byprocess(self_process);
if (pgid != pid)
{
group = lwp_pgrp_find(pgid);
if (group == RT_NULL)
{
group = lwp_pgrp_create(process);
}
else
{
/**
* An attempt was made to move a process into a process group in a different session
*/
if (sid != lwp_sid_get_bypgrp(group))
{
err = -EPERM;
goto exit;
}
/**
* or to change the process group ID of a session leader
*/
if (sid == lwp_to_pid(process))
{
err = -EPERM;
goto exit;
}
lwp_pgrp_move(group, process);
}
}
else
{
group = lwp_pgrp_find(pgid);
if (group == RT_NULL)
{
group = lwp_pgrp_create(process);
lwp_pgrp_move(group, process);
session = lwp_session_find(sid);
if (session == RT_NULL)
{
LOG_E("the session of sid: %d cannot be found", sid);
err = -EPERM;
goto exit;
}
else
{
lwp_session_insert(session, group);
}
}
else // this represents repeated calls
{
/**
* or to change the process group ID of a session leader
*/
if (lwp_sid_get_bypgrp(group) == lwp_pgid_get_bypgrp(group))
{
err = -EPERM;
goto exit;
}
else
{
err = 0;
}
}
}
exit:
return err;
}
/**
* getpgid() returns the PGID of the process specified by pid.
* If pid is zero, the process ID of the calling process is used. (Retrieving the PGID of a process other
* than the caller is rarely necessary, and the POSIX.1 getpgrp() is preferred for that task.)
*/
sysret_t sys_getpgid(pid_t pid)
{
rt_lwp_t process;
lwp_pid_lock_take();
process = lwp_from_pid_locked(pid);
lwp_pid_lock_release();
if (process == RT_NULL)
{
return -ESRCH;
}
return lwp_pgid_get_byprocess(process);
}
#ifdef RT_USING_FINSH
#include "finsh.h"
long list_processgroup(void)
{
int count = 0, index;
rt_processgroup_t *groups;
rt_processgroup_t group;
rt_thread_t thread;
char name[RT_NAME_MAX];
rt_kprintf("PGID SID leader process\n");
rt_kprintf("---- ---- ----------------\n");
count = rt_object_get_length(RT_Object_Class_ProcessGroup);
if (count > 0)
{
/* get pointers */
groups = (rt_processgroup_t *)rt_calloc(count, sizeof(rt_processgroup_t));
if (groups)
{
index = rt_object_get_pointers(RT_Object_Class_ProcessGroup, (rt_object_t *)groups, count);
if (index > 0)
{
for (index = 0; index < count; index++)
{
struct rt_processgroup pgrp;
group = groups[index];
PGRP_LOCK(group);
rt_memcpy(&pgrp, group, sizeof(struct rt_processgroup));
PGRP_UNLOCK(group);
if (pgrp.leader)
{
thread = rt_list_entry(pgrp.leader->t_grp.prev, struct rt_thread, sibling);
rt_strncpy(name, thread->parent.name, RT_NAME_MAX);
}
else
{
rt_strncpy(name, "nil", RT_NAME_MAX);
}
rt_kprintf("%4d %4d %-*.*s\n", pgrp.pgid, pgrp.sid, RT_NAME_MAX, RT_NAME_MAX, name);
}
}
rt_free(groups);
}
}
return 0;
}
MSH_CMD_EXPORT(list_processgroup, list process group);
#endif

View File

@ -14,32 +14,47 @@
* error
* 2023-10-27 shell Format codes of sys_exit(). Fix the data racing where lock is missed
* Add reference on pid/tid, so the resource is not freed while using.
* Add support for waitpid(options=WNOHANG)
* 2023-11-16 xqyjlj Fix the case where pid is 0
* 2023-11-17 xqyjlj add process group and session support
* 2023-11-24 shell Support of waitpid(options=WNOTRACED|WCONTINUED);
* Reimplement the waitpid with a wait queue method, and fixup problem
* with waitpid(pid=-1)/waitpid(pid=-pgid)/waitpid(pid=0) that only one
* process can be traced while waiter suspend
* 2024-01-25 shell porting to new sched API
*/
/* includes scheduler related API */
#define __RT_IPC_SOURCE__
#include <rthw.h>
#include <rtthread.h>
/* for waitpid, we are compatible to GNU extension */
#define _GNU_SOURCE
#define DBG_TAG "lwp.pid"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "lwp_internal.h"
#include <rthw.h>
#include <rtthread.h>
#include <dfs_file.h>
#include <unistd.h>
#include <stdio.h> /* rename() */
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/statfs.h> /* statfs() */
#include "lwp_internal.h"
#include "tty.h"
#include <stdatomic.h>
#ifdef ARCH_MM_MMU
#include "lwp_user_mm.h"
#endif
#ifdef RT_USING_DFS_PROCFS
#include "proc.h"
#include "procfs.h"
#endif
#define PID_MAX 10000
#define PID_CT_ASSERT(name, x) \
@ -147,8 +162,23 @@ static void lwp_pid_put_locked(pid_t pid)
}
}
#ifdef RT_USING_DFS_PROCFS
rt_inline void _free_proc_dentry(rt_lwp_t lwp)
{
char pid_str[64] = {0};
rt_snprintf(pid_str, 64, "%d", lwp->pid);
pid_str[63] = 0;
proc_remove_dentry(pid_str, 0);
}
#else
#define _free_proc_dentry(lwp)
#endif
void lwp_pid_put(struct rt_lwp *lwp)
{
_free_proc_dentry(lwp);
lwp_pid_lock_take();
lwp_pid_put_locked(lwp->pid);
lwp_pid_lock_release();
@ -168,6 +198,13 @@ static void lwp_pid_set_lwp_locked(pid_t pid, struct rt_lwp *lwp)
{
p->data = lwp;
lwp_ref_inc(lwp);
#ifdef RT_USING_DFS_PROCFS
if (pid)
{
proc_pid(pid);
}
#endif
}
}
@ -352,19 +389,23 @@ rt_lwp_t lwp_create(rt_base_t flags)
if (new_lwp)
{
/* minimal setup of lwp object */
new_lwp->session = -1;
new_lwp->ref = 1;
#ifdef RT_USING_SMP
new_lwp->bind_cpu = RT_CPUS_NR;
#endif
rt_list_init(&new_lwp->wait_list);
new_lwp->exe_file = RT_NULL;
rt_list_init(&new_lwp->t_grp);
rt_list_init(&new_lwp->pgrp_node);
rt_list_init(&new_lwp->timer);
lwp_user_object_lock_init(new_lwp);
rt_wqueue_init(&new_lwp->wait_queue);
rt_wqueue_init(&new_lwp->waitpid_waiters);
lwp_signal_init(&new_lwp->signal);
rt_mutex_init(&new_lwp->lwp_lock, "lwp_lock", RT_IPC_FLAG_PRIO);
if (flags & LWP_CREATE_FLAG_NOTRACE_EXEC)
new_lwp->did_exec = RT_TRUE;
/* lwp with pid */
if (flags & LWP_CREATE_FLAG_ALLOC_PID)
{
@ -375,7 +416,7 @@ rt_lwp_t lwp_create(rt_base_t flags)
lwp_user_object_lock_destroy(new_lwp);
rt_free(new_lwp);
new_lwp = RT_NULL;
LOG_E("pid slot fulled!\n");
LOG_E("%s: pid slot fulled", __func__);
}
else
{
@ -384,6 +425,20 @@ rt_lwp_t lwp_create(rt_base_t flags)
}
lwp_pid_lock_release();
}
rt_memset(&new_lwp->rt_rusage,0, sizeof(new_lwp->rt_rusage));
if (flags & LWP_CREATE_FLAG_INIT_USPACE)
{
rt_err_t error = lwp_user_space_init(new_lwp, 0);
if (error)
{
lwp_pid_put(new_lwp);
lwp_user_object_lock_destroy(new_lwp);
rt_free(new_lwp);
new_lwp = RT_NULL;
LOG_E("%s: failed to initialize user space", __func__);
}
}
}
LOG_D("%s(pid=%d) => %p", __func__, new_lwp ? new_lwp->pid : -1, new_lwp);
@ -393,6 +448,8 @@ rt_lwp_t lwp_create(rt_base_t flags)
/** when reference is 0, a lwp can be released */
void lwp_free(struct rt_lwp* lwp)
{
rt_processgroup_t group = RT_NULL;
if (lwp == RT_NULL)
{
return;
@ -406,6 +463,10 @@ void lwp_free(struct rt_lwp* lwp)
* all the reference is clear)
*/
LOG_D("lwp free: %p", lwp);
rt_free(lwp->exe_file);
group = lwp_pgrp_find(lwp_pgid_get_byprocess(lwp));
if (group)
lwp_pgrp_remove(group, lwp);
LWP_LOCK(lwp);
@ -472,6 +533,15 @@ void lwp_free(struct rt_lwp* lwp)
rt_inline rt_noreturn
void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
{
LWP_LOCK(lwp);
lwp->rt_rusage.ru_stime.tv_sec += thread->system_time / RT_TICK_PER_SECOND;
lwp->rt_rusage.ru_stime.tv_usec += thread->system_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
lwp->rt_rusage.ru_utime.tv_sec += thread->user_time / RT_TICK_PER_SECOND;
lwp->rt_rusage.ru_utime.tv_usec += thread->user_time % RT_TICK_PER_SECOND * (1000000 / RT_TICK_PER_SECOND);
rt_list_remove(&thread->sibling);
LWP_UNLOCK(lwp);
lwp_futex_exit_robust_list(thread);
/**
* Note: the tid tree always hold a reference to thread, hence the tid must
* be release before cleanup of thread
@ -479,10 +549,6 @@ void _thread_exit(rt_lwp_t lwp, rt_thread_t thread)
lwp_tid_put(thread->tid);
thread->tid = 0;
LWP_LOCK(lwp);
rt_list_remove(&thread->sibling);
LWP_UNLOCK(lwp);
rt_thread_delete(thread);
rt_schedule();
while (1) ;
@ -497,11 +563,11 @@ rt_inline void _clear_child_tid(rt_thread_t thread)
thread->clear_child_tid = RT_NULL;
lwp_put_to_user(clear_child_tid, &t, sizeof t);
sys_futex(clear_child_tid, FUTEX_WAKE | FUTEX_PRIVATE, 1, RT_NULL, RT_NULL, 0);
sys_futex(clear_child_tid, FUTEX_WAKE, 1, RT_NULL, RT_NULL, 0);
}
}
void lwp_exit(rt_lwp_t lwp, rt_base_t status)
void lwp_exit(rt_lwp_t lwp, lwp_status_t status)
{
rt_thread_t thread;
@ -523,7 +589,7 @@ void lwp_exit(rt_lwp_t lwp, rt_base_t status)
* Brief: only one thread should calls exit_group(),
* but we can not ensured that during run-time
*/
lwp->lwp_ret = LWP_CREATE_STAT(status);
lwp->lwp_status = status;
LWP_UNLOCK(lwp);
lwp_terminate(lwp);
@ -550,7 +616,7 @@ void lwp_exit(rt_lwp_t lwp, rt_base_t status)
_thread_exit(lwp, thread);
}
void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
void lwp_thread_exit(rt_thread_t thread, int status)
{
rt_thread_t header_thr;
struct rt_lwp *lwp;
@ -568,7 +634,11 @@ void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
header_thr = rt_list_entry(lwp->t_grp.prev, struct rt_thread, sibling);
if (header_thr == thread && thread->sibling.prev == &lwp->t_grp)
{
lwp->lwp_ret = LWP_CREATE_STAT(status);
/**
* if thread exit, treated as process exit normally.
* This is reasonable since trap event is exited through lwp_exit()
*/
lwp->lwp_status = LWP_CREATE_STAT_EXIT(status);
LWP_UNLOCK(lwp);
lwp_terminate(lwp);
@ -582,38 +652,6 @@ void lwp_thread_exit(rt_thread_t thread, rt_base_t status)
_thread_exit(lwp, thread);
}
static void _pop_tty(rt_lwp_t lwp)
{
if (!lwp->background)
{
struct termios *old_stdin_termios = get_old_termios();
struct rt_lwp *old_lwp = NULL;
if (lwp->session == -1)
{
tcsetattr(1, 0, old_stdin_termios);
}
if (lwp->tty != RT_NULL)
{
rt_mutex_take(&lwp->tty->lock, RT_WAITING_FOREVER);
if (lwp->tty->foreground == lwp)
{
old_lwp = tty_pop(&lwp->tty->head, RT_NULL);
lwp->tty->foreground = old_lwp;
}
else
{
tty_pop(&lwp->tty->head, lwp);
}
rt_mutex_release(&lwp->tty->lock);
LWP_LOCK(lwp);
lwp->tty = RT_NULL;
LWP_UNLOCK(lwp);
}
}
}
/** @note the reference is not for synchronization, but for the release of resource. the synchronization is done through lwp & pid lock */
int lwp_ref_inc(struct rt_lwp *lwp)
{
@ -657,7 +695,7 @@ int lwp_ref_dec(struct rt_lwp *lwp)
return ref;
}
struct rt_lwp* lwp_from_pid_locked(pid_t pid)
struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid)
{
struct lwp_avl_struct *p;
struct rt_lwp *lwp = RT_NULL;
@ -671,6 +709,13 @@ struct rt_lwp* lwp_from_pid_locked(pid_t pid)
return lwp;
}
struct rt_lwp* lwp_from_pid_locked(pid_t pid)
{
struct rt_lwp* lwp;
lwp = pid ? lwp_from_pid_raw_locked(pid) : lwp_self();
return lwp;
}
pid_t lwp_to_pid(struct rt_lwp* lwp)
{
if (!lwp)
@ -733,162 +778,377 @@ pid_t lwp_name2pid(const char *name)
int lwp_getpid(void)
{
return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
rt_lwp_t lwp = lwp_self();
return lwp ? lwp->pid : 1;
// return ((struct rt_lwp *)rt_thread_self()->lwp)->pid;
}
/**
* @brief Wait for a child lwp to terminate. Do the essential recycling. Setup
* status code for user
*/
static sysret_t _lwp_wait_and_recycle(struct rt_lwp *child, rt_thread_t cur_thr,
struct rt_lwp *self_lwp, int *status,
int options)
rt_inline void _update_ru(struct rt_lwp *child, struct rt_lwp *self_lwp, struct rusage *uru)
{
sysret_t error;
int lwp_stat;
int terminated;
if (!child)
struct rusage rt_rusage;
if (uru != RT_NULL)
{
error = -RT_ERROR;
rt_rusage.ru_stime.tv_sec = child->rt_rusage.ru_stime.tv_sec;
rt_rusage.ru_stime.tv_usec = child->rt_rusage.ru_stime.tv_usec;
rt_rusage.ru_utime.tv_sec = child->rt_rusage.ru_utime.tv_sec;
rt_rusage.ru_utime.tv_usec = child->rt_rusage.ru_utime.tv_usec;
lwp_data_put(self_lwp, uru, &rt_rusage, sizeof(*uru));
}
}
/* do statistical summary and reap the child if neccessary */
static rt_err_t _stats_and_reap_child(rt_lwp_t child, rt_thread_t cur_thr,
struct rt_lwp *self_lwp, int *ustatus,
int options, struct rusage *uru)
{
int lwp_stat = child->lwp_status;
/* report statistical data to process */
_update_ru(child, self_lwp, uru);
if (child->terminated && !(options & WNOWAIT))
{
/** Reap the child process if it's exited */
LOG_D("func %s: child detached", __func__);
lwp_pid_put(child);
lwp_children_unregister(self_lwp, child);
}
if (ustatus)
lwp_data_put(self_lwp, ustatus, &lwp_stat, sizeof(*ustatus));
return RT_EOK;
}
#define HAS_CHILD_BUT_NO_EVT (-1024)
/* check if the process is already terminate */
static sysret_t _query_event_from_lwp(rt_lwp_t child, rt_thread_t cur_thr, rt_lwp_t self_lwp,
int options, int *status)
{
sysret_t rc;
LWP_LOCK(child);
if (child->terminated)
{
rc = child->pid;
}
else if ((options & WSTOPPED) && child->jobctl_stopped && !child->wait_reap_stp)
{
child->wait_reap_stp = 1;
rc = child->pid;
}
else
{
/**
* Note: Critical Section
* - child lwp (RW. This will modify its parent if valid)
*/
LWP_LOCK(child);
if (child->terminated)
{
error = child->pid;
}
else if (rt_list_isempty(&child->wait_list))
{
/**
* Note: only one thread can wait on wait_list.
* dont reschedule before mutex unlock
*/
rt_enter_critical();
error = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
if (error == 0)
{
rt_list_insert_before(&child->wait_list, &RT_THREAD_LIST_NODE(cur_thr));
LWP_UNLOCK(child);
rt_set_errno(RT_EINTR);
rt_exit_critical();
rt_schedule();
/**
* Since parent is holding a reference to children this lock will
* not be freed before parent dereference to it.
*/
LWP_LOCK(child);
error = rt_get_errno();
if (error == RT_EINTR)
{
error = -EINTR;
}
else if (error != RT_EOK)
{
LOG_W("%s: unexpected error code %ld", __func__, error);
}
else
{
error = child->pid;
}
}
else
rt_exit_critical();
}
else
error = -RT_EINTR;
lwp_stat = child->lwp_ret;
terminated = child->terminated;
LWP_UNLOCK(child);
if (error > 0)
{
if (terminated)
{
LOG_D("func %s: child detached", __func__);
/** Reap the child process if it's exited */
lwp_pid_put(child);
lwp_children_unregister(self_lwp, child);
}
if (status)
lwp_data_put(self_lwp, status, &lwp_stat, sizeof(*status));
}
rc = HAS_CHILD_BUT_NO_EVT;
}
LWP_UNLOCK(child);
return error;
LOG_D("%s(child_pid=%d ('%s'), stopped=%d) => %d", __func__, child->pid, child->cmd, child->jobctl_stopped, rc);
return rc;
}
pid_t waitpid(pid_t pid, int *status, int options) __attribute__((alias("lwp_waitpid")));
pid_t lwp_waitpid(const pid_t pid, int *status, int options)
/* verify if the process is child, and reap it */
static pid_t _verify_child_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp,
pid_t wait_pid, int options, int *ustatus,
struct rusage *uru)
{
pid_t rc = -1;
struct rt_thread *thread;
sysret_t rc;
struct rt_lwp *child;
struct rt_lwp *self_lwp;
thread = rt_thread_self();
self_lwp = lwp_self();
if (!self_lwp)
{
rc = -RT_EINVAL;
}
/* check if pid is reference to a valid child */
lwp_pid_lock_take();
child = lwp_from_pid_locked(wait_pid);
if (!child)
rc = -EINVAL;
else if (child->parent != self_lwp)
rc = -ESRCH;
else
{
if (pid > 0)
{
lwp_pid_lock_take();
child = lwp_from_pid_locked(pid);
if (child->parent != self_lwp)
rc = -RT_ERROR;
else
rc = RT_EOK;
lwp_pid_lock_release();
rc = wait_pid;
if (rc == RT_EOK)
rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
}
else if (pid == -1)
{
LWP_LOCK(self_lwp);
child = self_lwp->first_child;
LWP_UNLOCK(self_lwp);
RT_ASSERT(!child || child->parent == self_lwp);
rc = _lwp_wait_and_recycle(child, thread, self_lwp, status, options);
}
else
{
/* not supported yet */
rc = -RT_EINVAL;
}
}
lwp_pid_lock_release();
if (rc > 0)
{
LOG_D("%s: recycle child id %ld (status=0x%x)", __func__, (long)rc, status ? *status : 0);
rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
if (rc > 0)
{
_stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
}
}
return rc;
}
/* try to reap any child */
static pid_t _reap_any_child_pid(rt_thread_t cur_thr, rt_lwp_t self_lwp, pid_t pair_pgid,
int options, int *ustatus, struct rusage *uru)
{
sysret_t rc = -ECHILD;
struct rt_lwp *child;
LWP_LOCK(self_lwp);
child = self_lwp->first_child;
/* find a exited child if any */
while (child)
{
if (pair_pgid && child->pgid != pair_pgid)
continue;
rc = _query_event_from_lwp(child, cur_thr, self_lwp, options, ustatus);
if (rc > 0)
break;
child = child->sibling;
}
LWP_UNLOCK(self_lwp);
if (rc > 0)
{
_stats_and_reap_child(child, cur_thr, self_lwp, ustatus, options, uru);
}
return rc;
}
rt_err_t lwp_waitpid_kick(rt_lwp_t parent, rt_lwp_t self_lwp)
{
/* waker provide the message mainly through its lwp_status */
rt_wqueue_wakeup(&parent->waitpid_waiters, self_lwp);
return RT_EOK;
}
struct waitpid_handle {
struct rt_wqueue_node wq_node;
int options;
rt_lwp_t waker_lwp;
};
/* the IPC message is setup and notify the parent */
static int _waitq_filter(struct rt_wqueue_node *wait_node, void *key)
{
int can_accept_evt = 0;
rt_thread_t waiter = wait_node->polling_thread;
pid_t destiny = (pid_t)wait_node->key;
rt_lwp_t waker_lwp = key;
struct waitpid_handle *handle;
rt_ubase_t options;
handle = rt_container_of(wait_node, struct waitpid_handle, wq_node);
RT_ASSERT(waiter != RT_NULL);
options = handle->options;
/* filter out if waker is not the one */
if (destiny > 0)
{
/**
* in waitpid immediately return routine, we already do the check
* that pid is one of the child process of waiting thread
*/
can_accept_evt = waker_lwp->pid == destiny;
}
else if (destiny == -1)
{
can_accept_evt = waker_lwp->parent == waiter->lwp;
}
else
{
RT_ASSERT(rc != 0);
LOG_D("%s: wait failed with code %ld", __func__, (long)rc);
/* destiny == 0 || destiny == -pgid */
pid_t waiter_pgid;
if (destiny == 0)
{
waiter_pgid = lwp_pgid_get_byprocess(waiter->lwp);
}
else
{
waiter_pgid = -destiny;
}
can_accept_evt = waiter_pgid == lwp_pgid_get_byprocess(waker_lwp);
}
/* filter out if event is not desired */
if (can_accept_evt)
{
if ((options & WEXITED) && waker_lwp->terminated)
can_accept_evt = 1;
else if ((options & WSTOPPED) && WIFSTOPPED(waker_lwp->lwp_status))
can_accept_evt = 1;
else if ((options & WCONTINUED) && WIFCONTINUED(waker_lwp->lwp_status))
can_accept_evt = 1;
else
can_accept_evt = 0;
}
/* setup message for waiter if accepted */
if (can_accept_evt)
handle->waker_lwp = waker_lwp;
/* 0 if event is accepted, otherwise discard */
return !can_accept_evt;
}
/* the waiter cleanup IPC message and wait for desired event here */
static rt_err_t _wait_for_event(rt_thread_t cur_thr, rt_lwp_t self_lwp,
struct waitpid_handle *handle, pid_t destiny)
{
rt_err_t ret;
/* current context checking */
RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
handle->wq_node.polling_thread = cur_thr;
handle->wq_node.key = destiny;
handle->wq_node.wakeup = _waitq_filter;
handle->wq_node.wqueue = &self_lwp->waitpid_waiters;
rt_list_init(&handle->wq_node.list);
cur_thr->error = RT_EOK;
LOG_D("%s(self_lwp=%d) wait for event", __func__, self_lwp->pid);
rt_enter_critical();
ret = rt_thread_suspend_with_flag(cur_thr, RT_INTERRUPTIBLE);
if (ret == RT_EOK)
{
rt_wqueue_add(handle->wq_node.wqueue, &handle->wq_node);
rt_exit_critical();
rt_schedule();
ret = cur_thr->error;
/**
* cur_thr error is a positive value, but some legacy implementation
* use a negative one. So we check to avoid errors
*/
ret = ret > 0 ? -ret : ret;
/**
* we dont rely on this actually, but we cleanup it since wakeup API
* set this up durint operation, and this will cause some messy condition
*/
handle->wq_node.wqueue->flag = RT_WQ_FLAG_CLEAN;
rt_wqueue_remove(&handle->wq_node);
}
else
{
/* failed to suspend, return immediately with failure */
rt_exit_critical();
}
return ret;
}
/* wait for IPC event and do the cleanup if neccessary */
static sysret_t _wait_and_reap(rt_thread_t cur_thr, rt_lwp_t self_lwp, const pid_t pid,
int options, int *ustatus, struct rusage *uru)
{
sysret_t rc;
struct waitpid_handle handle;
rt_lwp_t waker;
/* wait for SIGCHLD or other async events */
handle.options = options;
handle.waker_lwp = 0;
rc = _wait_for_event(cur_thr, self_lwp, &handle, pid);
waker = handle.waker_lwp;
if (waker != RT_NULL)
{
rc = waker->pid;
/* check out if any process exited */
LOG_D("%s: woken up by lwp=%d", __func__, waker->pid);
_stats_and_reap_child(waker, cur_thr, self_lwp, ustatus, options, uru);
}
/**
* else if (rc != RT_EOK)
* unable to do a suspend, or wakeup unexpectedly
* -> then returned a failure
*/
return rc;
}
pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru)
{
pid_t rc = -1;
struct rt_thread *cur_thr;
struct rt_lwp *self_lwp;
cur_thr = rt_thread_self();
self_lwp = lwp_self();
if (!cur_thr || !self_lwp)
{
rc = -EINVAL;
}
else
{
/* check if able to reap desired child immediately */
if (pid > 0)
{
/* if pid is child then try to reap it */
rc = _verify_child_and_reap(cur_thr, self_lwp, pid, options, status, ru);
}
else if (pid == -1)
{
/* any terminated child */
rc = _reap_any_child_pid(cur_thr, self_lwp, 0, options, status, ru);
}
else
{
/**
* (pid < -1 || pid == 0)
* any terminated child with matched pgid
*/
pid_t pair_pgid;
if (pid == 0)
{
pair_pgid = lwp_pgid_get_byprocess(self_lwp);
}
else
{
pair_pgid = -pid;
}
rc = _reap_any_child_pid(cur_thr, self_lwp, pair_pgid, options, status, ru);
}
if (rc == HAS_CHILD_BUT_NO_EVT)
{
if (!(options & WNOHANG))
{
/* otherwise, arrange a suspend and wait for async event */
options |= WEXITED;
rc = _wait_and_reap(cur_thr, self_lwp, pid, options, status, ru);
}
else
{
/**
* POSIX.1: If waitpid() was invoked with WNOHANG set in options,
* it has at least one child process specified by pid for which
* status is not available, and status is not available for any
* process specified by pid, 0 is returned
*/
rc = 0;
}
}
else
{
RT_ASSERT(rc != 0);
}
}
LOG_D("waitpid() => %d, *status=0x%x", rc, status ? *status:0);
return rc;
}
pid_t waitpid(pid_t pid, int *status, int options)
{
return lwp_waitpid(pid, status, options, RT_NULL);
}
#ifdef RT_USING_FINSH
/* copy from components/finsh/cmd.c */
static void object_split(int len)
@ -910,7 +1170,7 @@ static void print_thread_info(struct rt_thread* thread, int maxlen)
else
rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
#else
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, thread->current_priority);
rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
#endif /*RT_USING_SMP*/
stat = (RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK);
@ -1044,7 +1304,7 @@ static void cmd_kill(int argc, char** argv)
}
}
lwp_pid_lock_take();
lwp_signal_kill(lwp_from_pid_locked(pid), sig, SI_USER, 0);
lwp_signal_kill(lwp_from_pid_raw_locked(pid), sig, SI_USER, 0);
lwp_pid_lock_release();
}
MSH_CMD_EXPORT_ALIAS(cmd_kill, kill, send a signal to a process);
@ -1061,7 +1321,7 @@ static void cmd_killall(int argc, char** argv)
while((pid = lwp_name2pid(argv[1])) > 0)
{
lwp_pid_lock_take();
lwp_signal_kill(lwp_from_pid_locked(pid), SIGKILL, SI_USER, 0);
lwp_signal_kill(lwp_from_pid_raw_locked(pid), SIGKILL, SI_USER, 0);
lwp_pid_lock_release();
rt_thread_mdelay(100);
}
@ -1073,15 +1333,15 @@ MSH_CMD_EXPORT_ALIAS(cmd_killall, killall, kill processes by name);
int lwp_check_exit_request(void)
{
rt_thread_t thread = rt_thread_self();
rt_base_t expected = LWP_EXIT_REQUEST_TRIGGERED;
rt_size_t expected = LWP_EXIT_REQUEST_TRIGGERED;
if (!thread->lwp)
{
return 0;
}
return rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
LWP_EXIT_REQUEST_IN_PROCESS);
return atomic_compare_exchange_strong(&thread->exit_request, &expected,
LWP_EXIT_REQUEST_IN_PROCESS);
}
static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread);
@ -1119,7 +1379,7 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
rt_sched_lock_level_t slvl;
rt_list_t *list;
rt_thread_t thread;
rt_base_t expected = LWP_EXIT_REQUEST_NONE;
rt_size_t expected = LWP_EXIT_REQUEST_NONE;
/* broadcast exit request for sibling threads */
LWP_LOCK(lwp);
@ -1127,8 +1387,8 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
{
thread = rt_list_entry(list, struct rt_thread, sibling);
rt_atomic_compare_exchange_strong(&thread->exit_request, &expected,
LWP_EXIT_REQUEST_TRIGGERED);
atomic_compare_exchange_strong(&thread->exit_request, &expected,
LWP_EXIT_REQUEST_TRIGGERED);
rt_sched_lock(&slvl);
/* dont release, otherwise thread may have been freed */
@ -1213,8 +1473,43 @@ static void _wait_sibling_exit(rt_lwp_t lwp, rt_thread_t curr_thread)
}
}
static void _notify_parent(rt_lwp_t lwp)
{
int si_code;
int signo_or_exitcode;
lwp_siginfo_ext_t ext;
lwp_status_t lwp_status = lwp->lwp_status;
rt_lwp_t parent = lwp->parent;
if (WIFSIGNALED(lwp_status))
{
si_code = (lwp_status & LWP_COREDUMP_FLAG) ? CLD_DUMPED : CLD_KILLED;
signo_or_exitcode = WTERMSIG(lwp_status);
}
else
{
si_code = CLD_EXITED;
signo_or_exitcode = WEXITSTATUS(lwp->lwp_status);
}
lwp_waitpid_kick(parent, lwp);
ext = rt_malloc(sizeof(struct lwp_siginfo));
if (ext)
{
rt_thread_t cur_thr = rt_thread_self();
ext->sigchld.status = signo_or_exitcode;
ext->sigchld.stime = cur_thr->system_time;
ext->sigchld.utime = cur_thr->user_time;
}
lwp_signal_kill(parent, SIGCHLD, si_code, ext);
}
static void _resr_cleanup(struct rt_lwp *lwp)
{
lwp_jobctrl_on_exit(lwp);
LWP_LOCK(lwp);
lwp_signal_detach(&lwp->signal);
@ -1254,8 +1549,6 @@ static void _resr_cleanup(struct rt_lwp *lwp)
}
LWP_UNLOCK(lwp);
_pop_tty(lwp);
/**
* @brief Wakeup parent if it's waiting for this lwp, otherwise a signal
* will be sent to parent
@ -1264,29 +1557,26 @@ static void _resr_cleanup(struct rt_lwp *lwp)
* - the parent lwp (RW.)
*/
LWP_LOCK(lwp);
if (lwp->parent)
if (lwp->parent &&
!lwp_sigismember(&lwp->parent->signal.sig_action_nocldwait, SIGCHLD))
{
struct rt_thread *thread;
/* if successfully race to setup lwp->terminated before parent detach */
LWP_UNLOCK(lwp);
if (!rt_list_isempty(&lwp->wait_list))
{
thread = RT_THREAD_LIST_NODE_ENTRY(lwp->wait_list.next);
thread->error = RT_EOK;
thread->msg_ret = (void*)(rt_size_t)lwp->lwp_ret;
rt_thread_resume(thread);
}
else
{
/* children cannot detach itself and must wait for parent to take care of it */
lwp_signal_kill(lwp->parent, SIGCHLD, CLD_EXITED, 0);
}
/**
* Note: children cannot detach itself and must wait for parent to take
* care of it
*/
_notify_parent(lwp);
}
else
{
LWP_UNLOCK(lwp);
/* INFO: orphan hasn't parents to do the reap of pid */
/**
* if process is orphan, it doesn't have parent to do the recycling.
* Otherwise, its parent had setup a flag to mask out recycling event
*/
lwp_pid_put(lwp);
}
@ -1315,10 +1605,8 @@ static int _lwp_setaffinity(pid_t pid, int cpu)
int ret = -1;
lwp_pid_lock_take();
if(pid == 0)
lwp = lwp_self();
else
lwp = lwp_from_pid_locked(pid);
lwp = lwp_from_pid_locked(pid);
if (lwp)
{
#ifdef RT_USING_SMP

View File

@ -11,14 +11,14 @@
#ifndef LWP_PID_H__
#define LWP_PID_H__
#include "lwp.h"
#ifdef __cplusplus
extern "C" {
#endif
#define LWP_CREATE_FLAG_NONE 0x0000
#define LWP_CREATE_FLAG_ALLOC_PID 0x0001 /* allocate pid on lwp object create */
#define LWP_CREATE_FLAG_NONE 0x0000
#define LWP_CREATE_FLAG_ALLOC_PID 0x0001 /* allocate pid on lwp object create */
#define LWP_CREATE_FLAG_INIT_USPACE 0x0002 /* do user space initialization */
#define LWP_CREATE_FLAG_NOTRACE_EXEC 0x0004 /* not trace if execve() after fork() */
struct rt_lwp;
@ -46,6 +46,7 @@ void lwp_free(struct rt_lwp* lwp);
int lwp_ref_inc(struct rt_lwp *lwp);
int lwp_ref_dec(struct rt_lwp *lwp);
struct rt_lwp* lwp_from_pid_raw_locked(pid_t pid);
struct rt_lwp* lwp_from_pid_locked(pid_t pid);
pid_t lwp_to_pid(struct rt_lwp* lwp);
@ -54,7 +55,29 @@ char* lwp_pid2name(int32_t pid);
int lwp_getpid(void);
pid_t lwp_waitpid(const pid_t pid, int *status, int options);
struct rusage
{
struct timeval ru_utime;
struct timeval ru_stime;
long ru_maxrss;
long ru_ixrss;
long ru_idrss;
long ru_isrss;
long ru_minflt;
long ru_majflt;
long ru_nswap;
long ru_inblock;
long ru_oublock;
long ru_msgsnd;
long ru_msgrcv;
long ru_nsignals;
long ru_nvcsw;
long ru_nivcsw;
long reserved[16];
};
pid_t lwp_waitpid(const pid_t pid, int *status, int options, struct rusage *ru);
rt_err_t lwp_waitpid_kick(struct rt_lwp *parent, struct rt_lwp *self_lwp);
pid_t waitpid(pid_t pid, int *status, int options);
long list_process(void);
@ -85,8 +108,9 @@ rt_inline void lwp_from_pid_release_lock(struct rt_lwp *lwp)
lwp_ref_dec(lwp);
}
void lwp_thread_exit(rt_thread_t thread, rt_base_t status);
void lwp_exit(struct rt_lwp *lwp, rt_base_t status);
typedef rt_base_t lwp_status_t;
void lwp_thread_exit(rt_thread_t thread, int status);
void lwp_exit(struct rt_lwp *lwp, lwp_status_t status);
#ifdef __cplusplus
}

View File

@ -1,459 +0,0 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021/01/02 bernard the first version
* 2022/12/18 bernard fix the _m_lock to tid in user land.
*/
#include "lwp_internal.h"
#include <rtthread.h>
#ifdef ARCH_MM_MMU
#include <lwp_user_mm.h>
#endif
#include <sys/time.h>
#include <syscall_generic.h>
#define PMUTEX_NORMAL 0 /* Unable to recursion */
#define PMUTEX_RECURSIVE 1 /* Can be recursion */
#define PMUTEX_ERRORCHECK 2 /* This type of mutex provides error checking */
struct rt_pmutex
{
union
{
rt_mutex_t kmutex;
rt_sem_t ksem; /* use sem to emulate the mutex without recursive */
} lock;
struct lwp_avl_struct node;
struct rt_object *custom_obj;
rt_uint8_t type; /* pmutex type */
};
/*
* userspace mutex definitions in musl
*/
struct rt_umutex
{
union
{
int __i[6];
volatile int __vi[6];
volatile void *volatile __p[6];
} __u;
};
#define _m_type __u.__i[0]
#define _m_lock __u.__vi[1]
#define _m_waiters __u.__vi[2]
#define _m_prev __u.__p[3]
#define _m_next __u.__p[4]
#define _m_count __u.__i[5]
static struct rt_mutex _pmutex_lock;
static int pmutex_system_init(void)
{
rt_mutex_init(&_pmutex_lock, "pmtxLock", RT_IPC_FLAG_FIFO);
return 0;
}
INIT_PREV_EXPORT(pmutex_system_init);
static rt_err_t pmutex_destory(void *data)
{
rt_err_t ret = -1;
struct rt_pmutex *pmutex = (struct rt_pmutex *)data;
if (pmutex)
{
lwp_mutex_take_safe(&_pmutex_lock, RT_WAITING_FOREVER, 0);
/* remove pmutex from pmutext avl */
lwp_avl_remove(&pmutex->node, (struct lwp_avl_struct **)pmutex->node.data);
lwp_mutex_release_safe(&_pmutex_lock);
if (pmutex->type == PMUTEX_NORMAL)
{
rt_sem_delete(pmutex->lock.ksem);
}
else
{
rt_mutex_delete(pmutex->lock.kmutex);
}
/* release object */
rt_free(pmutex);
ret = 0;
}
return ret;
}
static struct rt_pmutex* pmutex_create(void *umutex, struct rt_lwp *lwp)
{
struct rt_pmutex *pmutex = RT_NULL;
struct rt_object *obj = RT_NULL;
rt_ubase_t type;
if (!lwp)
{
return RT_NULL;
}
long *p = (long *)umutex;
/* umutex[0] bit[0-1] saved mutex type */
type = *p & 3;
if (type != PMUTEX_NORMAL && type != PMUTEX_RECURSIVE && type != PMUTEX_ERRORCHECK)
{
return RT_NULL;
}
pmutex = (struct rt_pmutex *)rt_malloc(sizeof(struct rt_pmutex));
if (!pmutex)
{
return RT_NULL;
}
if (type == PMUTEX_NORMAL)
{
pmutex->lock.ksem = rt_sem_create("pmutex", 1, RT_IPC_FLAG_PRIO);
if (!pmutex->lock.ksem)
{
rt_free(pmutex);
return RT_NULL;
}
}
else
{
pmutex->lock.kmutex = rt_mutex_create("pmutex", RT_IPC_FLAG_PRIO);
if (!pmutex->lock.kmutex)
{
rt_free(pmutex);
return RT_NULL;
}
}
obj = rt_custom_object_create("pmutex", (void *)pmutex, pmutex_destory);
if (!obj)
{
if (pmutex->type == PMUTEX_NORMAL)
{
rt_sem_delete(pmutex->lock.ksem);
}
else
{
rt_mutex_delete(pmutex->lock.kmutex);
}
rt_free(pmutex);
return RT_NULL;
}
pmutex->node.avl_key = (avl_key_t)umutex;
pmutex->node.data = &lwp->address_search_head;
pmutex->custom_obj = obj;
pmutex->type = type;
/* insert into pmutex head */
lwp_avl_insert(&pmutex->node, &lwp->address_search_head);
return pmutex;
}
static struct rt_pmutex* pmutex_get(void *umutex, struct rt_lwp *lwp)
{
struct rt_pmutex *pmutex = RT_NULL;
struct lwp_avl_struct *node = RT_NULL;
node = lwp_avl_find((avl_key_t)umutex, lwp->address_search_head);
if (!node)
{
return RT_NULL;
}
pmutex = rt_container_of(node, struct rt_pmutex, node);
return pmutex;
}
static int _pthread_mutex_init(void *umutex)
{
struct rt_lwp *lwp = RT_NULL;
struct rt_pmutex *pmutex = RT_NULL;
rt_err_t lock_ret = 0;
/* umutex union is 6 x (void *) */
if (!lwp_user_accessable(umutex, sizeof(void *) * 6))
{
rt_set_errno(EINVAL);
return -EINVAL;
}
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
if (lock_ret != RT_EOK)
{
rt_set_errno(EAGAIN);
return -EAGAIN;
}
lwp = lwp_self();
pmutex = pmutex_get(umutex, lwp);
if (pmutex == RT_NULL)
{
/* create a pmutex according to this umutex */
pmutex = pmutex_create(umutex, lwp);
if (pmutex == RT_NULL)
{
rt_mutex_release(&_pmutex_lock);
rt_set_errno(ENOMEM);
return -ENOMEM;
}
if (lwp_user_object_add(lwp, pmutex->custom_obj) != 0)
{
rt_custom_object_destroy(pmutex->custom_obj);
rt_set_errno(ENOMEM);
return -ENOMEM;
}
}
else
{
lwp_mutex_take_safe(&_pmutex_lock, RT_WAITING_FOREVER, 1);
if (pmutex->type == PMUTEX_NORMAL)
{
pmutex->lock.ksem->value = 1;
}
else
{
pmutex->lock.kmutex->owner = RT_NULL;
pmutex->lock.kmutex->priority = 0xFF;
pmutex->lock.kmutex->hold = 0;
pmutex->lock.kmutex->ceiling_priority = 0xFF;
}
lwp_mutex_release_safe(&_pmutex_lock);
}
rt_mutex_release(&_pmutex_lock);
return 0;
}
static int _pthread_mutex_lock_timeout(void *umutex, struct timespec *timeout)
{
struct rt_lwp *lwp = RT_NULL;
struct rt_pmutex *pmutex = RT_NULL;
struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
rt_err_t lock_ret = 0;
rt_int32_t time = RT_WAITING_FOREVER;
if (!lwp_user_accessable((void *)umutex, sizeof(struct rt_umutex)))
{
rt_set_errno(EINVAL);
return -EINVAL;
}
if (timeout)
{
if (!lwp_user_accessable((void *)timeout, sizeof(struct timespec)))
{
rt_set_errno(EINVAL);
return -EINVAL;
}
time = rt_timespec_to_tick(timeout);
}
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
if (lock_ret != RT_EOK)
{
rt_set_errno(EINTR);
return -EINTR;
}
lwp = lwp_self();
pmutex = pmutex_get(umutex, lwp);
if (pmutex == RT_NULL)
{
rt_mutex_release(&_pmutex_lock);
rt_set_errno(EINVAL);
return -ENOMEM; /* umutex not recored in kernel */
}
rt_mutex_release(&_pmutex_lock);
switch (pmutex->type)
{
case PMUTEX_NORMAL:
lock_ret = rt_sem_take_interruptible(pmutex->lock.ksem, time);
break;
case PMUTEX_RECURSIVE:
lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
if (lock_ret == RT_EOK)
{
umutex_p->_m_lock = rt_thread_self()->tid;
}
break;
case PMUTEX_ERRORCHECK:
lock_ret = lwp_mutex_take_safe(&_pmutex_lock, RT_WAITING_FOREVER, 1);
if (lock_ret != RT_EOK)
{
return -EINTR;
}
if (pmutex->lock.kmutex->owner == rt_thread_self())
{
lwp_mutex_release_safe(&_pmutex_lock);
return -EDEADLK;
}
lwp_mutex_release_safe(&_pmutex_lock);
lock_ret = rt_mutex_take_interruptible(pmutex->lock.kmutex, time);
if (lock_ret == RT_EOK)
{
umutex_p->_m_lock = rt_thread_self()->tid;
}
break;
default: /* unknown type */
return -EINVAL;
}
if (lock_ret != RT_EOK)
{
if (lock_ret == -RT_ETIMEOUT)
{
if (time == 0) /* timeout is 0, means try lock failed */
{
rt_set_errno(EBUSY);
return -EBUSY;
}
else
{
rt_set_errno(ETIMEDOUT);
return -ETIMEDOUT;
}
}
else if (lock_ret == -RT_EINTR)
{
rt_set_errno(EINTR);
return -EINTR;
}
else
{
rt_set_errno(EAGAIN);
return -EAGAIN;
}
}
return 0;
}
static int _pthread_mutex_unlock(void *umutex)
{
rt_err_t lock_ret = 0;
struct rt_lwp *lwp = RT_NULL;
struct rt_pmutex *pmutex = RT_NULL;
struct rt_umutex *umutex_p = (struct rt_umutex*)umutex;
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
if (lock_ret != RT_EOK)
{
rt_set_errno(EAGAIN);
return -EAGAIN;
}
lwp = lwp_self();
pmutex = pmutex_get(umutex, lwp);
if (pmutex == RT_NULL)
{
rt_mutex_release(&_pmutex_lock);
rt_set_errno(EPERM);
return -EPERM;//unlock static mutex of unlock state
}
rt_mutex_release(&_pmutex_lock);
switch (pmutex->type)
{
case PMUTEX_NORMAL:
if(pmutex->lock.ksem->value >=1)
{
rt_set_errno(EPERM);
return -EPERM;//unlock dynamic mutex of unlock state
}
else
{
lock_ret = rt_sem_release(pmutex->lock.ksem);
}
break;
case PMUTEX_RECURSIVE:
case PMUTEX_ERRORCHECK:
lock_ret = rt_mutex_release(pmutex->lock.kmutex);
if ((lock_ret == RT_EOK) && pmutex->lock.kmutex->owner == NULL)
{
umutex_p->_m_lock = 0;
}
break;
default: /* unknown type */
return -EINVAL;
}
if (lock_ret != RT_EOK)
{
rt_set_errno(EPERM);
return -EPERM;
}
return 0;
}
static int _pthread_mutex_destroy(void *umutex)
{
struct rt_lwp *lwp = RT_NULL;
struct rt_pmutex *pmutex = RT_NULL;
rt_err_t lock_ret = 0;
lock_ret = rt_mutex_take_interruptible(&_pmutex_lock, RT_WAITING_FOREVER);
if (lock_ret != RT_EOK)
{
rt_set_errno(EAGAIN);
return -EAGAIN;
}
lwp = lwp_self();
pmutex = pmutex_get(umutex, lwp);
if (pmutex == RT_NULL)
{
rt_mutex_release(&_pmutex_lock);
rt_set_errno(EINVAL);
return -EINVAL;
}
lwp_user_object_delete(lwp, pmutex->custom_obj);
rt_mutex_release(&_pmutex_lock);
return 0;
}
sysret_t sys_pmutex(void *umutex, int op, void *arg)
{
int ret = -EINVAL;
switch (op)
{
case PMUTEX_INIT:
ret = _pthread_mutex_init(umutex);
break;
case PMUTEX_LOCK:
ret = _pthread_mutex_lock_timeout(umutex, (struct timespec*)arg);
if (ret == -ENOMEM)
{
/* lock not init, try init it and lock again. */
ret = _pthread_mutex_init(umutex);
if (ret == 0)
{
ret = _pthread_mutex_lock_timeout(umutex, (struct timespec*)arg);
}
}
break;
case PMUTEX_UNLOCK:
ret = _pthread_mutex_unlock(umutex);
break;
case PMUTEX_DESTROY:
ret = _pthread_mutex_destroy(umutex);
break;
default:
rt_set_errno(EINVAL);
break;
}
return ret;
}

View File

@ -0,0 +1,432 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-17 xqyjlj the first version
* 2023-11-29 Shell Add direct reference of sess for group
*/
#include "lwp.h"
#include "lwp_internal.h"
#include "lwp_syscall.h"
#include "terminal/terminal.h"
#define DBG_TAG "lwp.session"
#define DBG_LVL DBG_WARNING
#include <rtdbg.h>
rt_session_t lwp_session_find(pid_t sid)
{
rt_base_t level;
rt_session_t session = RT_NULL;
rt_list_t *node = RT_NULL;
struct rt_object_information *information = RT_NULL;
information = rt_object_get_information(RT_Object_Class_Session);
/* parameter check */
if ((sid < 0) || (information == RT_NULL))
{
return RT_NULL;
}
if (sid == 0)
{
sid = lwp_getpid();
}
/* enter critical */
level = rt_spin_lock_irqsave(&(information->spinlock));
/* try to find session */
rt_list_for_each(node, &(information->object_list))
{
session = (rt_session_t)rt_list_entry(node, struct rt_object, list);
if (session->sid == sid)
{
rt_spin_unlock_irqrestore(&(information->spinlock), level);
return session;
}
}
rt_spin_unlock_irqrestore(&(information->spinlock), level);
return RT_NULL;
}
rt_session_t lwp_session_create(rt_lwp_t leader)
{
rt_session_t session = RT_NULL;
/* parameter check */
if (leader == RT_NULL)
{
return RT_NULL;
}
session = rt_malloc(sizeof(struct rt_session));
if (session != RT_NULL)
{
rt_object_init(&(session->object), RT_Object_Class_Session, "session");
rt_list_init(&(session->processgroup));
rt_mutex_init(&(session->mutex), "session", RT_IPC_FLAG_PRIO);
session->leader = leader;
session->sid = leader->pid;
lwp_pgrp_update_children_info(leader->pgrp, session->sid, leader->pgid);
session->foreground_pgid = session->sid;
session->ctty = RT_NULL;
}
return session;
}
int lwp_session_delete(rt_session_t session)
{
int retry = 1;
lwp_tty_t ctty;
/* parameter check */
if (session == RT_NULL)
{
return -EINVAL;
}
/* clear children sid */
lwp_session_update_children_info(session, 0);
while (retry)
{
retry = 0;
ctty = session->ctty;
SESS_LOCK_NESTED(session);
if (session->ctty == ctty)
{
if (ctty)
{
SESS_UNLOCK(session);
/**
* Note: it's safe to release the session lock now. Even if someone
* race to acquire the tty, it's safe under protection of tty_lock()
* and the check inside
*/
tty_lock(ctty);
tty_rel_sess(ctty, session);
session->ctty = RT_NULL;
}
else
{
SESS_UNLOCK(session);
}
}
else
{
SESS_UNLOCK(session);
retry = 1;
}
}
rt_object_detach(&(session->object));
rt_mutex_detach(&(session->mutex));
rt_free(session);
return 0;
}
int lwp_session_insert(rt_session_t session, rt_processgroup_t group)
{
/* parameter check */
if (session == RT_NULL || group == RT_NULL)
{
return -EINVAL;
}
SESS_LOCK_NESTED(session);
PGRP_LOCK_NESTED(group);
group->sid = session->sid;
group->session = session;
lwp_pgrp_update_children_info(group, session->sid, group->pgid);
rt_list_insert_after(&(session->processgroup), &(group->pgrp_list_node));
PGRP_UNLOCK(group);
SESS_UNLOCK(session);
return 0;
}
int lwp_session_remove(rt_session_t session, rt_processgroup_t group)
{
rt_bool_t is_empty = RT_FALSE;
/* parameter check */
if (session == RT_NULL || group == RT_NULL)
{
return -EINVAL;
}
SESS_LOCK_NESTED(session);
PGRP_LOCK_NESTED(group);
rt_list_remove(&(group->pgrp_list_node));
/* clear children sid */
lwp_pgrp_update_children_info(group, 0, group->pgid);
group->sid = 0;
group->session = RT_NULL;
PGRP_UNLOCK(group);
is_empty = rt_list_isempty(&(session->processgroup));
SESS_UNLOCK(session);
if (is_empty)
{
lwp_session_delete(session);
return 1;
}
return 0;
}
int lwp_session_move(rt_session_t session, rt_processgroup_t group)
{
rt_session_t prev_session;
/* parameter check */
if (session == RT_NULL || group == RT_NULL)
{
return -EINVAL;
}
if (lwp_sid_get_bysession(session) == lwp_sid_get_bypgrp(group))
{
return 0;
}
SESS_LOCK(session);
prev_session = group->session;
if (prev_session)
{
SESS_LOCK(prev_session);
lwp_session_remove(prev_session, group);
SESS_UNLOCK(prev_session);
}
lwp_session_insert(session, group);
SESS_UNLOCK(session);
return 0;
}
int lwp_session_update_children_info(rt_session_t session, pid_t sid)
{
rt_list_t *node = RT_NULL;
rt_processgroup_t group = RT_NULL;
if (session == RT_NULL)
{
return -EINVAL;
}
SESS_LOCK_NESTED(session);
rt_list_for_each(node, &(session->processgroup))
{
group = (rt_processgroup_t)rt_list_entry(node, struct rt_processgroup, pgrp_list_node);
PGRP_LOCK_NESTED(group);
if (sid != -1)
{
group->sid = sid;
group->session = session;
lwp_pgrp_update_children_info(group, sid, group->pgid);
}
PGRP_UNLOCK(group);
}
SESS_UNLOCK(session);
return 0;
}
int lwp_session_set_foreground(rt_session_t session, pid_t pgid)
{
rt_processgroup_t group = RT_NULL;
rt_list_t *node = RT_NULL;
rt_bool_t is_contains = RT_FALSE;
/* parameter check */
if (session == RT_NULL || pgid <= 0)
{
return -EINVAL;
}
SESS_LOCK(session);
rt_list_for_each(node, &(session->processgroup))
{
group = (rt_processgroup_t)rt_list_entry(node, struct rt_processgroup, pgrp_list_node);
PGRP_LOCK(group);
if (group->pgid == pgid)
{
is_contains = RT_TRUE;
}
PGRP_UNLOCK(group);
}
if (is_contains)
{
session->foreground_pgid = pgid;
// TODO: maybe notify tty
}
SESS_UNLOCK(session);
return is_contains ? 0 : -EINVAL;
}
/**
* setsid() creates a new session if the calling process is not a process group leader.
* The calling process is the leader of the new session (i.e., its session ID is made the same as its process ID).
* The calling process also becomes the process group leader of a new process group in the session
* (i.e., its process group ID is made the same as its process ID).
*/
sysret_t sys_setsid(void)
{
rt_lwp_t process;
pid_t pid;
rt_processgroup_t group;
rt_session_t session;
sysret_t err = 0;
process = lwp_self();
pid = lwp_to_pid(process);
/**
* if the calling process is already a process group leader.
*/
if (lwp_pgrp_find(pid))
{
err = -EPERM;
goto exit;
}
group = lwp_pgrp_create(process);
if (group)
{
lwp_pgrp_move(group, process);
session = lwp_session_create(process);
if (session)
{
lwp_session_move(session, group);
}
else
{
lwp_pgrp_delete(group);
}
err = lwp_sid_get_bysession(session);
}
else
{
err = -ENOMEM;
}
exit:
return err;
}
/**
* getsid() returns the session ID of the process with process ID pid.
* If pid is 0, getsid() returns the session ID of the calling process.
*/
sysret_t sys_getsid(pid_t pid)
{
rt_lwp_t process, self_process;
pid_t sid;
lwp_pid_lock_take();
process = lwp_from_pid_locked(pid);
lwp_pid_lock_release();
if (process == RT_NULL)
{
return -ESRCH;
}
self_process = lwp_self();
sid = lwp_sid_get_byprocess(process);
if (sid != lwp_sid_get_byprocess(self_process))
{
/**
* A process with process ID pid exists, but it is not in the same session as the calling process,
* and the implementation considers this an error.
*
* Note: Linux does not return EPERM.
*/
return -EPERM;
}
return sid;
}
#ifdef RT_USING_FINSH
#include "finsh.h"
long list_session(void)
{
int count = 0, index;
rt_session_t *sessions;
rt_session_t session;
rt_thread_t thread;
char name[RT_NAME_MAX];
rt_kprintf("SID leader process\n");
rt_kprintf("---- ----------------\n");
count = rt_object_get_length(RT_Object_Class_Session);
if (count > 0)
{
/* get pointers */
sessions = (rt_session_t *)rt_calloc(count, sizeof(rt_session_t));
if (sessions)
{
index = rt_object_get_pointers(RT_Object_Class_Session, (rt_object_t *)sessions, count);
if (index > 0)
{
for (index = 0; index < count; index++)
{
struct rt_session se;
session = sessions[index];
SESS_LOCK(session);
rt_memcpy(&se, session, sizeof(struct rt_session));
SESS_UNLOCK(session);
if (se.leader && se.leader)
{
thread = rt_list_entry(se.leader->t_grp.prev, struct rt_thread, sibling);
rt_strncpy(name, thread->parent.name, RT_NAME_MAX);
}
else
{
rt_strncpy(name, "nil", RT_NAME_MAX);
}
rt_kprintf("%4d %-*.*s\n", se.sid, RT_NAME_MAX, RT_NAME_MAX, name);
}
}
rt_free(sessions);
}
}
return 0;
}
MSH_CMD_EXPORT(list_session, list session);
#endif

View File

@ -1,27 +0,0 @@
#include <rthw.h>
#include <rtthread.h>
#include "lwp.h"
//#include "lwp_tid.h"
#include "lwp_pid.h"
int setsid(void)
{
int err = -EPERM;
struct rt_thread *current_thread = rt_thread_self();
struct rt_lwp *current_lwp = (struct rt_lwp *)rt_thread_self()->lwp;
if (current_lwp->session == current_thread->tid)
{
return err;
}
current_lwp->session = current_thread->tid;
current_lwp->__pgrp = current_thread->tid;
current_lwp->leader = 1;
current_lwp->tty = RT_NULL;
current_lwp->tty_old_pgrp = 0;
err = current_lwp->session;
return err;
}

View File

@ -10,6 +10,9 @@
* 2023-07-04 Shell Support siginfo, sigqueue
* remove lwp_signal_backup/restore() to reduce architecture codes
* update the generation, pending and delivery routines
* 2023-11-22 Shell Support for job control signal. Fixup of signal catch while
* some of the signals is blocked, but no more further dequeue is applied.
* Add itimer support
*/
#define __RT_IPC_SOURCE__
#define DBG_TAG "lwp.signal"
@ -24,7 +27,7 @@
#include "sys/signal.h"
#include "syscall_generic.h"
static lwp_siginfo_t siginfo_create(int signo, int code, int value)
static lwp_siginfo_t siginfo_create(rt_thread_t current, int signo, int code, lwp_siginfo_ext_t ext)
{
lwp_siginfo_t siginfo;
struct rt_lwp *self_lwp;
@ -35,10 +38,10 @@ static lwp_siginfo_t siginfo_create(int signo, int code, int value)
{
siginfo->ksiginfo.signo = signo;
siginfo->ksiginfo.code = code;
siginfo->ksiginfo.value = value;
siginfo->ext = ext;
self_lwp = lwp_self();
self_thr = rt_thread_self();
self_thr = current;
self_lwp = current->lwp;
if (self_lwp)
{
siginfo->ksiginfo.from_pid = self_lwp->pid;
@ -56,6 +59,12 @@ static lwp_siginfo_t siginfo_create(int signo, int code, int value)
rt_inline void siginfo_delete(lwp_siginfo_t siginfo)
{
if (siginfo->ext)
{
rt_free(siginfo->ext);
siginfo->ext = RT_NULL;
}
rt_free(siginfo);
}
@ -296,10 +305,30 @@ static lwp_siginfo_t sigqueue_dequeue(lwp_sigqueue_t sigqueue, int signo)
return found;
}
/**
* Discard all the signal matching `signo` in sigqueue
*/
static void sigqueue_discard(lwp_sigqueue_t sigqueue, int signo)
{
lwp_siginfo_t queuing_si;
while (!sigqueue_isempty(sigqueue))
while (sigqueue_ismember(sigqueue, signo))
{
queuing_si = sigqueue_dequeue(sigqueue, signo);
siginfo_delete(queuing_si);
}
}
/**
* Discard all the queuing signals in sigset
*/
static void sigqueue_discard_sigset(lwp_sigqueue_t sigqueue, lwp_sigset_t *sigset)
{
lwp_siginfo_t queuing_si;
lwp_sigset_t mask;
int signo;
_signotsets(&mask, sigset);
while ((signo = sigqueue_peek(sigqueue, &mask)) != 0)
{
queuing_si = sigqueue_dequeue(sigqueue, signo);
siginfo_delete(queuing_si);
@ -312,11 +341,21 @@ RT_STATIC_ASSERT(lp_width_same, sizeof(void *) == sizeof(long));
/** translate lwp siginfo to user siginfo_t */
rt_inline void siginfo_k2u(lwp_siginfo_t ksigi, siginfo_t *usigi)
{
int signo = ksigi->ksiginfo.signo;
usigi->si_code = ksigi->ksiginfo.code;
usigi->si_signo = ksigi->ksiginfo.signo;
usigi->si_value.sival_ptr = (void *)ksigi->ksiginfo.value;
usigi->si_signo = signo;
usigi->si_pid = ksigi->ksiginfo.from_pid;
if (ksigi->ext)
{
if (signo == SIGCHLD)
{
usigi->si_status = ksigi->ext->sigchld.status;
usigi->si_utime = ksigi->ext->sigchld.stime;
usigi->si_stime = ksigi->ext->sigchld.utime;
}
}
/* deprecated field */
usigi->si_errno = 0;
}
@ -417,7 +456,10 @@ rt_err_t lwp_signal_init(struct lwp_signal *sig)
memset(&sig->sig_action_onstack, 0, sizeof(sig->sig_action_onstack));
memset(&sig->sig_action_restart, 0, sizeof(sig->sig_action_restart));
memset(&sig->sig_action_siginfo, 0, sizeof(sig->sig_action_siginfo));
memset(&sig->sig_action_nocldstop, 0, sizeof(sig->sig_action_nocldstop));
memset(&sig->sig_action_nocldwait, 0, sizeof(sig->sig_action_nocldwait));
lwp_sigqueue_init(&sig->sig_queue);
return rc;
}
@ -433,17 +475,19 @@ rt_err_t lwp_signal_detach(struct lwp_signal *signal)
int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag)
{
struct rt_lwp *lwp = (struct rt_lwp*)thread->lwp;
struct rt_lwp *lwp = (struct rt_lwp *)thread->lwp;
lwp_sigset_t sigmask = thread->signal.sigset_mask;
int ret = 0;
_sigaddset(&sigmask, SIGCONT);
switch (suspend_flag)
{
case RT_INTERRUPTIBLE:
if (!sigqueue_isempty(_SIGQ(thread)))
if (sigqueue_peek(_SIGQ(thread), &sigmask))
{
break;
}
if (thread->lwp && !sigqueue_isempty(_SIGQ(lwp)))
if (thread->lwp && sigqueue_peek(_SIGQ(lwp), &sigmask))
{
break;
}
@ -470,84 +514,202 @@ int lwp_thread_signal_suspend_check(rt_thread_t thread, int suspend_flag)
return ret;
}
void lwp_thread_signal_catch(void *exp_frame)
rt_inline rt_bool_t _is_jobctl_signal(rt_lwp_t lwp, int signo)
{
int signo = 0;
struct rt_thread *thread;
struct rt_lwp *lwp;
lwp_siginfo_t siginfo = 0;
lwp_sigqueue_t pending;
lwp_sigset_t *sig_mask;
lwp_sigset_t save_sig_mask;
lwp_sigset_t new_sig_mask;
lwp_sighandler_t handler = 0;
siginfo_t usiginfo;
siginfo_t *p_usi = RT_NULL;
lwp_sigset_t jobctl_sigset = lwp_sigset_init(LWP_SIG_JOBCTL_SET);
thread = rt_thread_self();
lwp = (struct rt_lwp*)thread->lwp;
return lwp_sigismember(&jobctl_sigset, signo);
}
RT_ASSERT(!!lwp);
LWP_LOCK(lwp);
rt_inline rt_bool_t _is_stop_signal(rt_lwp_t lwp, int signo)
{
lwp_sigset_t stop_sigset = lwp_sigset_init(LWP_SIG_STOP_SET);
/* check if signal exist */
if (!sigqueue_isempty(_SIGQ(thread)))
return lwp_sigismember(&stop_sigset, signo);
}
rt_inline rt_bool_t _need_notify_status_changed(rt_lwp_t lwp, int signo)
{
RT_ASSERT(lwp_sigismember(&lwp_sigset_init(LWP_SIG_JOBCTL_SET), signo));
return !lwp_sigismember(&lwp->signal.sig_action_nocldstop, SIGCHLD);
}
/**
* wakeup the waitpid_waiters if any, and try to generate SIGCHLD if they are
* not disable explicitly by user.
*
* TODO: This event is always per-process and doesn't make whole lot of
* sense for ptracers, who shouldn't consume the state via wait(2) either,
* but, for backward compatibility, notify the ptracer of the group leader
* too unless it's gonna be a duplicate.
*/
static void _notify_parent_and_leader(rt_lwp_t child_lwp, rt_thread_t child_thr, int trig_signo, rt_bool_t is_stop)
{
int si_code;
lwp_siginfo_ext_t ext;
rt_lwp_t parent_lwp = child_lwp->parent;
if (!parent_lwp)
return ;
/* prepare the event data for parent to query */
if (is_stop)
{
pending = _SIGQ(thread);
sig_mask = &thread->signal.sigset_mask;
}
else if (!sigqueue_isempty(_SIGQ(lwp)))
{
pending = _SIGQ(lwp);
sig_mask = &thread->signal.sigset_mask;
si_code = CLD_STOPPED;
child_lwp->lwp_status = LWP_CREATE_STAT_STOPPED(trig_signo);
}
else
{
pending = RT_NULL;
si_code = CLD_CONTINUED;
child_lwp->lwp_status = LWP_CREATE_STAT_CONTINUED;
}
if (pending)
/* wakeup waiter on waitpid(2) */
lwp_waitpid_kick(parent_lwp, child_lwp);
if (_need_notify_status_changed(parent_lwp, trig_signo))
{
/* peek the pending signal */
signo = sigqueue_peek(pending, sig_mask);
if (signo)
ext = rt_malloc(sizeof(struct lwp_siginfo_ext));
if (ext)
{
siginfo = sigqueue_dequeue(pending, signo);
RT_ASSERT(siginfo != RT_NULL);
handler = _get_sighandler_locked(lwp, signo);
ext->sigchld.status = trig_signo;
/* IGN signal will never be queued */
RT_ASSERT(handler != LWP_SIG_ACT_IGN);
/* TODO: signal usage is not supported */
ext->sigchld.stime = child_thr->system_time;
ext->sigchld.utime = child_thr->user_time;
}
/* copy the blocked signal mask from the registered signal action */
memcpy(&new_sig_mask, &lwp->signal.sig_action_mask[signo - 1], sizeof(new_sig_mask));
/* generate SIGCHLD for parent */
lwp_signal_kill(parent_lwp, SIGCHLD, si_code, ext);
}
}
if (!_sigismember(&lwp->signal.sig_action_nodefer, signo))
_sigaddset(&new_sig_mask, signo);
static int _do_signal_wakeup(rt_thread_t thread, int sig);
static rt_err_t _stop_thread_locked(rt_lwp_t self_lwp, rt_thread_t cur_thr, int signo,
lwp_siginfo_t si, lwp_sigqueue_t sq)
{
rt_err_t error;
int jobctl_stopped = self_lwp->jobctl_stopped;
rt_thread_t iter;
_thread_signal_mask(thread, LWP_SIG_MASK_CMD_BLOCK, &new_sig_mask, &save_sig_mask);
/* siginfo is need for signal action */
if (_sigismember(&lwp->signal.sig_action_siginfo, signo))
{
siginfo_k2u(siginfo, &usiginfo);
p_usi = &usiginfo;
}
else
p_usi = RT_NULL;
/* race to setup jobctl stopped flags */
if (!jobctl_stopped)
{
self_lwp->jobctl_stopped = RT_TRUE;
self_lwp->wait_reap_stp = RT_FALSE;
rt_list_for_each_entry(iter, &self_lwp->t_grp, sibling)
{
if (iter != cur_thr)
_do_signal_wakeup(iter, signo);
}
}
/**
* raise the event again so siblings is able to catch it again.
* `si` will be discarded while SIGCONT is generatd
*/
sigqueue_enqueue(sq, si);
/* release the lwp lock so we can happily suspend */
LWP_UNLOCK(self_lwp);
rt_set_errno(RT_EOK);
/* After suspension, only the SIGKILL and SIGCONT will wake this thread up */
error = rt_thread_suspend_with_flag(cur_thr, RT_KILLABLE);
if (error == RT_EOK)
{
rt_schedule();
error = rt_get_errno();
error = error > 0 ? -error : error;
}
if (!jobctl_stopped &&
(sigqueue_ismember(_SIGQ(self_lwp), SIGCONT) ||
sigqueue_ismember(_SIGQ(cur_thr), SIGCONT)))
{
/**
* if we are resumed by a SIGCONT and we are the winner of racing
* notify parent of the incoming event
*/
_notify_parent_and_leader(self_lwp, cur_thr, SIGCONT, RT_FALSE);
}
/* reacquire the lock since we release it before */
LWP_LOCK(self_lwp);
return error;
}
static void _catch_signal_locked(rt_lwp_t lwp, rt_thread_t thread, int signo,
lwp_siginfo_t siginfo, lwp_sighandler_t handler,
void *exp_frame)
{
lwp_sigset_t new_sig_mask;
lwp_sigset_t save_sig_mask;
siginfo_t usiginfo;
siginfo_t *p_usi;
/* siginfo is need for signal action */
if (_sigismember(&lwp->signal.sig_action_siginfo, signo))
{
siginfo_k2u(siginfo, &usiginfo);
p_usi = &usiginfo;
}
else
{
p_usi = RT_NULL;
}
/**
* lock is acquired by caller. Release it so that we can happily go to the
* signal handler in user space
*/
LWP_UNLOCK(lwp);
if (pending && signo)
{
siginfo_delete(siginfo);
siginfo_delete(siginfo);
/* signal default handler */
if (handler == LWP_SIG_ACT_DFL)
/* signal default handler */
if (handler == LWP_SIG_ACT_DFL)
{
lwp_sigset_t ign_sigset;
ign_sigset = lwp_sigset_init(LWP_SIG_IGNORE_SET);
if (signo == SIGCONT)
{
arch_syscall_set_errno(exp_frame, EINTR, ERESTART);
arch_thread_signal_enter(signo, p_usi, exp_frame, 0, &thread->signal.sigset_mask);
}
else if (!lwp_sigismember(&ign_sigset, signo) && !lwp->sig_protected)
{
/* for those defautl handler is to terminate process */
LOG_D("%s: default handler; and exit", __func__);
sys_exit_group(0);
/* TODO: coredump if neccessary */
lwp_exit(lwp, LWP_CREATE_STAT_SIGNALED(signo, 0));
}
/**
* otherwise is to ignore the signal,
* -> then reacquire the lock and return
*/
}
else if (handler == LWP_SIG_ACT_IGN)
{
/* do nothing */
}
else
{
/* copy the blocked signal mask from the registered signal action */
memcpy(&new_sig_mask, &lwp->signal.sig_action_mask[signo - 1], sizeof(new_sig_mask));
if (!_sigismember(&lwp->signal.sig_action_nodefer, signo))
_sigaddset(&new_sig_mask, signo);
_thread_signal_mask(thread, LWP_SIG_MASK_CMD_BLOCK, &new_sig_mask, &save_sig_mask);
if (_sigismember(&lwp->signal.sig_action_restart, signo))
{
arch_syscall_set_errno(exp_frame, EINTR, ERESTART);
}
/**
@ -557,18 +719,92 @@ void lwp_thread_signal_catch(void *exp_frame)
*/
LOG_D("%s: enter signal handler(signo=%d) at %p", __func__, signo, handler);
arch_thread_signal_enter(signo, p_usi, exp_frame, handler, &save_sig_mask);
/* the arch_thread_signal_enter() never return */
RT_ASSERT(0);
}
/* reacquire the lock because we release it before */
LWP_LOCK(lwp);
}
void lwp_thread_signal_catch(void *exp_frame)
{
struct rt_thread *thread;
struct rt_lwp *lwp;
lwp_sigqueue_t pending;
lwp_sigset_t *sig_mask;
int retry_signal_catch;
int signo;
thread = rt_thread_self();
lwp = (struct rt_lwp *)thread->lwp;
RT_ASSERT(!!lwp);
LWP_LOCK(lwp);
do {
/* if stopped process resume, we will retry to catch the signal */
retry_signal_catch = 0;
signo = 0;
/* try to peek a signal which is pending and not blocked by this thread */
if (!sigqueue_isempty(_SIGQ(thread)))
{
pending = _SIGQ(thread);
sig_mask = &thread->signal.sigset_mask;
signo = sigqueue_peek(pending, sig_mask);
}
if (!signo && !sigqueue_isempty(_SIGQ(lwp)))
{
pending = _SIGQ(lwp);
sig_mask = &thread->signal.sigset_mask;
signo = sigqueue_peek(pending, sig_mask);
}
if (signo)
{
lwp_siginfo_t siginfo;
lwp_sighandler_t handler;
LOG_D("%s(signo=%d)", __func__, signo);
siginfo = sigqueue_dequeue(pending, signo);
RT_ASSERT(siginfo != RT_NULL);
handler = _get_sighandler_locked(lwp, signo);
if (_is_stop_signal(lwp, signo) && handler == LWP_SIG_ACT_DFL)
{
/* notify the status update for parent process */
_notify_parent_and_leader(lwp, thread, signo, RT_TRUE);
LOG_D("%s: pid=%d stopped", __func__, lwp->pid);
_stop_thread_locked(lwp, thread, signo, siginfo, pending);
LOG_D("%s: pid=%d continued", __func__, lwp->pid);
/* wakeup and retry to catch signals send to us */
retry_signal_catch = 1;
}
else
{
/* do a normal, non-jobctl signal handling */
_catch_signal_locked(lwp, thread, signo, siginfo, handler, exp_frame);
}
}
} while (retry_signal_catch);
LWP_UNLOCK(lwp);
}
static int _do_signal_wakeup(rt_thread_t thread, int sig)
{
int need_schedule;
rt_sched_lock_level_t slvl;
if (!_sigismember(&thread->signal.sigset_mask, sig))
{
int stat;
rt_sched_lock(&slvl);
int stat = rt_sched_thread_get_stat(thread);
stat = rt_sched_thread_get_stat(thread);
if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK)
{
if ((stat & RT_SIGNAL_COMMON_WAKEUP_MASK) != RT_SIGNAL_COMMON_WAKEUP_MASK)
@ -599,6 +835,8 @@ static int _do_signal_wakeup(rt_thread_t thread, int sig)
rt_sched_unlock(slvl);
need_schedule = 0;
}
RT_SCHED_DEBUG_IS_UNLOCKED;
}
else
need_schedule = 0;
@ -689,7 +927,63 @@ rt_inline rt_bool_t _sighandler_cannot_caught(struct rt_lwp *lwp, int signo)
return signo == SIGKILL || signo == SIGSTOP;
}
rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
/* before signal is killed to target process/thread */
static void _before_sending_jobctl_signal(int signo, rt_lwp_t target_lwp, lwp_siginfo_t si)
{
rt_thread_t thr_iter;
rt_sched_lock_level_t slvl;
lwp_sigset_t jobctl_sigset = lwp_sigset_init(LWP_SIG_JOBCTL_SET);
LWP_ASSERT_LOCKED(target_lwp);
/**
* dequeue all the pending jobctl signals (including
* the one we are adding, since we don't want to pend it)
*/
sigqueue_discard_sigset(_SIGQ(target_lwp), &jobctl_sigset);
if (signo == SIGCONT)
{
target_lwp->jobctl_stopped = RT_FALSE;
rt_list_for_each_entry(thr_iter, &target_lwp->t_grp, sibling)
{
rt_base_t stat;
sigqueue_discard_sigset(_SIGQ(thr_iter), &jobctl_sigset);
/**
* Note: all stopped thread will be resumed
*/
rt_sched_lock(&slvl);
stat = rt_sched_thread_get_stat(thr_iter);
if ((stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK &&
(stat & RT_SIGNAL_KILL_WAKEUP_MASK) == 0)
{
thr_iter->error = RT_EINTR;
/**
* don't matter if we failed to resume the thread, since we
* only care about the event passing, but not ordering here
*/
rt_sched_unlock(slvl);
rt_thread_wakeup(thr_iter);
}
else
{
rt_sched_unlock(slvl);
}
}
}
else
{
rt_list_for_each_entry(thr_iter, &target_lwp->t_grp, sibling)
{
sigqueue_discard_sigset(_SIGQ(thr_iter), &jobctl_sigset);
}
}
}
rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, lwp_siginfo_ext_t value)
{
rt_err_t ret = -1;
@ -700,10 +994,15 @@ rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
/** must be able to be suspended */
RT_DEBUG_SCHEDULER_AVAILABLE(RT_TRUE);
if (!lwp || signo <= 0 || signo > _LWP_NSIG)
if (!lwp || signo < 0 || signo > _LWP_NSIG)
{
ret = -RT_EINVAL;
}
else if (signo == 0)
{
/* process exist and current process have privileges */
ret = 0;
}
else
{
LOG_D("%s(lwp=%p \"%s\",signo=%ld,code=%ld,value=%ld)",
@ -715,19 +1014,25 @@ rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value)
terminated = lwp->terminated;
/* short-circuit code for inactive task, ignored signals */
if (terminated || _sighandler_is_ignored(lwp, signo))
if (terminated)
{
/* no one rely on this, then free the resource */
if (value)
rt_free(value);
ret = 0;
}
else
{
siginfo = siginfo_create(signo, code, value);
siginfo = siginfo_create(rt_thread_self(), signo, code, value);
if (siginfo)
{
if (_is_jobctl_signal(lwp, signo))
_before_sending_jobctl_signal(signo, lwp, siginfo);
need_schedule = _siginfo_deliver_to_lwp(lwp, siginfo);
ret = 0;
lwp_signal_notify(&lwp->signalfd_notify_head, siginfo);
ret = 0;
}
else
{
@ -755,6 +1060,10 @@ static void _signal_action_flag_k2u(int signo, struct lwp_signal *signal, struct
flags |= SA_RESTART;
if (_sigismember(&signal->sig_action_siginfo, signo))
flags |= SA_SIGINFO;
if (_sigismember(&signal->sig_action_nocldstop, signo))
flags |= SA_NOCLDSTOP;
if (_sigismember(&signal->sig_action_nocldwait, signo))
flags |= SA_NOCLDWAIT;
act->sa_flags = flags;
}
@ -770,6 +1079,25 @@ static void _signal_action_flag_u2k(int signo, struct lwp_signal *signal, const
_sigaddset(&signal->sig_action_restart, signo);
if (flags & SA_SIGINFO)
_sigaddset(&signal->sig_action_siginfo, signo);
if (signo == SIGCHLD)
{
/* These flags are meaningful only when establishing a handler for SIGCHLD */
if (flags & SA_NOCLDSTOP)
_sigaddset(&signal->sig_action_nocldstop, signo);
if (flags & SA_NOCLDWAIT)
_sigaddset(&signal->sig_action_nocldwait, signo);
}
#define _HANDLE_FLAGS (SA_RESTORER | SA_NODEFER | SA_ONSTACK | SA_RESTART | SA_SIGINFO | SA_NOCLDSTOP | SA_NOCLDWAIT)
if (flags & ~_HANDLE_FLAGS)
LOG_W("Unhandled flags: 0x%lx", flags & ~_HANDLE_FLAGS);
}
rt_bool_t lwp_sigisign(struct rt_lwp *lwp, int _sig)
{
unsigned long sig = _sig - 1;
return lwp->signal.sig_action[sig] == LWP_SIG_ACT_IGN;
}
rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
@ -793,6 +1121,7 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
oact->sa_restorer = RT_NULL;
_signal_action_flag_k2u(signo, &lwp->signal, oact);
}
if (act)
{
/**
@ -800,15 +1129,29 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
* argument succeed, even in the case of signals that cannot be caught or ignored
*/
if (_sighandler_cannot_caught(lwp, signo))
ret = -RT_EINVAL;
ret = -EINVAL;
else
{
prev_handler = _get_sighandler_locked(lwp, signo);
lwp->signal.sig_action_mask[signo - 1] = act->sa_mask;
if (act->__sa_handler._sa_handler == SIG_IGN)
lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_IGN;
{
lwp_sigset_t no_ign_set = lwp_sigset_init(LWP_SIG_NO_IGN_SET);
if (!lwp_sigismember(&no_ign_set, signo))
{
/* except those unignorable signals, discard them for proc */
lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_IGN;
}
else
{
/* POSIX.1: SIG_IGN and SIG_DFL are equivalent for SIGCONT */
lwp->signal.sig_action[signo - 1] = LWP_SIG_ACT_DFL;
}
}
else
{
lwp->signal.sig_action[signo - 1] = act->__sa_handler._sa_handler;
}
_signal_action_flag_u2k(signo, &lwp->signal, act);
@ -837,12 +1180,12 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
LWP_UNLOCK(lwp);
}
else
ret = -RT_EINVAL;
ret = -EINVAL;
return ret;
}
rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long value)
rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, lwp_siginfo_ext_t value)
{
rt_err_t ret = -1;
@ -855,10 +1198,15 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
LOG_D("%s(signo=%d)", __func__, signo);
if (!thread || signo <= 0 || signo >= _LWP_NSIG)
if (!thread || signo < 0 || signo >= _LWP_NSIG)
{
ret = -RT_EINVAL;
}
else if (signo == 0)
{
/* thread exist and current thread have privileges */
ret = 0;
}
else
{
lwp = thread->lwp;
@ -874,7 +1222,7 @@ rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long
ret = 0;
else
{
siginfo = siginfo_create(signo, code, value);
siginfo = siginfo_create(rt_thread_self(), signo, code, value);
if (siginfo)
{
@ -919,7 +1267,7 @@ rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
if (thread)
{
lwp = (struct rt_lwp*)thread->lwp;
lwp = (struct rt_lwp *)thread->lwp;
LWP_LOCK(lwp);
if (!lwp)
@ -948,13 +1296,14 @@ static int _dequeue_signal(rt_thread_t thread, lwp_sigset_t *mask, siginfo_t *us
lwp_sigset_t *pending;
lwp_sigqueue_t sigqueue;
lwp = thread->lwp;
RT_ASSERT(lwp);
sigqueue = _SIGQ(thread);
pending = &sigqueue->sigset_pending;
signo = _next_signal(pending, mask);
if (!signo)
{
lwp = thread->lwp;
RT_ASSERT(lwp);
sigqueue = _SIGQ(lwp);
pending = &sigqueue->sigset_pending;
signo = _next_signal(pending, mask);
@ -978,6 +1327,7 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
rt_err_t ret;
lwp_sigset_t saved_sigset;
lwp_sigset_t blocked_sigset;
lwp_sigset_t dontwait_sigset;
int sig;
struct rt_lwp *lwp = thread->lwp;
@ -990,10 +1340,10 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
/* Create a mask of signals user dont want or cannot catch */
_sigdelset(sigset, SIGKILL);
_sigdelset(sigset, SIGSTOP);
_signotsets(sigset, sigset);
_signotsets(&dontwait_sigset, sigset);
LWP_LOCK(lwp);
sig = _dequeue_signal(thread, sigset, usi);
sig = _dequeue_signal(thread, &dontwait_sigset, usi);
LWP_UNLOCK(lwp);
if (sig)
return sig;
@ -1007,13 +1357,12 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
* Note: If the pending signal arrives before thread suspend, the suspend
* operation will return a failure
*/
_sigandsets(&blocked_sigset, &thread->signal.sigset_mask, sigset);
_sigandsets(&blocked_sigset, &thread->signal.sigset_mask, &dontwait_sigset);
_thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &blocked_sigset, &saved_sigset);
if (timeout)
{
rt_uint32_t time;
time = rt_timespec_to_tick(timeout);
rt_tick_t time;
time = (timeout->tv_sec * RT_TICK_PER_SECOND) + ((timeout->tv_nsec * RT_TICK_PER_SECOND) / NANOSECOND_PER_SECOND);
/**
* Brief: POSIX
* If the timespec structure pointed to by timeout is zero-valued and
@ -1023,11 +1372,13 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
if (time == 0)
return -EAGAIN;
rt_enter_critical();
ret = rt_thread_suspend_with_flag(thread, RT_INTERRUPTIBLE);
rt_timer_control(&(thread->thread_timer),
RT_TIMER_CTRL_SET_TIME,
&time);
rt_timer_start(&(thread->thread_timer));
rt_exit_critical();
}
else
@ -1040,7 +1391,7 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
{
rt_schedule();
/* If thread->error reliable? */
if (thread->error == -RT_EINTR)
if (thread->error == RT_EINTR)
ret = -EINTR;
else
ret = -EAGAIN;
@ -1049,7 +1400,7 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
_thread_signal_mask(thread, LWP_SIG_MASK_CMD_SET_MASK, &saved_sigset, RT_NULL);
LWP_LOCK(lwp);
sig = _dequeue_signal(thread, sigset, usi);
sig = _dequeue_signal(thread, &dontwait_sigset, usi);
LWP_UNLOCK(lwp);
return sig ? sig : ret;
@ -1072,3 +1423,22 @@ void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *pending)
_sigandsets(pending, pending, &thread->signal.sigset_mask);
}
}
rt_err_t lwp_pgrp_signal_kill(rt_processgroup_t pgrp, long signo, long code,
lwp_siginfo_ext_t value)
{
struct rt_lwp *lwp;
rt_err_t rc = 0;
PGRP_ASSERT_LOCKED(pgrp);
if (pgrp)
{
rt_list_for_each_entry(lwp, &pgrp->process, pgrp_node)
{
lwp_signal_kill(lwp, signo, code, value);
}
}
return rc;
}

View File

@ -7,6 +7,7 @@
* Date Author Notes
* 2020-02-23 Jesven first version.
* 2023-07-06 Shell update the generation, pending and delivery API
* 2023-11-22 Shell support for job control signal
*/
#ifndef __LWP_SIGNAL_H__
@ -17,23 +18,33 @@
#include <rtthread.h>
#include <sys/signal.h>
struct timespec;
struct itimerspec;
#ifdef __cplusplus
extern "C" {
#endif
#define _USIGNAL_SIGMASK(signo) (1u << ((signo)-1))
#define LWP_SIG_IGNORE_SET (_USIGNAL_SIGMASK(SIGCHLD) | _USIGNAL_SIGMASK(SIGURG))
#define LWP_SIG_ACT_DFL ((lwp_sighandler_t)0)
#define LWP_SIG_ACT_IGN ((lwp_sighandler_t)1)
#define LWP_SIG_USER_SA_FLAGS \
(SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
#define LWP_SIG_NO_IGN_SET \
(_USIGNAL_SIGMASK(SIGCONT) | _USIGNAL_SIGMASK(SIGSTOP) | \
_USIGNAL_SIGMASK(SIGKILL))
#define LWP_SIG_IGNORE_SET \
(_USIGNAL_SIGMASK(SIGCHLD) | _USIGNAL_SIGMASK(SIGURG) | \
_USIGNAL_SIGMASK(SIGWINCH) /* from 4.3 BSD, not POSIX.1 */)
#define LWP_SIG_JOBCTL_SET \
(_USIGNAL_SIGMASK(SIGCONT) | _USIGNAL_SIGMASK(SIGSTOP) | \
_USIGNAL_SIGMASK(SIGTSTP) | _USIGNAL_SIGMASK(SIGTTIN) | \
_USIGNAL_SIGMASK(SIGTTOU))
#define LWP_SIG_STOP_SET \
(_USIGNAL_SIGMASK(SIGSTOP) | _USIGNAL_SIGMASK(SIGTSTP) | \
_USIGNAL_SIGMASK(SIGTTIN) | _USIGNAL_SIGMASK(SIGTTOU))
#define LWP_SIG_ACT_DFL ((lwp_sighandler_t)0)
#define LWP_SIG_ACT_IGN ((lwp_sighandler_t)1)
#define LWP_SIG_USER_SA_FLAGS \
(SA_NOCLDSTOP | SA_NOCLDWAIT | SA_SIGINFO | SA_ONSTACK | SA_RESTART | \
SA_NODEFER | SA_RESETHAND | SA_EXPOSE_TAGBITS)
#define LWP_SIG_INVALID_TIMER ((timer_t)-1)
typedef enum {
typedef enum
{
LWP_SIG_MASK_CMD_BLOCK,
LWP_SIG_MASK_CMD_UNBLOCK,
LWP_SIG_MASK_CMD_SET_MASK,
@ -43,7 +54,8 @@ typedef enum {
/**
* LwP implementation of POSIX signal
*/
struct lwp_signal {
struct lwp_signal
{
timer_t real_timer;
struct lwp_sigqueue sig_queue;
rt_thread_t sig_dispatch_thr[_LWP_NSIG];
@ -55,9 +67,12 @@ struct lwp_signal {
lwp_sigset_t sig_action_onstack;
lwp_sigset_t sig_action_restart;
lwp_sigset_t sig_action_siginfo;
lwp_sigset_t sig_action_nocldstop;
lwp_sigset_t sig_action_nocldwait;
};
struct rt_lwp;
struct rt_processgroup;
#ifndef ARCH_MM_MMU
void lwp_sighandler_set(int sig, lwp_sighandler_t func);
@ -93,12 +108,14 @@ rt_inline void lwp_thread_signal_detach(struct lwp_thread_signal *tsig)
* @param signo the signal number
* @param code as in siginfo
* @param value as in siginfo
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as successful
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as
* successful
*
* @note the *signal_kill have the same definition of a successful return as
* kill() in IEEE Std 1003.1-2017
*/
rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code, long value);
rt_err_t lwp_signal_kill(struct rt_lwp *lwp, long signo, long code,
lwp_siginfo_ext_t value);
/**
* @brief set or examine the signal action of signo
@ -119,9 +136,11 @@ rt_err_t lwp_signal_action(struct rt_lwp *lwp, int signo,
* @param signo the signal number
* @param code as in siginfo
* @param value as in siginfo
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as successful
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as
* successful
*/
rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code, long value);
rt_err_t lwp_thread_signal_kill(rt_thread_t thread, long signo, long code,
lwp_siginfo_ext_t value);
/**
* @brief set signal mask of target thread
@ -136,7 +155,8 @@ rt_err_t lwp_thread_signal_mask(rt_thread_t thread, lwp_sig_mask_cmd_t how,
const lwp_sigset_t *sigset, lwp_sigset_t *oset);
/**
* @brief Catch signal if exists and no return, otherwise return with no side effect
* @brief Catch signal if exists and no return, otherwise return with no
* side effect
*
* @param exp_frame the exception frame on kernel stack
*/
@ -172,10 +192,43 @@ rt_err_t lwp_thread_signal_timedwait(rt_thread_t thread, lwp_sigset_t *sigset,
*/
void lwp_thread_signal_pending(rt_thread_t thread, lwp_sigset_t *sigset);
/**
* @brief send a signal to the process group
*
* @param pgrp target process group
* @param signo the signal number
* @param code as in siginfo
* @param value as in siginfo
* @return rt_err_t RT_EINVAL if the parameter is invalid, RT_EOK as
* successful
*/
rt_err_t lwp_pgrp_signal_kill(struct rt_processgroup *pgrp, long signo,
long code, lwp_siginfo_ext_t value);
rt_inline int lwp_sigismember(lwp_sigset_t *set, int _sig)
{
unsigned long sig = _sig - 1;
if (_LWP_NSIG_WORDS == 1)
{
return 1 & (set->sig[0] >> sig);
}
else
{
return 1 & (set->sig[sig / _LWP_NSIG_BPW] >> (sig % _LWP_NSIG_BPW));
}
}
struct itimerspec;
rt_bool_t lwp_sigisign(struct rt_lwp *lwp, int _sig);
rt_err_t lwp_signal_setitimer(struct rt_lwp *lwp, int which,
const struct itimerspec *restrict new,
struct itimerspec *restrict old);
rt_bool_t lwp_signal_restart_syscall(struct rt_lwp *lwp, int error_code);
#ifdef __cplusplus
}
#endif

View File

@ -113,7 +113,7 @@ struct musl_ifreq
{
union
{
#define IFNAMSIZ 16
#define IFNAMSIZ 16
char ifrn_name[IFNAMSIZ];
} ifr_ifrn;
union
@ -133,4 +133,23 @@ struct musl_ifreq
} ifr_ifru;
};
struct musl_rtentry
{
unsigned long int rt_pad1;
struct musl_sockaddr rt_dst;
struct musl_sockaddr rt_gateway;
struct musl_sockaddr rt_genmask;
unsigned short int rt_flags;
short int rt_pad2;
unsigned long int rt_pad3;
unsigned char rt_tos;
unsigned char rt_class;
short int rt_pad4[sizeof(long)/2-1];
short int rt_metric;
char *rt_dev;
unsigned long int rt_mtu;
unsigned long int rt_window;
unsigned short int rt_irtt;
};
#endif /* __LWP_SYS_SOCKET_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -108,10 +108,15 @@ sysret_t sys_log(const char* log, int size);
#ifdef ARCH_MM_MMU
sysret_t sys_futex(int *uaddr, int op, int val, const struct timespec *timeout, int *uaddr2, int val3);
sysret_t sys_pmutex(void *umutex, int op, void *arg);
sysret_t sys_cacheflush(void *addr, int len, int cache);
#endif /* ARCH_MM_MMU */
sysret_t sys_setsid(void);
sysret_t sys_getsid(pid_t pid);
sysret_t sys_setpgid(pid_t pid, pid_t pgid);
sysret_t sys_getpgid(pid_t pid);
#ifdef __cplusplus
}
#endif

View File

@ -6,6 +6,7 @@
* Change Logs:
* Date Author Notes
* 2021-01-15 shaojinchun first version
* 2023-11-16 xqyjlj Fix the case where tid is 0
*/
#define DBG_TAG "lwp.tid"
@ -126,20 +127,28 @@ void lwp_tid_put(int tid)
lwp_mutex_release_safe(&tid_lock);
}
rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid)
rt_thread_t lwp_tid_get_thread_raw(int tid)
{
struct lwp_avl_struct *p;
rt_thread_t thread = RT_NULL;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
p = lwp_avl_find(tid, lwp_tid_root);
if (p)
{
thread = (rt_thread_t)p->data;
if (thread != RT_NULL)
{
thread->tid_ref_count += 1;
}
}
return thread;
}
rt_thread_t lwp_tid_get_thread_and_inc_ref(int tid)
{
rt_thread_t thread = RT_NULL;
lwp_mutex_take_safe(&tid_lock, RT_WAITING_FOREVER, 0);
thread = tid ? lwp_tid_get_thread_raw(tid) : rt_thread_self();
if (thread != RT_NULL)
{
thread->tid_ref_count += 1;
}
lwp_mutex_release_safe(&tid_lock);
return thread;

View File

@ -23,10 +23,7 @@
#ifdef ARCH_MM_MMU
#include <lwp.h>
#include <lwp_arch.h>
#include <lwp_mm.h>
#include <lwp_user_mm.h>
#include "lwp_internal.h"
#include <mm_aspace.h>
#include <mm_fault.h>
@ -161,10 +158,10 @@ void lwp_aspace_switch(struct rt_thread *thread)
void lwp_unmap_user_space(struct rt_lwp *lwp)
{
arch_user_space_free(lwp);
if (lwp->aspace)
arch_user_space_free(lwp);
}
static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
int text)
{
@ -566,6 +563,14 @@ int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length)
return lwp_errno_to_posix(ret);
}
void *lwp_mremap(struct rt_lwp *lwp, void *old_address, size_t old_size,
size_t new_size, int flags, void *new_address)
{
RT_ASSERT(lwp);
return rt_aspace_mremap_range(lwp->aspace, old_address, old_size, new_size, flags, new_address);
}
size_t lwp_get_from_user(void *dst, void *src, size_t size)
{
struct rt_lwp *lwp = RT_NULL;
@ -621,11 +626,6 @@ size_t lwp_put_to_user(void *dst, void *src, size_t size)
return lwp_data_put(lwp, dst, src, size);
}
rt_inline rt_bool_t _in_user_space(const char *addr)
{
return (addr >= (char *)USER_VADDR_START && addr < (char *)USER_VADDR_TOP);
}
rt_inline rt_bool_t _can_unaligned_access(const char *addr)
{
return rt_kmem_v2p((char *)addr) - PV_OFFSET == addr;
@ -636,9 +636,9 @@ void *lwp_memcpy(void * __restrict dst, const void * __restrict src, size_t size
void *rc = dst;
long len;
if (_in_user_space(dst))
if (lwp_in_user_space(dst))
{
if (!_in_user_space(src))
if (!lwp_in_user_space(src))
{
len = lwp_put_to_user(dst, (void *)src, size);
if (!len)
@ -654,7 +654,7 @@ void *lwp_memcpy(void * __restrict dst, const void * __restrict src, size_t size
}
else
{
if (_in_user_space(src))
if (lwp_in_user_space(src))
{
len = lwp_get_from_user(dst, (void *)src, size);
if (!len)
@ -979,6 +979,13 @@ size_t lwp_user_strlen(const char *s)
return lwp_user_strlen_ext(lwp, s);
}
size_t lwp_strlen(struct rt_lwp *lwp, const char *s)
{
if (lwp_in_user_space(s))
return lwp_user_strlen_ext(lwp, s);
else
return strlen(s);
}
char** lwp_get_command_line_args(struct rt_lwp *lwp)
{

View File

@ -56,6 +56,9 @@ void* lwp_mmap2(struct rt_lwp *lwp, void *addr, size_t length, int prot, int fla
*/
int lwp_munmap(struct rt_lwp *lwp, void *addr, size_t length);
void *lwp_mremap(struct rt_lwp *lwp, void *old_address, size_t old_size,
size_t new_size, int flags, void *new_address);
/**
* @brief Test if address from user is accessible address by user
*
@ -163,6 +166,7 @@ rt_base_t lwp_brk(void *addr);
size_t lwp_user_strlen(const char *s);
size_t lwp_user_strlen_ext(struct rt_lwp *lwp, const char *s);
size_t lwp_strlen(struct rt_lwp *lwp, const char *s);
int lwp_fork_aspace(struct rt_lwp *dest_lwp, struct rt_lwp *src_lwp);

View File

@ -0,0 +1,13 @@
menuconfig LWP_USING_TERMINAL
bool "Terminal I/O Subsystem"
depends on RT_USING_SMART
default y
if LWP_USING_TERMINAL
config LWP_PTY_MAX_PARIS_LIMIT
int "Max number of pty devices registered at the same time"
default 64
help
This upper limit is set to protect kernel memory from draining
out by the application if it keeps allocating pty devices.
endif

View File

@ -0,0 +1,741 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell Add compatible layer for FreeBSD
*/
#ifndef __LWP_TTY_BSD_PORTING_H__
#define __LWP_TTY_BSD_PORTING_H__
#include <rtthread.h>
#include <lwp_internal.h>
#define _KERNEL
#ifndef __unused
#define __unused __attribute__((__unused__))
#endif
/* functionability of bsd tty layer */
#if 0
#define USING_BSD_HOOK
#endif
/* Only for devfs d_close() flags. */
#define FLASTCLOSE O_DIRECTORY
#define FREVOKE 0x00200000
/*
* Output flags - software output processing
*/
#if !((OPOST | OLCUC | ONLCR) & 0x8)
#define ONOEOT 0x0008 /* discard EOT's (^D) on output) */
#endif
/*
* Kernel encoding of open mode; separate read and write bits that are
* independently testable: 1 greater than the above.
*
* XXX
* FREAD and FWRITE are excluded from the #ifdef _KERNEL so that TIOCFLUSH,
* which was documented to use FREAD/FWRITE, continues to work.
*/
#define FREAD 0x0001
#define FWRITE 0x0002
/*
* Flags to memory allocation functions.
*/
#define M_NOWAIT 0x0001 /* do not block */
#define M_WAITOK 0x0002 /* ok to block */
#define M_NORECLAIM 0x0080 /* do not reclaim after failure */
#define M_ZERO 0x0100 /* bzero the allocation */
#define M_NOVM 0x0200 /* don't ask VM for pages */
#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
#define M_NODUMP 0x0800 /* don't dump pages in this allocation */
#define M_FIRSTFIT 0x1000 /* only for vmem, fast fit */
#define M_BESTFIT 0x2000 /* only for vmem, low fragmentation */
#define M_EXEC 0x4000 /* allocate executable space */
#define M_NEXTFIT 0x8000 /* only for vmem, follow cursor */
#define M_VERSION 2020110501
/*
* The INVARIANTS-enabled mtx_assert() functionality.
*
* The constants need to be defined for INVARIANT_SUPPORT infrastructure
* support as _mtx_assert() itself uses them and the latter implies that
* _mtx_assert() must build.
*/
#define MA_OWNED (1)
#define MA_NOTOWNED (2)
#define MA_RECURSED (4)
#define MA_NOTRECURSED (8)
/*
* Indentification of modem control signals. These definitions match
* the TIOCMGET definitions in <sys/ttycom.h> shifted a bit down, and
* that identity is enforced with CTASSERT at the bottom of kern/tty.c
* Both the modem bits and delta bits must fit in 16 bit.
*/
#define SER_DTR 0x0001 /* data terminal ready */
#define SER_RTS 0x0002 /* request to send */
#define SER_STX 0x0004 /* secondary transmit */
#define SER_SRX 0x0008 /* secondary receive */
#define SER_CTS 0x0010 /* clear to send */
#define SER_DCD 0x0020 /* data carrier detect */
#define SER_RI 0x0040 /* ring indicate */
#define SER_DSR 0x0080 /* data set ready */
#define SER_MASK_STATE 0x00ff
/*
* Flags for ioflag. (high 16 bits used to ask for read-ahead and
* help with write clustering)
* NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h
*/
#if 0
#define IO_UNIT 0x0001 /* do I/O as atomic unit */
#define IO_APPEND 0x0002 /* append write to end */
#endif /* not porting */
#define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */
#if 0
#define IO_NODELOCKED 0x0008 /* underlying node already locked */
#define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */
#define IO_VMIO 0x0020 /* data already in VMIO space */
#define IO_INVAL 0x0040 /* invalidate after I/O */
#define IO_SYNC 0x0080 /* do I/O synchronously */
#define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */
#define IO_NOREUSE 0x0200 /* VMIO data won't be reused */
#define IO_EXT 0x0400 /* operate on external attributes */
#define IO_NORMAL 0x0800 /* operate on regular data */
#define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */
#define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */
#define IO_RANGELOCKED 0x4000 /* range locked */
#define IO_DATASYNC 0x8000 /* do only data I/O synchronously */
#define IO_SEQMAX 0x7F /* seq heuristic max value */
#define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */
#endif /* not porting */
/** Used to distinguish between normal, callout, lock and init devices.
* Note: this is not used in smart system.
*/
#define TTYUNIT_INIT 0x1
#define TTYUNIT_LOCK 0x2
#define TTYUNIT_CALLOUT 0x4
/*
* TTY privileges.
*/
#define PRIV_TTY_CONSOLE 250 /* Set console to tty. */
#define PRIV_TTY_DRAINWAIT 251 /* Set tty drain wait time. */
#define PRIV_TTY_DTRWAIT 252 /* Set DTR wait on tty. */
#define PRIV_TTY_EXCLUSIVE 253 /* Override tty exclusive flag. */
#define _PRIV_TTY_PRISON 254 /* Removed. */
#define PRIV_TTY_STI 255 /* Simulate input on another tty. */
#define PRIV_TTY_SETA 256 /* Set tty termios structure. */
#define MPASS(ex) RT_ASSERT(ex)
#if !defined(MIN)
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
#if !defined(MAX)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#endif
#define curthread rt_thread_self()
#ifdef USING_BSD_HOOK
#define ttyhook_hashook(tp, hook) \
((tp)->t_hook != NULL && (tp)->t_hook->th_##hook != NULL)
#else
#define ttyhook_hashook(tp, hook) (RT_FALSE)
#endif
/* condvar API */
#include <rtdevice.h>
#define cv_init(cvp, name) rt_condvar_init(cvp, name)
#define cv_destroy(cvp) rt_condvar_detach(cvp)
#define cv_wait(cvp, mp) \
rt_condvar_timedwait(cvp, mp, RT_KILLABLE, RT_WAITING_FOREVER)
#define cv_wait_sig(cvp, mp) \
rt_condvar_timedwait(cvp, mp, RT_INTERRUPTIBLE, RT_WAITING_FOREVER)
#define cv_signal(cvp) rt_condvar_signal(cvp)
#define cv_broadcast(cvp) rt_condvar_broadcast(cvp)
#define cv_timedwait(cvp, mp, t) rt_condvar_timedwait(cvp, mp, RT_KILLABLE, t)
#define cv_timedwait_sig(cvp, mp, t) \
rt_condvar_timedwait(cvp, mp, RT_INTERRUPTIBLE, t)
struct lwp_tty;
struct uio;
/* TODO: just a place holder since devfs is not capable of doing this currently
*/
struct file
{
};
typedef rt_base_t sbintime_t;
typedef rt_ubase_t vm_offset_t;
typedef rt_base_t vm_ooffset_t;
typedef rt_ubase_t vm_paddr_t;
typedef rt_ubase_t vm_pindex_t;
typedef rt_ubase_t vm_size_t;
typedef char *rt_caddr_t;
/*
* The exact set of memory attributes is machine dependent. However,
* every machine is required to define VM_MEMATTR_DEFAULT and
* VM_MEMATTR_UNCACHEABLE.
*/
typedef char vm_memattr_t; /* memory attribute codes */
typedef int d_open_t(struct lwp_tty *tp, int oflags, int devtype,
struct rt_thread *td);
typedef int d_fdopen_t(struct lwp_tty *tp, int oflags, struct rt_thread *td,
struct file *fp);
typedef int d_close_t(struct lwp_tty *tp, int fflag, int devtype,
struct rt_thread *td);
#ifdef USING_BSD_DEVICE_STRATEGY
typedef void d_strategy_t(struct bio *bp);
#endif
typedef int d_ioctl_t(struct lwp_tty *tp, rt_ubase_t cmd, rt_caddr_t data,
int fflag, struct rt_thread *td);
typedef int d_read_t(struct lwp_tty *tp, struct uio *uio, int ioflag);
typedef int d_write_t(struct lwp_tty *tp, struct uio *uio, int ioflag);
typedef int d_poll_t(struct lwp_tty *tp, rt_pollreq_t *req,
struct rt_thread *td);
#ifdef USING_BSD_KNOTE
typedef int d_kqfilter_t(struct lwp_tty *tp, struct knote *kn);
#endif /* USING_BSD_KNOTE */
typedef int d_mmap_t(struct lwp_tty *tp, vm_ooffset_t offset, vm_paddr_t *paddr,
int nprot, vm_memattr_t *memattr);
#ifdef USING_BSD_MMAP_SINGLE
typedef int d_mmap_single_t(struct cdev *cdev, vm_ooffset_t *offset,
vm_size_t size, struct vm_object **object,
int nprot);
#endif /* USING_BSD_MMAP_SINGLE */
typedef void d_purge_t(struct lwp_tty *tp);
/*
* Character device switch table
*/
struct cdevsw
{
#ifdef USING_BSD_RAW_CDEVSW
int d_version;
u_int d_flags;
const char *d_name;
#endif /* USING_BSD_RAW_CDEVSW */
d_open_t *d_open;
d_fdopen_t *d_fdopen;
d_close_t *d_close;
d_read_t *d_read;
d_write_t *d_write;
d_ioctl_t *d_ioctl;
d_poll_t *d_poll;
d_mmap_t *d_mmap;
#ifdef USING_BSD_DEVICE_STRATEGY
d_strategy_t *d_strategy;
#endif /* USING_BSD_DEVICE_STRATEGY */
#ifdef USING_BSD_RAW_CDEVSW
void *d_spare0;
d_kqfilter_t *d_kqfilter;
d_purge_t *d_purge;
d_mmap_single_t *d_mmap_single;
int32_t d_spare1[3];
void *d_spare2[3];
/* These fields should not be messed with by drivers */
LIST_HEAD(, cdev) d_devs;
int d_spare3;
union
{
struct cdevsw *gianttrick;
SLIST_ENTRY(cdevsw) postfree_list;
} __d_giant;
#endif
};
struct iovec
{
void *iov_base; /* Base address. */
size_t iov_len; /* Length. */
};
enum uio_rw
{
UIO_READ,
UIO_WRITE
};
struct uio
{
struct iovec *uio_iov; /* scatter/gather list */
int uio_iovcnt; /* length of scatter/gather list */
off_t uio_offset; /* offset in target object */
ssize_t uio_resid; /* remaining bytes to process */
#ifdef USING_BSD_UIO
enum uio_seg uio_segflg; /* address space */
#endif
enum uio_rw uio_rw; /* operation */
#ifdef USING_BSD_UIO
struct rt_thread *uio_td; /* owner */
#endif /* USING_BSD_UIO */
};
#include <lwp_user_mm.h>
rt_inline int uiomove(void *operand, int n, struct uio *uio)
{
switch (uio->uio_rw)
{
case UIO_READ:
memcpy(uio->uio_iov->iov_base, operand, n);
break;
case UIO_WRITE:
memcpy(operand, uio->uio_iov->iov_base, n);
break;
default:
return -1;
}
uio->uio_iov->iov_base += n;
uio->uio_iov->iov_len--;
uio->uio_offset += n;
uio->uio_resid -= n;
return 0;
}
/* privileges checking: 0 if okay */
rt_inline int priv_check(struct rt_thread *td, int priv)
{
return 0;
}
/* Disable console redirection to a tty. */
rt_inline int constty_clear(struct lwp_tty *tp)
{
// rt_kprintf("\nTODO: %s unimplemented!\n", __func__);
return 0;
}
rt_inline int constty_set(struct lwp_tty *tp)
{
// rt_kprintf("\nTODO: %s unimplemented!\n", __func__);
return 0;
}
/**
* UMA (Universal Memory Allocator)
*/
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags);
typedef void (*uma_dtor)(void *mem, int size, void *arg);
typedef int (*uma_init)(void *mem, int size, int flags);
typedef void (*uma_fini)(void *mem, int size);
struct uma_zone
{
char *name;
int align;
int size;
};
/* Opaque type used as a handle to the zone */
typedef struct uma_zone *uma_zone_t;
rt_inline uma_zone_t uma_zcreate(char *name, int size, uma_ctor ctor,
uma_dtor dtor, uma_init zinit, uma_fini zfini,
int align, uint16_t flags)
{
uma_zone_t zone = rt_malloc(sizeof(struct uma_zone));
if (zone)
{
RT_ASSERT(ctor == RT_NULL);
RT_ASSERT(dtor == RT_NULL);
RT_ASSERT(zinit == RT_NULL);
RT_ASSERT(zfini == RT_NULL);
zone->size = size;
zone->name = name;
zone->align = align;
}
return zone;
}
rt_inline void *uma_zalloc(uma_zone_t zone, int flags)
{
void *buf = rt_malloc_align(zone->size, zone->align + 1);
if (buf)
rt_memset(buf, 0, sizeof(zone->size));
return buf;
}
rt_inline void uma_zfree(uma_zone_t zone, void *item)
{
rt_free_align(item);
}
/**
* bsd type of speed to linux type.
* Note: with switch blocks, compiler can generate the optimized version for us
*/
#include <termios.h>
rt_inline long bsd_speed_to_integer(speed_t speed)
{
long speed_value;
switch (speed)
{
case B0:
speed_value = 0;
break;
case B50:
speed_value = 50;
break;
case B75:
speed_value = 75;
break;
case B110:
speed_value = 110;
break;
case B134:
speed_value = 134;
break;
case B150:
speed_value = 150;
break;
case B200:
speed_value = 200;
break;
case B300:
speed_value = 300;
break;
case B600:
speed_value = 600;
break;
case B1200:
speed_value = 1200;
break;
case B1800:
speed_value = 1800;
break;
case B2400:
speed_value = 2400;
break;
case B4800:
speed_value = 4800;
break;
case B9600:
speed_value = 9600;
break;
case B19200:
speed_value = 19200;
break;
case B38400:
speed_value = 38400;
break;
case B57600:
speed_value = 57600;
break;
case B115200:
speed_value = 115200;
break;
case B230400:
speed_value = 230400;
break;
case B460800:
speed_value = 460800;
break;
case B500000:
speed_value = 500000;
break;
case B576000:
speed_value = 576000;
break;
case B921600:
speed_value = 921600;
break;
case B1000000:
speed_value = 1000000;
break;
case B1152000:
speed_value = 1152000;
break;
case B1500000:
speed_value = 1500000;
break;
case B2000000:
speed_value = 2000000;
break;
case B2500000:
speed_value = 2500000;
break;
case B3000000:
speed_value = 3000000;
break;
case B3500000:
speed_value = 3500000;
break;
case B4000000:
speed_value = 4000000;
break;
default:
speed_value = -1; // invalid speed
break;
}
return speed_value;
}
/* time.h */
/* Operations on timevals. */
#define timevalclear(tvp) ((tvp)->tv_sec = (tvp)->tv_usec = 0)
#define timevalisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#define timevalcmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? ((tvp)->tv_usec cmp(uvp)->tv_usec) \
: ((tvp)->tv_sec cmp(uvp)->tv_sec))
rt_inline void getmicrotime(struct timeval *now)
{
gettimeofday(now, RT_NULL);
}
rt_inline void timevalfix(struct timeval *tv)
{
if (tv->tv_usec < 0)
{
tv->tv_sec--;
tv->tv_usec += 1000000;
}
if (tv->tv_usec >= 1000000)
{
tv->tv_sec++;
tv->tv_usec -= 1000000;
}
}
rt_inline void timevaladd(struct timeval *op1, const struct timeval *op2)
{
op1->tv_sec += op2->tv_sec;
op1->tv_usec += op2->tv_usec;
timevalfix(op1);
}
rt_inline void timevalsub(struct timeval *op1, const struct timeval *op2)
{
op1->tv_sec -= op2->tv_sec;
op1->tv_usec -= op2->tv_usec;
timevalfix(op1);
}
rt_inline rt_tick_t tvtohz(struct timeval *tv)
{
rt_tick_t rc;
rc = tv->tv_sec * RT_TICK_PER_SECOND;
rc += tv->tv_usec * RT_TICK_PER_SECOND / MICROSECOND_PER_SECOND;
return rc;
}
/* ioctl */
#define _BSD_TIOCTL(val) ((val) << 16)
enum bsd_ioctl_cmd
{
BSD_TIOCDRAIN = 1,
BSD_TIOCFLUSH,
BSD_TIOCSTART,
BSD_TIOCSTOP,
BSD_TIOCSTAT,
BSD_TIOCGDRAINWAIT,
BSD_TIOCSDRAINWAIT,
BSD_TIOCSDTR,
BSD_TIOCCDTR,
};
#ifndef TIOCGETA /* get termios struct */
#define TIOCGETA TCGETS
#endif
#ifndef TIOCSETA /* set termios struct */
#define TIOCSETA TCSETS
#endif
#ifndef TIOCSETAW /* drain output, set */
#define TIOCSETAW TCSETSW
#endif
#ifndef TIOCSETAF /* drn out, fls in, set */
#define TIOCSETAF TCSETSF
#endif
#ifndef TIOCDRAIN /* wait till output drained */
#define TIOCDRAIN _BSD_TIOCTL(BSD_TIOCDRAIN)
#endif
#ifndef TIOCFLUSH /* flush buffers */
#define TIOCFLUSH _BSD_TIOCTL(BSD_TIOCFLUSH)
#endif
#ifndef TIOCSTART /* start output, like ^Q */
#define TIOCSTART _BSD_TIOCTL(BSD_TIOCSTART)
#endif
#ifndef TIOCSTOP /* stop output, like ^S */
#define TIOCSTOP _BSD_TIOCTL(BSD_TIOCSTOP)
#endif
#ifndef TIOCSTAT /* simulate ^T status message */
#define TIOCSTAT _BSD_TIOCTL(BSD_TIOCSTAT)
#endif
#ifndef TIOCGDRAINWAIT /* get ttywait timeout */
#define TIOCGDRAINWAIT _BSD_TIOCTL(BSD_TIOCGDRAINWAIT)
#endif
#ifndef TIOCSDRAINWAIT /* set ttywait timeout */
#define TIOCSDRAINWAIT _BSD_TIOCTL(BSD_TIOCSDRAINWAIT)
#endif
#ifndef TIOCSDTR /* set data terminal ready */
#define TIOCSDTR _BSD_TIOCTL(BSD_TIOCSDTR)
#endif
#ifndef TIOCCDTR /* clear data terminal ready */
#define TIOCCDTR _BSD_TIOCTL(BSD_TIOCCDTR)
#endif
#define ENOIOCTL ENOSYS
#define NO_PID -1
/* line discipline */
#define TTYDISC 0 /* termios tty line discipline */
#define SLIPDISC 4 /* serial IP discipline */
#define PPPDISC 5 /* PPP discipline */
#define NETGRAPHDISC 6 /* Netgraph tty node discipline */
#define H4DISC 7 /* Netgraph Bluetooth H4 discipline */
/*
* Control flags - hardware control of terminal
*/
#if __BSD_VISIBLE
#define CIGNORE 0x00000001 /* ignore control flags */
#define CCTS_OFLOW 0x00010000 /* CTS flow control of output */
#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW)
#define CRTS_IFLOW 0x00020000 /* RTS flow control of input */
#define CDTR_IFLOW 0x00040000 /* DTR flow control of input */
#define CDSR_OFLOW 0x00080000 /* DSR flow control of output */
#define CCAR_OFLOW 0x00100000 /* DCD flow control of output */
#define CNO_RTSDTR 0x00200000 /* Do not assert RTS or DTR automatically */
#else
#define CIGNORE 0 /* ignore control flags */
#define CCTS_OFLOW 0 /* CTS flow control of output */
#define CRTS_IFLOW 0 /* RTS flow control of input */
#define CDTR_IFLOW 0 /* DTR flow control of input */
#define CDSR_OFLOW 0 /* DSR flow control of output */
#define CCAR_OFLOW 0 /* DCD flow control of output */
#define CNO_RTSDTR 0 /* Do not assert RTS or DTR automatically */
#endif
#ifndef CRTSCTS
#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW)
#endif
#ifndef howmany
#define howmany(x, y) (((x) + ((y)-1)) / (y))
#endif
struct ucred
{
};
#define NOCRED ((struct ucred *)0) /* no credential available */
#define FSCRED ((struct ucred *)-1) /* filesystem credential */
/* convert from open() flags to/from fflags; convert O_RD/WR to FREAD/FWRITE */
#include <fcntl.h>
#define FFLAGS(oflags) ((oflags)&O_EXEC ? (oflags) : (oflags) + 1)
#define OFLAGS(fflags) \
(((fflags) & (O_EXEC | O_PATH)) != 0 ? (fflags) : (fflags)-1)
typedef int fo_rdwr_t(struct lwp_tty *tp, struct uio *uio,
struct ucred *active_cred, int flags,
struct rt_thread *td);
typedef int fo_truncate_t(struct lwp_tty *tp, off_t length,
struct ucred *active_cred, struct rt_thread *td);
typedef int fo_ioctl_t(struct lwp_tty *tp, rt_ubase_t com, void *data,
struct ucred *active_cred, int fflags, struct rt_thread *td);
typedef int fo_poll_t(struct lwp_tty *tp, struct rt_pollreq *rq, struct ucred *active_cred,
struct rt_thread *td);
typedef int fo_stat_t(struct lwp_tty *tp, struct stat *sb,
struct ucred *active_cred);
typedef int fo_close_t(struct lwp_tty *tp, struct rt_thread *td);
#ifdef USING_BSD_FO_EXT
typedef int fo_chmod_t(struct file *fp, mode_t mode, struct ucred *active_cred,
struct rt_thread *td);
typedef int fo_chown_t(struct file *fp, uid_t uid, gid_t gid,
struct ucred *active_cred, struct rt_thread *td);
typedef int fo_sendfile_t(struct file *fp, int sockfd, struct uio *hdr_uio,
struct uio *trl_uio, off_t offset, size_t nbytes,
off_t *sent, int flags, struct rt_thread *td);
typedef int fo_seek_t(struct file *fp, off_t offset, int whence,
struct rt_thread *td);
typedef int fo_kqfilter_t(struct file *fp, struct knote *kn);
typedef int fo_fill_kinfo_t(struct file *fp, struct kinfo_file *kif,
struct filedesc *fdp);
typedef int fo_mmap_t(struct file *fp, vm_map_t map, vm_offset_t *addr,
vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot,
int flags, vm_ooffset_t foff, struct rt_thread *td);
typedef int fo_aio_queue_t(struct file *fp, struct kaiocb *job);
typedef int fo_add_seals_t(struct file *fp, int flags);
typedef int fo_get_seals_t(struct file *fp, int *flags);
typedef int fo_fallocate_t(struct file *fp, off_t offset, off_t len,
struct rt_thread *td);
typedef int fo_fspacectl_t(struct file *fp, int cmd, off_t *offset,
off_t *length, int flags, struct ucred *active_cred,
struct rt_thread *td);
typedef int fo_spare_t(struct file *fp);
#endif /* USING_BSD_FO_EXT */
typedef int fo_flags_t;
struct bsd_fileops
{
fo_rdwr_t *fo_read;
fo_rdwr_t *fo_write;
fo_truncate_t *fo_truncate;
fo_ioctl_t *fo_ioctl;
fo_poll_t *fo_poll;
fo_stat_t *fo_stat;
fo_close_t *fo_close;
#ifdef USING_BSD_FO_EXT
fo_chmod_t *fo_chmod;
fo_chown_t *fo_chown;
fo_sendfile_t *fo_sendfile;
fo_seek_t *fo_seek;
fo_kqfilter_t *fo_kqfilter;
fo_fill_kinfo_t *fo_fill_kinfo;
fo_mmap_t *fo_mmap;
fo_aio_queue_t *fo_aio_queue;
fo_add_seals_t *fo_add_seals;
fo_get_seals_t *fo_get_seals;
fo_fallocate_t *fo_fallocate;
fo_fspacectl_t *fo_fspacectl;
fo_spare_t *fo_spares[8]; /* Spare slots */
#endif
fo_flags_t fo_flags; /* DFLAG_* below */
};
#define DFLAG_PASSABLE 0x01 /* may be passed via unix sockets. */
#define DFLAG_SEEKABLE 0x02 /* seekable / nonsequential */
#endif /* __LWP_TTY_BSD_PORTING_H__ */

View File

@ -0,0 +1,81 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_TTYDISC_H_
#define _SYS_TTYDISC_H_
#ifndef __LWP_TERMINAL_H__
#error "can only be included through <terminal.h>"
#endif /* !__LWP_TERMINAL_H__ */
#include <rtdef.h>
struct rt_wqueue;
struct rt_thread;
struct lwp_tty;
struct uio;
/* Top half routines. */
void ttydisc_open(struct lwp_tty *tp);
void ttydisc_close(struct lwp_tty *tp);
int ttydisc_read(struct lwp_tty *tp, struct uio *uio, int ioflag);
int ttydisc_write(struct lwp_tty *tp, struct uio *uio, int ioflag);
void ttydisc_optimize(struct lwp_tty *tp);
/* Bottom half routines. */
void ttydisc_modem(struct lwp_tty *tp, int open);
#define ttydisc_can_bypass(tp) ((tp)->t_flags & TF_BYPASS)
int ttydisc_rint(struct lwp_tty *tp, char c, int flags);
size_t ttydisc_rint_simple(struct lwp_tty *tp, const void *buf, size_t len);
size_t ttydisc_rint_bypass(struct lwp_tty *tp, const void *buf, size_t len);
void ttydisc_rint_done(struct lwp_tty *tp);
size_t ttydisc_rint_poll(struct lwp_tty *tp);
size_t ttydisc_getc(struct lwp_tty *tp, void *buf, size_t len);
int ttydisc_getc_uio(struct lwp_tty *tp, struct uio *uio);
size_t ttydisc_getc_poll(struct lwp_tty *tp);
/* Error codes for ttydisc_rint(). */
#define TRE_FRAMING 0x01
#define TRE_PARITY 0x02
#define TRE_OVERRUN 0x04
#define TRE_BREAK 0x08
#endif /* !_SYS_TTYDISC_H_ */

View File

@ -0,0 +1,180 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "bsd_porting.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_TTYQUEUE_H_
#define _SYS_TTYQUEUE_H_
#ifndef __LWP_TERMINAL_H__
#error "can only be included through <teminal.h>"
#endif /* !__LWP_TERMINAL_H__ */
struct lwp_tty;
struct ttyinq_block;
struct ttyoutq_block;
struct uio;
/* Data input queue. */
struct ttyinq
{
struct ttyinq_block *ti_firstblock;
struct ttyinq_block *ti_startblock;
struct ttyinq_block *ti_reprintblock;
struct ttyinq_block *ti_lastblock;
unsigned int ti_begin;
unsigned int ti_linestart;
unsigned int ti_reprint;
unsigned int ti_end;
unsigned int ti_nblocks;
unsigned int ti_quota;
};
#define TTYINQ_DATASIZE 128
/* Data output queue. */
struct ttyoutq
{
struct ttyoutq_block *to_firstblock;
struct ttyoutq_block *to_lastblock;
unsigned int to_begin;
unsigned int to_end;
unsigned int to_nblocks;
unsigned int to_quota;
};
#define TTYOUTQ_DATASIZE (256 - sizeof(struct ttyoutq_block *))
/* Input queue handling routines. */
int ttyinq_setsize(struct ttyinq *ti, struct lwp_tty *tp, size_t len);
void ttyinq_free(struct ttyinq *ti);
int ttyinq_read_uio(struct ttyinq *ti, struct lwp_tty *tp, struct uio *uio,
size_t readlen, size_t flushlen);
size_t ttyinq_write(struct ttyinq *ti, const void *buf, size_t len, int quote);
int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t len,
int quote);
void ttyinq_canonicalize(struct ttyinq *ti);
size_t ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
char *lastc);
void ttyinq_flush(struct ttyinq *ti);
int ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote);
void ttyinq_unputchar(struct ttyinq *ti);
void ttyinq_reprintpos_set(struct ttyinq *ti);
void ttyinq_reprintpos_reset(struct ttyinq *ti);
rt_inline size_t ttyinq_getsize(struct ttyinq *ti)
{
return (ti->ti_nblocks * TTYINQ_DATASIZE);
}
rt_inline size_t ttyinq_getallocatedsize(struct ttyinq *ti)
{
return (ti->ti_quota * TTYINQ_DATASIZE);
}
rt_inline size_t ttyinq_bytesleft(struct ttyinq *ti)
{
size_t len;
/* Make sure the usage never exceeds the length. */
len = ti->ti_nblocks * TTYINQ_DATASIZE;
MPASS(len >= ti->ti_end);
return (len - ti->ti_end);
}
rt_inline size_t ttyinq_bytescanonicalized(struct ttyinq *ti)
{
MPASS(ti->ti_begin <= ti->ti_linestart);
return (ti->ti_linestart - ti->ti_begin);
}
rt_inline size_t ttyinq_bytesline(struct ttyinq *ti)
{
MPASS(ti->ti_linestart <= ti->ti_end);
return (ti->ti_end - ti->ti_linestart);
}
/* Input buffer iteration. */
typedef void ttyinq_line_iterator_t(void *data, char c, int flags);
void ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data);
void ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data);
/* Output queue handling routines. */
void ttyoutq_flush(struct ttyoutq *to);
int ttyoutq_setsize(struct ttyoutq *to, struct lwp_tty *tp, size_t len);
void ttyoutq_free(struct ttyoutq *to);
size_t ttyoutq_read(struct ttyoutq *to, void *buf, size_t len);
int ttyoutq_read_uio(struct ttyoutq *to, struct lwp_tty *tp, struct uio *uio);
size_t ttyoutq_write(struct ttyoutq *to, const void *buf, size_t len);
int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t len);
rt_inline size_t ttyoutq_getsize(struct ttyoutq *to)
{
return (to->to_nblocks * TTYOUTQ_DATASIZE);
}
rt_inline size_t ttyoutq_getallocatedsize(struct ttyoutq *to)
{
return (to->to_quota * TTYOUTQ_DATASIZE);
}
rt_inline size_t ttyoutq_bytesleft(struct ttyoutq *to)
{
size_t len;
/* Make sure the usage never exceeds the length. */
len = to->to_nblocks * TTYOUTQ_DATASIZE;
MPASS(len >= to->to_end);
return (len - to->to_end);
}
rt_inline size_t ttyoutq_bytesused(struct ttyoutq *to)
{
return (to->to_end - to->to_begin);
}
#endif /* !_SYS_TTYQUEUE_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,711 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* (tty_compat.c)
* The compatible layer which interacts with process management core (lwp)
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#define DBG_TAG "lwp.tty"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "../tty_config.h"
#include "../tty_internal.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 1994-1995 Søren Schmidt
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* is the tty and session leader already binding ? */
static rt_bool_t _is_already_binding(lwp_tty_t tp, rt_lwp_t p)
{
rt_bool_t rc;
rt_processgroup_t pgrp = p->pgrp;
/* lwp is already locked */
RT_ASSERT(pgrp);
/* Note: pgrp->session is constant after process group is created */
if (tp->t_session && tp->t_session == pgrp->session)
{
rc = RT_TRUE;
}
else
{
rc = RT_FALSE;
}
return rc;
}
static rt_bool_t _is_tty_or_sess_busy(lwp_tty_t tp, rt_lwp_t p)
{
rt_bool_t rc;
rt_session_t sess = p->pgrp->session;
SESS_LOCK(sess);
if (sess->ctty)
{
rc = RT_TRUE;
}
else if (tp->t_session)
{
/**
* TODO: allow TTY stolen if the sess leader is killed while resource
* had not been collected
*/
if (tp->t_session->leader == RT_NULL)
rc = RT_FALSE;
else
rc = RT_TRUE;
}
else
{
rc = RT_FALSE;
}
SESS_UNLOCK(sess);
return rc;
}
int lwp_tty_bg_stop(struct lwp_tty *tp, struct rt_condvar *cv)
{
int error;
int revokecnt = tp->t_revokecnt;
rt_lwp_t self_lwp;
rt_thread_t header_thr;
rt_thread_t cur_thr = rt_thread_self();
int jobctl_stopped;
self_lwp = cur_thr->lwp;
RT_ASSERT(self_lwp);
jobctl_stopped = self_lwp->jobctl_stopped;
tty_lock_assert(tp, MA_OWNED | MA_NOTRECURSED);
MPASS(!tty_gone(tp));
LWP_LOCK(self_lwp);
header_thr = rt_list_entry(self_lwp->t_grp.prev, struct rt_thread, sibling);
if (!jobctl_stopped && header_thr == cur_thr &&
cur_thr->sibling.prev == &self_lwp->t_grp)
{
/* update lwp status */
jobctl_stopped = self_lwp->jobctl_stopped = RT_TRUE;
}
LWP_UNLOCK(self_lwp);
error = cv_wait(cv, tp->t_mtx);
if (jobctl_stopped)
{
self_lwp->jobctl_stopped = RT_FALSE;
}
/* Bail out when the device slipped away. */
if (tty_gone(tp))
return -ENXIO;
/* Restart the system call when we may have been revoked. */
if (tp->t_revokecnt != revokecnt)
return -ERESTART;
return error;
}
/* process management */
int lwp_tty_set_ctrl_proc(lwp_tty_t tp, rt_thread_t td)
{
int rc = -1;
struct rt_lwp *p = td->lwp;
tty_unlock(tp);
LWP_LOCK(p);
tty_lock(tp);
if (is_sess_leader(p))
{
if (_is_already_binding(tp, p))
{
rc = 0;
}
else if (_is_tty_or_sess_busy(tp, p))
{
rc = -EPERM;
}
else
{
/**
* Binding controlling process
* note: p->pgrp is protected by lwp lock;
* pgrp->session is always constant.
*/
tp->t_session = p->pgrp->session;
tp->t_session->ctty = tp;
tp->t_sessioncnt++;
/* Assign foreground process group */
tp->t_pgrp = p->pgrp;
p->term_ctrlterm = RT_TRUE;
LOG_D("%s(sid=%d)", __func__, tp->t_session->sid);
rc = 0;
}
}
else
{
rc = -EPERM;
}
LWP_UNLOCK(p);
return rc;
}
int lwp_tty_assign_foreground(lwp_tty_t tp, rt_thread_t td, int pgid)
{
struct rt_processgroup *pg;
rt_lwp_t cur_lwp = td->lwp;
tty_unlock(tp);
pg = lwp_pgrp_find_and_inc_ref(pgid);
if (pg == NULL || cur_lwp == NULL)
{
tty_lock(tp);
return -EPERM;
}
else
{
PGRP_LOCK(pg);
if (pg->sid != cur_lwp->sid)
{
PGRP_UNLOCK(pg);
lwp_pgrp_dec_ref(pg);
LOG_D("%s: NoPerm current process (pid=%d, pgid=%d, sid=%d), "
"tagget group (pgid=%d, sid=%d)", __func__,
cur_lwp->pid, cur_lwp->pgid, cur_lwp->sid, pgid, pg->sid);
tty_lock(tp);
return -EPERM;
}
}
tty_lock(tp);
/**
* Determine if this TTY is the controlling TTY after
* relocking the TTY.
*/
if (!tty_is_ctty(tp, td->lwp))
{
PGRP_UNLOCK(pg);
LOG_D("%s: NoCTTY current process (pid=%d, pgid=%d, sid=%d), "
"tagget group (pgid=%d, sid=%d)", __func__,
cur_lwp->pid, cur_lwp->pgid, cur_lwp->sid, pgid, pg->sid);
return -ENOTTY;
}
tp->t_pgrp = pg;
PGRP_UNLOCK(pg);
lwp_pgrp_dec_ref(pg);
/* Wake up the background process groups. */
cv_broadcast(&tp->t_bgwait);
LOG_D("%s: Foreground group %p (pgid=%d)", __func__, tp->t_pgrp,
tp->t_pgrp ? tp->t_pgrp->pgid : -1);
return 0;
}
/**
* Signalling processes.
*/
void lwp_tty_signal_sessleader(struct lwp_tty *tp, int sig)
{
struct rt_lwp *p;
struct rt_session *s;
tty_assert_locked(tp);
MPASS(sig >= 1 && sig < _LWP_NSIG);
/* Make signals start output again. */
tp->t_flags &= ~TF_STOPPED;
tp->t_termios.c_lflag &= ~FLUSHO;
/**
* Load s.leader exactly once to avoid race where s.leader is
* set to NULL by a concurrent invocation of killjobc() by the
* session leader. Note that we are not holding t_session's
* lock for the read.
*/
if ((s = tp->t_session) != NULL &&
(p = (void *)rt_atomic_load((rt_atomic_t *)&s->leader)) != NULL)
{
lwp_signal_kill(p, sig, SI_KERNEL, 0);
}
}
void lwp_tty_signal_pgrp(struct lwp_tty *tp, int sig)
{
tty_assert_locked(tp);
MPASS(sig >= 1 && sig < _LWP_NSIG);
/* Make signals start output again. */
tp->t_flags &= ~TF_STOPPED;
tp->t_termios.c_lflag &= ~FLUSHO;
#ifdef USING_BSD_SIGINFO
if (sig == SIGINFO && !(tp->t_termios.c_lflag & NOKERNINFO))
tty_info(tp);
#endif /* USING_BSD_SIGINFO */
if (tp->t_pgrp != NULL)
{
PGRP_LOCK(tp->t_pgrp);
lwp_pgrp_signal_kill(tp->t_pgrp, sig, SI_KERNEL, 0);
PGRP_UNLOCK(tp->t_pgrp);
}
}
/* bsd_ttydev_methods.d_ioctl */
rt_inline size_t _copy_to_user(void *to, void *from, size_t n)
{
return lwp_put_to_user(to, from, n) == n ? 0 : -EFAULT;
}
rt_inline size_t _copy_from_user(void *to, void *from, size_t n)
{
return lwp_get_from_user(to, from, n) == n ? 0 : -EFAULT;
}
static void termios_to_termio(struct termios *tios, struct termio *tio)
{
memset(tio, 0, sizeof(*tio));
tio->c_iflag = tios->c_iflag;
tio->c_oflag = tios->c_oflag;
tio->c_cflag = tios->c_cflag;
tio->c_lflag = tios->c_lflag;
tio->c_line = tios->c_line;
memcpy(tio->c_cc, tios->c_cc, NCC);
}
static void termio_to_termios(struct termio *tio, struct termios *tios)
{
int i;
tios->c_iflag = tio->c_iflag;
tios->c_oflag = tio->c_oflag;
tios->c_cflag = tio->c_cflag;
tios->c_lflag = tio->c_lflag;
for (i = NCC; i < NCCS; i++)
tios->c_cc[i] = _POSIX_VDISABLE;
memcpy(tios->c_cc, tio->c_cc, NCC);
}
#define IOCTL(cmd, data, fflags, td) \
bsd_ttydev_methods.d_ioctl(tp, cmd, data, fflags, td)
int lwp_tty_ioctl_adapter(lwp_tty_t tp, int cmd, int oflags, void *args, rt_thread_t td)
{
long fflags = FFLAGS(oflags);
struct termios tios;
struct termio tio;
int error;
LOG_D("%s(cmd=0x%x, args=%p)", __func__, cmd, args);
switch (cmd & 0xffff)
{
case TCGETS:
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags, td);
if (error)
break;
error = _copy_to_user(args, &tios, sizeof(tios));
break;
case TCSETS:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETA, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETSW:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETAW, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETSF:
error = _copy_from_user(&tios, args, sizeof(tios));
if (error)
break;
error = (IOCTL(TIOCSETAF, (rt_caddr_t)&tios, fflags, td));
break;
case TCGETA:
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags, td);
if (error)
break;
termios_to_termio(&tios, &tio);
error = _copy_to_user((void *)args, &tio, sizeof(tio));
break;
case TCSETA:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETA, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETAW:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETAW, (rt_caddr_t)&tios, fflags, td));
break;
case TCSETAF:
error = _copy_from_user(&tio, (void *)args, sizeof(tio));
if (error)
break;
termio_to_termios(&tio, &tios);
error = (IOCTL(TIOCSETAF, (rt_caddr_t)&tios, fflags, td));
break;
case TCSBRK:
if (args != 0)
{
/**
* Linux manual: SVr4, UnixWare, Solaris, and Linux treat
* tcsendbreak(fd,arg) with nonzero arg like tcdrain(fd).
*/
error = IOCTL(TIOCDRAIN, (rt_caddr_t)&tios, fflags, td);
}
else
{
/**
* Linux manual: If the terminal is using asynchronous serial
* data transmission, and arg is zero, then send a break (a
* stream of zero bits) for between 0.25 and 0.5 seconds.
*/
LOG_D("%s: ioctl TCSBRK arg 0 not implemented", __func__);
error = -ENOSYS;
}
break;
#ifdef USING_BSD_IOCTL_EXT
/* Software flow control */
case TCXONC: {
switch (args->arg)
{
case TCOOFF:
args->cmd = TIOCSTOP;
break;
case TCOON:
args->cmd = TIOCSTART;
break;
case TCIOFF:
case TCION: {
int c;
struct write_args wr;
error = IOCTL(TIOCGETA, (rt_caddr_t)&tios, fflags,
td);
if (error)
break;
fdrop(fp, td);
c = (args->arg == TCIOFF) ? VSTOP : VSTART;
c = tios.c_cc[c];
if (c != _POSIX_VDISABLE)
{
wr.fd = args->fd;
wr.buf = &c;
wr.nbyte = sizeof(c);
return (sys_write(td, &wr));
}
else
return 0;
}
default:
fdrop(fp, td);
return -EINVAL;
}
args->arg = 0;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
}
#endif /* USING_BSD_IOCTL_EXT */
case TCFLSH: {
int val;
error = 0;
switch ((rt_base_t)args)
{
case TCIFLUSH:
val = FREAD;
break;
case TCOFLUSH:
val = FWRITE;
break;
case TCIOFLUSH:
val = FREAD | FWRITE;
break;
default:
error = -EINVAL;
break;
}
if (!error)
error = (IOCTL(TIOCFLUSH, (rt_caddr_t)&val, fflags, td));
break;
}
#ifdef USING_BSD_IOCTL_EXT
case TIOCEXCL:
args->cmd = TIOCEXCL;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCNXCL:
args->cmd = TIOCNXCL;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/* Controlling terminal */
case TIOCSCTTY:
case TIOCNOTTY:
/* Process group and session ID */
case TIOCGPGRP:
case TIOCSPGRP:
case TIOCGSID:
/* TIOCOUTQ */
/* TIOCSTI */
case TIOCGWINSZ:
case TIOCSWINSZ:
error = IOCTL(cmd, (rt_caddr_t)args, fflags, td);
break;
#ifdef USING_BSD_IOCTL_EXT
case TIOCMGET:
args->cmd = TIOCMGET;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMBIS:
args->cmd = TIOCMBIS;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMBIC:
args->cmd = TIOCMBIC;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCMSET:
args->cmd = TIOCMSET;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
/* TIOCGSOFTCAR */
/* TIOCSSOFTCAR */
case FIONREAD: /* TIOCINQ */
args->cmd = FIONREAD;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
/* TIOCLINUX */
case TIOCCONS:
args->cmd = TIOCCONS;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCGSERIAL: {
struct linux_serial_struct lss;
bzero(&lss, sizeof(lss));
lss.type = PORT_16550A;
lss.flags = 0;
lss.close_delay = 0;
error = copyout(&lss, (void *)args->arg, sizeof(lss));
break;
}
case TIOCSSERIAL: {
struct linux_serial_struct lss;
error = copyin((void *)args->arg, &lss, sizeof(lss));
if (error)
break;
/* XXX - It really helps to have an implementation that
* does nothing. NOT!
*/
error = 0;
break;
}
case TIOCPKT:
args->cmd = TIOCPKT;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIONBIO:
args->cmd = FIONBIO;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCSETD: {
int line;
switch (args->arg)
{
case N_TTY:
line = TTYDISC;
break;
case N_SLIP:
line = SLIPDISC;
break;
case N_PPP:
line = PPPDISC;
break;
default:
fdrop(fp, td);
return -EINVAL;
}
error = (ioctl_emit(TIOCSETD, (rt_caddr_t)&line, fflags, td));
break;
}
case TIOCGETD: {
int linux_line;
int bsd_line = TTYDISC;
error =
ioctl_emit(TIOCGETD, (rt_caddr_t)&bsd_line, fflags, td);
if (error)
break;
switch (bsd_line)
{
case TTYDISC:
linux_line = N_TTY;
break;
case SLIPDISC:
linux_line = N_SLIP;
break;
case PPPDISC:
linux_line = N_PPP;
break;
default:
fdrop(fp, td);
return -EINVAL;
}
error = (copyout(&linux_line, (void *)args->arg, sizeof(int)));
break;
}
/* TCSBRKP */
/* TIOCTTYGSTRUCT */
case FIONCLEX:
args->cmd = FIONCLEX;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIOCLEX:
args->cmd = FIOCLEX;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case FIOASYNC:
args->cmd = FIOASYNC;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
/* TIOCSERCONFIG */
/* TIOCSERGWILD */
/* TIOCSERSWILD */
/* TIOCGLCKTRMIOS */
/* TIOCSLCKTRMIOS */
case TIOCSBRK:
args->cmd = TIOCSBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCCBRK:
args->cmd = TIOCCBRK;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
case TIOCGPTN: {
int nb;
error = ioctl_emit(TIOCGPTN, (rt_caddr_t)&nb, fflags, td);
if (!error)
error = copyout(&nb, (void *)args->arg, sizeof(int));
break;
}
case TIOCGPTPEER:
linux_msg(td, "unsupported ioctl TIOCGPTPEER");
error = -ENOIOCTL;
break;
case TIOCSPTLCK:
/*
* Our unlockpt() does nothing. Check that fd refers
* to a pseudo-terminal master device.
*/
args->cmd = TIOCPTMASTER;
error = (sys_ioctl(td, (struct ioctl_args *)args));
break;
#endif /* USING_BSD_IOCTL_EXT */
/**
* those are for current implementation of devfs, and we dont want to
* log them
*/
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_GETFD:
case F_SETFD:
case F_GETFL:
case F_SETFL:
/* fall back to fs */
error = -ENOIOCTL;
break;
default:
LOG_I("%s: unhandle commands 0x%x", __func__, cmd);
error = -ENOSYS;
break;
}
return (error);
}

View File

@ -0,0 +1,507 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "../bsd_porting.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TTY input queue buffering.
*
* Unlike the output queue, the input queue has more features that are
* needed to properly implement various features offered by the TTY
* interface:
*
* - Data can be removed from the tail of the queue, which is used to
* implement backspace.
* - Once in a while, input has to be `canonicalized'. When ICANON is
* turned on, this will be done after a CR has been inserted.
* Otherwise, it should be done after any character has been inserted.
* - The input queue can store one bit per byte, called the quoting bit.
* This bit is used by TTYDISC to make backspace work on quoted
* characters.
*
* In most cases, there is probably less input than output, so unlike
* the outq, we'll stick to 128 byte blocks here.
*/
static int ttyinq_flush_secure = 1;
#define TTYINQ_QUOTESIZE (TTYINQ_DATASIZE / BMSIZE)
#define BMSIZE 32
#define GETBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] & (1 << ((boff) % BMSIZE)))
#define SETBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] |= (1 << ((boff) % BMSIZE)))
#define CLRBIT(tib, boff) \
((tib)->tib_quotes[(boff) / BMSIZE] &= ~(1 << ((boff) % BMSIZE)))
struct ttyinq_block
{
struct ttyinq_block *tib_prev;
struct ttyinq_block *tib_next;
uint32_t tib_quotes[TTYINQ_QUOTESIZE];
char tib_data[TTYINQ_DATASIZE];
};
static uma_zone_t ttyinq_zone;
#define TTYINQ_INSERT_TAIL(ti, tib) \
do \
{ \
if (ti->ti_end == 0) \
{ \
tib->tib_prev = NULL; \
tib->tib_next = ti->ti_firstblock; \
ti->ti_firstblock = tib; \
} \
else \
{ \
tib->tib_prev = ti->ti_lastblock; \
tib->tib_next = ti->ti_lastblock->tib_next; \
ti->ti_lastblock->tib_next = tib; \
} \
if (tib->tib_next != NULL) \
tib->tib_next->tib_prev = tib; \
ti->ti_nblocks++; \
} while (0)
#define TTYINQ_REMOVE_HEAD(ti) \
do \
{ \
ti->ti_firstblock = ti->ti_firstblock->tib_next; \
if (ti->ti_firstblock != NULL) \
ti->ti_firstblock->tib_prev = NULL; \
ti->ti_nblocks--; \
} while (0)
#define TTYINQ_RECYCLE(ti, tib) \
do \
{ \
if (ti->ti_quota <= ti->ti_nblocks) \
uma_zfree(ttyinq_zone, tib); \
else \
TTYINQ_INSERT_TAIL(ti, tib); \
} while (0)
int ttyinq_setsize(struct ttyinq *ti, struct lwp_tty *tp, size_t size)
{
struct ttyinq_block *tib;
ti->ti_quota = howmany(size, TTYINQ_DATASIZE);
while (ti->ti_quota > ti->ti_nblocks)
{
/*
* List is getting bigger.
* Add new blocks to the tail of the list.
*
* We must unlock the TTY temporarily, because we need
* to allocate memory. This won't be a problem, because
* in the worst case, another thread ends up here, which
* may cause us to allocate too many blocks, but this
* will be caught by the loop below.
*/
tty_unlock(tp);
tib = uma_zalloc(ttyinq_zone, M_WAITOK);
tty_lock(tp);
if (tty_gone(tp))
{
uma_zfree(ttyinq_zone, tib);
return -ENXIO;
}
TTYINQ_INSERT_TAIL(ti, tib);
}
return 0;
}
void ttyinq_free(struct ttyinq *ti)
{
struct ttyinq_block *tib;
ttyinq_flush(ti);
ti->ti_quota = 0;
while ((tib = ti->ti_firstblock) != NULL)
{
TTYINQ_REMOVE_HEAD(ti);
uma_zfree(ttyinq_zone, tib);
}
MPASS(ti->ti_nblocks == 0);
}
int ttyinq_read_uio(struct ttyinq *ti, struct lwp_tty *tp, struct uio *uio,
size_t rlen, size_t flen)
{
MPASS(rlen <= uio->uio_resid);
while (rlen > 0)
{
int error;
struct ttyinq_block *tib;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (ti->ti_begin == ti->ti_linestart)
return 0;
tib = ti->ti_firstblock;
if (tib == NULL)
return 0;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = ti->ti_begin;
cend = MIN(MIN(ti->ti_linestart, ti->ti_begin + rlen), TTYINQ_DATASIZE);
clen = cend - cbegin;
MPASS(clen >= flen);
rlen -= clen;
/*
* We can prevent buffering in some cases:
* - We need to read the block until the end.
* - We don't need to read the block until the end, but
* there is no data beyond it, which allows us to move
* the write pointer to a new block.
*/
if (cend == TTYINQ_DATASIZE || cend == ti->ti_end)
{
/*
* Fast path: zero copy. Remove the first block,
* so we can unlock the TTY temporarily.
*/
TTYINQ_REMOVE_HEAD(ti);
ti->ti_begin = 0;
/*
* Because we remove the first block, we must
* fix up the block offsets.
*/
#define CORRECT_BLOCK(t) \
do \
{ \
if (t <= TTYINQ_DATASIZE) \
t = 0; \
else \
t -= TTYINQ_DATASIZE; \
} while (0)
CORRECT_BLOCK(ti->ti_linestart);
CORRECT_BLOCK(ti->ti_reprint);
CORRECT_BLOCK(ti->ti_end);
#undef CORRECT_BLOCK
/*
* Temporary unlock and copy the data to
* userspace. We may need to flush trailing
* bytes, like EOF characters.
*/
tty_unlock(tp);
error = uiomove(tib->tib_data + cbegin, clen - flen, uio);
tty_lock(tp);
/* Block can now be readded to the list. */
TTYINQ_RECYCLE(ti, tib);
}
else
{
char ob[TTYINQ_DATASIZE - 1];
/*
* Slow path: store data in a temporary buffer.
*/
memcpy(ob, tib->tib_data + cbegin, clen - flen);
ti->ti_begin += clen;
MPASS(ti->ti_begin < TTYINQ_DATASIZE);
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(ob, clen - flen, uio);
tty_lock(tp);
}
if (error != 0)
return error;
if (tty_gone(tp))
return -ENXIO;
}
return 0;
}
rt_inline void ttyinq_set_quotes(struct ttyinq_block *tib, size_t offset,
size_t length, int value)
{
if (value)
{
/* Set the bits. */
for (; length > 0; length--, offset++) SETBIT(tib, offset);
}
else
{
/* Unset the bits. */
for (; length > 0; length--, offset++) CLRBIT(tib, offset);
}
}
size_t ttyinq_write(struct ttyinq *ti, const void *buf, size_t nbytes,
int quote)
{
const char *cbuf = buf;
struct ttyinq_block *tib;
unsigned int boff;
size_t l;
while (nbytes > 0)
{
boff = ti->ti_end % TTYINQ_DATASIZE;
if (ti->ti_end == 0)
{
/* First time we're being used or drained. */
MPASS(ti->ti_begin == 0);
tib = ti->ti_firstblock;
if (tib == NULL)
{
/* Queue has no blocks. */
break;
}
ti->ti_lastblock = tib;
}
else if (boff == 0)
{
/* We reached the end of this block on last write. */
tib = ti->ti_lastblock->tib_next;
if (tib == NULL)
{
/* We've reached the watermark. */
break;
}
ti->ti_lastblock = tib;
}
else
{
tib = ti->ti_lastblock;
}
/* Don't copy more than was requested. */
l = MIN(nbytes, TTYINQ_DATASIZE - boff);
MPASS(l > 0);
memcpy(tib->tib_data + boff, cbuf, l);
/* Set the quoting bits for the proper region. */
ttyinq_set_quotes(tib, boff, l, quote);
cbuf += l;
nbytes -= l;
ti->ti_end += l;
}
return (cbuf - (const char *)buf);
}
int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t nbytes,
int quote)
{
size_t ret __unused;
if (ttyinq_bytesleft(ti) < nbytes)
return -1;
/* We should always be able to write it back. */
ret = ttyinq_write(ti, buf, nbytes, quote);
MPASS(ret == nbytes);
return 0;
}
void ttyinq_canonicalize(struct ttyinq *ti)
{
ti->ti_linestart = ti->ti_reprint = ti->ti_end;
ti->ti_startblock = ti->ti_reprintblock = ti->ti_lastblock;
}
size_t ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
char *lastc)
{
struct ttyinq_block *tib = ti->ti_firstblock;
unsigned int boff = ti->ti_begin;
unsigned int bend =
MIN(MIN(TTYINQ_DATASIZE, ti->ti_linestart), ti->ti_begin + maxlen);
MPASS(maxlen > 0);
if (tib == NULL)
return 0;
while (boff < bend)
{
if (strchr(breakc, tib->tib_data[boff]) && !GETBIT(tib, boff))
{
*lastc = tib->tib_data[boff];
return (boff - ti->ti_begin + 1);
}
boff++;
}
/* Not found - just process the entire block. */
return (bend - ti->ti_begin);
}
void ttyinq_flush(struct ttyinq *ti)
{
struct ttyinq_block *tib;
ti->ti_begin = 0;
ti->ti_linestart = 0;
ti->ti_reprint = 0;
ti->ti_end = 0;
/* Zero all data in the input queue to get rid of passwords. */
if (ttyinq_flush_secure)
{
for (tib = ti->ti_firstblock; tib != NULL; tib = tib->tib_next)
memset(&tib->tib_data, 0, sizeof tib->tib_data);
}
}
int ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote)
{
unsigned int boff;
struct ttyinq_block *tib = ti->ti_lastblock;
if (ti->ti_linestart == ti->ti_end)
return -1;
MPASS(ti->ti_end > 0);
boff = (ti->ti_end - 1) % TTYINQ_DATASIZE;
*c = tib->tib_data[boff];
*quote = GETBIT(tib, boff);
return 0;
}
void ttyinq_unputchar(struct ttyinq *ti)
{
MPASS(ti->ti_linestart < ti->ti_end);
if (--ti->ti_end % TTYINQ_DATASIZE == 0)
{
/* Roll back to the previous block. */
ti->ti_lastblock = ti->ti_lastblock->tib_prev;
/*
* This can only fail if we are unputchar()'ing the
* first character in the queue.
*/
MPASS((ti->ti_lastblock == NULL) == (ti->ti_end == 0));
}
}
void ttyinq_reprintpos_set(struct ttyinq *ti)
{
ti->ti_reprint = ti->ti_end;
ti->ti_reprintblock = ti->ti_lastblock;
}
void ttyinq_reprintpos_reset(struct ttyinq *ti)
{
ti->ti_reprint = ti->ti_linestart;
ti->ti_reprintblock = ti->ti_startblock;
}
static void ttyinq_line_iterate(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator, void *data,
unsigned int offset, struct ttyinq_block *tib)
{
unsigned int boff;
/* Use the proper block when we're at the queue head. */
if (offset == 0)
tib = ti->ti_firstblock;
/* Iterate all characters and call the iterator function. */
for (; offset < ti->ti_end; offset++)
{
boff = offset % TTYINQ_DATASIZE;
MPASS(tib != NULL);
/* Call back the iterator function. */
iterator(data, tib->tib_data[boff], GETBIT(tib, boff));
/* Last byte iterated - go to the next block. */
if (boff == TTYINQ_DATASIZE - 1)
tib = tib->tib_next;
}
}
void ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data)
{
ttyinq_line_iterate(ti, iterator, data, ti->ti_linestart,
ti->ti_startblock);
}
void ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator,
void *data)
{
ttyinq_line_iterate(ti, iterator, data, ti->ti_reprint,
ti->ti_reprintblock);
}
static int ttyinq_startup(void)
{
ttyinq_zone = uma_zcreate("ttyinq", sizeof(struct ttyinq_block), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, 0);
return 0;
}
INIT_PREV_EXPORT(ttyinq_startup);
#if 0
SYSINIT(ttyinq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyinq_startup, NULL);
#endif

View File

@ -0,0 +1,370 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-11-13 Shell init ver.
*/
#include "../bsd_porting.h"
#include "../terminal.h"
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* TTY output queue buffering.
*
* The previous design of the TTY layer offered the so-called clists.
* These clists were used for both the input queues and the output
* queue. We don't use certain features on the output side, like quoting
* bits for parity marking and such. This mechanism is similar to the
* old clists, but only contains the features we need to buffer the
* output.
*/
struct ttyoutq_block
{
struct ttyoutq_block *tob_next;
char tob_data[TTYOUTQ_DATASIZE];
};
static uma_zone_t ttyoutq_zone;
#define TTYOUTQ_INSERT_TAIL(to, tob) \
do \
{ \
if (to->to_end == 0) \
{ \
tob->tob_next = to->to_firstblock; \
to->to_firstblock = tob; \
} \
else \
{ \
tob->tob_next = to->to_lastblock->tob_next; \
to->to_lastblock->tob_next = tob; \
} \
to->to_nblocks++; \
} while (0)
#define TTYOUTQ_REMOVE_HEAD(to) \
do \
{ \
to->to_firstblock = to->to_firstblock->tob_next; \
to->to_nblocks--; \
} while (0)
#define TTYOUTQ_RECYCLE(to, tob) \
do \
{ \
if (to->to_quota <= to->to_nblocks) \
uma_zfree(ttyoutq_zone, tob); \
else \
TTYOUTQ_INSERT_TAIL(to, tob); \
} while (0)
void ttyoutq_flush(struct ttyoutq *to)
{
to->to_begin = 0;
to->to_end = 0;
}
int ttyoutq_setsize(struct ttyoutq *to, struct lwp_tty *tp, size_t size)
{
struct ttyoutq_block *tob;
to->to_quota = howmany(size, TTYOUTQ_DATASIZE);
while (to->to_quota > to->to_nblocks)
{
/*
* List is getting bigger.
* Add new blocks to the tail of the list.
*
* We must unlock the TTY temporarily, because we need
* to allocate memory. This won't be a problem, because
* in the worst case, another thread ends up here, which
* may cause us to allocate too many blocks, but this
* will be caught by the loop below.
*/
tty_unlock(tp);
tob = uma_zalloc(ttyoutq_zone, M_WAITOK);
tty_lock(tp);
if (tty_gone(tp))
{
uma_zfree(ttyoutq_zone, tob);
return -ENXIO;
}
TTYOUTQ_INSERT_TAIL(to, tob);
}
return 0;
}
void ttyoutq_free(struct ttyoutq *to)
{
struct ttyoutq_block *tob;
ttyoutq_flush(to);
to->to_quota = 0;
while ((tob = to->to_firstblock) != NULL)
{
TTYOUTQ_REMOVE_HEAD(to);
uma_zfree(ttyoutq_zone, tob);
}
MPASS(to->to_nblocks == 0);
}
size_t ttyoutq_read(struct ttyoutq *to, void *buf, size_t len)
{
char *cbuf = buf;
while (len > 0)
{
struct ttyoutq_block *tob;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (to->to_begin == to->to_end)
break;
tob = to->to_firstblock;
if (tob == NULL)
break;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = to->to_begin;
cend = MIN(MIN(to->to_end, to->to_begin + len), TTYOUTQ_DATASIZE);
clen = cend - cbegin;
/* Copy the data out of the buffers. */
memcpy(cbuf, tob->tob_data + cbegin, clen);
cbuf += clen;
len -= clen;
if (cend == to->to_end)
{
/* Read the complete queue. */
to->to_begin = 0;
to->to_end = 0;
}
else if (cend == TTYOUTQ_DATASIZE)
{
/* Read the block until the end. */
TTYOUTQ_REMOVE_HEAD(to);
to->to_begin = 0;
to->to_end -= TTYOUTQ_DATASIZE;
TTYOUTQ_RECYCLE(to, tob);
}
else
{
/* Read the block partially. */
to->to_begin += clen;
}
}
return cbuf - (char *)buf;
}
/*
* An optimized version of ttyoutq_read() which can be used in pseudo
* TTY drivers to directly copy data from the outq to userspace, instead
* of buffering it.
*
* We can only copy data directly if we need to read the entire block
* back to the user, because we temporarily remove the block from the
* queue. Otherwise we need to copy it to a temporary buffer first, to
* make sure data remains in the correct order.
*/
int ttyoutq_read_uio(struct ttyoutq *to, struct lwp_tty *tp, struct uio *uio)
{
while (uio->uio_resid > 0)
{
int error;
struct ttyoutq_block *tob;
size_t cbegin, cend, clen;
/* See if there still is data. */
if (to->to_begin == to->to_end)
return 0;
tob = to->to_firstblock;
if (tob == NULL)
return 0;
/*
* The end address should be the lowest of these three:
* - The write pointer
* - The blocksize - we can't read beyond the block
* - The end address if we could perform the full read
*/
cbegin = to->to_begin;
cend = MIN(MIN(to->to_end, to->to_begin + uio->uio_resid),
TTYOUTQ_DATASIZE);
clen = cend - cbegin;
/*
* We can prevent buffering in some cases:
* - We need to read the block until the end.
* - We don't need to read the block until the end, but
* there is no data beyond it, which allows us to move
* the write pointer to a new block.
*/
if (cend == TTYOUTQ_DATASIZE || cend == to->to_end)
{
/*
* Fast path: zero copy. Remove the first block,
* so we can unlock the TTY temporarily.
*/
TTYOUTQ_REMOVE_HEAD(to);
to->to_begin = 0;
if (to->to_end <= TTYOUTQ_DATASIZE)
to->to_end = 0;
else
to->to_end -= TTYOUTQ_DATASIZE;
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(tob->tob_data + cbegin, clen, uio);
tty_lock(tp);
/* Block can now be readded to the list. */
TTYOUTQ_RECYCLE(to, tob);
}
else
{
char ob[TTYOUTQ_DATASIZE - 1];
/*
* Slow path: store data in a temporary buffer.
*/
memcpy(ob, tob->tob_data + cbegin, clen);
to->to_begin += clen;
MPASS(to->to_begin < TTYOUTQ_DATASIZE);
/* Temporary unlock and copy the data to userspace. */
tty_unlock(tp);
error = uiomove(ob, clen, uio);
tty_lock(tp);
}
if (error != 0)
return error;
}
return 0;
}
size_t ttyoutq_write(struct ttyoutq *to, const void *buf, size_t nbytes)
{
const char *cbuf = buf;
struct ttyoutq_block *tob;
unsigned int boff;
size_t l;
while (nbytes > 0)
{
boff = to->to_end % TTYOUTQ_DATASIZE;
if (to->to_end == 0)
{
/* First time we're being used or drained. */
MPASS(to->to_begin == 0);
tob = to->to_firstblock;
if (tob == NULL)
{
/* Queue has no blocks. */
break;
}
to->to_lastblock = tob;
}
else if (boff == 0)
{
/* We reached the end of this block on last write. */
tob = to->to_lastblock->tob_next;
if (tob == NULL)
{
/* We've reached the watermark. */
break;
}
to->to_lastblock = tob;
}
else
{
tob = to->to_lastblock;
}
/* Don't copy more than was requested. */
l = MIN(nbytes, TTYOUTQ_DATASIZE - boff);
MPASS(l > 0);
memcpy(tob->tob_data + boff, cbuf, l);
cbuf += l;
nbytes -= l;
to->to_end += l;
}
return (cbuf - (const char *)buf);
}
int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t nbytes)
{
size_t ret __unused;
if (ttyoutq_bytesleft(to) < nbytes)
return -1;
/* We should always be able to write it back. */
ret = ttyoutq_write(to, buf, nbytes);
MPASS(ret == nbytes);
return 0;
}
static int ttyoutq_startup(void)
{
ttyoutq_zone = uma_zcreate("ttyoutq", sizeof(struct ttyoutq_block), NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
return 0;
}
INIT_PREV_EXPORT(ttyoutq_startup);
#if 0
SYSINIT(ttyoutq, SI_SUB_DRIVERS, SI_ORDER_FIRST, ttyoutq_startup, NULL);
#endif

Some files were not shown because too many files have changed in this diff Show More