blob: 145094c3ced57684c2170939d53227fc81e05d5e [file] [log] [blame] [edit]
// System call stubs.
#include <parlib.h>
#include <vcore.h>
int sys_proc_destroy(int pid, int exitcode)
{
return ros_syscall(SYS_proc_destroy, pid, exitcode, 0, 0, 0, 0);
}
int sys_getpid(void)
{
return ros_syscall(SYS_getpid, 0, 0, 0, 0, 0, 0);
}
size_t sys_getpcoreid(void)
{
return ros_syscall(SYS_getpcoreid, 0, 0, 0, 0, 0, 0);
}
ssize_t sys_cputs(const uint8_t *s, size_t len)
{
return ros_syscall(SYS_cputs, s, len, 0, 0, 0, 0);
}
uint16_t sys_cgetc(void)
{
return ros_syscall(SYS_cgetc, 0, 0, 0, 0, 0, 0);
}
int sys_null(void)
{
return ros_syscall(SYS_null, 0, 0, 0, 0, 0, 0);
}
ssize_t sys_shared_page_alloc(void** addr, pid_t p2,
int p1_flags, int p2_flags
)
{
return ros_syscall(SYS_shared_page_alloc, addr,
p2, p1_flags, p2_flags, 0, 0);
}
ssize_t sys_shared_page_free(void* addr, pid_t p2)
{
return ros_syscall(SYS_shared_page_free, addr, p2, 0, 0, 0, 0);
}
//Write a buffer over the serial port
ssize_t sys_serial_write(void* buf, size_t len)
{
return ros_syscall(SYS_serial_write, buf, len, 0, 0, 0, 0);
}
//Read a buffer over the serial port
ssize_t sys_serial_read(void* buf, size_t len)
{
return ros_syscall(SYS_serial_read, buf, len, 0, 0, 0, 0);
}
//Write a buffer over ethernet
ssize_t sys_eth_write(void* buf, size_t len)
{
if (len == 0)
return 0;
return ros_syscall(SYS_eth_write, buf, len, 0, 0, 0, 0);
}
//Read a buffer via ethernet
ssize_t sys_eth_read(void* buf, size_t len)
{
if (len == 0)
return 0;
return ros_syscall(SYS_eth_read, buf, len, 0, 0, 0, 0);
}
void sys_reboot(void)
{
ros_syscall(SYS_reboot, 0, 0, 0, 0, 0, 0);
}
void sys_yield(bool being_nice)
{
ros_syscall(SYS_yield, being_nice, 0, 0, 0, 0, 0);
}
int sys_proc_create(char *path, size_t path_l, char *argv[], char *envp[])
{
struct procinfo pi;
if (procinfo_pack_args(&pi, argv, envp)) {
errno = ENOMEM;
return -1;
}
return ros_syscall(SYS_proc_create, path, path_l, &pi, 0, 0, 0);
}
int sys_proc_run(int pid)
{
return ros_syscall(SYS_proc_run, pid, 0, 0, 0, 0, 0);
}
void *CT(length) sys_mmap(void *SNT addr, size_t length, int prot, int flags,
int fd, size_t offset)
{
return (void*)ros_syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
}
int sys_provision(int pid, unsigned int res_type, long res_val)
{
return ros_syscall(SYS_provision, pid, res_type, res_val, 0, 0, 0);
}
int sys_notify(int pid, unsigned int ev_type, struct event_msg *u_msg)
{
return ros_syscall(SYS_notify, pid, ev_type, u_msg, 0, 0, 0);
}
int sys_self_notify(uint32_t vcoreid, unsigned int ev_type,
struct event_msg *u_msg, bool priv)
{
return ros_syscall(SYS_self_notify, vcoreid, ev_type, u_msg, priv, 0, 0);
}
int sys_halt_core(unsigned int usec)
{
return ros_syscall(SYS_halt_core, usec, 0, 0, 0, 0, 0);
}
void* sys_init_arsc()
{
return (void*)ros_syscall(SYS_init_arsc, 0, 0, 0, 0, 0, 0);
}
int sys_block(unsigned int usec)
{
return ros_syscall(SYS_block, usec, 0, 0, 0, 0, 0);
}
/* enable_my_notif tells the kernel whether or not it is okay to turn on notifs
* when our calling vcore 'yields'. This controls whether or not the vcore will
* get started from vcore_entry() or not, and whether or not remote cores need
* to sys_change_vcore to preempt-recover the calling vcore. Only set this to
* FALSE if you are unable to handle starting fresh at vcore_entry(). One
* example of this is in mcs_pdr_locks.
*
* Will return:
* 0 if we successfully changed to the target vcore.
* -EBUSY if the target vcore is already mapped (a good kind of failure)
* -EAGAIN if we failed for some other reason and need to try again. For
* example, the caller could be preempted, and we never even attempted to
* change.
* -EINVAL some userspace bug */
int sys_change_vcore(uint32_t vcoreid, bool enable_my_notif)
{
/* Since we might be asking to start up on a fresh stack (if
* enable_my_notif), we need to use some non-stack memory for the struct
* sysc. Our vcore could get restarted before the syscall finishes (after
* unlocking the proc, before finish_sysc()), and the act of finishing would
* write onto our stack. Thus we use the per-vcore struct. */
int flags;
/* Need to wait while a previous syscall is not done or locked. Since this
* should only be called from VC ctx, we'll just spin. Should be extremely
* rare. Note flags is initialized to SC_DONE. */
do {
cpu_relax();
flags = atomic_read(&__vcore_one_sysc.flags);
} while (!(flags & SC_DONE) || flags & SC_K_LOCK);
__vcore_one_sysc.num = SYS_change_vcore;
__vcore_one_sysc.arg0 = vcoreid;
__vcore_one_sysc.arg1 = enable_my_notif;
/* keep in sync with glibc sysdeps/ros/syscall.c */
__ros_arch_syscall((long)&__vcore_one_sysc, 1);
/* If we returned, either we wanted to (!enable_my_notif) or we failed.
* Need to wait til the sysc is finished to find out why. Again, its okay
* to just spin. */
do {
cpu_relax();
flags = atomic_read(&__vcore_one_sysc.flags);
} while (!(flags & SC_DONE) || flags & SC_K_LOCK);
return __vcore_one_sysc.retval;
}
int sys_change_to_m(void)
{
return ros_syscall(SYS_change_to_m, 0, 0, 0, 0, 0, 0);
}
int sys_poke_ksched(int pid, unsigned int res_type)
{
return ros_syscall(SYS_poke_ksched, pid, res_type, 0, 0, 0, 0);
}
int sys_abort_sysc(struct syscall *sysc)
{
return ros_syscall(SYS_abort_sysc, sysc, 0, 0, 0, 0, 0);
}