󠁳⁧⁧ anonymous
Niger
 
 
᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ ᠌᠌ FLINTY
⠀⠀⠀⠀⠀⠀⠀⠀/\\\``` ቻልጠዐሁነ ħค𝘊𝘒ₑᵣ ¨^?0/`\
Featured Artwork Showcase
anonymous

struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize){

struct group_info *group_info;

int nblocks;

int i;



nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;

/* Make sure we always allocate at least one indirect block pointer */

nblocks = nblocks ? : 1;

group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);

if (!group_info)

return NULL;

group_info->ngroups = gidsetsize;

group_info->nblocks = nblocks;

atomic_set(&group_info->usage, 1);


struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize){

struct group_info *group_info;

int nblocks;

int i;



nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;

/* Make sure we always allocate at least one indirect block pointer */

nblocks = nblocks ? : 1;

group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);

if (!group_info)

return NULL;

group_info->ngroups = gidsetsize;

group_info->nblocks = nblocks;

atomic_set(&group_info->usage, 1);



if (gidsetsize <= NGROUPS_SMALL)

group_info->blocks[0] = group_info->small_block;

else {

for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;



out_undo_partial_alloc:

while (--i >= 0) {

free_page((unsigned long)group_info->blocks );

}

kfree(group_info);

return NULL;

}



EXPORT_SYMBOL(groups_alloc);



void groups_free(struct group_info *group_info)

{

if (group_info->blocks[0] != group_info->small_block) {

int i;

for (i = 0; i < group_info->nblocks; i++)

free_page((unsigned long)group_info->blocks );

}

kfree(group_info);

}



EXPORT_SYMBOL(groups_free);



/* export the group_info to a user-space array */

static int groups_to_user(gid_t __user *grouplist,

const struct group_info *group_info)

{

int i;

unsigned int count = group_info->ngroups;



for (i = 0; i < group_info->nblocks; i++) {

unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);

unsigned int len = cp_count * sizeof(*grouplist);



if (copy_to_user(grouplist, group_info->blocks , len))

return -EFAULT;



grouplist += NGROUPS_PER_BLOCK;

count -= cp_count;

}

return 0;
|

if (gidsetsize <= NGROUPS_SMALL)

group_info->blocks[0] = group_info->small_block;

else {

for (i = 0; i < nblocks;|
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize){

struct group_info *group_info;

int nblocks;

int i;



nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;

/* Make sure we always allocate at least one indirect block pointer */

nblocks = nblocks ? : 1;

group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);

if (!group_info)

return NULL;

group_info->ngroups = gidsetsize;

group_info->nblocks = nblocks;

atomic_set(&group_info->usage, 1);



if (gidsetsize <= NGROUPS_SMALL)

group_info->blocks[0] = group_info->small_block;

else {

for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;



out_undo_partial_alloc:

while (--i >= 0) {

free_page((unsigned long)group_info->blocks );

}

kfree(group_info);

return NULL;

}



EXPORT_SYMBOL(groups_alloc);



void groups_free(struct group_info *group_info)

{

if (group_info->blocks[0] != group_info->small_block) {

int i;

for (i = 0; i < group_info->nblocks; i++)

free_page((unsigned long)group_info->blocks );

}

kfree(group_info);

}



EXPORT_SYMBOL(groups_free);



/* export the group_info to a user-space array */

static int groups_to_user(gid_t __user *grouplist,

const struct group_info *group_info)

{

int i;

unsigned int count = group_info->ngroups;



for (i = 0; i < group_info->nblocks; i++) {

unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);

unsigned int len = cp_count * sizeof(*grouplist);



if (copy_to_user(grouplist, group_info->blocks , len))

return -EFAULT;



grouplist += NGROUPS_PER_BLOCK;

count -= cp_count;

}

return 0;

}



/* fill a group_info from a user-space array - it must be allocated already */

static int groups_from_user(struct group_info *group_info,

gid_t __user *grouplist)

{

int i;

unsigned int count = group_info->ngroups;



for (i = 0; i < group_info->nblocks; i++) {

unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);

unsigned int len = cp_count * sizeof(*grouplist);



if (copy_from_user(group_info->blocks , grouplist, len))

return -EFAULT;



grouplist += NGROUPS_PER_BLOCK;

count -= cp_count;

}

return 0;

}



/* a simple Shell sort */

static void groups_sort(struct group_info *group_info)

{

int base, max, stride;

int gidsetsize = group_info->ngroups;



for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)

; /* nothing */

stride /= 3;



while (stride) {

max = gidsetsize - stride;

for (base = 0; base < max; base++) {

int left = base;

int right = left + stride;

gid_t tmp = GROUP_AT(group_info, right);



while (left >= 0 && GROUP_AT(group_info, left) > tmp) {

GROUP_AT(group_info, right) =

GROUP_AT(group_info, left);

right = left;

left -= stride;

}

GROUP_AT(group_info, right) = tmp;

}

stride /= 3;

}

}



/* a simple bsearch */

int groups_search(const struct group_info *group_info, gid_t grp)

{

unsigned int left, right;



if (!group_info)

return 0;



left = 0;

right = group_info->ngroups;

while (left < right) {

unsigned int mid = left + (righ|
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize){

struct group_info *group_info;

int nblocks;

int i;



nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;

/* Make sure we always allocate at least one indirect block pointer */

nblocks = nblocks ? : 1;

group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);

if (!group_info)

return NULL;

group_info->ngroups = gidsetsize;

group_info->nblocks = nblocks;

atomic_set(&group_info->usage, 1);



if (gidsetsize <= NGROUPS_SMALL)

group_info->blocks[0] = group_info->small_block;

else {

for (i = 0; i < nblocks; i++) {

gid_t *b;

b = (void *)__get_free_page(GFP_USER);

if (!b)

goto out_undo_partial_alloc;

group_info->blocks = b;

}

}

return group_info;



out_undo_partial_alloc:

while (--i >= 0) {

free_page((unsigned long)group_info->blocks );

}

kfree(group_info);

return NULL;

}



EXPORT_SYMBOL(groups_alloc);



void groups_free(struct group_info *group_info)

{

if (group_info->blocks[0] != group_info->small_block) {

int i;

for (i = 0; i < group_info->nblocks; i++)

free_page((unsigned long)group_info->blocks );

}

kfree(group_info);

}



EXPORT_SYMBOL(groups_free);



/* export the group_info to a user-space array */

static int groups_to_user(gid_t __user *grouplist,

const struct group_info *group_info)

{

int i;

unsigned int count = group_info->ngroups;



for (i = 0; i < group_info
Featured Artwork Showcase
.
-二十四 2 Apr @ 2:10pm 
твоей жопой поражён
-二十四 2 Apr @ 2:02pm 
артеееееем
-二十四 1 Apr @ 5:20am 
666 горят на блатных номерах
Да я тут давлю людей, но я не при делах
На заднем кресле сатана колет себе опиат
-二十四 31 Mar @ 2:05pm 
Смотри вверх, но не прыгай вниз
Аккуратней, когда лезвием вены чертишь
󠁳⁧⁧ anonymous 31 Mar @ 1:58pm 
:steamhappy:
-二十四 31 Mar @ 1:35pm 
нефор