Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
C
CS3214 Project 2 - A Fork-Join Framework
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
maxb24
CS3214 Project 2 - A Fork-Join Framework
Commits
d398e1f3
Commit
d398e1f3
authored
1 year ago
by
Max Barrett
Browse files
Options
Downloads
Patches
Plain Diff
work stealing done with cpu pinning
parent
a751c037
No related branches found
Branches containing commit
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
tests/threadpool.c
+90
-22
90 additions, 22 deletions
tests/threadpool.c
with
90 additions
and
22 deletions
tests/threadpool.c
+
90
−
22
View file @
d398e1f3
#define _GNU_SOURCE
#include
"threadpool.h"
#include
"list.h"
#include
<stdlib.h>
#include
<stdio.h>
#include
<assert.h>
#include
<pthread.h>
#include
<sched.h>
#include
<unistd.h>
typedef
enum
{
NOT_STARTED
,
...
...
@@ -19,6 +23,7 @@ struct thread_pool {
int
threads
;
struct
worker
*
workers
;
pthread_barrier_t
sync
;
char
padding
[
50
];
};
struct
future
{
...
...
@@ -29,52 +34,106 @@ struct future {
struct
thread_pool
*
threadpool
;
state
status
;
pthread_cond_t
cond
;
char
padding
[
50
];
};
struct
worker
{
struct
thread_pool
*
worker_threadpool
;
pthread_t
internal
;
struct
list
worker_queue
;
char
padding
[
50
];
};
static
__thread
struct
worker
*
future_worker
;
static
void
*
start_routine
(
void
*
arg
);
static
void
*
run_future
(
struct
thread_pool
*
threadpool
);
static
bool
check_for_futures
(
struct
thread_pool
*
threadpool
);
static
void
*
start_routine
(
void
*
arg
)
{
//get the worker and associated threadpool
struct
worker
*
worker
=
(
struct
worker
*
)
arg
;
struct
thread_pool
*
pool
=
worker
->
worker_threadpool
;
struct
worker
*
worker
=
(
struct
worker
*
)
arg
;
struct
thread_pool
*
threadpool
=
worker
->
worker_threadpool
;
//CPU pinning
cpu_set_t
cpuset
;
CPU_ZERO
(
&
cpuset
);
int
num_cores
=
sysconf
(
_SC_NPROCESSORS_ONLN
);
for
(
int
i
=
0
;
i
<
threadpool
->
threads
;
i
++
)
{
CPU_SET
(
i
%
num_cores
,
&
cpuset
);
}
pthread_setaffinity_np
(
pthread_self
(),
sizeof
(
cpu_set_t
),
&
cpuset
);
//use barrier to make locks easier
pthread_barrier_wait
(
&
pool
->
sync
);
pthread_barrier_wait
(
&
threadpool
->
sync
);
//set up the thread local worker
for
(
int
i
=
0
;
i
<
threadpool
->
threads
;
i
++
)
{
if
(
pthread_self
()
==
threadpool
->
workers
[
i
].
internal
)
{
future_worker
=
&
threadpool
->
workers
[
i
];
break
;
}
}
//make worker continue to loop
while
(
1
)
{
pthread_mutex_lock
(
&
pool
->
lock
);
pthread_mutex_lock
(
&
thread
pool
->
lock
);
//if global queue is empty wait
while
(
list_empty
(
&
pool
->
global_queue
)
&&
!
pool
->
shutdown
)
{
pthread_cond_wait
(
&
pool
->
cond
,
&
pool
->
lock
);
while
(
!
check_for_futures
(
threadpool
)
&&
!
thread
pool
->
shutdown
)
{
pthread_cond_wait
(
&
thread
pool
->
cond
,
&
thread
pool
->
lock
);
}
//if shutdown unlock and exit
if
(
pool
->
shutdown
)
{
pthread_mutex_unlock
(
&
pool
->
lock
);
if
(
thread
pool
->
shutdown
)
{
pthread_mutex_unlock
(
&
thread
pool
->
lock
);
pthread_exit
(
NULL
);
return
NULL
;
}
//remove future from global list and run it
struct
list_elem
*
elem
=
list_pop_front
(
&
pool
->
global_queue
);
struct
future
*
future
=
list_entry
(
elem
,
struct
future
,
elem
);
future
->
status
=
WORKING
;
pthread_mutex_unlock
(
&
pool
->
lock
);
future
->
result
=
future
->
task
(
pool
,
future
->
args
);
pthread_mutex_lock
(
&
pool
->
lock
);
future
->
status
=
DONE
;
pthread_cond_signal
(
&
future
->
cond
);
pthread_mutex_unlock
(
&
pool
->
lock
);
run_future
(
threadpool
);
pthread_mutex_unlock
(
&
threadpool
->
lock
);
}
return
NULL
;
}
static
void
*
run_future
(
struct
thread_pool
*
threadpool
)
{
struct
future
*
curr_future
=
NULL
;
if
(
!
list_empty
(
&
future_worker
->
worker_queue
))
{
//first check worker queue
curr_future
=
list_entry
(
list_pop_front
(
&
future_worker
->
worker_queue
),
struct
future
,
elem
);
}
else
if
(
!
list_empty
(
&
threadpool
->
global_queue
))
{
//if worker queue is empty check global queue
curr_future
=
list_entry
(
list_pop_front
(
&
threadpool
->
global_queue
),
struct
future
,
elem
);
}
else
{
//if global and worker queue are empty start looking to steal a future
for
(
int
i
=
0
;
i
<
threadpool
->
threads
;
i
++
)
{
if
(
!
list_empty
(
&
threadpool
->
workers
[
i
].
worker_queue
))
{
curr_future
=
list_entry
(
list_pop_back
(
&
threadpool
->
workers
[
i
].
worker_queue
),
struct
future
,
elem
);
break
;
}
}
}
curr_future
->
status
=
WORKING
;
pthread_mutex_unlock
(
&
threadpool
->
lock
);
curr_future
->
result
=
curr_future
->
task
(
threadpool
,
curr_future
->
args
);
pthread_mutex_lock
(
&
threadpool
->
lock
);
curr_future
->
status
=
DONE
;
pthread_cond_signal
(
&
curr_future
->
cond
);
return
NULL
;
}
static
bool
check_for_futures
(
struct
thread_pool
*
threadpool
)
{
for
(
int
i
=
0
;
i
<
threadpool
->
threads
;
i
++
)
{
if
(
!
list_empty
(
&
threadpool
->
workers
[
i
].
worker_queue
))
{
return
true
;
}
}
return
!
list_empty
(
&
threadpool
->
global_queue
);
}
struct
thread_pool
*
thread_pool_new
(
int
nthreads
)
{
//create new threadpool and init varibles
struct
thread_pool
*
threadpool
=
(
struct
thread_pool
*
)
malloc
(
sizeof
(
struct
thread_pool
));
pthread_mutex_init
(
&
threadpool
->
lock
,
NULL
);
pthread_cond_init
(
&
threadpool
->
cond
,
NULL
);
pthread_mutex_lock
(
&
threadpool
->
lock
);
list_init
(
&
threadpool
->
global_queue
);
threadpool
->
shutdown
=
false
;
threadpool
->
threads
=
nthreads
;
...
...
@@ -82,9 +141,11 @@ struct thread_pool * thread_pool_new(int nthreads) {
//create threads and wait until they are all made
pthread_barrier_init
(
&
threadpool
->
sync
,
NULL
,
nthreads
+
1
);
for
(
int
i
=
0
;
i
<
nthreads
;
i
++
)
{
list_init
(
&
threadpool
->
workers
[
i
].
worker_queue
);
threadpool
->
workers
[
i
].
worker_threadpool
=
threadpool
;
pthread_create
(
&
threadpool
->
workers
[
i
].
internal
,
NULL
,
start_routine
,
&
threadpool
->
workers
[
i
]);
}
pthread_mutex_unlock
(
&
threadpool
->
lock
);
pthread_barrier_wait
(
&
threadpool
->
sync
);
return
threadpool
;
}
...
...
@@ -100,8 +161,10 @@ void thread_pool_shutdown_and_destroy(struct thread_pool *threadpool) {
pthread_join
(
threadpool
->
workers
[
i
].
internal
,
NULL
);
}
//clean up memory
free
(
future_worker
);
pthread_mutex_destroy
(
&
threadpool
->
lock
);
pthread_cond_destroy
(
&
threadpool
->
cond
);
pthread_barrier_destroy
(
&
threadpool
->
sync
);
free
(
threadpool
->
workers
);
free
(
threadpool
);
}
...
...
@@ -116,7 +179,7 @@ void * future_get(struct future *future) {
future
->
result
=
future
->
task
(
future
->
threadpool
,
future
->
args
);
pthread_mutex_lock
(
&
future
->
threadpool
->
lock
);
future
->
status
=
DONE
;
}
else
{
//
if
it has been started wait for it to finish and get resul
t
}
else
{
//if
the task is still being completed wait for i
t
while
(
future
->
status
!=
DONE
)
{
pthread_cond_wait
(
&
future
->
cond
,
&
future
->
threadpool
->
lock
);
}
...
...
@@ -126,10 +189,10 @@ void * future_get(struct future *future) {
return
future
->
result
;
}
void
future_free
(
struct
future
*
future
)
{
free
(
future
);
}
struct
future
*
thread_pool_submit
(
struct
thread_pool
*
pool
,
fork_join_task_t
task
,
void
*
data
)
{
//create future and set values
struct
future
*
future
=
malloc
(
sizeof
(
struct
future
));
...
...
@@ -140,10 +203,15 @@ struct future * thread_pool_submit(struct thread_pool *pool, fork_join_task_t ta
future
->
status
=
NOT_STARTED
;
pthread_cond_init
(
&
future
->
cond
,
NULL
);
pthread_mutex_lock
(
&
pool
->
lock
);
//add to global queue
list_push_back
(
&
pool
->
global_queue
,
&
future
->
elem
);
//if there is an associated worker then it is an interal submission and it goes to the worker queue
if
(
future_worker
!=
NULL
)
{
list_push_front
(
&
future_worker
->
worker_queue
,
&
future
->
elem
);
}
else
{
//if there is no worker then it gets added to its global queue
list_push_back
(
&
pool
->
global_queue
,
&
future
->
elem
);
}
//signal to the threads that a future has been added to the queue
pthread_cond_signal
(
&
pool
->
cond
);
pthread_mutex_unlock
(
&
pool
->
lock
);
return
future
;
}
}
\ No newline at end of file
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment