#define LG_FREE_WORK \
{ \
LAGraph_Free ((void **) &W, NULL) ; \
LAGraph_Free ((void **) &D, NULL) ; \
}
#define LG_FREE_ALL \
{ \
LG_FREE_WORK ; \
LAGraph_Free ((void **) &P, NULL) ; \
}
#include "LG_internal.h"
int LAGr_SortByDegree
(
int64_t **P_handle, const LAGraph_Graph G, bool byout, bool ascending, char *msg
)
{
LG_CLEAR_MSG ;
int64_t *P = NULL ;
int64_t *W = NULL ;
int64_t *D = NULL ;
LG_ASSERT_MSG (P_handle != NULL, GrB_NULL_POINTER, "&P != NULL") ;
(*P_handle) = NULL ;
LG_TRY (LAGraph_CheckGraph (G, msg)) ;
GrB_Vector Degree ;
if (G->kind == LAGraph_ADJACENCY_UNDIRECTED ||
(G->kind == LAGraph_ADJACENCY_DIRECTED &&
G->is_symmetric_structure == LAGraph_TRUE))
{
Degree = G->out_degree ;
}
else
{
Degree = (byout) ? G->out_degree : G->in_degree ;
}
LG_ASSERT_MSG (Degree != NULL, LAGRAPH_NOT_CACHED, "degree unknown") ;
GrB_Index n ;
GRB_TRY (GrB_Vector_size (&n, Degree)) ;
#define CHUNK (64*1024)
int nthreads = LG_nthreads_outer * LG_nthreads_inner ;
nthreads = LAGRAPH_MIN (nthreads, n/CHUNK) ;
nthreads = LAGRAPH_MAX (nthreads, 1) ;
LG_TRY (LAGraph_Malloc ((void **) &P, n, sizeof (int64_t), msg)) ;
LG_TRY (LAGraph_Malloc ((void **) &D, n, sizeof (int64_t), msg)) ;
LG_TRY (LAGraph_Malloc ((void **) &W, 2*n, sizeof (int64_t), msg)) ;
int64_t *W0 = W ;
int64_t *W1 = W + n ;
int64_t k;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < n ; k++)
{
D [k] = 0 ;
P [k] = k ;
}
GrB_Index nvals = n ;
GRB_TRY (GrB_Vector_extractTuples ((GrB_Index *) W0, W1, &nvals, Degree)) ;
if (ascending)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
D [W0 [k]] = W1 [k] ;
}
}
else
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
D [W0 [k]] = -W1 [k] ;
}
}
LG_TRY (LAGraph_Free ((void **) &W, NULL)) ;
LG_TRY (LG_msort2 (D, P, n, msg)) ;
LG_FREE_WORK ;
(*P_handle) = P ;
return (GrB_SUCCESS) ;
}