2N/A/*
2N/A * lib/krb5/ccache/ccbase.c
2N/A *
2N/A * Copyright 1990,2004,2008 by the Massachusetts Institute of Technology.
2N/A * All Rights Reserved.
2N/A *
2N/A * Export of this software from the United States of America may
2N/A * require a specific license from the United States Government.
2N/A * It is the responsibility of any person or organization contemplating
2N/A * export to obtain such a license before exporting.
2N/A *
2N/A * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
2N/A * distribute this software and its documentation for any purpose and
2N/A * without fee is hereby granted, provided that the above copyright
2N/A * notice appear in all copies and that both that copyright notice and
2N/A * this permission notice appear in supporting documentation, and that
2N/A * the name of M.I.T. not be used in advertising or publicity pertaining
2N/A * to distribution of the software without specific, written prior
2N/A * permission. Furthermore if you modify this software you must label
2N/A * your software as modified software and not distribute it in such a
2N/A * fashion that it might be confused with the original M.I.T. software.
2N/A * M.I.T. makes no representations about the suitability of
2N/A * this software for any purpose. It is provided "as is" without express
2N/A * or implied warranty.
2N/A *
2N/A *
2N/A * Registration functions for ccache.
2N/A */
2N/A
2N/A#include "k5-int.h"
2N/A#include "k5-thread.h"
2N/A
2N/A#include "fcc.h"
2N/A#include "cc-int.h"
2N/A
2N/Astruct krb5_cc_typelist {
2N/A const krb5_cc_ops *ops;
2N/A struct krb5_cc_typelist *next;
2N/A};
2N/A
2N/Astruct krb5_cc_typecursor {
2N/A struct krb5_cc_typelist *tptr;
2N/A};
2N/A/* typedef krb5_cc_typecursor in k5-int.h */
2N/A
2N/Aextern const krb5_cc_ops krb5_mcc_ops;
2N/A
2N/A#define NEXT NULL
2N/A
2N/A#ifdef _WIN32
2N/Aextern const krb5_cc_ops krb5_lcc_ops;
2N/Astatic struct krb5_cc_typelist cc_lcc_entry = { &krb5_lcc_ops, NEXT };
2N/A#undef NEXT
2N/A#define NEXT &cc_lcc_entry
2N/A#endif
2N/A
2N/A#ifdef USE_CCAPI_V3
2N/Aextern const krb5_cc_ops krb5_cc_stdcc_ops;
2N/Astatic struct krb5_cc_typelist cc_stdcc_entry = { &krb5_cc_stdcc_ops, NEXT };
2N/A#undef NEXT
2N/A#define NEXT &cc_stdcc_entry
2N/A#endif
2N/A
2N/Astatic struct krb5_cc_typelist cc_mcc_entry = { &krb5_mcc_ops, NEXT };
2N/A#undef NEXT
2N/A#define NEXT &cc_mcc_entry
2N/A
2N/A#ifndef NO_FILE_CCACHE
2N/Astatic struct krb5_cc_typelist cc_fcc_entry = { &krb5_cc_file_ops, NEXT };
2N/A#undef NEXT
2N/A#define NEXT &cc_fcc_entry
2N/A#endif
2N/A
2N/A#ifdef USE_KEYRING_CCACHE
2N/Aextern const krb5_cc_ops krb5_krcc_ops;
2N/Astatic struct krb5_cc_typelist cc_krcc_entry = { &krb5_krcc_ops, NEXT };
2N/A#undef NEXT
2N/A#define NEXT &cc_krcc_entry
2N/A#endif /* USE_KEYRING_CCACHE */
2N/A
2N/A
2N/A#define INITIAL_TYPEHEAD (NEXT)
2N/Astatic struct krb5_cc_typelist *cc_typehead = INITIAL_TYPEHEAD;
2N/Astatic k5_mutex_t cc_typelist_lock = K5_MUTEX_PARTIAL_INITIALIZER;
2N/A
2N/A/* mutex for krb5_cccol_[un]lock */
2N/Astatic k5_cc_mutex cccol_lock = K5_CC_MUTEX_PARTIAL_INITIALIZER;
2N/A
2N/Astatic krb5_error_code
2N/Akrb5int_cc_getops(krb5_context, const char *, const krb5_cc_ops **);
2N/A
2N/Aint
2N/Akrb5int_cc_initialize(void)
2N/A{
2N/A int err;
2N/A
2N/A err = k5_cc_mutex_finish_init(&cccol_lock);
2N/A if (err)
2N/A return err;
2N/A err = k5_cc_mutex_finish_init(&krb5int_mcc_mutex);
2N/A if (err)
2N/A return err;
2N/A err = k5_mutex_finish_init(&cc_typelist_lock);
2N/A if (err)
2N/A return err;
2N/A#ifndef NO_FILE_CCACHE
2N/A err = k5_cc_mutex_finish_init(&krb5int_cc_file_mutex);
2N/A if (err)
2N/A return err;
2N/A#endif
2N/A#ifdef USE_KEYRING_CCACHE
2N/A err = k5_cc_mutex_finish_init(&krb5int_krcc_mutex);
2N/A if (err)
2N/A return err;
2N/A#endif
2N/A return 0;
2N/A}
2N/A
2N/Avoid
2N/Akrb5int_cc_finalize(void)
2N/A{
2N/A struct krb5_cc_typelist *t, *t_next;
2N/A k5_cccol_force_unlock();
2N/A k5_cc_mutex_destroy(&cccol_lock);
2N/A k5_mutex_destroy(&cc_typelist_lock);
2N/A#ifndef NO_FILE_CCACHE
2N/A k5_cc_mutex_destroy(&krb5int_cc_file_mutex);
2N/A#endif
2N/A k5_cc_mutex_destroy(&krb5int_mcc_mutex);
2N/A#ifdef USE_KEYRING_CCACHE
2N/A k5_cc_mutex_destroy(&krb5int_krcc_mutex);
2N/A#endif
2N/A for (t = cc_typehead; t != INITIAL_TYPEHEAD; t = t_next) {
2N/A t_next = t->next;
2N/A free(t);
2N/A }
2N/A}
2N/A
2N/A
2N/A/*
2N/A * Register a new credentials cache type
2N/A * If override is set, replace any existing ccache with that type tag
2N/A */
2N/A
2N/Akrb5_error_code KRB5_CALLCONV
2N/Akrb5_cc_register(krb5_context context, const krb5_cc_ops *ops,
2N/A krb5_boolean override)
2N/A{
2N/A struct krb5_cc_typelist *t;
2N/A krb5_error_code err;
2N/A
2N/A err = k5_mutex_lock(&cc_typelist_lock);
2N/A if (err)
2N/A return err;
2N/A for (t = cc_typehead;t && strcmp(t->ops->prefix,ops->prefix);t = t->next)
2N/A ;
2N/A if (t) {
2N/A if (override) {
2N/A t->ops = ops;
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A return 0;
2N/A } else {
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A return KRB5_CC_TYPE_EXISTS;
2N/A }
2N/A }
2N/A if (!(t = (struct krb5_cc_typelist *) malloc(sizeof(*t)))) {
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A return ENOMEM;
2N/A }
2N/A t->next = cc_typehead;
2N/A t->ops = ops;
2N/A cc_typehead = t;
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A return 0;
2N/A}
2N/A
2N/A/*
2N/A * Resolve a credential cache name into a cred. cache object.
2N/A *
2N/A * The name is currently constrained to be of the form "type:residual";
2N/A *
2N/A * The "type" portion corresponds to one of the predefined credential
2N/A * cache types, while the "residual" portion is specific to the
2N/A * particular cache type.
2N/A */
2N/A
2N/A#include <ctype.h>
2N/Akrb5_error_code KRB5_CALLCONV
2N/Akrb5_cc_resolve (krb5_context context, const char *name, krb5_ccache *cache)
2N/A{
2N/A char *pfx, *cp;
2N/A const char *resid;
2N/A unsigned int pfxlen;
2N/A krb5_error_code err;
2N/A const krb5_cc_ops *ops;
2N/A
2N/A if (name == NULL)
2N/A return KRB5_CC_BADNAME;
2N/A pfx = NULL;
2N/A cp = strchr (name, ':');
2N/A if (!cp) {
2N/A if (krb5_cc_dfl_ops)
2N/A return (*krb5_cc_dfl_ops->resolve)(context, cache, name);
2N/A else
2N/A return KRB5_CC_BADNAME;
2N/A }
2N/A
2N/A pfxlen = cp - name;
2N/A
2N/A if ( pfxlen == 1 && isalpha((unsigned char) name[0]) ) {
2N/A /* We found a drive letter not a prefix - use FILE */
2N/A pfx = strdup("FILE");
2N/A if (!pfx)
2N/A return ENOMEM;
2N/A
2N/A resid = name;
2N/A } else {
2N/A resid = name + pfxlen + 1;
2N/A
2N/A pfx = malloc (pfxlen+1);
2N/A if (!pfx)
2N/A return ENOMEM;
2N/A
2N/A memcpy (pfx, name, pfxlen);
2N/A pfx[pfxlen] = '\0';
2N/A }
2N/A
2N/A *cache = (krb5_ccache) 0;
2N/A
2N/A err = krb5int_cc_getops(context, pfx, &ops);
2N/A if (pfx != NULL)
2N/A free(pfx);
2N/A if (err)
2N/A return err;
2N/A
2N/A return ops->resolve(context, cache, resid);
2N/A}
2N/A
2N/A/*
2N/A * cc_getops
2N/A *
2N/A * Internal function to return the ops vector for a given ccache
2N/A * prefix string.
2N/A */
2N/Astatic krb5_error_code
2N/Akrb5int_cc_getops(krb5_context context,
2N/A const char *pfx,
2N/A const krb5_cc_ops **ops)
2N/A{
2N/A krb5_error_code err;
2N/A struct krb5_cc_typelist *tlist;
2N/A
2N/A err = k5_mutex_lock(&cc_typelist_lock);
2N/A if (err)
2N/A return err;
2N/A
2N/A for (tlist = cc_typehead; tlist; tlist = tlist->next) {
2N/A if (strcmp (tlist->ops->prefix, pfx) == 0) {
2N/A *ops = tlist->ops;
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A return 0;
2N/A }
2N/A }
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A if (krb5_cc_dfl_ops && !strcmp (pfx, krb5_cc_dfl_ops->prefix)) {
2N/A *ops = krb5_cc_dfl_ops;
2N/A return 0;
2N/A }
2N/A return KRB5_CC_UNKNOWN_TYPE;
2N/A}
2N/A
2N/A/*
2N/A * cc_new_unique
2N/A *
2N/A * Generate a new unique ccache, given a ccache type and a hint
2N/A * string. Ignores the hint string for now.
2N/A */
2N/Akrb5_error_code KRB5_CALLCONV
2N/Akrb5_cc_new_unique(
2N/A krb5_context context,
2N/A const char *type,
2N/A const char *hint,
2N/A krb5_ccache *id)
2N/A{
2N/A const krb5_cc_ops *ops;
2N/A krb5_error_code err;
2N/A
2N/A *id = NULL;
2N/A
2N/A err = krb5int_cc_getops(context, type, &ops);
2N/A if (err)
2N/A return err;
2N/A
2N/A return ops->gen_new(context, id);
2N/A}
2N/A
2N/A/*
2N/A * cc_typecursor
2N/A *
2N/A * Note: to avoid copying the typelist at cursor creation time, among
2N/A * other things, we assume that the only additions ever occur to the
2N/A * typelist.
2N/A */
2N/Akrb5_error_code
2N/Akrb5int_cc_typecursor_new(krb5_context context, krb5_cc_typecursor *t)
2N/A{
2N/A krb5_error_code err = 0;
2N/A krb5_cc_typecursor n = NULL;
2N/A
2N/A *t = NULL;
2N/A n = malloc(sizeof(*n));
2N/A if (n == NULL)
2N/A return ENOMEM;
2N/A
2N/A err = k5_mutex_lock(&cc_typelist_lock);
2N/A if (err)
2N/A goto errout;
2N/A n->tptr = cc_typehead;
2N/A err = k5_mutex_unlock(&cc_typelist_lock);
2N/A if (err)
2N/A goto errout;
2N/A
2N/A *t = n;
2N/Aerrout:
2N/A if (err)
2N/A free(n);
2N/A return err;
2N/A}
2N/A
2N/Akrb5_error_code
2N/Akrb5int_cc_typecursor_next(krb5_context context,
2N/A krb5_cc_typecursor t,
2N/A const krb5_cc_ops **ops)
2N/A{
2N/A krb5_error_code err = 0;
2N/A
2N/A *ops = NULL;
2N/A if (t->tptr == NULL)
2N/A return 0;
2N/A
2N/A err = k5_mutex_lock(&cc_typelist_lock);
2N/A if (err)
2N/A goto errout;
2N/A *ops = t->tptr->ops;
2N/A t->tptr = t->tptr->next;
2N/A err = k5_mutex_unlock(&cc_typelist_lock);
2N/A if (err)
2N/A goto errout;
2N/A
2N/Aerrout:
2N/A return err;
2N/A}
2N/A
2N/Akrb5_error_code
2N/Akrb5int_cc_typecursor_free(krb5_context context, krb5_cc_typecursor *t)
2N/A{
2N/A free(*t);
2N/A *t = NULL;
2N/A return 0;
2N/A}
2N/A
2N/Akrb5_error_code KRB5_CALLCONV
2N/Akrb5_cc_move(krb5_context context, krb5_ccache src, krb5_ccache dst)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A krb5_principal princ = NULL;
2N/A
2N/A ret = krb5_cccol_lock(context);
2N/A if (ret) {
2N/A return ret;
2N/A }
2N/A
2N/A ret = krb5_cc_lock(context, src);
2N/A if (ret) {
2N/A krb5_cccol_unlock(context);
2N/A return ret;
2N/A }
2N/A
2N/A ret = krb5_cc_get_principal(context, src, &princ);
2N/A if (!ret) {
2N/A ret = krb5_cc_initialize(context, dst, princ);
2N/A }
2N/A if (!ret) {
2N/A ret = krb5_cc_lock(context, dst);
2N/A }
2N/A if (!ret) {
2N/A ret = krb5_cc_copy_creds(context, src, dst);
2N/A krb5_cc_unlock(context, dst);
2N/A }
2N/A
2N/A krb5_cc_unlock(context, src);
2N/A if (!ret) {
2N/A ret = krb5_cc_destroy(context, src);
2N/A }
2N/A krb5_cccol_unlock(context);
2N/A if (princ) {
2N/A krb5_free_principal(context, princ);
2N/A princ = NULL;
2N/A }
2N/A
2N/A return ret;
2N/A}
2N/A
2N/Akrb5_error_code
2N/Ak5_cc_mutex_init(k5_cc_mutex *m)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A ret = k5_mutex_init(&m->lock);
2N/A if (ret) return ret;
2N/A m->owner = NULL;
2N/A m->refcount = 0;
2N/A
2N/A return ret;
2N/A}
2N/A
2N/Akrb5_error_code
2N/Ak5_cc_mutex_finish_init(k5_cc_mutex *m)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A ret = k5_mutex_finish_init(&m->lock);
2N/A if (ret) return ret;
2N/A m->owner = NULL;
2N/A m->refcount = 0;
2N/A
2N/A return ret;
2N/A}
2N/A
2N/Avoid
2N/Ak5_cc_mutex_assert_locked(krb5_context context, k5_cc_mutex *m)
2N/A{
2N/A#ifdef DEBUG_THREADS
2N/A assert(m->refcount > 0);
2N/A assert(m->owner == context);
2N/A#endif
2N/A k5_assert_locked(&m->lock);
2N/A}
2N/A
2N/Avoid
2N/Ak5_cc_mutex_assert_unlocked(krb5_context context, k5_cc_mutex *m)
2N/A{
2N/A#ifdef DEBUG_THREADS
2N/A assert(m->refcount == 0);
2N/A assert(m->owner == NULL);
2N/A#endif
2N/A k5_assert_unlocked(&m->lock);
2N/A}
2N/A
2N/Akrb5_error_code
2N/Ak5_cc_mutex_lock(krb5_context context, k5_cc_mutex *m)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A /* not locked or already locked by another context */
2N/A if (m->owner != context) {
2N/A /* acquire lock, blocking until available */
2N/A ret = k5_mutex_lock(&m->lock);
2N/A m->owner = context;
2N/A m->refcount = 1;
2N/A }
2N/A /* already locked by this context, just increase refcount */
2N/A else {
2N/A m->refcount++;
2N/A }
2N/A return ret;
2N/A}
2N/A
2N/Akrb5_error_code
2N/Ak5_cc_mutex_unlock(krb5_context context, k5_cc_mutex *m)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A /* verify owner and sanity check refcount */
2N/A if ((m->owner != context) || (m->refcount < 1)) {
2N/A return ret;
2N/A }
2N/A /* decrement & unlock when count reaches zero */
2N/A m->refcount--;
2N/A if (m->refcount == 0) {
2N/A m->owner = NULL;
2N/A k5_mutex_unlock(&m->lock);
2N/A }
2N/A return ret;
2N/A}
2N/A
2N/A/* necessary to make reentrant locks play nice with krb5int_cc_finalize */
2N/Akrb5_error_code
2N/Ak5_cc_mutex_force_unlock(k5_cc_mutex *m)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A m->refcount = 0;
2N/A m->owner = NULL;
2N/A if (m->refcount > 0) {
2N/A k5_mutex_unlock(&m->lock);
2N/A }
2N/A return ret;
2N/A}
2N/A
2N/A/*
2N/A * holds on to all pertype global locks as well as typelist lock
2N/A */
2N/A
2N/Akrb5_error_code KRB5_CALLCONV
2N/Akrb5_cccol_lock(krb5_context context)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A ret = k5_cc_mutex_lock(context, &cccol_lock);
2N/A if (ret) {
2N/A return ret;
2N/A }
2N/A ret = k5_mutex_lock(&cc_typelist_lock);
2N/A if (ret) {
2N/A k5_cc_mutex_unlock(context, &cccol_lock);
2N/A return ret;
2N/A }
2N/A ret = k5_cc_mutex_lock(context, &krb5int_cc_file_mutex);
2N/A if (ret) {
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A k5_cc_mutex_unlock(context, &cccol_lock);
2N/A return ret;
2N/A }
2N/A ret = k5_cc_mutex_lock(context, &krb5int_mcc_mutex);
2N/A if (ret) {
2N/A k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A k5_cc_mutex_unlock(context, &cccol_lock);
2N/A return ret;
2N/A }
2N/A#ifdef USE_CCAPI_V3
2N/A ret = krb5_stdccv3_context_lock(context);
2N/A#endif
2N/A#ifdef USE_KEYRING_CCACHE
2N/A ret = k5_cc_mutex_lock(context, &krb5int_krcc_mutex);
2N/A#endif
2N/A if (ret) {
2N/A k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
2N/A k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A k5_cc_mutex_unlock(context, &cccol_lock);
2N/A return ret;
2N/A }
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A return ret;
2N/A}
2N/A
2N/Akrb5_error_code KRB5_CALLCONV
2N/Akrb5_cccol_unlock(krb5_context context)
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A /* sanity check */
2N/A k5_cc_mutex_assert_locked(context, &cccol_lock);
2N/A
2N/A ret = k5_mutex_lock(&cc_typelist_lock);
2N/A if (ret) {
2N/A k5_cc_mutex_unlock(context, &cccol_lock);
2N/A return ret;
2N/A }
2N/A
2N/A /* unlock each type in the opposite order */
2N/A#ifdef USE_KEYRING_CCACHE
2N/A k5_cc_mutex_assert_locked(context, &krb5int_krcc_mutex);
2N/A k5_cc_mutex_unlock(context, &krb5int_krcc_mutex);
2N/A#endif
2N/A#ifdef USE_CCAPI_V3
2N/A krb5_stdccv3_context_unlock(context);
2N/A#endif
2N/A k5_cc_mutex_assert_locked(context, &krb5int_mcc_mutex);
2N/A k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
2N/A k5_cc_mutex_assert_locked(context, &krb5int_cc_file_mutex);
2N/A k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
2N/A k5_mutex_assert_locked(&cc_typelist_lock);
2N/A
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A k5_cc_mutex_unlock(context, &cccol_lock);
2N/A
2N/A return ret;
2N/A}
2N/A
2N/A/* necessary to make reentrant locks play nice with krb5int_cc_finalize */
2N/Akrb5_error_code
2N/Ak5_cccol_force_unlock()
2N/A{
2N/A krb5_error_code ret = 0;
2N/A
2N/A /* sanity check */
2N/A if ((&cccol_lock)->refcount == 0) {
2N/A return 0;
2N/A }
2N/A
2N/A ret = k5_mutex_lock(&cc_typelist_lock);
2N/A if (ret) {
2N/A (&cccol_lock)->refcount = 0;
2N/A (&cccol_lock)->owner = NULL;
2N/A k5_mutex_unlock(&(&cccol_lock)->lock);
2N/A return ret;
2N/A }
2N/A
2N/A /* unlock each type in the opposite order */
2N/A#ifdef USE_KEYRING_CCACHE
2N/A k5_cc_mutex_force_unlock(&krb5int_krcc_mutex);
2N/A#endif
2N/A#ifdef USE_CCAPI_V3
2N/A krb5_stdccv3_context_unlock(NULL);
2N/A#endif
2N/A k5_cc_mutex_force_unlock(&krb5int_mcc_mutex);
2N/A k5_cc_mutex_force_unlock(&krb5int_cc_file_mutex);
2N/A
2N/A k5_mutex_unlock(&cc_typelist_lock);
2N/A k5_cc_mutex_force_unlock(&cccol_lock);
2N/A
2N/A return ret;
2N/A}