// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "runtime.h" #include "hashmap.h" #include "type.h" /* Return a pointer to the struct/union of type "type" whose "field" field is addressed by pointer "p". */ struct hash { /* a hash table; initialize with hash_init() */ uint32 count; /* elements in table - must be first */ uint8 datasize; /* amount of data to store in entry */ uint8 max_power; /* max power of 2 to create sub-tables */ uint8 max_probes; /* max entries to probe before rehashing */ uint8 indirectval; /* storing pointers to values */ int32 changes; /* inc'ed whenever a subtable is created/grown */ hash_hash_t (*data_hash) (uint32, void *a); /* return hash of *a */ uint32 (*data_eq) (uint32, void *a, void *b); /* return whether *a == *b */ void (*data_del) (uint32, void *arg, void *data); /* invoked on deletion */ struct hash_subtable *st; /* first-level table */ uint32 keysize; uint32 valsize; uint32 datavo; // three sets of offsets: the digit counts how many // of key, value are passed as inputs: // 0 = func() (key, value) // 1 = func(key) (value) // 2 = func(key, value) uint32 ko0; uint32 vo0; uint32 ko1; uint32 vo1; uint32 po1; uint32 ko2; uint32 vo2; uint32 po2; Alg* keyalg; Alg* valalg; }; struct hash_entry { hash_hash_t hash; /* hash value of data */ byte data[1]; /* user data has "datasize" bytes */ }; struct hash_subtable { uint8 power; /* bits used to index this table */ uint8 used; /* bits in hash used before reaching this table */ uint8 datasize; /* bytes of client data in an entry */ uint8 max_probes; /* max number of probes when searching */ int16 limit_bytes; /* max_probes * (datasize+sizeof (hash_hash_t)) */ struct hash_entry *end; /* points just past end of entry[] */ struct hash_entry entry[1]; /* 2**power+max_probes-1 elements of elemsize bytes */ }; #define HASH_DATA_EQ(h,x,y) ((*h->data_eq) (h->keysize, (x), (y))) #define HASH_REHASH 0x2 /* an internal flag */ /* the number of bits used is stored in the flags word too */ #define HASH_USED(x) ((x) >> 2) #define HASH_MAKE_USED(x) ((x) << 2) #define HASH_LOW 6 #define HASH_ONE (((hash_hash_t)1) << HASH_LOW) #define HASH_MASK (HASH_ONE - 1) #define HASH_ADJUST(x) (((x) < HASH_ONE) << HASH_LOW) #define HASH_BITS (sizeof (hash_hash_t) * 8) #define HASH_SUBHASH HASH_MASK #define HASH_NIL 0 #define HASH_NIL_MEMSET 0 #define HASH_OFFSET(base, byte_offset) \ ((struct hash_entry *) (((byte *) (base)) + (byte_offset))) /* return a hash layer with 2**power empty entries */ static struct hash_subtable * hash_subtable_new (struct hash *h, int32 power, int32 used) { int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]); int32 bytes = elemsize << power; struct hash_subtable *st; int32 limit_bytes = h->max_probes * elemsize; int32 max_probes = h->max_probes; if (bytes < limit_bytes) { limit_bytes = bytes; max_probes = 1 << power; } bytes += limit_bytes - elemsize; st = malloc (offsetof (struct hash_subtable, entry[0]) + bytes); st->power = power; st->used = used; st->datasize = h->datasize; st->max_probes = max_probes; st->limit_bytes = limit_bytes; st->end = HASH_OFFSET (st->entry, bytes); memset (st->entry, HASH_NIL_MEMSET, bytes); return (st); } static void init_sizes (int64 hint, int32 *init_power, int32 *max_power) { int32 log = 0; int32 i; for (i = 32; i != 0; i >>= 1) { if ((hint >> (log + i)) != 0) { log += i; } } log += 1 + (((hint << 3) >> log) >= 11); /* round up for utilization */ if (log <= 14) { *init_power = log; } else { *init_power = 12; } *max_power = 12; } static void hash_init (struct hash *h, int32 datasize, hash_hash_t (*data_hash) (uint32, void *), uint32 (*data_eq) (uint32, void *, void *), void (*data_del) (uint32, void *, void *), int64 hint) { int32 init_power; int32 max_power; if(datasize < sizeof (void *)) datasize = sizeof (void *); datasize = rnd(datasize, sizeof (void *)); init_sizes (hint, &init_power, &max_power); h->datasize = datasize; h->max_power = max_power; h->max_probes = 15; assert (h->datasize == datasize); assert (h->max_power == max_power); assert (sizeof (void *) <= h->datasize || h->max_power == 255); h->count = 0; h->changes = 0; h->data_hash = data_hash; h->data_eq = data_eq; h->data_del = data_del; h->st = hash_subtable_new (h, init_power, 0); } static void hash_remove_n (struct hash_subtable *st, struct hash_entry *dst_e, int32 n) { int32 elemsize = st->datasize + offsetof (struct hash_entry, data[0]); struct hash_entry *src_e = HASH_OFFSET (dst_e, n * elemsize); struct hash_entry *end_e = st->end; int32 shift = HASH_BITS - (st->power + st->used); int32 index_mask = (((hash_hash_t)1) << st->power) - 1; int32 dst_i = (((byte *) dst_e) - ((byte *) st->entry)) / elemsize; int32 src_i = dst_i + n; hash_hash_t hash; int32 skip; int32 bytes; while (dst_e != src_e) { if (src_e != end_e) { struct hash_entry *cp_e = src_e; int32 save_dst_i = dst_i; while (cp_e != end_e && (hash = cp_e->hash) != HASH_NIL && ((hash >> shift) & index_mask) <= dst_i) { cp_e = HASH_OFFSET (cp_e, elemsize); dst_i++; } bytes = ((byte *) cp_e) - (byte *) src_e; memmove (dst_e, src_e, bytes); dst_e = HASH_OFFSET (dst_e, bytes); src_e = cp_e; src_i += dst_i - save_dst_i; if (src_e != end_e && (hash = src_e->hash) != HASH_NIL) { skip = ((hash >> shift) & index_mask) - dst_i; } else { skip = src_i - dst_i; } } else { skip = src_i - dst_i; } bytes = skip * elemsize; memset (dst_e, HASH_NIL_MEMSET, bytes); dst_e = HASH_OFFSET (dst_e, bytes); dst_i += skip; } } static int32 hash_insert_internal (struct hash_subtable **pst, int32 flags, hash_hash_t hash, struct hash *h, void *data, void **pres); static void hash_conv (struct hash *h, struct hash_subtable *st, int32 flags, hash_hash_t hash, struct hash_entry *e) { int32 new_flags = (flags + HASH_MAKE_USED (st->power)) | HASH_REHASH; int32 shift = HASH_BITS - HASH_USED (new_flags); hash_hash_t prefix_mask = (-(hash_hash_t)1) << shift; int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]); void *dummy_result; struct hash_entry *de; int32 index_mask = (1 << st->power) - 1; hash_hash_t e_hash; struct hash_entry *pe = HASH_OFFSET (e, -elemsize); while (e != st->entry && (e_hash = pe->hash) != HASH_NIL && (e_hash & HASH_MASK) != HASH_SUBHASH) { e = pe; pe = HASH_OFFSET (pe, -elemsize); } de = e; while (e != st->end && (e_hash = e->hash) != HASH_NIL && (e_hash & HASH_MASK) != HASH_SUBHASH) { struct hash_entry *target_e = HASH_OFFSET (st->entry, ((e_hash >> shift) & index_mask) * elemsize); struct hash_entry *ne = HASH_OFFSET (e, elemsize); hash_hash_t current = e_hash & prefix_mask; if (de < target_e) { memset (de, HASH_NIL_MEMSET, ((byte *) target_e) - (byte *) de); de = target_e; } if ((hash & prefix_mask) == current || (ne != st->end && (e_hash = ne->hash) != HASH_NIL && (e_hash & prefix_mask) == current)) { struct hash_subtable *new_st = hash_subtable_new (h, 1, HASH_USED (new_flags)); int32 rc = hash_insert_internal (&new_st, new_flags, e->hash, h, e->data, &dummy_result); assert (rc == 0); memcpy(dummy_result, e->data, h->datasize); e = ne; while (e != st->end && (e_hash = e->hash) != HASH_NIL && (e_hash & prefix_mask) == current) { assert ((e_hash & HASH_MASK) != HASH_SUBHASH); rc = hash_insert_internal (&new_st, new_flags, e_hash, h, e->data, &dummy_result); assert (rc == 0); memcpy(dummy_result, e->data, h->datasize); e = HASH_OFFSET (e, elemsize); } memset (de->data, HASH_NIL_MEMSET, h->datasize); *(struct hash_subtable **)de->data = new_st; de->hash = current | HASH_SUBHASH; } else { if (e != de) { memcpy (de, e, elemsize); } e = HASH_OFFSET (e, elemsize); } de = HASH_OFFSET (de, elemsize); } if (e != de) { hash_remove_n (st, de, (((byte *) e) - (byte *) de) / elemsize); } } static void hash_grow (struct hash *h, struct hash_subtable **pst, int32 flags) { struct hash_subtable *old_st = *pst; int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]); *pst = hash_subtable_new (h, old_st->power + 1, HASH_USED (flags)); struct hash_entry *end_e = old_st->end; struct hash_entry *e; void *dummy_result; int32 used = 0; flags |= HASH_REHASH; for (e = old_st->entry; e != end_e; e = HASH_OFFSET (e, elemsize)) { hash_hash_t hash = e->hash; if (hash != HASH_NIL) { int32 rc = hash_insert_internal (pst, flags, e->hash, h, e->data, &dummy_result); assert (rc == 0); memcpy(dummy_result, e->data, h->datasize); used++; } } free (old_st); } int32 hash_lookup (struct hash *h, void *data, void **pres) { int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]); hash_hash_t hash = (*h->data_hash) (h->keysize, data) & ~HASH_MASK; struct hash_subtable *st = h->st; int32 used = 0; hash_hash_t e_hash; struct hash_entry *e; struct hash_entry *end_e; hash += HASH_ADJUST (hash); for (;;) { int32 shift = HASH_BITS - (st->power + used); int32 index_mask = (1 << st->power) - 1; int32 i = (hash >> shift) & index_mask; /* i is the natural position of hash */ e = HASH_OFFSET (st->entry, i * elemsize); /* e points to element i */ e_hash = e->hash; if ((e_hash & HASH_MASK) != HASH_SUBHASH) { /* a subtable */ break; } used += st->power; st = *(struct hash_subtable **)e->data; } end_e = HASH_OFFSET (e, st->limit_bytes); while (e != end_e && (e_hash = e->hash) != HASH_NIL && e_hash < hash) { e = HASH_OFFSET (e, elemsize); } while (e != end_e && ((e_hash = e->hash) ^ hash) < HASH_SUBHASH) { if (HASH_DATA_EQ (h, data, e->data)) { /* a match */ *pres = e->data; return (1); } e = HASH_OFFSET (e, elemsize); } USED(e_hash); *pres = 0; return (0); } int32 hash_remove (struct hash *h, void *data, void *arg) { int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]); hash_hash_t hash = (*h->data_hash) (h->keysize, data) & ~HASH_MASK; struct hash_subtable *st = h->st; int32 used = 0; hash_hash_t e_hash; struct hash_entry *e; struct hash_entry *end_e; hash += HASH_ADJUST (hash); for (;;) { int32 shift = HASH_BITS - (st->power + used); int32 index_mask = (1 << st->power) - 1; int32 i = (hash >> shift) & index_mask; /* i is the natural position of hash */ e = HASH_OFFSET (st->entry, i * elemsize); /* e points to element i */ e_hash = e->hash; if ((e_hash & HASH_MASK) != HASH_SUBHASH) { /* a subtable */ break; } used += st->power; st = *(struct hash_subtable **)e->data; } end_e = HASH_OFFSET (e, st->limit_bytes); while (e != end_e && (e_hash = e->hash) != HASH_NIL && e_hash < hash) { e = HASH_OFFSET (e, elemsize); } while (e != end_e && ((e_hash = e->hash) ^ hash) < HASH_SUBHASH) { if (HASH_DATA_EQ (h, data, e->data)) { /* a match */ (*h->data_del) (h->datavo, arg, e->data); hash_remove_n (st, e, 1); h->count--; return (1); } e = HASH_OFFSET (e, elemsize); } USED(e_hash); return (0); } static int32 hash_insert_internal (struct hash_subtable **pst, int32 flags, hash_hash_t hash, struct hash *h, void *data, void **pres) { int32 elemsize = h->datasize + offsetof (struct hash_entry, data[0]); if ((flags & HASH_REHASH) == 0) { hash += HASH_ADJUST (hash); hash &= ~HASH_MASK; } for (;;) { struct hash_subtable *st = *pst; int32 shift = HASH_BITS - (st->power + HASH_USED (flags)); int32 index_mask = (1 << st->power) - 1; int32 i = (hash >> shift) & index_mask; /* i is the natural position of hash */ struct hash_entry *start_e = HASH_OFFSET (st->entry, i * elemsize); /* start_e is the pointer to element i */ struct hash_entry *e = start_e; /* e is going to range over [start_e, end_e) */ struct hash_entry *end_e; hash_hash_t e_hash = e->hash; if ((e_hash & HASH_MASK) == HASH_SUBHASH) { /* a subtable */ pst = (struct hash_subtable **) e->data; flags += HASH_MAKE_USED (st->power); continue; } end_e = HASH_OFFSET (start_e, st->limit_bytes); while (e != end_e && (e_hash = e->hash) != HASH_NIL && e_hash < hash) { e = HASH_OFFSET (e, elemsize); i++; } if (e != end_e && e_hash != HASH_NIL) { /* ins_e ranges over the elements that may match */ struct hash_entry *ins_e = e; int32 ins_i = i; hash_hash_t ins_e_hash; while (ins_e != end_e && ((e_hash = ins_e->hash) ^ hash) < HASH_SUBHASH) { if (HASH_DATA_EQ (h, data, ins_e->data)) { /* a match */ *pres = ins_e->data; return (1); } assert (e_hash != hash || (flags & HASH_REHASH) == 0); hash += (e_hash == hash); /* adjust hash if it collides */ ins_e = HASH_OFFSET (ins_e, elemsize); ins_i++; if (e_hash <= hash) { /* set e to insertion point */ e = ins_e; i = ins_i; } } /* set ins_e to the insertion point for the new element */ ins_e = e; ins_i = i; ins_e_hash = 0; /* move ins_e to point at the end of the contiguous block, but stop if any element can't be moved by one up */ while (ins_e != st->end && (ins_e_hash = ins_e->hash) != HASH_NIL && ins_i + 1 - ((ins_e_hash >> shift) & index_mask) < st->max_probes && (ins_e_hash & HASH_MASK) != HASH_SUBHASH) { ins_e = HASH_OFFSET (ins_e, elemsize); ins_i++; } if (e == end_e || ins_e == st->end || ins_e_hash != HASH_NIL) { e = end_e; /* can't insert; must grow or convert to subtable */ } else { /* make space for element */ memmove (HASH_OFFSET (e, elemsize), e, ((byte *) ins_e) - (byte *) e); } } if (e != end_e) { e->hash = hash; *pres = e->data; return (0); } h->changes++; if (st->power < h->max_power) { hash_grow (h, pst, flags); } else { hash_conv (h, st, flags, hash, start_e); } } } int32 hash_insert (struct hash *h, void *data, void **pres) { int32 rc = hash_insert_internal (&h->st, 0, (*h->data_hash) (h->keysize, data), h, data, pres); h->count += (rc == 0); /* increment count if element didn't previously exist */ return (rc); } uint32 hash_count (struct hash *h) { return (h->count); } static void iter_restart (struct hash_iter *it, struct hash_subtable *st, int32 used) { int32 elemsize = it->elemsize; hash_hash_t last_hash = it->last_hash; struct hash_entry *e; hash_hash_t e_hash; struct hash_iter_sub *sub = &it->subtable_state[it->i]; struct hash_entry *end; for (;;) { int32 shift = HASH_BITS - (st->power + used); int32 index_mask = (1 << st->power) - 1; int32 i = (last_hash >> shift) & index_mask; end = st->end; e = HASH_OFFSET (st->entry, i * elemsize); sub->start = st->entry; sub->end = end; if ((e->hash & HASH_MASK) != HASH_SUBHASH) { break; } sub->e = HASH_OFFSET (e, elemsize); sub = &it->subtable_state[++(it->i)]; used += st->power; st = *(struct hash_subtable **)e->data; } while (e != end && ((e_hash = e->hash) == HASH_NIL || e_hash <= last_hash)) { e = HASH_OFFSET (e, elemsize); } sub->e = e; } void * hash_next (struct hash_iter *it) { int32 elemsize = it->elemsize; struct hash_iter_sub *sub = &it->subtable_state[it->i]; struct hash_entry *e = sub->e; struct hash_entry *end = sub->end; hash_hash_t e_hash = 0; if (it->changes != it->h->changes) { /* hash table's structure changed; recompute */ it->changes = it->h->changes; it->i = 0; iter_restart (it, it->h->st, 0); sub = &it->subtable_state[it->i]; e = sub->e; end = sub->end; } if (e != sub->start && it->last_hash != HASH_OFFSET (e, -elemsize)->hash) { struct hash_entry *start = HASH_OFFSET (e, -(elemsize * it->h->max_probes)); struct hash_entry *pe = HASH_OFFSET (e, -elemsize); hash_hash_t last_hash = it->last_hash; if (start < sub->start) { start = sub->start; } while (e != start && ((e_hash = pe->hash) == HASH_NIL || last_hash < e_hash)) { e = pe; pe = HASH_OFFSET (pe, -elemsize); } while (e != end && ((e_hash = e->hash) == HASH_NIL || e_hash <= last_hash)) { e = HASH_OFFSET (e, elemsize); } } for (;;) { while (e != end && (e_hash = e->hash) == HASH_NIL) { e = HASH_OFFSET (e, elemsize); } if (e == end) { if (it->i == 0) { it->last_hash = HASH_OFFSET (e, -elemsize)->hash; sub->e = e; return (0); } else { it->i--; sub = &it->subtable_state[it->i]; e = sub->e; end = sub->end; } } else if ((e_hash & HASH_MASK) != HASH_SUBHASH) { it->last_hash = e->hash; sub->e = HASH_OFFSET (e, elemsize); return (e->data); } else { struct hash_subtable *st = *(struct hash_subtable **)e->data; sub->e = HASH_OFFSET (e, elemsize); it->i++; assert (it->i < sizeof (it->subtable_state) / sizeof (it->subtable_state[0])); sub = &it->subtable_state[it->i]; sub->e = e = st->entry; sub->start = st->entry; sub->end = end = st->end; } } } void hash_iter_init (struct hash *h, struct hash_iter *it) { it->elemsize = h->datasize + offsetof (struct hash_entry, data[0]); it->changes = h->changes; it->i = 0; it->h = h; it->last_hash = 0; it->subtable_state[0].e = h->st->entry; it->subtable_state[0].start = h->st->entry; it->subtable_state[0].end = h->st->end; } static void clean_st (struct hash_subtable *st, int32 *slots, int32 *used) { int32 elemsize = st->datasize + offsetof (struct hash_entry, data[0]); struct hash_entry *e = st->entry; struct hash_entry *end = st->end; int32 lslots = (((byte *) end) - (byte *) e) / elemsize; int32 lused = 0; while (e != end) { hash_hash_t hash = e->hash; if ((hash & HASH_MASK) == HASH_SUBHASH) { clean_st (*(struct hash_subtable **)e->data, slots, used); } else { lused += (hash != HASH_NIL); } e = HASH_OFFSET (e, elemsize); } free (st); *slots += lslots; *used += lused; } void hash_destroy (struct hash *h) { int32 slots = 0; int32 used = 0; clean_st (h->st, &slots, &used); free (h); } static void hash_visit_internal (struct hash_subtable *st, int32 used, int32 level, void (*data_visit) (void *arg, int32 level, void *data), void *arg) { int32 elemsize = st->datasize + offsetof (struct hash_entry, data[0]); struct hash_entry *e = st->entry; int32 shift = HASH_BITS - (used + st->power); int32 i = 0; while (e != st->end) { int32 index = ((e->hash >> (shift - 1)) >> 1) & ((1 << st->power) - 1); if ((e->hash & HASH_MASK) == HASH_SUBHASH) { (*data_visit) (arg, level, e->data); hash_visit_internal (*(struct hash_subtable **)e->data, used + st->power, level + 1, data_visit, arg); } else { (*data_visit) (arg, level, e->data); } if (e->hash != HASH_NIL) { assert (i < index + st->max_probes); assert (index <= i); } e = HASH_OFFSET (e, elemsize); i++; } } void hash_visit (struct hash *h, void (*data_visit) (void *arg, int32 level, void *data), void *arg) { hash_visit_internal (h->st, 0, 0, data_visit, arg); } // /// interfaces to go runtime // // hash requires < 256 bytes of data (key+value) stored inline. // Only basic types can be key - biggest is complex128 (16 bytes). // Leave some room to grow, just in case. enum { MaxValsize = 256 - 64 }; static void donothing(uint32 s, void *a, void *b) { USED(s); USED(a); USED(b); } static void freedata(uint32 datavo, void *a, void *b) { void *p; USED(a); p = *(void**)((byte*)b + datavo); free(p); } static void** hash_indirect(Hmap *h, void *p) { if(h->indirectval) p = *(void**)p; return p; } static int32 debug = 0; // makemap(key, val *Type, hint uint32) (hmap *map[any]any); Hmap* makemap(Type *key, Type *val, int64 hint) { Hmap *h; int32 keyalg, valalg, keysize, valsize, valsize_in_hash; void (*data_del)(uint32, void*, void*); if(hint < 0 || (int32)hint != hint) panicstring("makemap: size out of range"); keyalg = key->alg; valalg = val->alg; keysize = key->size; valsize = val->size; if(keyalg >= nelem(algarray) || algarray[keyalg].hash == nohash) { printf("map(keyalg=%d)\n", keyalg); throw("runtime.makemap: unsupported map key type"); } if(valalg >= nelem(algarray)) { printf("map(valalg=%d)\n", valalg); throw("runtime.makemap: unsupported map value type"); } h = mal(sizeof(*h)); valsize_in_hash = valsize; data_del = donothing; if (valsize > MaxValsize) { h->indirectval = 1; data_del = freedata; valsize_in_hash = sizeof(void*); } // align value inside data so that mark-sweep gc can find it. // might remove in the future and just assume datavo == keysize. h->datavo = keysize; if(valsize_in_hash >= sizeof(void*)) h->datavo = rnd(keysize, sizeof(void*)); hash_init(h, h->datavo+valsize_in_hash, algarray[keyalg].hash, algarray[keyalg].equal, data_del, hint); h->keysize = keysize; h->valsize = valsize; h->keyalg = &algarray[keyalg]; h->valalg = &algarray[valalg]; // these calculations are compiler dependent. // figure out offsets of map call arguments. // func() (key, val) h->ko0 = rnd(sizeof(h), Structrnd); h->vo0 = rnd(h->ko0+keysize, val->align); // func(key) (val[, pres]) h->ko1 = rnd(sizeof(h), key->align); h->vo1 = rnd(h->ko1+keysize, Structrnd); h->po1 = rnd(h->vo1+valsize, 1); // func(key, val[, pres]) h->ko2 = rnd(sizeof(h), key->align); h->vo2 = rnd(h->ko2+keysize, val->align); h->po2 = rnd(h->vo2+valsize, 1); if(debug) { printf("makemap: map=%p; keysize=%d; valsize=%d; keyalg=%d; valalg=%d; offsets=%d,%d; %d,%d,%d; %d,%d,%d\n", h, keysize, valsize, keyalg, valalg, h->ko0, h->vo0, h->ko1, h->vo1, h->po1, h->ko2, h->vo2, h->po2); } return h; } // makemap(key, val *Type, hint int64) (hmap *map[any]any); void ·makemap(Type *key, Type *val, int64 hint, Hmap *ret) { ret = makemap(key, val, hint); FLUSH(&ret); } void mapaccess(Hmap *h, byte *ak, byte *av, bool *pres) { byte *res; if(gcwaiting) gosched(); res = nil; if(hash_lookup(h, ak, (void**)&res)) { *pres = true; h->valalg->copy(h->valsize, av, hash_indirect(h, res+h->datavo)); } else { *pres = false; h->valalg->copy(h->valsize, av, nil); } } // mapaccess1(hmap *map[any]any, key any) (val any); #pragma textflag 7 void ·mapaccess1(Hmap *h, ...) { byte *ak, *av; bool pres; ak = (byte*)&h + h->ko1; av = (byte*)&h + h->vo1; mapaccess(h, ak, av, &pres); if(debug) { prints("runtime.mapaccess1: map="); ·printpointer(h); prints("; key="); h->keyalg->print(h->keysize, ak); prints("; val="); h->valalg->print(h->valsize, av); prints("; pres="); ·printbool(pres); prints("\n"); } } // mapaccess2(hmap *map[any]any, key any) (val any, pres bool); #pragma textflag 7 void ·mapaccess2(Hmap *h, ...) { byte *ak, *av, *ap; ak = (byte*)&h + h->ko1; av = (byte*)&h + h->vo1; ap = (byte*)&h + h->po1; mapaccess(h, ak, av, ap); if(debug) { prints("runtime.mapaccess2: map="); ·printpointer(h); prints("; key="); h->keyalg->print(h->keysize, ak); prints("; val="); h->valalg->print(h->valsize, av); prints("; pres="); ·printbool(*ap); prints("\n"); } } void mapassign(Hmap *h, byte *ak, byte *av) { byte *res; int32 hit; if(gcwaiting) gosched(); res = nil; if(av == nil) { hash_remove(h, ak, (void**)&res); return; } hit = hash_insert(h, ak, (void**)&res); if(!hit && h->indirectval) *(void**)(res+h->datavo) = mal(h->valsize); h->keyalg->copy(h->keysize, res, ak); h->valalg->copy(h->valsize, hash_indirect(h, res+h->datavo), av); if(debug) { prints("mapassign: map="); ·printpointer(h); prints("; key="); h->keyalg->print(h->keysize, ak); prints("; val="); h->valalg->print(h->valsize, av); prints("; hit="); ·printint(hit); prints("; res="); ·printpointer(res); prints("\n"); } } // mapassign1(hmap *map[any]any, key any, val any); #pragma textflag 7 void ·mapassign1(Hmap *h, ...) { byte *ak, *av; ak = (byte*)&h + h->ko2; av = (byte*)&h + h->vo2; mapassign(h, ak, av); } // mapassign2(hmap *map[any]any, key any, val any, pres bool); #pragma textflag 7 void ·mapassign2(Hmap *h, ...) { byte *ak, *av, *ap; ak = (byte*)&h + h->ko2; av = (byte*)&h + h->vo2; ap = (byte*)&h + h->po2; if(*ap == false) av = nil; // delete mapassign(h, ak, av); if(debug) { prints("mapassign2: map="); ·printpointer(h); prints("; key="); h->keyalg->print(h->keysize, ak); prints("\n"); } } void* hash_next_and_deref(struct hash_iter *it) { void *p; p = hash_next(it); if(it->h->indirectval) p = *(void**)p; return p; } // mapiterinit(hmap *map[any]any, hiter *any); void ·mapiterinit(Hmap *h, struct hash_iter *it) { if(h == nil) { it->data = nil; return; } hash_iter_init(h, it); it->data = hash_next_and_deref(it); if(debug) { prints("runtime.mapiterinit: map="); ·printpointer(h); prints("; iter="); ·printpointer(it); prints("; data="); ·printpointer(it->data); prints("\n"); } } struct hash_iter* mapiterinit(Hmap *h) { struct hash_iter *it; it = mal(sizeof *it); ·mapiterinit(h, it); return it; } // mapiternext(hiter *any); void ·mapiternext(struct hash_iter *it) { if(gcwaiting) gosched(); it->data = hash_next_and_deref(it); if(debug) { prints("runtime.mapiternext: iter="); ·printpointer(it); prints("; data="); ·printpointer(it->data); prints("\n"); } } void mapiternext(struct hash_iter *it) { ·mapiternext(it); } // mapiter1(hiter *any) (key any); #pragma textflag 7 void ·mapiter1(struct hash_iter *it, ...) { Hmap *h; byte *ak, *res; h = it->h; ak = (byte*)&it + h->ko0; res = it->data; if(res == nil) throw("runtime.mapiter1: key:val nil pointer"); h->keyalg->copy(h->keysize, ak, res); if(debug) { prints("mapiter2: iter="); ·printpointer(it); prints("; map="); ·printpointer(h); prints("\n"); } } bool mapiterkey(struct hash_iter *it, void *ak) { Hmap *h; byte *res; h = it->h; res = it->data; if(res == nil) return false; h->keyalg->copy(h->keysize, ak, res); return true; } // mapiter2(hiter *any) (key any, val any); #pragma textflag 7 void ·mapiter2(struct hash_iter *it, ...) { Hmap *h; byte *ak, *av, *res; h = it->h; ak = (byte*)&it + h->ko0; av = (byte*)&it + h->vo0; res = it->data; if(res == nil) throw("runtime.mapiter2: key:val nil pointer"); h->keyalg->copy(h->keysize, ak, res); h->valalg->copy(h->valsize, av, hash_indirect(h, res+h->datavo)); if(debug) { prints("mapiter2: iter="); ·printpointer(it); prints("; map="); ·printpointer(h); prints("\n"); } }