1 /** 2 Utility functions for array processing 3 4 Copyright: © 2012 RejectedSoftware e.K. 5 License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file. 6 Authors: Sönke Ludwig 7 */ 8 module vibe.utils.array; 9 10 import vibe.internal.utilallocator; 11 12 import std.algorithm; 13 import std.range : isInputRange, isOutputRange; 14 import std.traits; 15 static import std.utf; 16 17 18 void removeFromArray(T)(ref T[] array, T item) 19 { 20 foreach( i; 0 .. array.length ) 21 if( array[i] is item ){ 22 removeFromArrayIdx(array, i); 23 return; 24 } 25 } 26 27 void removeFromArrayIdx(T)(ref T[] array, size_t idx) 28 { 29 foreach( j; idx+1 .. array.length) 30 array[j-1] = array[j]; 31 array.length = array.length-1; 32 } 33 34 enum AppenderResetMode { 35 keepData, 36 freeData, 37 reuseData 38 } 39 40 struct AllocAppender(ArrayType : E[], E) { 41 alias ElemType = Unqual!E; 42 43 static assert(!hasIndirections!E && !hasElaborateDestructor!E); 44 45 private { 46 ElemType[] m_data; 47 ElemType[] m_remaining; 48 IAllocator m_alloc; 49 bool m_allocatedBuffer = false; 50 } 51 52 this(IAllocator alloc, ElemType[] initial_buffer = null) 53 { 54 m_alloc = alloc; 55 m_data = initial_buffer; 56 m_remaining = initial_buffer; 57 } 58 59 @disable this(this); 60 61 @property ArrayType data() { return cast(ArrayType)m_data[0 .. m_data.length - m_remaining.length]; } 62 63 void reset(AppenderResetMode reset_mode = AppenderResetMode.keepData) 64 { 65 if (reset_mode == AppenderResetMode.keepData) m_data = null; 66 else if (reset_mode == AppenderResetMode.freeData) { if (m_allocatedBuffer) m_alloc.deallocate(m_data); m_data = null; } 67 m_remaining = m_data; 68 } 69 70 /** Grows the capacity of the internal buffer so that it can hold a minumum amount of elements. 71 72 Params: 73 amount = The minimum amount of elements that shall be appendable without 74 triggering a re-allocation. 75 76 */ 77 void reserve(size_t amount) 78 @trusted { 79 size_t nelems = m_data.length - m_remaining.length; 80 if (!m_data.length) { 81 m_data = cast(ElemType[])m_alloc.allocate(amount*E.sizeof); 82 m_remaining = m_data; 83 m_allocatedBuffer = true; 84 } 85 if (m_remaining.length < amount) { 86 debug { 87 import std.digest.crc; 88 auto checksum = crc32Of(m_data[0 .. nelems]); 89 } 90 if (m_allocatedBuffer) { 91 void[] vdata = m_data; 92 m_alloc.reallocate(vdata, (nelems+amount)*E.sizeof); 93 m_data = () @trusted { return cast(ElemType[])vdata; } (); 94 } else { 95 auto newdata = cast(ElemType[])m_alloc.allocate((nelems+amount)*E.sizeof); 96 newdata[0 .. nelems] = m_data[0 .. nelems]; 97 m_data = newdata; 98 m_allocatedBuffer = true; 99 } 100 debug assert(crc32Of(m_data[0 .. nelems]) == checksum); 101 } 102 m_remaining = m_data[nelems .. m_data.length]; 103 } 104 105 void put(E el) 106 @safe { 107 if( m_remaining.length == 0 ) grow(1); 108 m_remaining[0] = el; 109 m_remaining = m_remaining[1 .. $]; 110 } 111 112 void put(ArrayType arr) 113 @safe { 114 if (m_remaining.length < arr.length) grow(arr.length); 115 m_remaining[0 .. arr.length] = arr[]; 116 m_remaining = m_remaining[arr.length .. $]; 117 } 118 119 static if( !hasAliasing!E ){ 120 void put(in ElemType[] arr) 121 @trusted 122 { 123 put(cast(ArrayType)arr); 124 } 125 } 126 127 static if( is(ElemType == char) ){ 128 void put(dchar el) 129 @safe 130 { 131 if( el < 128 ) put(cast(char)el); 132 else { 133 char[4] buf; 134 auto len = std.utf.encode(buf, el); 135 put(() @trusted { return cast(ArrayType)buf[0 .. len]; }()); 136 } 137 } 138 } 139 140 static if( is(ElemType == wchar) ){ 141 void put(dchar el) 142 @safe 143 { 144 if( el < 128 ) put(cast(wchar)el); 145 else { 146 wchar[3] buf; 147 auto len = std.utf.encode(buf, el); 148 put(() @trusted { return cast(ArrayType)buf[0 .. len]; } ()); 149 } 150 } 151 } 152 153 static if (!is(E == immutable) || !hasAliasing!E) { 154 /** Appends a number of bytes in-place. 155 156 The delegate will get the memory slice of the memory that follows 157 the already written data. Use `reserve` to ensure that this slice 158 has enough room. The delegate should overwrite as much of the 159 slice as desired and then has to return the number of elements 160 that should be appended (counting from the start of the slice). 161 */ 162 void append(scope size_t delegate(scope ElemType[] dst) @safe del) 163 { 164 auto n = del(m_remaining); 165 assert(n <= m_remaining.length); 166 m_remaining = m_remaining[n .. $]; 167 } 168 } 169 170 void grow(size_t min_free) 171 { 172 if( !m_data.length && min_free < 16 ) min_free = 16; 173 174 auto min_size = m_data.length + min_free - m_remaining.length; 175 auto new_size = max(m_data.length, 16); 176 while( new_size < min_size ) 177 new_size = (new_size * 3) / 2; 178 reserve(new_size - m_data.length + m_remaining.length); 179 } 180 } 181 182 unittest { 183 auto a = AllocAppender!string(theAllocator()); 184 a.put("Hello"); 185 a.put(' '); 186 a.put("World"); 187 assert(a.data == "Hello World"); 188 a.reset(); 189 assert(a.data == ""); 190 } 191 192 unittest { 193 char[4] buf; 194 auto a = AllocAppender!string(theAllocator(), buf); 195 a.put("He"); 196 assert(a.data == "He"); 197 assert(a.data.ptr == buf.ptr); 198 a.put("ll"); 199 assert(a.data == "Hell"); 200 assert(a.data.ptr == buf.ptr); 201 a.put('o'); 202 assert(a.data == "Hello"); 203 assert(a.data.ptr != buf.ptr); 204 } 205 206 unittest { 207 char[4] buf; 208 auto a = AllocAppender!string(theAllocator(), buf); 209 a.put("Hello"); 210 assert(a.data == "Hello"); 211 assert(a.data.ptr != buf.ptr); 212 } 213 214 unittest { 215 auto app = AllocAppender!(int[])(theAllocator); 216 app.reserve(2); 217 app.append((scope mem) { 218 assert(mem.length >= 2); 219 mem[0] = 1; 220 mem[1] = 2; 221 return size_t(2); 222 }); 223 assert(app.data == [1, 2]); 224 } 225 226 unittest { 227 auto app = AllocAppender!string(theAllocator); 228 app.reserve(3); 229 app.append((scope mem) { 230 assert(mem.length >= 3); 231 mem[0] = 'f'; 232 mem[1] = 'o'; 233 mem[2] = 'o'; 234 return size_t(3); 235 }); 236 assert(app.data == "foo"); 237 } 238 239 240 struct FixedAppender(ArrayType : E[], size_t NELEM, E) { 241 alias ElemType = Unqual!E; 242 private { 243 ElemType[NELEM] m_data; 244 size_t m_fill; 245 } 246 247 void clear() 248 { 249 m_fill = 0; 250 } 251 252 void put(E el) 253 { 254 m_data[m_fill++] = el; 255 } 256 257 static if( is(ElemType == char) ){ 258 void put(dchar el) 259 { 260 if( el < 128 ) put(cast(char)el); 261 else { 262 char[4] buf; 263 auto len = std.utf.encode(buf, el); 264 put(cast(ArrayType)buf[0 .. len]); 265 } 266 } 267 } 268 269 static if( is(ElemType == wchar) ){ 270 void put(dchar el) 271 { 272 if( el < 128 ) put(cast(wchar)el); 273 else { 274 wchar[3] buf; 275 auto len = std.utf.encode(buf, el); 276 put(cast(ArrayType)buf[0 .. len]); 277 } 278 } 279 } 280 281 void put(ArrayType arr) 282 { 283 m_data[m_fill .. m_fill+arr.length] = (cast(ElemType[])arr)[]; 284 m_fill += arr.length; 285 } 286 287 @property ArrayType data() { return cast(ArrayType)m_data[0 .. m_fill]; } 288 289 static if (!is(E == immutable)) { 290 void reset() { m_fill = 0; } 291 } 292 } 293 294 295 /** 296 TODO: clear ring buffer fields upon removal (to run struct destructors, if T is a struct) 297 */ 298 struct FixedRingBuffer(T, size_t N = 0, bool INITIALIZE = true) { 299 private { 300 static if( N > 0 ) { 301 static if (INITIALIZE) T[N] m_buffer; 302 else T[N] m_buffer = void; 303 } else T[] m_buffer; 304 size_t m_start = 0; 305 size_t m_fill = 0; 306 } 307 308 static if( N == 0 ){ 309 this(size_t capacity) { m_buffer = new T[capacity]; } 310 } 311 312 @property bool empty() const { return m_fill == 0; } 313 314 @property bool full() const { return m_fill == m_buffer.length; } 315 316 @property size_t length() const { return m_fill; } 317 318 @property size_t freeSpace() const { return m_buffer.length - m_fill; } 319 320 @property size_t capacity() const { return m_buffer.length; } 321 322 static if( N == 0 ){ 323 /// Resets the capacity to zero and explicitly frees the memory for the buffer. 324 void dispose() 325 { 326 delete m_buffer; 327 m_buffer = null; 328 m_start = m_fill = 0; 329 } 330 331 @property void capacity(size_t new_size) 332 { 333 if( m_buffer.length ){ 334 auto newbuffer = new T[new_size]; 335 auto dst = newbuffer; 336 auto newfill = min(m_fill, new_size); 337 read(dst[0 .. newfill]); 338 m_buffer = newbuffer; 339 m_start = 0; 340 m_fill = newfill; 341 } else { 342 m_buffer = new T[new_size]; 343 } 344 } 345 } 346 347 @property ref inout(T) front() inout { assert(!empty); return m_buffer[m_start]; } 348 349 @property ref inout(T) back() inout { assert(!empty); return m_buffer[mod(m_start+m_fill-1)]; } 350 351 void clear() 352 { 353 popFrontN(length); 354 assert(m_fill == 0); 355 m_start = 0; 356 } 357 358 void put()(T itm) { assert(m_fill < m_buffer.length); m_buffer[mod(m_start + m_fill++)] = itm; } 359 void put(TC : T)(TC[] itms) 360 { 361 if( !itms.length ) return; 362 assert(m_fill+itms.length <= m_buffer.length); 363 if( mod(m_start+m_fill) >= mod(m_start+m_fill+itms.length) ){ 364 size_t chunk1 = m_buffer.length - (m_start+m_fill); 365 size_t chunk2 = itms.length - chunk1; 366 m_buffer[m_start+m_fill .. m_buffer.length] = itms[0 .. chunk1]; 367 m_buffer[0 .. chunk2] = itms[chunk1 .. $]; 368 } else { 369 m_buffer[mod(m_start+m_fill) .. mod(m_start+m_fill)+itms.length] = itms[]; 370 } 371 m_fill += itms.length; 372 } 373 void putN(size_t n) { assert(m_fill+n <= m_buffer.length); m_fill += n; } 374 375 void popFront() { assert(!empty); m_start = mod(m_start+1); m_fill--; } 376 void popFrontN(size_t n) { assert(length >= n); m_start = mod(m_start + n); m_fill -= n; } 377 378 void popBack() { assert(!empty); m_fill--; } 379 void popBackN(size_t n) { assert(length >= n); m_fill -= n; } 380 381 void removeAt(Range r) 382 { 383 assert(r.m_buffer is m_buffer); 384 if (r.m_start == m_start) { popFront(); return; } 385 if( m_start + m_fill > m_buffer.length ){ 386 assert(r.m_start > m_start && r.m_start < m_buffer.length || r.m_start < mod(m_start+m_fill)); 387 if( r.m_start > m_start ){ 388 foreach(i; r.m_start .. m_buffer.length-1) 389 m_buffer[i] = m_buffer[i+1]; 390 m_buffer[$-1] = m_buffer[0]; 391 foreach(i; 0 .. mod(m_start + m_fill - 1)) 392 m_buffer[i] = m_buffer[i+1]; 393 } else { 394 foreach(i; r.m_start .. mod(m_start + m_fill - 1)) 395 m_buffer[i] = m_buffer[i+1]; 396 } 397 } else { 398 assert(r.m_start >= m_start && r.m_start < m_start+m_fill); 399 foreach(i; r.m_start .. m_start+m_fill-1) 400 m_buffer[i] = m_buffer[i+1]; 401 } 402 m_fill--; 403 destroy(m_buffer[mod(m_start+m_fill)]); // TODO: only call destroy for non-POD T 404 } 405 406 inout(T)[] peek() inout { return m_buffer[m_start .. min(m_start+m_fill, m_buffer.length)]; } 407 T[] peekDst() { 408 if( m_start + m_fill < m_buffer.length ) return m_buffer[m_start+m_fill .. $]; 409 else return m_buffer[mod(m_start+m_fill) .. m_start]; 410 } 411 412 void read(T[] dst) 413 { 414 assert(dst.length <= length); 415 if( !dst.length ) return; 416 if( mod(m_start) >= mod(m_start+dst.length) ){ 417 size_t chunk1 = m_buffer.length - m_start; 418 size_t chunk2 = dst.length - chunk1; 419 dst[0 .. chunk1] = m_buffer[m_start .. $]; 420 dst[chunk1 .. $] = m_buffer[0 .. chunk2]; 421 } else { 422 dst[] = m_buffer[m_start .. m_start+dst.length]; 423 } 424 popFrontN(dst.length); 425 } 426 427 int opApply(scope int delegate(ref T itm) @safe del) 428 { 429 if( m_start+m_fill > m_buffer.length ){ 430 foreach(i; m_start .. m_buffer.length) 431 if( auto ret = del(m_buffer[i]) ) 432 return ret; 433 foreach(i; 0 .. mod(m_start+m_fill)) 434 if( auto ret = del(m_buffer[i]) ) 435 return ret; 436 } else { 437 foreach(i; m_start .. m_start+m_fill) 438 if( auto ret = del(m_buffer[i]) ) 439 return ret; 440 } 441 return 0; 442 } 443 444 /// iterate through elements with index 445 int opApply(scope int delegate(size_t i, ref T itm) @safe del) 446 { 447 if( m_start+m_fill > m_buffer.length ){ 448 foreach(i; m_start .. m_buffer.length) 449 if( auto ret = del(i - m_start, m_buffer[i]) ) 450 return ret; 451 foreach(i; 0 .. mod(m_start+m_fill)) 452 if( auto ret = del(i + m_buffer.length - m_start, m_buffer[i]) ) 453 return ret; 454 } else { 455 foreach(i; m_start .. m_start+m_fill) 456 if( auto ret = del(i - m_start, m_buffer[i]) ) 457 return ret; 458 } 459 return 0; 460 } 461 462 ref inout(T) opIndex(size_t idx) inout { assert(idx < length); return m_buffer[mod(m_start+idx)]; } 463 464 Range opSlice() { return Range(m_buffer, m_start, m_fill); } 465 466 Range opSlice(size_t from, size_t to) 467 { 468 assert(from <= to); 469 assert(to <= m_fill); 470 return Range(m_buffer, mod(m_start+from), to-from); 471 } 472 473 size_t opDollar(size_t dim)() const if(dim == 0) { return length; } 474 475 private size_t mod(size_t n) 476 const { 477 static if( N == 0 ){ 478 /*static if(PotOnly){ 479 return x & (m_buffer.length-1); 480 } else {*/ 481 return n % m_buffer.length; 482 //} 483 } else static if( ((N - 1) & N) == 0 ){ 484 return n & (N - 1); 485 } else return n % N; 486 } 487 488 static struct Range { 489 private { 490 T[] m_buffer; 491 size_t m_start; 492 size_t m_length; 493 } 494 495 private this(T[] buffer, size_t start, size_t length) 496 { 497 m_buffer = buffer; 498 m_start = start; 499 m_length = length; 500 } 501 502 @property bool empty() const { return m_length == 0; } 503 504 @property inout(T) front() inout { assert(!empty); return m_buffer[m_start]; } 505 506 void popFront() 507 { 508 assert(!empty); 509 m_start++; 510 m_length--; 511 if( m_start >= m_buffer.length ) 512 m_start = 0; 513 } 514 } 515 } 516 517 unittest { 518 static assert(isInputRange!(FixedRingBuffer!int) && isOutputRange!(FixedRingBuffer!int, int)); 519 520 FixedRingBuffer!(int, 5) buf; 521 assert(buf.length == 0 && buf.freeSpace == 5); buf.put(1); // |1 . . . . 522 assert(buf.length == 1 && buf.freeSpace == 4); buf.put(2); // |1 2 . . . 523 assert(buf.length == 2 && buf.freeSpace == 3); buf.put(3); // |1 2 3 . . 524 assert(buf.length == 3 && buf.freeSpace == 2); buf.put(4); // |1 2 3 4 . 525 assert(buf.length == 4 && buf.freeSpace == 1); buf.put(5); // |1 2 3 4 5 526 assert(buf.length == 5 && buf.freeSpace == 0); 527 assert(buf.front == 1); 528 buf.popFront(); // .|2 3 4 5 529 assert(buf.front == 2); 530 buf.popFrontN(2); // . . .|4 5 531 assert(buf.front == 4); 532 assert(buf.length == 2 && buf.freeSpace == 3); 533 buf.put([6, 7, 8]); // 6 7 8|4 5 534 assert(buf.length == 5 && buf.freeSpace == 0); 535 int[5] dst; 536 buf.read(dst); // . . .|. . 537 assert(dst == [4, 5, 6, 7, 8]); 538 assert(buf.length == 0 && buf.freeSpace == 5); 539 buf.put([1, 2]); // . . .|1 2 540 assert(buf.length == 2 && buf.freeSpace == 3); 541 buf.read(dst[0 .. 2]); //|. . . . . 542 assert(dst[0 .. 2] == [1, 2]); 543 544 buf.put([0, 0, 0, 1, 2]); //|0 0 0 1 2 545 buf.popFrontN(2); //. .|0 1 2 546 buf.put([3, 4]); // 3 4|0 1 2 547 foreach(i, item; buf) 548 { 549 assert(i == item); 550 } 551 552 assert(buf.front == 0); 553 assert(buf.full); 554 buf.removeAt(buf[0..1]); //4 .|1 2 3 555 foreach(i, item; buf) 556 { 557 assert(i == item - 1); 558 } 559 560 buf.put(5); // 4 5|1 2 3 561 buf.removeAt(buf[3..4]); // 5 .|1 2 3 562 assert(buf.front == 1); buf.popFront(); 563 assert(buf.front == 2); buf.popFront(); 564 assert(buf.front == 3); buf.popFront(); 565 assert(buf.front == 5); buf.popFront(); 566 assert(buf.empty); 567 } 568 569 570 struct ArraySet(Key) 571 { 572 import std.experimental.allocator : makeArray, expandArray, dispose; 573 import std.experimental.allocator.building_blocks.affix_allocator : AffixAllocator; 574 575 private { 576 static if (__VERSION__ < 2074) { 577 struct AW { // work around AffixAllocator limitations 578 IAllocator alloc; 579 alias alloc this; 580 enum alignment = max(Key.alignof, int.alignof); 581 void[] resolveInternalPointer(void* p) { void[] ret; alloc.resolveInternalPointer(p, ret); return ret; } 582 } 583 alias AllocatorType = AffixAllocator!(AW, int); 584 } else { 585 IAllocator AW(IAllocator a) { return a; } 586 alias AllocatorType = AffixAllocator!(IAllocator, int); 587 } 588 Key[4] m_staticEntries; 589 Key[] m_entries; 590 AllocatorType m_allocator; 591 } 592 593 ~this() 594 @trusted { 595 static if (__VERSION__ <= 2071) 596 scope (failure) assert(false); 597 if (m_entries.ptr) { 598 if (--allocator.prefix(m_entries) <= 0) { 599 try allocator.dispose(m_entries); 600 catch (Exception e) assert(false, e.msg); // should never happen 601 } 602 } 603 } 604 605 this(this) 606 @trusted { 607 static if (__VERSION__ <= 2071) 608 scope (failure) assert(false); 609 if (m_entries.ptr) { 610 allocator.prefix(m_entries)++; 611 } 612 } 613 614 @property ArraySet dup() 615 { 616 static if (__VERSION__ <= 2071) 617 scope (failure) assert(false); 618 ArraySet ret; 619 ret.m_staticEntries = m_staticEntries; 620 ret.m_allocator = m_allocator; 621 622 if (m_entries.length) { 623 Key[] duped; 624 () @trusted { 625 try duped = allocator.makeArray!(Key)(m_entries.length); 626 catch (Exception e) assert(false, e.msg); 627 if (!duped.length) 628 assert(false, "Failed to allocate memory for duplicated "~ArraySet.stringof); 629 allocator.prefix(duped) = 1; 630 } (); 631 duped[] = m_entries[]; 632 ret.m_entries = duped; 633 } 634 635 return ret; 636 } 637 638 void setAllocator(IAllocator allocator) 639 in { assert(m_entries.ptr is null, "Cannot set allocator after elements have been inserted."); } 640 body { 641 m_allocator = AllocatorType(AW(allocator)); 642 } 643 644 bool opBinaryRight(string op)(Key key) if (op == "in") { return contains(key); } 645 646 int opApply(int delegate(ref Key) @safe del) 647 { 648 foreach (ref k; m_staticEntries) 649 if (k != Key.init) 650 if (auto ret = del(k)) 651 return ret; 652 foreach (ref k; m_entries) 653 if (k != Key.init) 654 if (auto ret = del(k)) 655 return ret; 656 return 0; 657 } 658 659 bool contains(Key key) 660 const { 661 foreach (ref k; m_staticEntries) if (k == key) return true; 662 foreach (ref k; m_entries) if (k == key) return true; 663 return false; 664 } 665 666 void insert(Key key) 667 { 668 if (contains(key)) return; 669 670 foreach (ref k; m_staticEntries) 671 if (k == Key.init) { 672 k = key; 673 return; 674 } 675 676 foreach (ref k; m_entries) 677 if (k == Key.init) { 678 k = key; 679 return; 680 } 681 682 size_t oldlen = m_entries.length; 683 684 () @trusted { 685 try { 686 if (!oldlen) { 687 m_entries = allocator.makeArray!Key(64); 688 assert(m_entries.length, "Failed to allocate memory for "~ArraySet.stringof); 689 allocator.prefix(m_entries) = 1; 690 } else { 691 int oldrc = allocator.prefix(m_entries); 692 if (!allocator.expandArray(m_entries, max(64, oldlen * 3 / 4))) 693 assert(false, "Failed to allocate memory for "~ArraySet.stringof); 694 allocator.prefix(m_entries) = oldrc; 695 } 696 } catch (Exception e) assert(false, e.msg); 697 } (); 698 699 m_entries[oldlen] = key; 700 } 701 702 void remove(Key key) 703 { 704 foreach (ref k; m_staticEntries) if (k == key) { k = Key.init; return; } 705 foreach (ref k; m_entries) if (k == key) { k = Key.init; return; } 706 } 707 708 ref allocator() 709 nothrow @trusted { 710 try { 711 static if (__VERSION__ < 2074) auto palloc = m_allocator.parent; 712 else auto palloc = m_allocator._parent; 713 if (!palloc) { 714 assert(vibeThreadAllocator !is null, "No theAllocator set!?"); 715 m_allocator = AllocatorType(AW(vibeThreadAllocator)); 716 } 717 } catch (Exception e) assert(false, e.msg); // should never throw 718 return m_allocator; 719 } 720 } 721 722 @safe nothrow unittest { 723 import std.experimental.allocator : allocatorObject; 724 import std.experimental.allocator.mallocator : Mallocator; 725 726 ArraySet!int s; 727 s.setAllocator(() @trusted { return Mallocator.instance.allocatorObject; } ()); 728 729 ArraySet!int t; 730 t = s; 731 732 s.insert(1); 733 s.insert(2); 734 s.insert(3); 735 s.insert(4); 736 assert(s.contains(1)); 737 assert(s.contains(2)); 738 assert(s.contains(3)); 739 assert(s.contains(4)); 740 assert(!t.contains(1)); 741 742 s.insert(5); 743 assert(s.contains(5)); 744 745 t = s; 746 assert(t.contains(5)); 747 assert(t.contains(1)); 748 749 s.insert(6); 750 assert(s.contains(6)); 751 assert(t.contains(6)); 752 753 s = ArraySet!int.init; 754 assert(!s.contains(1)); 755 assert(t.contains(1)); 756 assert(t.contains(6)); 757 758 s = t.dup; 759 assert(s.contains(1)); 760 assert(s.contains(6)); 761 762 t.remove(1); 763 assert(!t.contains(1)); 764 assert(s.contains(1)); 765 assert(t.contains(2)); 766 assert(t.contains(6)); 767 768 t.remove(6); 769 assert(!t.contains(6)); 770 assert(s.contains(6)); 771 assert(t.contains(5)); 772 }