com.carrotsearch.hppc.LongHashSet.size()方法的使用及代码示例

x33g5p2x  于2022-01-24 转载在 其他  
字(8.3k)|赞(0)|评价(0)|浏览(58)

本文整理了Java中com.carrotsearch.hppc.LongHashSet.size()方法的一些代码示例,展示了LongHashSet.size()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。LongHashSet.size()方法的具体详情如下:
包路径:com.carrotsearch.hppc.LongHashSet
类名称:LongHashSet
方法名:size

LongHashSet.size介绍

暂无

代码示例

代码示例来源:origin: carrotsearch/hppc

/**
 * {@inheritDoc}
 */
@Override
public boolean isEmpty() {
 return size() == 0;
}

代码示例来源:origin: carrotsearch/hppc

/**
 * {@inheritDoc}
 */
@Override
  public long [] toArray() {
 final long[] cloned = (new long [size()]);
 int j = 0;
 if (hasEmptyKey) {
  cloned[j++] = 0L;
 }
 final long[] keys =  this.keys;
 for (int slot = 0, max = mask; slot <= max; slot++) {
  long existing;
  if (!((existing = keys[slot]) == 0)) {
   cloned[j++] = existing;
  }
 }
 return cloned;
}

代码示例来源:origin: carrotsearch/hppc

/**
 * Return true if all keys of some other container exist in this container.
 */
private boolean sameKeys(LongSet other) {
 if (other.size() != size()) {
  return false;
 }
 for (LongCursor c : other) {
  if (!contains( c.value)) {
   return false;
  }
 }
 return true;
}

代码示例来源:origin: carrotsearch/hppc

/**
 * {@inheritDoc}
 */
@Override
public int removeAll(LongPredicate predicate) {
 int before = size();
 if (hasEmptyKey) {
  if (predicate.apply(0L)) {
   hasEmptyKey = false;
  }
 }
 final long[] keys =  this.keys;
 for (int slot = 0, max = this.mask; slot <= max;) {
  long existing;
  if (!((existing = keys[slot]) == 0)) {
   if (predicate.apply(existing)) {
    shiftConflictingKeys(slot);
    continue; // Repeat the check for the same slot i (shifted).
   }
  }
  slot++;
 }
 return before - size();
}

代码示例来源:origin: carrotsearch/hppc

/**
 * Allocate new internal buffers. This method attempts to allocate
 * and assign internal buffers atomically (either allocations succeed or not).
 */
protected void allocateBuffers(int arraySize) {
 assert Integer.bitCount(arraySize) == 1;
 // Compute new hash mixer candidate before expanding.
 final int newKeyMixer = this.orderMixer.newKeyMixer(arraySize);
 // Ensure no change is done if we hit an OOM.
 long[] prevKeys =  this.keys;
 try {
  int emptyElementSlot = 1;
  this.keys = (new long [arraySize + emptyElementSlot]);
 } catch (OutOfMemoryError e) {
  this.keys = prevKeys;
  throw new BufferAllocationException(
    "Not enough memory to allocate buffers for rehashing: %,d -> %,d", 
    e,
    this.keys == null ? 0 : size(), 
    arraySize);
 }
 this.resizeAt = expandAtCount(arraySize, loadFactor);
 this.keyMixer = newKeyMixer;
 this.mask = arraySize - 1;
}

代码示例来源:origin: carrotsearch/hppc

/**
 * This method is invoked when there is a new key to be inserted into
 * the buffer but there is not enough empty slots to do so.
 * 
 * New buffers are allocated. If this succeeds, we know we can proceed
 * with rehashing so we assign the pending element to the previous buffer
 * (possibly violating the invariant of having at least one empty slot)
 * and rehash all keys, substituting new buffers at the end.  
 */
protected void allocateThenInsertThenRehash(int slot, long pendingKey) {
 assert assigned == resizeAt 
     && (( keys[slot]) == 0)
     && !((pendingKey) == 0);
 // Try to allocate new buffers first. If we OOM, we leave in a consistent state.
 final long[] prevKeys =  this.keys;
 allocateBuffers(nextBufferSize(mask + 1, size(), loadFactor));
 assert this.keys.length > prevKeys.length;
 // We have succeeded at allocating new data so insert the pending key/value at
 // the free slot in the old arrays before rehashing.
 prevKeys[slot] = pendingKey;
 // Rehash old keys, including the pending key.
 rehash(prevKeys);
}

代码示例来源:origin: harbby/presto-connectors

/**
 * {@inheritDoc}
 */
@Override
public boolean isEmpty() {
 return size() == 0;
}

代码示例来源:origin: sirensolutions/siren-join

@Override
public int size() {
 return this.set.size();
}

代码示例来源:origin: harbby/presto-connectors

/**
 * {@inheritDoc}
 */
@Override
  public long [] toArray() {
    final long[] cloned = (new long [size()]);
 int j = 0;
 if (hasEmptyKey) {
  cloned[j++] = 0L;
 }
 final long[] keys =  this.keys;
 for (int slot = 0, max = mask; slot <= max; slot++) {
  long existing;
  if (!((existing = keys[slot]) == 0)) {
   cloned[j++] = existing;
  }
 }
 return cloned;
}

代码示例来源:origin: neo4j-contrib/neo4j-graph-algorithms

public static long intersection(LongHashSet targets1, LongHashSet targets2) {
  LongHashSet intersectionSet = new LongHashSet(targets1);
  intersectionSet.retainAll(targets2);
  return intersectionSet.size();
}

代码示例来源:origin: org.neo4j/graph-algorithms-core

public static long intersection(LongHashSet targets1, LongHashSet targets2) {
  LongHashSet intersectionSet = new LongHashSet(targets1);
  intersectionSet.retainAll(targets2);
  return intersectionSet.size();
}

代码示例来源:origin: harbby/presto-connectors

/**
 * Return true if all keys of some other container exist in this container.
 */
private boolean sameKeys(LongSet other) {
 if (other.size() != size()) {
  return false;
 }
 for (LongCursor c : other) {
  if (!contains( c.value)) {
   return false;
  }
 }
 return true;
}

代码示例来源:origin: harbby/presto-connectors

/**
 * {@inheritDoc}
 */
@Override
public int removeAll(LongPredicate predicate) {
 int before = size();
 if (hasEmptyKey) {
  if (predicate.apply(0L)) {
   hasEmptyKey = false;
  }
 }
 final long[] keys =  this.keys;
 for (int slot = 0, max = this.mask; slot <= max;) {
  long existing;
  if (!((existing = keys[slot]) == 0)) {
   if (predicate.apply(existing)) {
    shiftConflictingKeys(slot);
    continue; // Repeat the check for the same slot i (shifted).
   }
  }
  slot++;
 }
 return before - size();
}

代码示例来源:origin: neo4j-contrib/neo4j-graph-algorithms

public static long intersection2(long[] targets1, long[] targets2) {
  LongHashSet intersectionSet = LongHashSet.from(targets1);
  intersectionSet.retainAll(LongHashSet.from(targets2));
  return intersectionSet.size();
}

代码示例来源:origin: org.neo4j/graph-algorithms-core

public static long intersection2(long[] targets1, long[] targets2) {
  LongHashSet intersectionSet = LongHashSet.from(targets1);
  intersectionSet.retainAll(LongHashSet.from(targets2));
  return intersectionSet.size();
}

代码示例来源:origin: harbby/presto-connectors

/**
 * Allocate new internal buffers. This method attempts to allocate
 * and assign internal buffers atomically (either allocations succeed or not).
 */
protected void allocateBuffers(int arraySize) {
 assert Integer.bitCount(arraySize) == 1;
 // Compute new hash mixer candidate before expanding.
 final int newKeyMixer = this.orderMixer.newKeyMixer(arraySize);
 // Ensure no change is done if we hit an OOM.
 long[] prevKeys =  this.keys;
 try {
  int emptyElementSlot = 1;
  this.keys = (new long [arraySize + emptyElementSlot]);
 } catch (OutOfMemoryError e) {
  this.keys = prevKeys;
  throw new BufferAllocationException(
    "Not enough memory to allocate buffers for rehashing: %,d -> %,d", 
    e,
    this.keys == null ? 0 : size(), 
    arraySize);
 }
 this.resizeAt = expandAtCount(arraySize, loadFactor);
 this.keyMixer = newKeyMixer;
 this.mask = arraySize - 1;
}

代码示例来源:origin: harbby/presto-connectors

/**
 * This method is invoked when there is a new key to be inserted into
 * the buffer but there is not enough empty slots to do so.
 * 
 * New buffers are allocated. If this succeeds, we know we can proceed
 * with rehashing so we assign the pending element to the previous buffer
 * (possibly violating the invariant of having at least one empty slot)
 * and rehash all keys, substituting new buffers at the end.  
 */
protected void allocateThenInsertThenRehash(int slot, long pendingKey) {
 assert assigned == resizeAt 
     && (( keys[slot]) == 0)
     && !((pendingKey) == 0);
 // Try to allocate new buffers first. If we OOM, we leave in a consistent state.
 final long[] prevKeys =  this.keys;
 allocateBuffers(nextBufferSize(mask + 1, size(), loadFactor));
 assert this.keys.length > prevKeys.length;
 // We have succeeded at allocating new data so insert the pending key/value at
 // the free slot in the old arrays before rehashing.
 prevKeys[slot] = pendingKey;
 // Rehash old keys, including the pending key.
 rehash(prevKeys);
}

代码示例来源:origin: sirensolutions/siren-join

@Override
public BytesRef writeToBytes() {
 long start = System.nanoTime();
 int size = set.size();
 BytesRef bytes = new BytesRef(new byte[HEADER_SIZE + 8 * size]);
 // Encode encoding type
 Bytes.writeInt(bytes, this.getEncoding().ordinal());
 // Encode flag
 bytes.bytes[bytes.offset++] = (byte) (this.isPruned() ? 1 : 0);
 // Encode size of the set
 Bytes.writeInt(bytes, size);
 // Encode longs
 for (LongCursor i : set) {
  Bytes.writeLong(bytes, i.value);
 }
 logger.debug("Serialized {} terms - took {} ms", this.size(), (System.nanoTime() - start) / 1000000);
 bytes.length = bytes.offset;
 bytes.offset = 0;
 return bytes;
}

代码示例来源:origin: sirensolutions/siren-join

/**
 * Serialize the list of terms to the {@link StreamOutput}.
 * <br>
 * Given the low performance of {@link org.elasticsearch.common.io.stream.BytesStreamOutput} when writing a large number
 * of longs (5 to 10 times slower than writing directly to a byte[]), we use a small buffer of 8kb
 * to optimise the throughput. 8kb seems to be the optimal buffer size, larger buffer size did not improve
 * the throughput.
 *
 * @param out the output
 */
@Override
public void writeTo(StreamOutput out) throws IOException {
 // Encode flag
 out.writeBoolean(this.isPruned());
 // Encode size of list
 out.writeInt(set.size());
 // Encode longs
 BytesRef buffer = new BytesRef(new byte[1024 * 8]);
 Iterator<LongCursor> it = set.iterator();
 while (it.hasNext()) {
  Bytes.writeLong(buffer, it.next().value);
  if (buffer.offset == buffer.length) {
   out.write(buffer.bytes, 0, buffer.offset);
   buffer.offset = 0;
  }
 }
 // flush the remaining bytes from the buffer
 out.write(buffer.bytes, 0, buffer.offset);
}

相关文章