Source file src/runtime/malloc_generated.go

     1  // Code generated by mkmalloc.go; DO NOT EDIT.
     2  // See overview in malloc_stubs.go.
     3  
     4  package runtime
     5  
     6  import (
     7  	"internal/goarch"
     8  	"internal/goexperiment"
     9  	"internal/runtime/sys"
    10  	"unsafe"
    11  )
    12  
    13  func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
    14  
    15  	if doubleCheckMalloc {
    16  		if gcphase == _GCmarktermination {
    17  			throw("mallocgc called with gcphase == _GCmarktermination")
    18  		}
    19  	}
    20  
    21  	lockRankMayQueueFinalizer()
    22  
    23  	if debug.malloc {
    24  		if x := preMallocgcDebug(size, typ); x != nil {
    25  			return x
    26  		}
    27  	}
    28  
    29  	if gcBlackenEnabled != 0 {
    30  		deductAssistCredit(size)
    31  	}
    32  
    33  	const sizeclass = 1
    34  
    35  	const elemsize = 8
    36  
    37  	mp := acquirem()
    38  	if doubleCheckMalloc {
    39  		doubleCheckSmallScanNoHeader(size, typ, mp)
    40  	}
    41  	mp.mallocing = 1
    42  
    43  	checkGCTrigger := false
    44  	c := getMCache(mp)
    45  	const spc = spanClass(sizeclass<<1) | spanClass(0)
    46  	span := c.alloc[spc]
    47  
    48  	var nextFreeFastResult gclinkptr
    49  	if span.allocCache != 0 {
    50  		theBit := sys.TrailingZeros64(span.allocCache)
    51  		result := span.freeindex + uint16(theBit)
    52  		if result < span.nelems {
    53  			freeidx := result + 1
    54  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
    55  				span.allocCache >>= uint(theBit + 1)
    56  				span.freeindex = freeidx
    57  				span.allocCount++
    58  				nextFreeFastResult = gclinkptr(uintptr(result)*
    59  					8 +
    60  					span.base())
    61  			}
    62  		}
    63  	}
    64  	v := nextFreeFastResult
    65  	if v == 0 {
    66  		v, span, checkGCTrigger = c.nextFree(spc)
    67  	}
    68  	x := unsafe.Pointer(v)
    69  	if span.needzero != 0 {
    70  		memclrNoHeapPointers(x, elemsize)
    71  	}
    72  	if goarch.PtrSize == 8 && sizeclass == 1 {
    73  
    74  		c.scanAlloc += 8
    75  	} else {
    76  		dataSize := size
    77  		x := uintptr(x)
    78  
    79  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) {
    80  			throw("tried to write heap bits, but no heap bits in span")
    81  		}
    82  
    83  		src0 := readUintptr(getGCMask(typ))
    84  
    85  		const elemsize = 8
    86  
    87  		scanSize := typ.PtrBytes
    88  		src := src0
    89  		if typ.Size_ == goarch.PtrSize {
    90  			src = (1 << (dataSize / goarch.PtrSize)) - 1
    91  		} else {
    92  
    93  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
    94  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
    95  			}
    96  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
    97  				src |= src0 << (i / goarch.PtrSize)
    98  				scanSize += typ.Size_
    99  			}
   100  		}
   101  
   102  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   103  		dst := unsafe.Pointer(dstBase)
   104  		o := (x - span.base()) / goarch.PtrSize
   105  		i := o / ptrBits
   106  		j := o % ptrBits
   107  		const bits uintptr = elemsize / goarch.PtrSize
   108  
   109  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   110  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   111  
   112  			bits0 := ptrBits - j
   113  			bits1 := bits - bits0
   114  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   115  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   116  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   117  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   118  		} else {
   119  
   120  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   121  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   122  		}
   123  
   124  		const doubleCheck = false
   125  		if doubleCheck {
   126  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   127  		}
   128  		if doubleCheckHeapSetType {
   129  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   130  		}
   131  		c.scanAlloc += scanSize
   132  	}
   133  
   134  	publicationBarrier()
   135  
   136  	if writeBarrier.enabled {
   137  
   138  		gcmarknewobject(span, uintptr(x))
   139  	} else {
   140  
   141  		span.freeIndexForScan = span.freeindex
   142  	}
   143  
   144  	c.nextSample -= int64(elemsize)
   145  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   146  		profilealloc(mp, x, elemsize)
   147  	}
   148  	mp.mallocing = 0
   149  	releasem(mp)
   150  
   151  	if checkGCTrigger {
   152  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   153  			gcStart(t)
   154  		}
   155  	}
   156  	gp := getg()
   157  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   158  
   159  		addSecret(x, size)
   160  	}
   161  
   162  	if valgrindenabled {
   163  		valgrindMalloc(x, size)
   164  	}
   165  
   166  	if gcBlackenEnabled != 0 && elemsize != 0 {
   167  		if assistG := getg().m.curg; assistG != nil {
   168  			assistG.gcAssistBytes -= int64(elemsize - size)
   169  		}
   170  	}
   171  
   172  	if debug.malloc {
   173  		postMallocgcDebug(x, elemsize, typ)
   174  	}
   175  	return x
   176  }
   177  
   178  func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   179  
   180  	if doubleCheckMalloc {
   181  		if gcphase == _GCmarktermination {
   182  			throw("mallocgc called with gcphase == _GCmarktermination")
   183  		}
   184  	}
   185  
   186  	lockRankMayQueueFinalizer()
   187  
   188  	if debug.malloc {
   189  		if x := preMallocgcDebug(size, typ); x != nil {
   190  			return x
   191  		}
   192  	}
   193  
   194  	if gcBlackenEnabled != 0 {
   195  		deductAssistCredit(size)
   196  	}
   197  
   198  	const sizeclass = 2
   199  
   200  	const elemsize = 16
   201  
   202  	mp := acquirem()
   203  	if doubleCheckMalloc {
   204  		doubleCheckSmallScanNoHeader(size, typ, mp)
   205  	}
   206  	mp.mallocing = 1
   207  
   208  	checkGCTrigger := false
   209  	c := getMCache(mp)
   210  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   211  	span := c.alloc[spc]
   212  
   213  	var nextFreeFastResult gclinkptr
   214  	if span.allocCache != 0 {
   215  		theBit := sys.TrailingZeros64(span.allocCache)
   216  		result := span.freeindex + uint16(theBit)
   217  		if result < span.nelems {
   218  			freeidx := result + 1
   219  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   220  				span.allocCache >>= uint(theBit + 1)
   221  				span.freeindex = freeidx
   222  				span.allocCount++
   223  				nextFreeFastResult = gclinkptr(uintptr(result)*
   224  					16 +
   225  					span.base())
   226  			}
   227  		}
   228  	}
   229  	v := nextFreeFastResult
   230  	if v == 0 {
   231  		v, span, checkGCTrigger = c.nextFree(spc)
   232  	}
   233  	x := unsafe.Pointer(v)
   234  	if span.needzero != 0 {
   235  		memclrNoHeapPointers(x, elemsize)
   236  	}
   237  	if goarch.PtrSize == 8 && sizeclass == 1 {
   238  
   239  		c.scanAlloc += 8
   240  	} else {
   241  		dataSize := size
   242  		x := uintptr(x)
   243  
   244  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) {
   245  			throw("tried to write heap bits, but no heap bits in span")
   246  		}
   247  
   248  		src0 := readUintptr(getGCMask(typ))
   249  
   250  		const elemsize = 16
   251  
   252  		scanSize := typ.PtrBytes
   253  		src := src0
   254  		if typ.Size_ == goarch.PtrSize {
   255  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   256  		} else {
   257  
   258  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   259  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   260  			}
   261  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   262  				src |= src0 << (i / goarch.PtrSize)
   263  				scanSize += typ.Size_
   264  			}
   265  		}
   266  
   267  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   268  		dst := unsafe.Pointer(dstBase)
   269  		o := (x - span.base()) / goarch.PtrSize
   270  		i := o / ptrBits
   271  		j := o % ptrBits
   272  		const bits uintptr = elemsize / goarch.PtrSize
   273  
   274  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   275  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   276  
   277  			bits0 := ptrBits - j
   278  			bits1 := bits - bits0
   279  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   280  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   281  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   282  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   283  		} else {
   284  
   285  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   286  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   287  		}
   288  
   289  		const doubleCheck = false
   290  		if doubleCheck {
   291  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   292  		}
   293  		if doubleCheckHeapSetType {
   294  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   295  		}
   296  		c.scanAlloc += scanSize
   297  	}
   298  
   299  	publicationBarrier()
   300  
   301  	if writeBarrier.enabled {
   302  
   303  		gcmarknewobject(span, uintptr(x))
   304  	} else {
   305  
   306  		span.freeIndexForScan = span.freeindex
   307  	}
   308  
   309  	c.nextSample -= int64(elemsize)
   310  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   311  		profilealloc(mp, x, elemsize)
   312  	}
   313  	mp.mallocing = 0
   314  	releasem(mp)
   315  
   316  	if checkGCTrigger {
   317  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   318  			gcStart(t)
   319  		}
   320  	}
   321  	gp := getg()
   322  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   323  
   324  		addSecret(x, size)
   325  	}
   326  
   327  	if valgrindenabled {
   328  		valgrindMalloc(x, size)
   329  	}
   330  
   331  	if gcBlackenEnabled != 0 && elemsize != 0 {
   332  		if assistG := getg().m.curg; assistG != nil {
   333  			assistG.gcAssistBytes -= int64(elemsize - size)
   334  		}
   335  	}
   336  
   337  	if debug.malloc {
   338  		postMallocgcDebug(x, elemsize, typ)
   339  	}
   340  	return x
   341  }
   342  
   343  func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   344  
   345  	if doubleCheckMalloc {
   346  		if gcphase == _GCmarktermination {
   347  			throw("mallocgc called with gcphase == _GCmarktermination")
   348  		}
   349  	}
   350  
   351  	lockRankMayQueueFinalizer()
   352  
   353  	if debug.malloc {
   354  		if x := preMallocgcDebug(size, typ); x != nil {
   355  			return x
   356  		}
   357  	}
   358  
   359  	if gcBlackenEnabled != 0 {
   360  		deductAssistCredit(size)
   361  	}
   362  
   363  	const sizeclass = 3
   364  
   365  	const elemsize = 24
   366  
   367  	mp := acquirem()
   368  	if doubleCheckMalloc {
   369  		doubleCheckSmallScanNoHeader(size, typ, mp)
   370  	}
   371  	mp.mallocing = 1
   372  
   373  	checkGCTrigger := false
   374  	c := getMCache(mp)
   375  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   376  	span := c.alloc[spc]
   377  
   378  	var nextFreeFastResult gclinkptr
   379  	if span.allocCache != 0 {
   380  		theBit := sys.TrailingZeros64(span.allocCache)
   381  		result := span.freeindex + uint16(theBit)
   382  		if result < span.nelems {
   383  			freeidx := result + 1
   384  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   385  				span.allocCache >>= uint(theBit + 1)
   386  				span.freeindex = freeidx
   387  				span.allocCount++
   388  				nextFreeFastResult = gclinkptr(uintptr(result)*
   389  					24 +
   390  					span.base())
   391  			}
   392  		}
   393  	}
   394  	v := nextFreeFastResult
   395  	if v == 0 {
   396  		v, span, checkGCTrigger = c.nextFree(spc)
   397  	}
   398  	x := unsafe.Pointer(v)
   399  	if span.needzero != 0 {
   400  		memclrNoHeapPointers(x, elemsize)
   401  	}
   402  	if goarch.PtrSize == 8 && sizeclass == 1 {
   403  
   404  		c.scanAlloc += 8
   405  	} else {
   406  		dataSize := size
   407  		x := uintptr(x)
   408  
   409  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) {
   410  			throw("tried to write heap bits, but no heap bits in span")
   411  		}
   412  
   413  		src0 := readUintptr(getGCMask(typ))
   414  
   415  		const elemsize = 24
   416  
   417  		scanSize := typ.PtrBytes
   418  		src := src0
   419  		if typ.Size_ == goarch.PtrSize {
   420  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   421  		} else {
   422  
   423  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   424  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   425  			}
   426  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   427  				src |= src0 << (i / goarch.PtrSize)
   428  				scanSize += typ.Size_
   429  			}
   430  		}
   431  
   432  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   433  		dst := unsafe.Pointer(dstBase)
   434  		o := (x - span.base()) / goarch.PtrSize
   435  		i := o / ptrBits
   436  		j := o % ptrBits
   437  		const bits uintptr = elemsize / goarch.PtrSize
   438  
   439  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   440  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   441  
   442  			bits0 := ptrBits - j
   443  			bits1 := bits - bits0
   444  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   445  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   446  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   447  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   448  		} else {
   449  
   450  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   451  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   452  		}
   453  
   454  		const doubleCheck = false
   455  		if doubleCheck {
   456  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   457  		}
   458  		if doubleCheckHeapSetType {
   459  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   460  		}
   461  		c.scanAlloc += scanSize
   462  	}
   463  
   464  	publicationBarrier()
   465  
   466  	if writeBarrier.enabled {
   467  
   468  		gcmarknewobject(span, uintptr(x))
   469  	} else {
   470  
   471  		span.freeIndexForScan = span.freeindex
   472  	}
   473  
   474  	c.nextSample -= int64(elemsize)
   475  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   476  		profilealloc(mp, x, elemsize)
   477  	}
   478  	mp.mallocing = 0
   479  	releasem(mp)
   480  
   481  	if checkGCTrigger {
   482  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   483  			gcStart(t)
   484  		}
   485  	}
   486  	gp := getg()
   487  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   488  
   489  		addSecret(x, size)
   490  	}
   491  
   492  	if valgrindenabled {
   493  		valgrindMalloc(x, size)
   494  	}
   495  
   496  	if gcBlackenEnabled != 0 && elemsize != 0 {
   497  		if assistG := getg().m.curg; assistG != nil {
   498  			assistG.gcAssistBytes -= int64(elemsize - size)
   499  		}
   500  	}
   501  
   502  	if debug.malloc {
   503  		postMallocgcDebug(x, elemsize, typ)
   504  	}
   505  	return x
   506  }
   507  
   508  func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   509  
   510  	if doubleCheckMalloc {
   511  		if gcphase == _GCmarktermination {
   512  			throw("mallocgc called with gcphase == _GCmarktermination")
   513  		}
   514  	}
   515  
   516  	lockRankMayQueueFinalizer()
   517  
   518  	if debug.malloc {
   519  		if x := preMallocgcDebug(size, typ); x != nil {
   520  			return x
   521  		}
   522  	}
   523  
   524  	if gcBlackenEnabled != 0 {
   525  		deductAssistCredit(size)
   526  	}
   527  
   528  	const sizeclass = 4
   529  
   530  	const elemsize = 32
   531  
   532  	mp := acquirem()
   533  	if doubleCheckMalloc {
   534  		doubleCheckSmallScanNoHeader(size, typ, mp)
   535  	}
   536  	mp.mallocing = 1
   537  
   538  	checkGCTrigger := false
   539  	c := getMCache(mp)
   540  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   541  	span := c.alloc[spc]
   542  
   543  	var nextFreeFastResult gclinkptr
   544  	if span.allocCache != 0 {
   545  		theBit := sys.TrailingZeros64(span.allocCache)
   546  		result := span.freeindex + uint16(theBit)
   547  		if result < span.nelems {
   548  			freeidx := result + 1
   549  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   550  				span.allocCache >>= uint(theBit + 1)
   551  				span.freeindex = freeidx
   552  				span.allocCount++
   553  				nextFreeFastResult = gclinkptr(uintptr(result)*
   554  					32 +
   555  					span.base())
   556  			}
   557  		}
   558  	}
   559  	v := nextFreeFastResult
   560  	if v == 0 {
   561  		v, span, checkGCTrigger = c.nextFree(spc)
   562  	}
   563  	x := unsafe.Pointer(v)
   564  	if span.needzero != 0 {
   565  		memclrNoHeapPointers(x, elemsize)
   566  	}
   567  	if goarch.PtrSize == 8 && sizeclass == 1 {
   568  
   569  		c.scanAlloc += 8
   570  	} else {
   571  		dataSize := size
   572  		x := uintptr(x)
   573  
   574  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) {
   575  			throw("tried to write heap bits, but no heap bits in span")
   576  		}
   577  
   578  		src0 := readUintptr(getGCMask(typ))
   579  
   580  		const elemsize = 32
   581  
   582  		scanSize := typ.PtrBytes
   583  		src := src0
   584  		if typ.Size_ == goarch.PtrSize {
   585  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   586  		} else {
   587  
   588  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   589  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   590  			}
   591  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   592  				src |= src0 << (i / goarch.PtrSize)
   593  				scanSize += typ.Size_
   594  			}
   595  		}
   596  
   597  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   598  		dst := unsafe.Pointer(dstBase)
   599  		o := (x - span.base()) / goarch.PtrSize
   600  		i := o / ptrBits
   601  		j := o % ptrBits
   602  		const bits uintptr = elemsize / goarch.PtrSize
   603  
   604  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   605  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   606  
   607  			bits0 := ptrBits - j
   608  			bits1 := bits - bits0
   609  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   610  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   611  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   612  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   613  		} else {
   614  
   615  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   616  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   617  		}
   618  
   619  		const doubleCheck = false
   620  		if doubleCheck {
   621  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   622  		}
   623  		if doubleCheckHeapSetType {
   624  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   625  		}
   626  		c.scanAlloc += scanSize
   627  	}
   628  
   629  	publicationBarrier()
   630  
   631  	if writeBarrier.enabled {
   632  
   633  		gcmarknewobject(span, uintptr(x))
   634  	} else {
   635  
   636  		span.freeIndexForScan = span.freeindex
   637  	}
   638  
   639  	c.nextSample -= int64(elemsize)
   640  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   641  		profilealloc(mp, x, elemsize)
   642  	}
   643  	mp.mallocing = 0
   644  	releasem(mp)
   645  
   646  	if checkGCTrigger {
   647  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   648  			gcStart(t)
   649  		}
   650  	}
   651  	gp := getg()
   652  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   653  
   654  		addSecret(x, size)
   655  	}
   656  
   657  	if valgrindenabled {
   658  		valgrindMalloc(x, size)
   659  	}
   660  
   661  	if gcBlackenEnabled != 0 && elemsize != 0 {
   662  		if assistG := getg().m.curg; assistG != nil {
   663  			assistG.gcAssistBytes -= int64(elemsize - size)
   664  		}
   665  	}
   666  
   667  	if debug.malloc {
   668  		postMallocgcDebug(x, elemsize, typ)
   669  	}
   670  	return x
   671  }
   672  
   673  func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   674  
   675  	if doubleCheckMalloc {
   676  		if gcphase == _GCmarktermination {
   677  			throw("mallocgc called with gcphase == _GCmarktermination")
   678  		}
   679  	}
   680  
   681  	lockRankMayQueueFinalizer()
   682  
   683  	if debug.malloc {
   684  		if x := preMallocgcDebug(size, typ); x != nil {
   685  			return x
   686  		}
   687  	}
   688  
   689  	if gcBlackenEnabled != 0 {
   690  		deductAssistCredit(size)
   691  	}
   692  
   693  	const sizeclass = 5
   694  
   695  	const elemsize = 48
   696  
   697  	mp := acquirem()
   698  	if doubleCheckMalloc {
   699  		doubleCheckSmallScanNoHeader(size, typ, mp)
   700  	}
   701  	mp.mallocing = 1
   702  
   703  	checkGCTrigger := false
   704  	c := getMCache(mp)
   705  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   706  	span := c.alloc[spc]
   707  
   708  	var nextFreeFastResult gclinkptr
   709  	if span.allocCache != 0 {
   710  		theBit := sys.TrailingZeros64(span.allocCache)
   711  		result := span.freeindex + uint16(theBit)
   712  		if result < span.nelems {
   713  			freeidx := result + 1
   714  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   715  				span.allocCache >>= uint(theBit + 1)
   716  				span.freeindex = freeidx
   717  				span.allocCount++
   718  				nextFreeFastResult = gclinkptr(uintptr(result)*
   719  					48 +
   720  					span.base())
   721  			}
   722  		}
   723  	}
   724  	v := nextFreeFastResult
   725  	if v == 0 {
   726  		v, span, checkGCTrigger = c.nextFree(spc)
   727  	}
   728  	x := unsafe.Pointer(v)
   729  	if span.needzero != 0 {
   730  		memclrNoHeapPointers(x, elemsize)
   731  	}
   732  	if goarch.PtrSize == 8 && sizeclass == 1 {
   733  
   734  		c.scanAlloc += 8
   735  	} else {
   736  		dataSize := size
   737  		x := uintptr(x)
   738  
   739  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) {
   740  			throw("tried to write heap bits, but no heap bits in span")
   741  		}
   742  
   743  		src0 := readUintptr(getGCMask(typ))
   744  
   745  		const elemsize = 48
   746  
   747  		scanSize := typ.PtrBytes
   748  		src := src0
   749  		if typ.Size_ == goarch.PtrSize {
   750  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   751  		} else {
   752  
   753  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   754  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   755  			}
   756  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   757  				src |= src0 << (i / goarch.PtrSize)
   758  				scanSize += typ.Size_
   759  			}
   760  		}
   761  
   762  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   763  		dst := unsafe.Pointer(dstBase)
   764  		o := (x - span.base()) / goarch.PtrSize
   765  		i := o / ptrBits
   766  		j := o % ptrBits
   767  		const bits uintptr = elemsize / goarch.PtrSize
   768  
   769  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   770  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   771  
   772  			bits0 := ptrBits - j
   773  			bits1 := bits - bits0
   774  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   775  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   776  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   777  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   778  		} else {
   779  
   780  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   781  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   782  		}
   783  
   784  		const doubleCheck = false
   785  		if doubleCheck {
   786  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   787  		}
   788  		if doubleCheckHeapSetType {
   789  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   790  		}
   791  		c.scanAlloc += scanSize
   792  	}
   793  
   794  	publicationBarrier()
   795  
   796  	if writeBarrier.enabled {
   797  
   798  		gcmarknewobject(span, uintptr(x))
   799  	} else {
   800  
   801  		span.freeIndexForScan = span.freeindex
   802  	}
   803  
   804  	c.nextSample -= int64(elemsize)
   805  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   806  		profilealloc(mp, x, elemsize)
   807  	}
   808  	mp.mallocing = 0
   809  	releasem(mp)
   810  
   811  	if checkGCTrigger {
   812  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   813  			gcStart(t)
   814  		}
   815  	}
   816  	gp := getg()
   817  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   818  
   819  		addSecret(x, size)
   820  	}
   821  
   822  	if valgrindenabled {
   823  		valgrindMalloc(x, size)
   824  	}
   825  
   826  	if gcBlackenEnabled != 0 && elemsize != 0 {
   827  		if assistG := getg().m.curg; assistG != nil {
   828  			assistG.gcAssistBytes -= int64(elemsize - size)
   829  		}
   830  	}
   831  
   832  	if debug.malloc {
   833  		postMallocgcDebug(x, elemsize, typ)
   834  	}
   835  	return x
   836  }
   837  
   838  func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
   839  
   840  	if doubleCheckMalloc {
   841  		if gcphase == _GCmarktermination {
   842  			throw("mallocgc called with gcphase == _GCmarktermination")
   843  		}
   844  	}
   845  
   846  	lockRankMayQueueFinalizer()
   847  
   848  	if debug.malloc {
   849  		if x := preMallocgcDebug(size, typ); x != nil {
   850  			return x
   851  		}
   852  	}
   853  
   854  	if gcBlackenEnabled != 0 {
   855  		deductAssistCredit(size)
   856  	}
   857  
   858  	const sizeclass = 6
   859  
   860  	const elemsize = 64
   861  
   862  	mp := acquirem()
   863  	if doubleCheckMalloc {
   864  		doubleCheckSmallScanNoHeader(size, typ, mp)
   865  	}
   866  	mp.mallocing = 1
   867  
   868  	checkGCTrigger := false
   869  	c := getMCache(mp)
   870  	const spc = spanClass(sizeclass<<1) | spanClass(0)
   871  	span := c.alloc[spc]
   872  
   873  	var nextFreeFastResult gclinkptr
   874  	if span.allocCache != 0 {
   875  		theBit := sys.TrailingZeros64(span.allocCache)
   876  		result := span.freeindex + uint16(theBit)
   877  		if result < span.nelems {
   878  			freeidx := result + 1
   879  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
   880  				span.allocCache >>= uint(theBit + 1)
   881  				span.freeindex = freeidx
   882  				span.allocCount++
   883  				nextFreeFastResult = gclinkptr(uintptr(result)*
   884  					64 +
   885  					span.base())
   886  			}
   887  		}
   888  	}
   889  	v := nextFreeFastResult
   890  	if v == 0 {
   891  		v, span, checkGCTrigger = c.nextFree(spc)
   892  	}
   893  	x := unsafe.Pointer(v)
   894  	if span.needzero != 0 {
   895  		memclrNoHeapPointers(x, elemsize)
   896  	}
   897  	if goarch.PtrSize == 8 && sizeclass == 1 {
   898  
   899  		c.scanAlloc += 8
   900  	} else {
   901  		dataSize := size
   902  		x := uintptr(x)
   903  
   904  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) {
   905  			throw("tried to write heap bits, but no heap bits in span")
   906  		}
   907  
   908  		src0 := readUintptr(getGCMask(typ))
   909  
   910  		const elemsize = 64
   911  
   912  		scanSize := typ.PtrBytes
   913  		src := src0
   914  		if typ.Size_ == goarch.PtrSize {
   915  			src = (1 << (dataSize / goarch.PtrSize)) - 1
   916  		} else {
   917  
   918  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
   919  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
   920  			}
   921  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
   922  				src |= src0 << (i / goarch.PtrSize)
   923  				scanSize += typ.Size_
   924  			}
   925  		}
   926  
   927  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
   928  		dst := unsafe.Pointer(dstBase)
   929  		o := (x - span.base()) / goarch.PtrSize
   930  		i := o / ptrBits
   931  		j := o % ptrBits
   932  		const bits uintptr = elemsize / goarch.PtrSize
   933  
   934  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
   935  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
   936  
   937  			bits0 := ptrBits - j
   938  			bits1 := bits - bits0
   939  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
   940  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
   941  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
   942  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
   943  		} else {
   944  
   945  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
   946  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
   947  		}
   948  
   949  		const doubleCheck = false
   950  		if doubleCheck {
   951  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
   952  		}
   953  		if doubleCheckHeapSetType {
   954  			doubleCheckHeapType(x, dataSize, typ, nil, span)
   955  		}
   956  		c.scanAlloc += scanSize
   957  	}
   958  
   959  	publicationBarrier()
   960  
   961  	if writeBarrier.enabled {
   962  
   963  		gcmarknewobject(span, uintptr(x))
   964  	} else {
   965  
   966  		span.freeIndexForScan = span.freeindex
   967  	}
   968  
   969  	c.nextSample -= int64(elemsize)
   970  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
   971  		profilealloc(mp, x, elemsize)
   972  	}
   973  	mp.mallocing = 0
   974  	releasem(mp)
   975  
   976  	if checkGCTrigger {
   977  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
   978  			gcStart(t)
   979  		}
   980  	}
   981  	gp := getg()
   982  	if goexperiment.RuntimeSecret && gp.secret > 0 {
   983  
   984  		addSecret(x, size)
   985  	}
   986  
   987  	if valgrindenabled {
   988  		valgrindMalloc(x, size)
   989  	}
   990  
   991  	if gcBlackenEnabled != 0 && elemsize != 0 {
   992  		if assistG := getg().m.curg; assistG != nil {
   993  			assistG.gcAssistBytes -= int64(elemsize - size)
   994  		}
   995  	}
   996  
   997  	if debug.malloc {
   998  		postMallocgcDebug(x, elemsize, typ)
   999  	}
  1000  	return x
  1001  }
  1002  
  1003  func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1004  
  1005  	if doubleCheckMalloc {
  1006  		if gcphase == _GCmarktermination {
  1007  			throw("mallocgc called with gcphase == _GCmarktermination")
  1008  		}
  1009  	}
  1010  
  1011  	lockRankMayQueueFinalizer()
  1012  
  1013  	if debug.malloc {
  1014  		if x := preMallocgcDebug(size, typ); x != nil {
  1015  			return x
  1016  		}
  1017  	}
  1018  
  1019  	if gcBlackenEnabled != 0 {
  1020  		deductAssistCredit(size)
  1021  	}
  1022  
  1023  	const sizeclass = 7
  1024  
  1025  	const elemsize = 80
  1026  
  1027  	mp := acquirem()
  1028  	if doubleCheckMalloc {
  1029  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1030  	}
  1031  	mp.mallocing = 1
  1032  
  1033  	checkGCTrigger := false
  1034  	c := getMCache(mp)
  1035  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1036  	span := c.alloc[spc]
  1037  
  1038  	var nextFreeFastResult gclinkptr
  1039  	if span.allocCache != 0 {
  1040  		theBit := sys.TrailingZeros64(span.allocCache)
  1041  		result := span.freeindex + uint16(theBit)
  1042  		if result < span.nelems {
  1043  			freeidx := result + 1
  1044  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1045  				span.allocCache >>= uint(theBit + 1)
  1046  				span.freeindex = freeidx
  1047  				span.allocCount++
  1048  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1049  					80 +
  1050  					span.base())
  1051  			}
  1052  		}
  1053  	}
  1054  	v := nextFreeFastResult
  1055  	if v == 0 {
  1056  		v, span, checkGCTrigger = c.nextFree(spc)
  1057  	}
  1058  	x := unsafe.Pointer(v)
  1059  	if span.needzero != 0 {
  1060  		memclrNoHeapPointers(x, elemsize)
  1061  	}
  1062  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1063  
  1064  		c.scanAlloc += 8
  1065  	} else {
  1066  		dataSize := size
  1067  		x := uintptr(x)
  1068  
  1069  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) {
  1070  			throw("tried to write heap bits, but no heap bits in span")
  1071  		}
  1072  
  1073  		src0 := readUintptr(getGCMask(typ))
  1074  
  1075  		const elemsize = 80
  1076  
  1077  		scanSize := typ.PtrBytes
  1078  		src := src0
  1079  		if typ.Size_ == goarch.PtrSize {
  1080  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1081  		} else {
  1082  
  1083  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1084  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1085  			}
  1086  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1087  				src |= src0 << (i / goarch.PtrSize)
  1088  				scanSize += typ.Size_
  1089  			}
  1090  		}
  1091  
  1092  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1093  		dst := unsafe.Pointer(dstBase)
  1094  		o := (x - span.base()) / goarch.PtrSize
  1095  		i := o / ptrBits
  1096  		j := o % ptrBits
  1097  		const bits uintptr = elemsize / goarch.PtrSize
  1098  
  1099  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1100  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1101  
  1102  			bits0 := ptrBits - j
  1103  			bits1 := bits - bits0
  1104  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1105  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1106  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1107  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1108  		} else {
  1109  
  1110  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1111  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1112  		}
  1113  
  1114  		const doubleCheck = false
  1115  		if doubleCheck {
  1116  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1117  		}
  1118  		if doubleCheckHeapSetType {
  1119  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1120  		}
  1121  		c.scanAlloc += scanSize
  1122  	}
  1123  
  1124  	publicationBarrier()
  1125  
  1126  	if writeBarrier.enabled {
  1127  
  1128  		gcmarknewobject(span, uintptr(x))
  1129  	} else {
  1130  
  1131  		span.freeIndexForScan = span.freeindex
  1132  	}
  1133  
  1134  	c.nextSample -= int64(elemsize)
  1135  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1136  		profilealloc(mp, x, elemsize)
  1137  	}
  1138  	mp.mallocing = 0
  1139  	releasem(mp)
  1140  
  1141  	if checkGCTrigger {
  1142  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1143  			gcStart(t)
  1144  		}
  1145  	}
  1146  	gp := getg()
  1147  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  1148  
  1149  		addSecret(x, size)
  1150  	}
  1151  
  1152  	if valgrindenabled {
  1153  		valgrindMalloc(x, size)
  1154  	}
  1155  
  1156  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1157  		if assistG := getg().m.curg; assistG != nil {
  1158  			assistG.gcAssistBytes -= int64(elemsize - size)
  1159  		}
  1160  	}
  1161  
  1162  	if debug.malloc {
  1163  		postMallocgcDebug(x, elemsize, typ)
  1164  	}
  1165  	return x
  1166  }
  1167  
  1168  func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1169  
  1170  	if doubleCheckMalloc {
  1171  		if gcphase == _GCmarktermination {
  1172  			throw("mallocgc called with gcphase == _GCmarktermination")
  1173  		}
  1174  	}
  1175  
  1176  	lockRankMayQueueFinalizer()
  1177  
  1178  	if debug.malloc {
  1179  		if x := preMallocgcDebug(size, typ); x != nil {
  1180  			return x
  1181  		}
  1182  	}
  1183  
  1184  	if gcBlackenEnabled != 0 {
  1185  		deductAssistCredit(size)
  1186  	}
  1187  
  1188  	const sizeclass = 8
  1189  
  1190  	const elemsize = 96
  1191  
  1192  	mp := acquirem()
  1193  	if doubleCheckMalloc {
  1194  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1195  	}
  1196  	mp.mallocing = 1
  1197  
  1198  	checkGCTrigger := false
  1199  	c := getMCache(mp)
  1200  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1201  	span := c.alloc[spc]
  1202  
  1203  	var nextFreeFastResult gclinkptr
  1204  	if span.allocCache != 0 {
  1205  		theBit := sys.TrailingZeros64(span.allocCache)
  1206  		result := span.freeindex + uint16(theBit)
  1207  		if result < span.nelems {
  1208  			freeidx := result + 1
  1209  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1210  				span.allocCache >>= uint(theBit + 1)
  1211  				span.freeindex = freeidx
  1212  				span.allocCount++
  1213  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1214  					96 +
  1215  					span.base())
  1216  			}
  1217  		}
  1218  	}
  1219  	v := nextFreeFastResult
  1220  	if v == 0 {
  1221  		v, span, checkGCTrigger = c.nextFree(spc)
  1222  	}
  1223  	x := unsafe.Pointer(v)
  1224  	if span.needzero != 0 {
  1225  		memclrNoHeapPointers(x, elemsize)
  1226  	}
  1227  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1228  
  1229  		c.scanAlloc += 8
  1230  	} else {
  1231  		dataSize := size
  1232  		x := uintptr(x)
  1233  
  1234  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) {
  1235  			throw("tried to write heap bits, but no heap bits in span")
  1236  		}
  1237  
  1238  		src0 := readUintptr(getGCMask(typ))
  1239  
  1240  		const elemsize = 96
  1241  
  1242  		scanSize := typ.PtrBytes
  1243  		src := src0
  1244  		if typ.Size_ == goarch.PtrSize {
  1245  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1246  		} else {
  1247  
  1248  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1249  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1250  			}
  1251  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1252  				src |= src0 << (i / goarch.PtrSize)
  1253  				scanSize += typ.Size_
  1254  			}
  1255  		}
  1256  
  1257  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1258  		dst := unsafe.Pointer(dstBase)
  1259  		o := (x - span.base()) / goarch.PtrSize
  1260  		i := o / ptrBits
  1261  		j := o % ptrBits
  1262  		const bits uintptr = elemsize / goarch.PtrSize
  1263  
  1264  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1265  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1266  
  1267  			bits0 := ptrBits - j
  1268  			bits1 := bits - bits0
  1269  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1270  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1271  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1272  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1273  		} else {
  1274  
  1275  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1276  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1277  		}
  1278  
  1279  		const doubleCheck = false
  1280  		if doubleCheck {
  1281  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1282  		}
  1283  		if doubleCheckHeapSetType {
  1284  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1285  		}
  1286  		c.scanAlloc += scanSize
  1287  	}
  1288  
  1289  	publicationBarrier()
  1290  
  1291  	if writeBarrier.enabled {
  1292  
  1293  		gcmarknewobject(span, uintptr(x))
  1294  	} else {
  1295  
  1296  		span.freeIndexForScan = span.freeindex
  1297  	}
  1298  
  1299  	c.nextSample -= int64(elemsize)
  1300  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1301  		profilealloc(mp, x, elemsize)
  1302  	}
  1303  	mp.mallocing = 0
  1304  	releasem(mp)
  1305  
  1306  	if checkGCTrigger {
  1307  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1308  			gcStart(t)
  1309  		}
  1310  	}
  1311  	gp := getg()
  1312  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  1313  
  1314  		addSecret(x, size)
  1315  	}
  1316  
  1317  	if valgrindenabled {
  1318  		valgrindMalloc(x, size)
  1319  	}
  1320  
  1321  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1322  		if assistG := getg().m.curg; assistG != nil {
  1323  			assistG.gcAssistBytes -= int64(elemsize - size)
  1324  		}
  1325  	}
  1326  
  1327  	if debug.malloc {
  1328  		postMallocgcDebug(x, elemsize, typ)
  1329  	}
  1330  	return x
  1331  }
  1332  
  1333  func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1334  
  1335  	if doubleCheckMalloc {
  1336  		if gcphase == _GCmarktermination {
  1337  			throw("mallocgc called with gcphase == _GCmarktermination")
  1338  		}
  1339  	}
  1340  
  1341  	lockRankMayQueueFinalizer()
  1342  
  1343  	if debug.malloc {
  1344  		if x := preMallocgcDebug(size, typ); x != nil {
  1345  			return x
  1346  		}
  1347  	}
  1348  
  1349  	if gcBlackenEnabled != 0 {
  1350  		deductAssistCredit(size)
  1351  	}
  1352  
  1353  	const sizeclass = 9
  1354  
  1355  	const elemsize = 112
  1356  
  1357  	mp := acquirem()
  1358  	if doubleCheckMalloc {
  1359  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1360  	}
  1361  	mp.mallocing = 1
  1362  
  1363  	checkGCTrigger := false
  1364  	c := getMCache(mp)
  1365  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1366  	span := c.alloc[spc]
  1367  
  1368  	var nextFreeFastResult gclinkptr
  1369  	if span.allocCache != 0 {
  1370  		theBit := sys.TrailingZeros64(span.allocCache)
  1371  		result := span.freeindex + uint16(theBit)
  1372  		if result < span.nelems {
  1373  			freeidx := result + 1
  1374  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1375  				span.allocCache >>= uint(theBit + 1)
  1376  				span.freeindex = freeidx
  1377  				span.allocCount++
  1378  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1379  					112 +
  1380  					span.base())
  1381  			}
  1382  		}
  1383  	}
  1384  	v := nextFreeFastResult
  1385  	if v == 0 {
  1386  		v, span, checkGCTrigger = c.nextFree(spc)
  1387  	}
  1388  	x := unsafe.Pointer(v)
  1389  	if span.needzero != 0 {
  1390  		memclrNoHeapPointers(x, elemsize)
  1391  	}
  1392  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1393  
  1394  		c.scanAlloc += 8
  1395  	} else {
  1396  		dataSize := size
  1397  		x := uintptr(x)
  1398  
  1399  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) {
  1400  			throw("tried to write heap bits, but no heap bits in span")
  1401  		}
  1402  
  1403  		src0 := readUintptr(getGCMask(typ))
  1404  
  1405  		const elemsize = 112
  1406  
  1407  		scanSize := typ.PtrBytes
  1408  		src := src0
  1409  		if typ.Size_ == goarch.PtrSize {
  1410  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1411  		} else {
  1412  
  1413  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1414  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1415  			}
  1416  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1417  				src |= src0 << (i / goarch.PtrSize)
  1418  				scanSize += typ.Size_
  1419  			}
  1420  		}
  1421  
  1422  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1423  		dst := unsafe.Pointer(dstBase)
  1424  		o := (x - span.base()) / goarch.PtrSize
  1425  		i := o / ptrBits
  1426  		j := o % ptrBits
  1427  		const bits uintptr = elemsize / goarch.PtrSize
  1428  
  1429  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1430  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1431  
  1432  			bits0 := ptrBits - j
  1433  			bits1 := bits - bits0
  1434  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1435  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1436  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1437  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1438  		} else {
  1439  
  1440  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1441  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1442  		}
  1443  
  1444  		const doubleCheck = false
  1445  		if doubleCheck {
  1446  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1447  		}
  1448  		if doubleCheckHeapSetType {
  1449  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1450  		}
  1451  		c.scanAlloc += scanSize
  1452  	}
  1453  
  1454  	publicationBarrier()
  1455  
  1456  	if writeBarrier.enabled {
  1457  
  1458  		gcmarknewobject(span, uintptr(x))
  1459  	} else {
  1460  
  1461  		span.freeIndexForScan = span.freeindex
  1462  	}
  1463  
  1464  	c.nextSample -= int64(elemsize)
  1465  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1466  		profilealloc(mp, x, elemsize)
  1467  	}
  1468  	mp.mallocing = 0
  1469  	releasem(mp)
  1470  
  1471  	if checkGCTrigger {
  1472  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1473  			gcStart(t)
  1474  		}
  1475  	}
  1476  	gp := getg()
  1477  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  1478  
  1479  		addSecret(x, size)
  1480  	}
  1481  
  1482  	if valgrindenabled {
  1483  		valgrindMalloc(x, size)
  1484  	}
  1485  
  1486  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1487  		if assistG := getg().m.curg; assistG != nil {
  1488  			assistG.gcAssistBytes -= int64(elemsize - size)
  1489  		}
  1490  	}
  1491  
  1492  	if debug.malloc {
  1493  		postMallocgcDebug(x, elemsize, typ)
  1494  	}
  1495  	return x
  1496  }
  1497  
  1498  func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1499  
  1500  	if doubleCheckMalloc {
  1501  		if gcphase == _GCmarktermination {
  1502  			throw("mallocgc called with gcphase == _GCmarktermination")
  1503  		}
  1504  	}
  1505  
  1506  	lockRankMayQueueFinalizer()
  1507  
  1508  	if debug.malloc {
  1509  		if x := preMallocgcDebug(size, typ); x != nil {
  1510  			return x
  1511  		}
  1512  	}
  1513  
  1514  	if gcBlackenEnabled != 0 {
  1515  		deductAssistCredit(size)
  1516  	}
  1517  
  1518  	const sizeclass = 10
  1519  
  1520  	const elemsize = 128
  1521  
  1522  	mp := acquirem()
  1523  	if doubleCheckMalloc {
  1524  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1525  	}
  1526  	mp.mallocing = 1
  1527  
  1528  	checkGCTrigger := false
  1529  	c := getMCache(mp)
  1530  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1531  	span := c.alloc[spc]
  1532  
  1533  	var nextFreeFastResult gclinkptr
  1534  	if span.allocCache != 0 {
  1535  		theBit := sys.TrailingZeros64(span.allocCache)
  1536  		result := span.freeindex + uint16(theBit)
  1537  		if result < span.nelems {
  1538  			freeidx := result + 1
  1539  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1540  				span.allocCache >>= uint(theBit + 1)
  1541  				span.freeindex = freeidx
  1542  				span.allocCount++
  1543  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1544  					128 +
  1545  					span.base())
  1546  			}
  1547  		}
  1548  	}
  1549  	v := nextFreeFastResult
  1550  	if v == 0 {
  1551  		v, span, checkGCTrigger = c.nextFree(spc)
  1552  	}
  1553  	x := unsafe.Pointer(v)
  1554  	if span.needzero != 0 {
  1555  		memclrNoHeapPointers(x, elemsize)
  1556  	}
  1557  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1558  
  1559  		c.scanAlloc += 8
  1560  	} else {
  1561  		dataSize := size
  1562  		x := uintptr(x)
  1563  
  1564  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) {
  1565  			throw("tried to write heap bits, but no heap bits in span")
  1566  		}
  1567  
  1568  		src0 := readUintptr(getGCMask(typ))
  1569  
  1570  		const elemsize = 128
  1571  
  1572  		scanSize := typ.PtrBytes
  1573  		src := src0
  1574  		if typ.Size_ == goarch.PtrSize {
  1575  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1576  		} else {
  1577  
  1578  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1579  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1580  			}
  1581  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1582  				src |= src0 << (i / goarch.PtrSize)
  1583  				scanSize += typ.Size_
  1584  			}
  1585  		}
  1586  
  1587  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1588  		dst := unsafe.Pointer(dstBase)
  1589  		o := (x - span.base()) / goarch.PtrSize
  1590  		i := o / ptrBits
  1591  		j := o % ptrBits
  1592  		const bits uintptr = elemsize / goarch.PtrSize
  1593  
  1594  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1595  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1596  
  1597  			bits0 := ptrBits - j
  1598  			bits1 := bits - bits0
  1599  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1600  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1601  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1602  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1603  		} else {
  1604  
  1605  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1606  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1607  		}
  1608  
  1609  		const doubleCheck = false
  1610  		if doubleCheck {
  1611  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1612  		}
  1613  		if doubleCheckHeapSetType {
  1614  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1615  		}
  1616  		c.scanAlloc += scanSize
  1617  	}
  1618  
  1619  	publicationBarrier()
  1620  
  1621  	if writeBarrier.enabled {
  1622  
  1623  		gcmarknewobject(span, uintptr(x))
  1624  	} else {
  1625  
  1626  		span.freeIndexForScan = span.freeindex
  1627  	}
  1628  
  1629  	c.nextSample -= int64(elemsize)
  1630  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1631  		profilealloc(mp, x, elemsize)
  1632  	}
  1633  	mp.mallocing = 0
  1634  	releasem(mp)
  1635  
  1636  	if checkGCTrigger {
  1637  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1638  			gcStart(t)
  1639  		}
  1640  	}
  1641  	gp := getg()
  1642  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  1643  
  1644  		addSecret(x, size)
  1645  	}
  1646  
  1647  	if valgrindenabled {
  1648  		valgrindMalloc(x, size)
  1649  	}
  1650  
  1651  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1652  		if assistG := getg().m.curg; assistG != nil {
  1653  			assistG.gcAssistBytes -= int64(elemsize - size)
  1654  		}
  1655  	}
  1656  
  1657  	if debug.malloc {
  1658  		postMallocgcDebug(x, elemsize, typ)
  1659  	}
  1660  	return x
  1661  }
  1662  
  1663  func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1664  
  1665  	if doubleCheckMalloc {
  1666  		if gcphase == _GCmarktermination {
  1667  			throw("mallocgc called with gcphase == _GCmarktermination")
  1668  		}
  1669  	}
  1670  
  1671  	lockRankMayQueueFinalizer()
  1672  
  1673  	if debug.malloc {
  1674  		if x := preMallocgcDebug(size, typ); x != nil {
  1675  			return x
  1676  		}
  1677  	}
  1678  
  1679  	if gcBlackenEnabled != 0 {
  1680  		deductAssistCredit(size)
  1681  	}
  1682  
  1683  	const sizeclass = 11
  1684  
  1685  	const elemsize = 144
  1686  
  1687  	mp := acquirem()
  1688  	if doubleCheckMalloc {
  1689  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1690  	}
  1691  	mp.mallocing = 1
  1692  
  1693  	checkGCTrigger := false
  1694  	c := getMCache(mp)
  1695  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1696  	span := c.alloc[spc]
  1697  
  1698  	var nextFreeFastResult gclinkptr
  1699  	if span.allocCache != 0 {
  1700  		theBit := sys.TrailingZeros64(span.allocCache)
  1701  		result := span.freeindex + uint16(theBit)
  1702  		if result < span.nelems {
  1703  			freeidx := result + 1
  1704  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1705  				span.allocCache >>= uint(theBit + 1)
  1706  				span.freeindex = freeidx
  1707  				span.allocCount++
  1708  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1709  					144 +
  1710  					span.base())
  1711  			}
  1712  		}
  1713  	}
  1714  	v := nextFreeFastResult
  1715  	if v == 0 {
  1716  		v, span, checkGCTrigger = c.nextFree(spc)
  1717  	}
  1718  	x := unsafe.Pointer(v)
  1719  	if span.needzero != 0 {
  1720  		memclrNoHeapPointers(x, elemsize)
  1721  	}
  1722  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1723  
  1724  		c.scanAlloc += 8
  1725  	} else {
  1726  		dataSize := size
  1727  		x := uintptr(x)
  1728  
  1729  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) {
  1730  			throw("tried to write heap bits, but no heap bits in span")
  1731  		}
  1732  
  1733  		src0 := readUintptr(getGCMask(typ))
  1734  
  1735  		const elemsize = 144
  1736  
  1737  		scanSize := typ.PtrBytes
  1738  		src := src0
  1739  		if typ.Size_ == goarch.PtrSize {
  1740  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1741  		} else {
  1742  
  1743  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1744  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1745  			}
  1746  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1747  				src |= src0 << (i / goarch.PtrSize)
  1748  				scanSize += typ.Size_
  1749  			}
  1750  		}
  1751  
  1752  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1753  		dst := unsafe.Pointer(dstBase)
  1754  		o := (x - span.base()) / goarch.PtrSize
  1755  		i := o / ptrBits
  1756  		j := o % ptrBits
  1757  		const bits uintptr = elemsize / goarch.PtrSize
  1758  
  1759  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1760  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1761  
  1762  			bits0 := ptrBits - j
  1763  			bits1 := bits - bits0
  1764  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1765  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1766  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1767  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1768  		} else {
  1769  
  1770  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1771  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1772  		}
  1773  
  1774  		const doubleCheck = false
  1775  		if doubleCheck {
  1776  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1777  		}
  1778  		if doubleCheckHeapSetType {
  1779  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1780  		}
  1781  		c.scanAlloc += scanSize
  1782  	}
  1783  
  1784  	publicationBarrier()
  1785  
  1786  	if writeBarrier.enabled {
  1787  
  1788  		gcmarknewobject(span, uintptr(x))
  1789  	} else {
  1790  
  1791  		span.freeIndexForScan = span.freeindex
  1792  	}
  1793  
  1794  	c.nextSample -= int64(elemsize)
  1795  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1796  		profilealloc(mp, x, elemsize)
  1797  	}
  1798  	mp.mallocing = 0
  1799  	releasem(mp)
  1800  
  1801  	if checkGCTrigger {
  1802  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1803  			gcStart(t)
  1804  		}
  1805  	}
  1806  	gp := getg()
  1807  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  1808  
  1809  		addSecret(x, size)
  1810  	}
  1811  
  1812  	if valgrindenabled {
  1813  		valgrindMalloc(x, size)
  1814  	}
  1815  
  1816  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1817  		if assistG := getg().m.curg; assistG != nil {
  1818  			assistG.gcAssistBytes -= int64(elemsize - size)
  1819  		}
  1820  	}
  1821  
  1822  	if debug.malloc {
  1823  		postMallocgcDebug(x, elemsize, typ)
  1824  	}
  1825  	return x
  1826  }
  1827  
  1828  func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1829  
  1830  	if doubleCheckMalloc {
  1831  		if gcphase == _GCmarktermination {
  1832  			throw("mallocgc called with gcphase == _GCmarktermination")
  1833  		}
  1834  	}
  1835  
  1836  	lockRankMayQueueFinalizer()
  1837  
  1838  	if debug.malloc {
  1839  		if x := preMallocgcDebug(size, typ); x != nil {
  1840  			return x
  1841  		}
  1842  	}
  1843  
  1844  	if gcBlackenEnabled != 0 {
  1845  		deductAssistCredit(size)
  1846  	}
  1847  
  1848  	const sizeclass = 12
  1849  
  1850  	const elemsize = 160
  1851  
  1852  	mp := acquirem()
  1853  	if doubleCheckMalloc {
  1854  		doubleCheckSmallScanNoHeader(size, typ, mp)
  1855  	}
  1856  	mp.mallocing = 1
  1857  
  1858  	checkGCTrigger := false
  1859  	c := getMCache(mp)
  1860  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  1861  	span := c.alloc[spc]
  1862  
  1863  	var nextFreeFastResult gclinkptr
  1864  	if span.allocCache != 0 {
  1865  		theBit := sys.TrailingZeros64(span.allocCache)
  1866  		result := span.freeindex + uint16(theBit)
  1867  		if result < span.nelems {
  1868  			freeidx := result + 1
  1869  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  1870  				span.allocCache >>= uint(theBit + 1)
  1871  				span.freeindex = freeidx
  1872  				span.allocCount++
  1873  				nextFreeFastResult = gclinkptr(uintptr(result)*
  1874  					160 +
  1875  					span.base())
  1876  			}
  1877  		}
  1878  	}
  1879  	v := nextFreeFastResult
  1880  	if v == 0 {
  1881  		v, span, checkGCTrigger = c.nextFree(spc)
  1882  	}
  1883  	x := unsafe.Pointer(v)
  1884  	if span.needzero != 0 {
  1885  		memclrNoHeapPointers(x, elemsize)
  1886  	}
  1887  	if goarch.PtrSize == 8 && sizeclass == 1 {
  1888  
  1889  		c.scanAlloc += 8
  1890  	} else {
  1891  		dataSize := size
  1892  		x := uintptr(x)
  1893  
  1894  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) {
  1895  			throw("tried to write heap bits, but no heap bits in span")
  1896  		}
  1897  
  1898  		src0 := readUintptr(getGCMask(typ))
  1899  
  1900  		const elemsize = 160
  1901  
  1902  		scanSize := typ.PtrBytes
  1903  		src := src0
  1904  		if typ.Size_ == goarch.PtrSize {
  1905  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  1906  		} else {
  1907  
  1908  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  1909  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  1910  			}
  1911  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  1912  				src |= src0 << (i / goarch.PtrSize)
  1913  				scanSize += typ.Size_
  1914  			}
  1915  		}
  1916  
  1917  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  1918  		dst := unsafe.Pointer(dstBase)
  1919  		o := (x - span.base()) / goarch.PtrSize
  1920  		i := o / ptrBits
  1921  		j := o % ptrBits
  1922  		const bits uintptr = elemsize / goarch.PtrSize
  1923  
  1924  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  1925  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  1926  
  1927  			bits0 := ptrBits - j
  1928  			bits1 := bits - bits0
  1929  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  1930  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  1931  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  1932  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  1933  		} else {
  1934  
  1935  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  1936  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  1937  		}
  1938  
  1939  		const doubleCheck = false
  1940  		if doubleCheck {
  1941  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  1942  		}
  1943  		if doubleCheckHeapSetType {
  1944  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  1945  		}
  1946  		c.scanAlloc += scanSize
  1947  	}
  1948  
  1949  	publicationBarrier()
  1950  
  1951  	if writeBarrier.enabled {
  1952  
  1953  		gcmarknewobject(span, uintptr(x))
  1954  	} else {
  1955  
  1956  		span.freeIndexForScan = span.freeindex
  1957  	}
  1958  
  1959  	c.nextSample -= int64(elemsize)
  1960  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  1961  		profilealloc(mp, x, elemsize)
  1962  	}
  1963  	mp.mallocing = 0
  1964  	releasem(mp)
  1965  
  1966  	if checkGCTrigger {
  1967  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  1968  			gcStart(t)
  1969  		}
  1970  	}
  1971  	gp := getg()
  1972  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  1973  
  1974  		addSecret(x, size)
  1975  	}
  1976  
  1977  	if valgrindenabled {
  1978  		valgrindMalloc(x, size)
  1979  	}
  1980  
  1981  	if gcBlackenEnabled != 0 && elemsize != 0 {
  1982  		if assistG := getg().m.curg; assistG != nil {
  1983  			assistG.gcAssistBytes -= int64(elemsize - size)
  1984  		}
  1985  	}
  1986  
  1987  	if debug.malloc {
  1988  		postMallocgcDebug(x, elemsize, typ)
  1989  	}
  1990  	return x
  1991  }
  1992  
  1993  func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  1994  
  1995  	if doubleCheckMalloc {
  1996  		if gcphase == _GCmarktermination {
  1997  			throw("mallocgc called with gcphase == _GCmarktermination")
  1998  		}
  1999  	}
  2000  
  2001  	lockRankMayQueueFinalizer()
  2002  
  2003  	if debug.malloc {
  2004  		if x := preMallocgcDebug(size, typ); x != nil {
  2005  			return x
  2006  		}
  2007  	}
  2008  
  2009  	if gcBlackenEnabled != 0 {
  2010  		deductAssistCredit(size)
  2011  	}
  2012  
  2013  	const sizeclass = 13
  2014  
  2015  	const elemsize = 176
  2016  
  2017  	mp := acquirem()
  2018  	if doubleCheckMalloc {
  2019  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2020  	}
  2021  	mp.mallocing = 1
  2022  
  2023  	checkGCTrigger := false
  2024  	c := getMCache(mp)
  2025  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2026  	span := c.alloc[spc]
  2027  
  2028  	var nextFreeFastResult gclinkptr
  2029  	if span.allocCache != 0 {
  2030  		theBit := sys.TrailingZeros64(span.allocCache)
  2031  		result := span.freeindex + uint16(theBit)
  2032  		if result < span.nelems {
  2033  			freeidx := result + 1
  2034  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2035  				span.allocCache >>= uint(theBit + 1)
  2036  				span.freeindex = freeidx
  2037  				span.allocCount++
  2038  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2039  					176 +
  2040  					span.base())
  2041  			}
  2042  		}
  2043  	}
  2044  	v := nextFreeFastResult
  2045  	if v == 0 {
  2046  		v, span, checkGCTrigger = c.nextFree(spc)
  2047  	}
  2048  	x := unsafe.Pointer(v)
  2049  	if span.needzero != 0 {
  2050  		memclrNoHeapPointers(x, elemsize)
  2051  	}
  2052  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2053  
  2054  		c.scanAlloc += 8
  2055  	} else {
  2056  		dataSize := size
  2057  		x := uintptr(x)
  2058  
  2059  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) {
  2060  			throw("tried to write heap bits, but no heap bits in span")
  2061  		}
  2062  
  2063  		src0 := readUintptr(getGCMask(typ))
  2064  
  2065  		const elemsize = 176
  2066  
  2067  		scanSize := typ.PtrBytes
  2068  		src := src0
  2069  		if typ.Size_ == goarch.PtrSize {
  2070  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2071  		} else {
  2072  
  2073  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2074  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2075  			}
  2076  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2077  				src |= src0 << (i / goarch.PtrSize)
  2078  				scanSize += typ.Size_
  2079  			}
  2080  		}
  2081  
  2082  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2083  		dst := unsafe.Pointer(dstBase)
  2084  		o := (x - span.base()) / goarch.PtrSize
  2085  		i := o / ptrBits
  2086  		j := o % ptrBits
  2087  		const bits uintptr = elemsize / goarch.PtrSize
  2088  
  2089  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2090  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2091  
  2092  			bits0 := ptrBits - j
  2093  			bits1 := bits - bits0
  2094  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2095  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2096  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2097  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2098  		} else {
  2099  
  2100  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2101  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2102  		}
  2103  
  2104  		const doubleCheck = false
  2105  		if doubleCheck {
  2106  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2107  		}
  2108  		if doubleCheckHeapSetType {
  2109  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2110  		}
  2111  		c.scanAlloc += scanSize
  2112  	}
  2113  
  2114  	publicationBarrier()
  2115  
  2116  	if writeBarrier.enabled {
  2117  
  2118  		gcmarknewobject(span, uintptr(x))
  2119  	} else {
  2120  
  2121  		span.freeIndexForScan = span.freeindex
  2122  	}
  2123  
  2124  	c.nextSample -= int64(elemsize)
  2125  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2126  		profilealloc(mp, x, elemsize)
  2127  	}
  2128  	mp.mallocing = 0
  2129  	releasem(mp)
  2130  
  2131  	if checkGCTrigger {
  2132  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2133  			gcStart(t)
  2134  		}
  2135  	}
  2136  	gp := getg()
  2137  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  2138  
  2139  		addSecret(x, size)
  2140  	}
  2141  
  2142  	if valgrindenabled {
  2143  		valgrindMalloc(x, size)
  2144  	}
  2145  
  2146  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2147  		if assistG := getg().m.curg; assistG != nil {
  2148  			assistG.gcAssistBytes -= int64(elemsize - size)
  2149  		}
  2150  	}
  2151  
  2152  	if debug.malloc {
  2153  		postMallocgcDebug(x, elemsize, typ)
  2154  	}
  2155  	return x
  2156  }
  2157  
  2158  func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2159  
  2160  	if doubleCheckMalloc {
  2161  		if gcphase == _GCmarktermination {
  2162  			throw("mallocgc called with gcphase == _GCmarktermination")
  2163  		}
  2164  	}
  2165  
  2166  	lockRankMayQueueFinalizer()
  2167  
  2168  	if debug.malloc {
  2169  		if x := preMallocgcDebug(size, typ); x != nil {
  2170  			return x
  2171  		}
  2172  	}
  2173  
  2174  	if gcBlackenEnabled != 0 {
  2175  		deductAssistCredit(size)
  2176  	}
  2177  
  2178  	const sizeclass = 14
  2179  
  2180  	const elemsize = 192
  2181  
  2182  	mp := acquirem()
  2183  	if doubleCheckMalloc {
  2184  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2185  	}
  2186  	mp.mallocing = 1
  2187  
  2188  	checkGCTrigger := false
  2189  	c := getMCache(mp)
  2190  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2191  	span := c.alloc[spc]
  2192  
  2193  	var nextFreeFastResult gclinkptr
  2194  	if span.allocCache != 0 {
  2195  		theBit := sys.TrailingZeros64(span.allocCache)
  2196  		result := span.freeindex + uint16(theBit)
  2197  		if result < span.nelems {
  2198  			freeidx := result + 1
  2199  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2200  				span.allocCache >>= uint(theBit + 1)
  2201  				span.freeindex = freeidx
  2202  				span.allocCount++
  2203  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2204  					192 +
  2205  					span.base())
  2206  			}
  2207  		}
  2208  	}
  2209  	v := nextFreeFastResult
  2210  	if v == 0 {
  2211  		v, span, checkGCTrigger = c.nextFree(spc)
  2212  	}
  2213  	x := unsafe.Pointer(v)
  2214  	if span.needzero != 0 {
  2215  		memclrNoHeapPointers(x, elemsize)
  2216  	}
  2217  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2218  
  2219  		c.scanAlloc += 8
  2220  	} else {
  2221  		dataSize := size
  2222  		x := uintptr(x)
  2223  
  2224  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) {
  2225  			throw("tried to write heap bits, but no heap bits in span")
  2226  		}
  2227  
  2228  		src0 := readUintptr(getGCMask(typ))
  2229  
  2230  		const elemsize = 192
  2231  
  2232  		scanSize := typ.PtrBytes
  2233  		src := src0
  2234  		if typ.Size_ == goarch.PtrSize {
  2235  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2236  		} else {
  2237  
  2238  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2239  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2240  			}
  2241  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2242  				src |= src0 << (i / goarch.PtrSize)
  2243  				scanSize += typ.Size_
  2244  			}
  2245  		}
  2246  
  2247  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2248  		dst := unsafe.Pointer(dstBase)
  2249  		o := (x - span.base()) / goarch.PtrSize
  2250  		i := o / ptrBits
  2251  		j := o % ptrBits
  2252  		const bits uintptr = elemsize / goarch.PtrSize
  2253  
  2254  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2255  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2256  
  2257  			bits0 := ptrBits - j
  2258  			bits1 := bits - bits0
  2259  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2260  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2261  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2262  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2263  		} else {
  2264  
  2265  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2266  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2267  		}
  2268  
  2269  		const doubleCheck = false
  2270  		if doubleCheck {
  2271  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2272  		}
  2273  		if doubleCheckHeapSetType {
  2274  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2275  		}
  2276  		c.scanAlloc += scanSize
  2277  	}
  2278  
  2279  	publicationBarrier()
  2280  
  2281  	if writeBarrier.enabled {
  2282  
  2283  		gcmarknewobject(span, uintptr(x))
  2284  	} else {
  2285  
  2286  		span.freeIndexForScan = span.freeindex
  2287  	}
  2288  
  2289  	c.nextSample -= int64(elemsize)
  2290  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2291  		profilealloc(mp, x, elemsize)
  2292  	}
  2293  	mp.mallocing = 0
  2294  	releasem(mp)
  2295  
  2296  	if checkGCTrigger {
  2297  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2298  			gcStart(t)
  2299  		}
  2300  	}
  2301  	gp := getg()
  2302  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  2303  
  2304  		addSecret(x, size)
  2305  	}
  2306  
  2307  	if valgrindenabled {
  2308  		valgrindMalloc(x, size)
  2309  	}
  2310  
  2311  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2312  		if assistG := getg().m.curg; assistG != nil {
  2313  			assistG.gcAssistBytes -= int64(elemsize - size)
  2314  		}
  2315  	}
  2316  
  2317  	if debug.malloc {
  2318  		postMallocgcDebug(x, elemsize, typ)
  2319  	}
  2320  	return x
  2321  }
  2322  
  2323  func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2324  
  2325  	if doubleCheckMalloc {
  2326  		if gcphase == _GCmarktermination {
  2327  			throw("mallocgc called with gcphase == _GCmarktermination")
  2328  		}
  2329  	}
  2330  
  2331  	lockRankMayQueueFinalizer()
  2332  
  2333  	if debug.malloc {
  2334  		if x := preMallocgcDebug(size, typ); x != nil {
  2335  			return x
  2336  		}
  2337  	}
  2338  
  2339  	if gcBlackenEnabled != 0 {
  2340  		deductAssistCredit(size)
  2341  	}
  2342  
  2343  	const sizeclass = 15
  2344  
  2345  	const elemsize = 208
  2346  
  2347  	mp := acquirem()
  2348  	if doubleCheckMalloc {
  2349  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2350  	}
  2351  	mp.mallocing = 1
  2352  
  2353  	checkGCTrigger := false
  2354  	c := getMCache(mp)
  2355  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2356  	span := c.alloc[spc]
  2357  
  2358  	var nextFreeFastResult gclinkptr
  2359  	if span.allocCache != 0 {
  2360  		theBit := sys.TrailingZeros64(span.allocCache)
  2361  		result := span.freeindex + uint16(theBit)
  2362  		if result < span.nelems {
  2363  			freeidx := result + 1
  2364  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2365  				span.allocCache >>= uint(theBit + 1)
  2366  				span.freeindex = freeidx
  2367  				span.allocCount++
  2368  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2369  					208 +
  2370  					span.base())
  2371  			}
  2372  		}
  2373  	}
  2374  	v := nextFreeFastResult
  2375  	if v == 0 {
  2376  		v, span, checkGCTrigger = c.nextFree(spc)
  2377  	}
  2378  	x := unsafe.Pointer(v)
  2379  	if span.needzero != 0 {
  2380  		memclrNoHeapPointers(x, elemsize)
  2381  	}
  2382  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2383  
  2384  		c.scanAlloc += 8
  2385  	} else {
  2386  		dataSize := size
  2387  		x := uintptr(x)
  2388  
  2389  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) {
  2390  			throw("tried to write heap bits, but no heap bits in span")
  2391  		}
  2392  
  2393  		src0 := readUintptr(getGCMask(typ))
  2394  
  2395  		const elemsize = 208
  2396  
  2397  		scanSize := typ.PtrBytes
  2398  		src := src0
  2399  		if typ.Size_ == goarch.PtrSize {
  2400  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2401  		} else {
  2402  
  2403  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2404  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2405  			}
  2406  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2407  				src |= src0 << (i / goarch.PtrSize)
  2408  				scanSize += typ.Size_
  2409  			}
  2410  		}
  2411  
  2412  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2413  		dst := unsafe.Pointer(dstBase)
  2414  		o := (x - span.base()) / goarch.PtrSize
  2415  		i := o / ptrBits
  2416  		j := o % ptrBits
  2417  		const bits uintptr = elemsize / goarch.PtrSize
  2418  
  2419  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2420  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2421  
  2422  			bits0 := ptrBits - j
  2423  			bits1 := bits - bits0
  2424  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2425  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2426  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2427  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2428  		} else {
  2429  
  2430  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2431  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2432  		}
  2433  
  2434  		const doubleCheck = false
  2435  		if doubleCheck {
  2436  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2437  		}
  2438  		if doubleCheckHeapSetType {
  2439  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2440  		}
  2441  		c.scanAlloc += scanSize
  2442  	}
  2443  
  2444  	publicationBarrier()
  2445  
  2446  	if writeBarrier.enabled {
  2447  
  2448  		gcmarknewobject(span, uintptr(x))
  2449  	} else {
  2450  
  2451  		span.freeIndexForScan = span.freeindex
  2452  	}
  2453  
  2454  	c.nextSample -= int64(elemsize)
  2455  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2456  		profilealloc(mp, x, elemsize)
  2457  	}
  2458  	mp.mallocing = 0
  2459  	releasem(mp)
  2460  
  2461  	if checkGCTrigger {
  2462  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2463  			gcStart(t)
  2464  		}
  2465  	}
  2466  	gp := getg()
  2467  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  2468  
  2469  		addSecret(x, size)
  2470  	}
  2471  
  2472  	if valgrindenabled {
  2473  		valgrindMalloc(x, size)
  2474  	}
  2475  
  2476  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2477  		if assistG := getg().m.curg; assistG != nil {
  2478  			assistG.gcAssistBytes -= int64(elemsize - size)
  2479  		}
  2480  	}
  2481  
  2482  	if debug.malloc {
  2483  		postMallocgcDebug(x, elemsize, typ)
  2484  	}
  2485  	return x
  2486  }
  2487  
  2488  func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2489  
  2490  	if doubleCheckMalloc {
  2491  		if gcphase == _GCmarktermination {
  2492  			throw("mallocgc called with gcphase == _GCmarktermination")
  2493  		}
  2494  	}
  2495  
  2496  	lockRankMayQueueFinalizer()
  2497  
  2498  	if debug.malloc {
  2499  		if x := preMallocgcDebug(size, typ); x != nil {
  2500  			return x
  2501  		}
  2502  	}
  2503  
  2504  	if gcBlackenEnabled != 0 {
  2505  		deductAssistCredit(size)
  2506  	}
  2507  
  2508  	const sizeclass = 16
  2509  
  2510  	const elemsize = 224
  2511  
  2512  	mp := acquirem()
  2513  	if doubleCheckMalloc {
  2514  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2515  	}
  2516  	mp.mallocing = 1
  2517  
  2518  	checkGCTrigger := false
  2519  	c := getMCache(mp)
  2520  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2521  	span := c.alloc[spc]
  2522  
  2523  	var nextFreeFastResult gclinkptr
  2524  	if span.allocCache != 0 {
  2525  		theBit := sys.TrailingZeros64(span.allocCache)
  2526  		result := span.freeindex + uint16(theBit)
  2527  		if result < span.nelems {
  2528  			freeidx := result + 1
  2529  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2530  				span.allocCache >>= uint(theBit + 1)
  2531  				span.freeindex = freeidx
  2532  				span.allocCount++
  2533  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2534  					224 +
  2535  					span.base())
  2536  			}
  2537  		}
  2538  	}
  2539  	v := nextFreeFastResult
  2540  	if v == 0 {
  2541  		v, span, checkGCTrigger = c.nextFree(spc)
  2542  	}
  2543  	x := unsafe.Pointer(v)
  2544  	if span.needzero != 0 {
  2545  		memclrNoHeapPointers(x, elemsize)
  2546  	}
  2547  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2548  
  2549  		c.scanAlloc += 8
  2550  	} else {
  2551  		dataSize := size
  2552  		x := uintptr(x)
  2553  
  2554  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) {
  2555  			throw("tried to write heap bits, but no heap bits in span")
  2556  		}
  2557  
  2558  		src0 := readUintptr(getGCMask(typ))
  2559  
  2560  		const elemsize = 224
  2561  
  2562  		scanSize := typ.PtrBytes
  2563  		src := src0
  2564  		if typ.Size_ == goarch.PtrSize {
  2565  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2566  		} else {
  2567  
  2568  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2569  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2570  			}
  2571  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2572  				src |= src0 << (i / goarch.PtrSize)
  2573  				scanSize += typ.Size_
  2574  			}
  2575  		}
  2576  
  2577  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2578  		dst := unsafe.Pointer(dstBase)
  2579  		o := (x - span.base()) / goarch.PtrSize
  2580  		i := o / ptrBits
  2581  		j := o % ptrBits
  2582  		const bits uintptr = elemsize / goarch.PtrSize
  2583  
  2584  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2585  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2586  
  2587  			bits0 := ptrBits - j
  2588  			bits1 := bits - bits0
  2589  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2590  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2591  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2592  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2593  		} else {
  2594  
  2595  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2596  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2597  		}
  2598  
  2599  		const doubleCheck = false
  2600  		if doubleCheck {
  2601  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2602  		}
  2603  		if doubleCheckHeapSetType {
  2604  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2605  		}
  2606  		c.scanAlloc += scanSize
  2607  	}
  2608  
  2609  	publicationBarrier()
  2610  
  2611  	if writeBarrier.enabled {
  2612  
  2613  		gcmarknewobject(span, uintptr(x))
  2614  	} else {
  2615  
  2616  		span.freeIndexForScan = span.freeindex
  2617  	}
  2618  
  2619  	c.nextSample -= int64(elemsize)
  2620  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2621  		profilealloc(mp, x, elemsize)
  2622  	}
  2623  	mp.mallocing = 0
  2624  	releasem(mp)
  2625  
  2626  	if checkGCTrigger {
  2627  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2628  			gcStart(t)
  2629  		}
  2630  	}
  2631  	gp := getg()
  2632  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  2633  
  2634  		addSecret(x, size)
  2635  	}
  2636  
  2637  	if valgrindenabled {
  2638  		valgrindMalloc(x, size)
  2639  	}
  2640  
  2641  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2642  		if assistG := getg().m.curg; assistG != nil {
  2643  			assistG.gcAssistBytes -= int64(elemsize - size)
  2644  		}
  2645  	}
  2646  
  2647  	if debug.malloc {
  2648  		postMallocgcDebug(x, elemsize, typ)
  2649  	}
  2650  	return x
  2651  }
  2652  
  2653  func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2654  
  2655  	if doubleCheckMalloc {
  2656  		if gcphase == _GCmarktermination {
  2657  			throw("mallocgc called with gcphase == _GCmarktermination")
  2658  		}
  2659  	}
  2660  
  2661  	lockRankMayQueueFinalizer()
  2662  
  2663  	if debug.malloc {
  2664  		if x := preMallocgcDebug(size, typ); x != nil {
  2665  			return x
  2666  		}
  2667  	}
  2668  
  2669  	if gcBlackenEnabled != 0 {
  2670  		deductAssistCredit(size)
  2671  	}
  2672  
  2673  	const sizeclass = 17
  2674  
  2675  	const elemsize = 240
  2676  
  2677  	mp := acquirem()
  2678  	if doubleCheckMalloc {
  2679  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2680  	}
  2681  	mp.mallocing = 1
  2682  
  2683  	checkGCTrigger := false
  2684  	c := getMCache(mp)
  2685  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2686  	span := c.alloc[spc]
  2687  
  2688  	var nextFreeFastResult gclinkptr
  2689  	if span.allocCache != 0 {
  2690  		theBit := sys.TrailingZeros64(span.allocCache)
  2691  		result := span.freeindex + uint16(theBit)
  2692  		if result < span.nelems {
  2693  			freeidx := result + 1
  2694  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2695  				span.allocCache >>= uint(theBit + 1)
  2696  				span.freeindex = freeidx
  2697  				span.allocCount++
  2698  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2699  					240 +
  2700  					span.base())
  2701  			}
  2702  		}
  2703  	}
  2704  	v := nextFreeFastResult
  2705  	if v == 0 {
  2706  		v, span, checkGCTrigger = c.nextFree(spc)
  2707  	}
  2708  	x := unsafe.Pointer(v)
  2709  	if span.needzero != 0 {
  2710  		memclrNoHeapPointers(x, elemsize)
  2711  	}
  2712  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2713  
  2714  		c.scanAlloc += 8
  2715  	} else {
  2716  		dataSize := size
  2717  		x := uintptr(x)
  2718  
  2719  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) {
  2720  			throw("tried to write heap bits, but no heap bits in span")
  2721  		}
  2722  
  2723  		src0 := readUintptr(getGCMask(typ))
  2724  
  2725  		const elemsize = 240
  2726  
  2727  		scanSize := typ.PtrBytes
  2728  		src := src0
  2729  		if typ.Size_ == goarch.PtrSize {
  2730  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2731  		} else {
  2732  
  2733  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2734  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2735  			}
  2736  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2737  				src |= src0 << (i / goarch.PtrSize)
  2738  				scanSize += typ.Size_
  2739  			}
  2740  		}
  2741  
  2742  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2743  		dst := unsafe.Pointer(dstBase)
  2744  		o := (x - span.base()) / goarch.PtrSize
  2745  		i := o / ptrBits
  2746  		j := o % ptrBits
  2747  		const bits uintptr = elemsize / goarch.PtrSize
  2748  
  2749  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2750  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2751  
  2752  			bits0 := ptrBits - j
  2753  			bits1 := bits - bits0
  2754  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2755  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2756  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2757  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2758  		} else {
  2759  
  2760  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2761  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2762  		}
  2763  
  2764  		const doubleCheck = false
  2765  		if doubleCheck {
  2766  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2767  		}
  2768  		if doubleCheckHeapSetType {
  2769  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2770  		}
  2771  		c.scanAlloc += scanSize
  2772  	}
  2773  
  2774  	publicationBarrier()
  2775  
  2776  	if writeBarrier.enabled {
  2777  
  2778  		gcmarknewobject(span, uintptr(x))
  2779  	} else {
  2780  
  2781  		span.freeIndexForScan = span.freeindex
  2782  	}
  2783  
  2784  	c.nextSample -= int64(elemsize)
  2785  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2786  		profilealloc(mp, x, elemsize)
  2787  	}
  2788  	mp.mallocing = 0
  2789  	releasem(mp)
  2790  
  2791  	if checkGCTrigger {
  2792  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2793  			gcStart(t)
  2794  		}
  2795  	}
  2796  	gp := getg()
  2797  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  2798  
  2799  		addSecret(x, size)
  2800  	}
  2801  
  2802  	if valgrindenabled {
  2803  		valgrindMalloc(x, size)
  2804  	}
  2805  
  2806  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2807  		if assistG := getg().m.curg; assistG != nil {
  2808  			assistG.gcAssistBytes -= int64(elemsize - size)
  2809  		}
  2810  	}
  2811  
  2812  	if debug.malloc {
  2813  		postMallocgcDebug(x, elemsize, typ)
  2814  	}
  2815  	return x
  2816  }
  2817  
  2818  func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2819  
  2820  	if doubleCheckMalloc {
  2821  		if gcphase == _GCmarktermination {
  2822  			throw("mallocgc called with gcphase == _GCmarktermination")
  2823  		}
  2824  	}
  2825  
  2826  	lockRankMayQueueFinalizer()
  2827  
  2828  	if debug.malloc {
  2829  		if x := preMallocgcDebug(size, typ); x != nil {
  2830  			return x
  2831  		}
  2832  	}
  2833  
  2834  	if gcBlackenEnabled != 0 {
  2835  		deductAssistCredit(size)
  2836  	}
  2837  
  2838  	const sizeclass = 18
  2839  
  2840  	const elemsize = 256
  2841  
  2842  	mp := acquirem()
  2843  	if doubleCheckMalloc {
  2844  		doubleCheckSmallScanNoHeader(size, typ, mp)
  2845  	}
  2846  	mp.mallocing = 1
  2847  
  2848  	checkGCTrigger := false
  2849  	c := getMCache(mp)
  2850  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  2851  	span := c.alloc[spc]
  2852  
  2853  	var nextFreeFastResult gclinkptr
  2854  	if span.allocCache != 0 {
  2855  		theBit := sys.TrailingZeros64(span.allocCache)
  2856  		result := span.freeindex + uint16(theBit)
  2857  		if result < span.nelems {
  2858  			freeidx := result + 1
  2859  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  2860  				span.allocCache >>= uint(theBit + 1)
  2861  				span.freeindex = freeidx
  2862  				span.allocCount++
  2863  				nextFreeFastResult = gclinkptr(uintptr(result)*
  2864  					256 +
  2865  					span.base())
  2866  			}
  2867  		}
  2868  	}
  2869  	v := nextFreeFastResult
  2870  	if v == 0 {
  2871  		v, span, checkGCTrigger = c.nextFree(spc)
  2872  	}
  2873  	x := unsafe.Pointer(v)
  2874  	if span.needzero != 0 {
  2875  		memclrNoHeapPointers(x, elemsize)
  2876  	}
  2877  	if goarch.PtrSize == 8 && sizeclass == 1 {
  2878  
  2879  		c.scanAlloc += 8
  2880  	} else {
  2881  		dataSize := size
  2882  		x := uintptr(x)
  2883  
  2884  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) {
  2885  			throw("tried to write heap bits, but no heap bits in span")
  2886  		}
  2887  
  2888  		src0 := readUintptr(getGCMask(typ))
  2889  
  2890  		const elemsize = 256
  2891  
  2892  		scanSize := typ.PtrBytes
  2893  		src := src0
  2894  		if typ.Size_ == goarch.PtrSize {
  2895  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  2896  		} else {
  2897  
  2898  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  2899  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  2900  			}
  2901  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  2902  				src |= src0 << (i / goarch.PtrSize)
  2903  				scanSize += typ.Size_
  2904  			}
  2905  		}
  2906  
  2907  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  2908  		dst := unsafe.Pointer(dstBase)
  2909  		o := (x - span.base()) / goarch.PtrSize
  2910  		i := o / ptrBits
  2911  		j := o % ptrBits
  2912  		const bits uintptr = elemsize / goarch.PtrSize
  2913  
  2914  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  2915  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  2916  
  2917  			bits0 := ptrBits - j
  2918  			bits1 := bits - bits0
  2919  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  2920  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  2921  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  2922  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  2923  		} else {
  2924  
  2925  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  2926  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  2927  		}
  2928  
  2929  		const doubleCheck = false
  2930  		if doubleCheck {
  2931  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  2932  		}
  2933  		if doubleCheckHeapSetType {
  2934  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  2935  		}
  2936  		c.scanAlloc += scanSize
  2937  	}
  2938  
  2939  	publicationBarrier()
  2940  
  2941  	if writeBarrier.enabled {
  2942  
  2943  		gcmarknewobject(span, uintptr(x))
  2944  	} else {
  2945  
  2946  		span.freeIndexForScan = span.freeindex
  2947  	}
  2948  
  2949  	c.nextSample -= int64(elemsize)
  2950  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  2951  		profilealloc(mp, x, elemsize)
  2952  	}
  2953  	mp.mallocing = 0
  2954  	releasem(mp)
  2955  
  2956  	if checkGCTrigger {
  2957  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  2958  			gcStart(t)
  2959  		}
  2960  	}
  2961  	gp := getg()
  2962  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  2963  
  2964  		addSecret(x, size)
  2965  	}
  2966  
  2967  	if valgrindenabled {
  2968  		valgrindMalloc(x, size)
  2969  	}
  2970  
  2971  	if gcBlackenEnabled != 0 && elemsize != 0 {
  2972  		if assistG := getg().m.curg; assistG != nil {
  2973  			assistG.gcAssistBytes -= int64(elemsize - size)
  2974  		}
  2975  	}
  2976  
  2977  	if debug.malloc {
  2978  		postMallocgcDebug(x, elemsize, typ)
  2979  	}
  2980  	return x
  2981  }
  2982  
  2983  func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  2984  
  2985  	if doubleCheckMalloc {
  2986  		if gcphase == _GCmarktermination {
  2987  			throw("mallocgc called with gcphase == _GCmarktermination")
  2988  		}
  2989  	}
  2990  
  2991  	lockRankMayQueueFinalizer()
  2992  
  2993  	if debug.malloc {
  2994  		if x := preMallocgcDebug(size, typ); x != nil {
  2995  			return x
  2996  		}
  2997  	}
  2998  
  2999  	if gcBlackenEnabled != 0 {
  3000  		deductAssistCredit(size)
  3001  	}
  3002  
  3003  	const sizeclass = 19
  3004  
  3005  	const elemsize = 288
  3006  
  3007  	mp := acquirem()
  3008  	if doubleCheckMalloc {
  3009  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3010  	}
  3011  	mp.mallocing = 1
  3012  
  3013  	checkGCTrigger := false
  3014  	c := getMCache(mp)
  3015  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3016  	span := c.alloc[spc]
  3017  
  3018  	var nextFreeFastResult gclinkptr
  3019  	if span.allocCache != 0 {
  3020  		theBit := sys.TrailingZeros64(span.allocCache)
  3021  		result := span.freeindex + uint16(theBit)
  3022  		if result < span.nelems {
  3023  			freeidx := result + 1
  3024  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3025  				span.allocCache >>= uint(theBit + 1)
  3026  				span.freeindex = freeidx
  3027  				span.allocCount++
  3028  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3029  					288 +
  3030  					span.base())
  3031  			}
  3032  		}
  3033  	}
  3034  	v := nextFreeFastResult
  3035  	if v == 0 {
  3036  		v, span, checkGCTrigger = c.nextFree(spc)
  3037  	}
  3038  	x := unsafe.Pointer(v)
  3039  	if span.needzero != 0 {
  3040  		memclrNoHeapPointers(x, elemsize)
  3041  	}
  3042  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3043  
  3044  		c.scanAlloc += 8
  3045  	} else {
  3046  		dataSize := size
  3047  		x := uintptr(x)
  3048  
  3049  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) {
  3050  			throw("tried to write heap bits, but no heap bits in span")
  3051  		}
  3052  
  3053  		src0 := readUintptr(getGCMask(typ))
  3054  
  3055  		const elemsize = 288
  3056  
  3057  		scanSize := typ.PtrBytes
  3058  		src := src0
  3059  		if typ.Size_ == goarch.PtrSize {
  3060  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3061  		} else {
  3062  
  3063  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3064  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3065  			}
  3066  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3067  				src |= src0 << (i / goarch.PtrSize)
  3068  				scanSize += typ.Size_
  3069  			}
  3070  		}
  3071  
  3072  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3073  		dst := unsafe.Pointer(dstBase)
  3074  		o := (x - span.base()) / goarch.PtrSize
  3075  		i := o / ptrBits
  3076  		j := o % ptrBits
  3077  		const bits uintptr = elemsize / goarch.PtrSize
  3078  
  3079  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3080  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3081  
  3082  			bits0 := ptrBits - j
  3083  			bits1 := bits - bits0
  3084  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3085  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3086  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3087  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3088  		} else {
  3089  
  3090  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3091  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3092  		}
  3093  
  3094  		const doubleCheck = false
  3095  		if doubleCheck {
  3096  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3097  		}
  3098  		if doubleCheckHeapSetType {
  3099  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3100  		}
  3101  		c.scanAlloc += scanSize
  3102  	}
  3103  
  3104  	publicationBarrier()
  3105  
  3106  	if writeBarrier.enabled {
  3107  
  3108  		gcmarknewobject(span, uintptr(x))
  3109  	} else {
  3110  
  3111  		span.freeIndexForScan = span.freeindex
  3112  	}
  3113  
  3114  	c.nextSample -= int64(elemsize)
  3115  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3116  		profilealloc(mp, x, elemsize)
  3117  	}
  3118  	mp.mallocing = 0
  3119  	releasem(mp)
  3120  
  3121  	if checkGCTrigger {
  3122  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3123  			gcStart(t)
  3124  		}
  3125  	}
  3126  	gp := getg()
  3127  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  3128  
  3129  		addSecret(x, size)
  3130  	}
  3131  
  3132  	if valgrindenabled {
  3133  		valgrindMalloc(x, size)
  3134  	}
  3135  
  3136  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3137  		if assistG := getg().m.curg; assistG != nil {
  3138  			assistG.gcAssistBytes -= int64(elemsize - size)
  3139  		}
  3140  	}
  3141  
  3142  	if debug.malloc {
  3143  		postMallocgcDebug(x, elemsize, typ)
  3144  	}
  3145  	return x
  3146  }
  3147  
  3148  func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3149  
  3150  	if doubleCheckMalloc {
  3151  		if gcphase == _GCmarktermination {
  3152  			throw("mallocgc called with gcphase == _GCmarktermination")
  3153  		}
  3154  	}
  3155  
  3156  	lockRankMayQueueFinalizer()
  3157  
  3158  	if debug.malloc {
  3159  		if x := preMallocgcDebug(size, typ); x != nil {
  3160  			return x
  3161  		}
  3162  	}
  3163  
  3164  	if gcBlackenEnabled != 0 {
  3165  		deductAssistCredit(size)
  3166  	}
  3167  
  3168  	const sizeclass = 20
  3169  
  3170  	const elemsize = 320
  3171  
  3172  	mp := acquirem()
  3173  	if doubleCheckMalloc {
  3174  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3175  	}
  3176  	mp.mallocing = 1
  3177  
  3178  	checkGCTrigger := false
  3179  	c := getMCache(mp)
  3180  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3181  	span := c.alloc[spc]
  3182  
  3183  	var nextFreeFastResult gclinkptr
  3184  	if span.allocCache != 0 {
  3185  		theBit := sys.TrailingZeros64(span.allocCache)
  3186  		result := span.freeindex + uint16(theBit)
  3187  		if result < span.nelems {
  3188  			freeidx := result + 1
  3189  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3190  				span.allocCache >>= uint(theBit + 1)
  3191  				span.freeindex = freeidx
  3192  				span.allocCount++
  3193  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3194  					320 +
  3195  					span.base())
  3196  			}
  3197  		}
  3198  	}
  3199  	v := nextFreeFastResult
  3200  	if v == 0 {
  3201  		v, span, checkGCTrigger = c.nextFree(spc)
  3202  	}
  3203  	x := unsafe.Pointer(v)
  3204  	if span.needzero != 0 {
  3205  		memclrNoHeapPointers(x, elemsize)
  3206  	}
  3207  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3208  
  3209  		c.scanAlloc += 8
  3210  	} else {
  3211  		dataSize := size
  3212  		x := uintptr(x)
  3213  
  3214  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) {
  3215  			throw("tried to write heap bits, but no heap bits in span")
  3216  		}
  3217  
  3218  		src0 := readUintptr(getGCMask(typ))
  3219  
  3220  		const elemsize = 320
  3221  
  3222  		scanSize := typ.PtrBytes
  3223  		src := src0
  3224  		if typ.Size_ == goarch.PtrSize {
  3225  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3226  		} else {
  3227  
  3228  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3229  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3230  			}
  3231  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3232  				src |= src0 << (i / goarch.PtrSize)
  3233  				scanSize += typ.Size_
  3234  			}
  3235  		}
  3236  
  3237  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3238  		dst := unsafe.Pointer(dstBase)
  3239  		o := (x - span.base()) / goarch.PtrSize
  3240  		i := o / ptrBits
  3241  		j := o % ptrBits
  3242  		const bits uintptr = elemsize / goarch.PtrSize
  3243  
  3244  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3245  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3246  
  3247  			bits0 := ptrBits - j
  3248  			bits1 := bits - bits0
  3249  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3250  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3251  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3252  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3253  		} else {
  3254  
  3255  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3256  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3257  		}
  3258  
  3259  		const doubleCheck = false
  3260  		if doubleCheck {
  3261  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3262  		}
  3263  		if doubleCheckHeapSetType {
  3264  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3265  		}
  3266  		c.scanAlloc += scanSize
  3267  	}
  3268  
  3269  	publicationBarrier()
  3270  
  3271  	if writeBarrier.enabled {
  3272  
  3273  		gcmarknewobject(span, uintptr(x))
  3274  	} else {
  3275  
  3276  		span.freeIndexForScan = span.freeindex
  3277  	}
  3278  
  3279  	c.nextSample -= int64(elemsize)
  3280  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3281  		profilealloc(mp, x, elemsize)
  3282  	}
  3283  	mp.mallocing = 0
  3284  	releasem(mp)
  3285  
  3286  	if checkGCTrigger {
  3287  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3288  			gcStart(t)
  3289  		}
  3290  	}
  3291  	gp := getg()
  3292  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  3293  
  3294  		addSecret(x, size)
  3295  	}
  3296  
  3297  	if valgrindenabled {
  3298  		valgrindMalloc(x, size)
  3299  	}
  3300  
  3301  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3302  		if assistG := getg().m.curg; assistG != nil {
  3303  			assistG.gcAssistBytes -= int64(elemsize - size)
  3304  		}
  3305  	}
  3306  
  3307  	if debug.malloc {
  3308  		postMallocgcDebug(x, elemsize, typ)
  3309  	}
  3310  	return x
  3311  }
  3312  
  3313  func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3314  
  3315  	if doubleCheckMalloc {
  3316  		if gcphase == _GCmarktermination {
  3317  			throw("mallocgc called with gcphase == _GCmarktermination")
  3318  		}
  3319  	}
  3320  
  3321  	lockRankMayQueueFinalizer()
  3322  
  3323  	if debug.malloc {
  3324  		if x := preMallocgcDebug(size, typ); x != nil {
  3325  			return x
  3326  		}
  3327  	}
  3328  
  3329  	if gcBlackenEnabled != 0 {
  3330  		deductAssistCredit(size)
  3331  	}
  3332  
  3333  	const sizeclass = 21
  3334  
  3335  	const elemsize = 352
  3336  
  3337  	mp := acquirem()
  3338  	if doubleCheckMalloc {
  3339  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3340  	}
  3341  	mp.mallocing = 1
  3342  
  3343  	checkGCTrigger := false
  3344  	c := getMCache(mp)
  3345  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3346  	span := c.alloc[spc]
  3347  
  3348  	var nextFreeFastResult gclinkptr
  3349  	if span.allocCache != 0 {
  3350  		theBit := sys.TrailingZeros64(span.allocCache)
  3351  		result := span.freeindex + uint16(theBit)
  3352  		if result < span.nelems {
  3353  			freeidx := result + 1
  3354  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3355  				span.allocCache >>= uint(theBit + 1)
  3356  				span.freeindex = freeidx
  3357  				span.allocCount++
  3358  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3359  					352 +
  3360  					span.base())
  3361  			}
  3362  		}
  3363  	}
  3364  	v := nextFreeFastResult
  3365  	if v == 0 {
  3366  		v, span, checkGCTrigger = c.nextFree(spc)
  3367  	}
  3368  	x := unsafe.Pointer(v)
  3369  	if span.needzero != 0 {
  3370  		memclrNoHeapPointers(x, elemsize)
  3371  	}
  3372  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3373  
  3374  		c.scanAlloc += 8
  3375  	} else {
  3376  		dataSize := size
  3377  		x := uintptr(x)
  3378  
  3379  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) {
  3380  			throw("tried to write heap bits, but no heap bits in span")
  3381  		}
  3382  
  3383  		src0 := readUintptr(getGCMask(typ))
  3384  
  3385  		const elemsize = 352
  3386  
  3387  		scanSize := typ.PtrBytes
  3388  		src := src0
  3389  		if typ.Size_ == goarch.PtrSize {
  3390  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3391  		} else {
  3392  
  3393  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3394  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3395  			}
  3396  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3397  				src |= src0 << (i / goarch.PtrSize)
  3398  				scanSize += typ.Size_
  3399  			}
  3400  		}
  3401  
  3402  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3403  		dst := unsafe.Pointer(dstBase)
  3404  		o := (x - span.base()) / goarch.PtrSize
  3405  		i := o / ptrBits
  3406  		j := o % ptrBits
  3407  		const bits uintptr = elemsize / goarch.PtrSize
  3408  
  3409  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3410  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3411  
  3412  			bits0 := ptrBits - j
  3413  			bits1 := bits - bits0
  3414  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3415  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3416  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3417  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3418  		} else {
  3419  
  3420  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3421  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3422  		}
  3423  
  3424  		const doubleCheck = false
  3425  		if doubleCheck {
  3426  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3427  		}
  3428  		if doubleCheckHeapSetType {
  3429  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3430  		}
  3431  		c.scanAlloc += scanSize
  3432  	}
  3433  
  3434  	publicationBarrier()
  3435  
  3436  	if writeBarrier.enabled {
  3437  
  3438  		gcmarknewobject(span, uintptr(x))
  3439  	} else {
  3440  
  3441  		span.freeIndexForScan = span.freeindex
  3442  	}
  3443  
  3444  	c.nextSample -= int64(elemsize)
  3445  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3446  		profilealloc(mp, x, elemsize)
  3447  	}
  3448  	mp.mallocing = 0
  3449  	releasem(mp)
  3450  
  3451  	if checkGCTrigger {
  3452  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3453  			gcStart(t)
  3454  		}
  3455  	}
  3456  	gp := getg()
  3457  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  3458  
  3459  		addSecret(x, size)
  3460  	}
  3461  
  3462  	if valgrindenabled {
  3463  		valgrindMalloc(x, size)
  3464  	}
  3465  
  3466  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3467  		if assistG := getg().m.curg; assistG != nil {
  3468  			assistG.gcAssistBytes -= int64(elemsize - size)
  3469  		}
  3470  	}
  3471  
  3472  	if debug.malloc {
  3473  		postMallocgcDebug(x, elemsize, typ)
  3474  	}
  3475  	return x
  3476  }
  3477  
  3478  func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3479  
  3480  	if doubleCheckMalloc {
  3481  		if gcphase == _GCmarktermination {
  3482  			throw("mallocgc called with gcphase == _GCmarktermination")
  3483  		}
  3484  	}
  3485  
  3486  	lockRankMayQueueFinalizer()
  3487  
  3488  	if debug.malloc {
  3489  		if x := preMallocgcDebug(size, typ); x != nil {
  3490  			return x
  3491  		}
  3492  	}
  3493  
  3494  	if gcBlackenEnabled != 0 {
  3495  		deductAssistCredit(size)
  3496  	}
  3497  
  3498  	const sizeclass = 22
  3499  
  3500  	const elemsize = 384
  3501  
  3502  	mp := acquirem()
  3503  	if doubleCheckMalloc {
  3504  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3505  	}
  3506  	mp.mallocing = 1
  3507  
  3508  	checkGCTrigger := false
  3509  	c := getMCache(mp)
  3510  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3511  	span := c.alloc[spc]
  3512  
  3513  	var nextFreeFastResult gclinkptr
  3514  	if span.allocCache != 0 {
  3515  		theBit := sys.TrailingZeros64(span.allocCache)
  3516  		result := span.freeindex + uint16(theBit)
  3517  		if result < span.nelems {
  3518  			freeidx := result + 1
  3519  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3520  				span.allocCache >>= uint(theBit + 1)
  3521  				span.freeindex = freeidx
  3522  				span.allocCount++
  3523  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3524  					384 +
  3525  					span.base())
  3526  			}
  3527  		}
  3528  	}
  3529  	v := nextFreeFastResult
  3530  	if v == 0 {
  3531  		v, span, checkGCTrigger = c.nextFree(spc)
  3532  	}
  3533  	x := unsafe.Pointer(v)
  3534  	if span.needzero != 0 {
  3535  		memclrNoHeapPointers(x, elemsize)
  3536  	}
  3537  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3538  
  3539  		c.scanAlloc += 8
  3540  	} else {
  3541  		dataSize := size
  3542  		x := uintptr(x)
  3543  
  3544  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) {
  3545  			throw("tried to write heap bits, but no heap bits in span")
  3546  		}
  3547  
  3548  		src0 := readUintptr(getGCMask(typ))
  3549  
  3550  		const elemsize = 384
  3551  
  3552  		scanSize := typ.PtrBytes
  3553  		src := src0
  3554  		if typ.Size_ == goarch.PtrSize {
  3555  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3556  		} else {
  3557  
  3558  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3559  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3560  			}
  3561  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3562  				src |= src0 << (i / goarch.PtrSize)
  3563  				scanSize += typ.Size_
  3564  			}
  3565  		}
  3566  
  3567  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3568  		dst := unsafe.Pointer(dstBase)
  3569  		o := (x - span.base()) / goarch.PtrSize
  3570  		i := o / ptrBits
  3571  		j := o % ptrBits
  3572  		const bits uintptr = elemsize / goarch.PtrSize
  3573  
  3574  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3575  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3576  
  3577  			bits0 := ptrBits - j
  3578  			bits1 := bits - bits0
  3579  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3580  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3581  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3582  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3583  		} else {
  3584  
  3585  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3586  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3587  		}
  3588  
  3589  		const doubleCheck = false
  3590  		if doubleCheck {
  3591  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3592  		}
  3593  		if doubleCheckHeapSetType {
  3594  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3595  		}
  3596  		c.scanAlloc += scanSize
  3597  	}
  3598  
  3599  	publicationBarrier()
  3600  
  3601  	if writeBarrier.enabled {
  3602  
  3603  		gcmarknewobject(span, uintptr(x))
  3604  	} else {
  3605  
  3606  		span.freeIndexForScan = span.freeindex
  3607  	}
  3608  
  3609  	c.nextSample -= int64(elemsize)
  3610  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3611  		profilealloc(mp, x, elemsize)
  3612  	}
  3613  	mp.mallocing = 0
  3614  	releasem(mp)
  3615  
  3616  	if checkGCTrigger {
  3617  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3618  			gcStart(t)
  3619  		}
  3620  	}
  3621  	gp := getg()
  3622  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  3623  
  3624  		addSecret(x, size)
  3625  	}
  3626  
  3627  	if valgrindenabled {
  3628  		valgrindMalloc(x, size)
  3629  	}
  3630  
  3631  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3632  		if assistG := getg().m.curg; assistG != nil {
  3633  			assistG.gcAssistBytes -= int64(elemsize - size)
  3634  		}
  3635  	}
  3636  
  3637  	if debug.malloc {
  3638  		postMallocgcDebug(x, elemsize, typ)
  3639  	}
  3640  	return x
  3641  }
  3642  
  3643  func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3644  
  3645  	if doubleCheckMalloc {
  3646  		if gcphase == _GCmarktermination {
  3647  			throw("mallocgc called with gcphase == _GCmarktermination")
  3648  		}
  3649  	}
  3650  
  3651  	lockRankMayQueueFinalizer()
  3652  
  3653  	if debug.malloc {
  3654  		if x := preMallocgcDebug(size, typ); x != nil {
  3655  			return x
  3656  		}
  3657  	}
  3658  
  3659  	if gcBlackenEnabled != 0 {
  3660  		deductAssistCredit(size)
  3661  	}
  3662  
  3663  	const sizeclass = 23
  3664  
  3665  	const elemsize = 416
  3666  
  3667  	mp := acquirem()
  3668  	if doubleCheckMalloc {
  3669  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3670  	}
  3671  	mp.mallocing = 1
  3672  
  3673  	checkGCTrigger := false
  3674  	c := getMCache(mp)
  3675  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3676  	span := c.alloc[spc]
  3677  
  3678  	var nextFreeFastResult gclinkptr
  3679  	if span.allocCache != 0 {
  3680  		theBit := sys.TrailingZeros64(span.allocCache)
  3681  		result := span.freeindex + uint16(theBit)
  3682  		if result < span.nelems {
  3683  			freeidx := result + 1
  3684  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3685  				span.allocCache >>= uint(theBit + 1)
  3686  				span.freeindex = freeidx
  3687  				span.allocCount++
  3688  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3689  					416 +
  3690  					span.base())
  3691  			}
  3692  		}
  3693  	}
  3694  	v := nextFreeFastResult
  3695  	if v == 0 {
  3696  		v, span, checkGCTrigger = c.nextFree(spc)
  3697  	}
  3698  	x := unsafe.Pointer(v)
  3699  	if span.needzero != 0 {
  3700  		memclrNoHeapPointers(x, elemsize)
  3701  	}
  3702  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3703  
  3704  		c.scanAlloc += 8
  3705  	} else {
  3706  		dataSize := size
  3707  		x := uintptr(x)
  3708  
  3709  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) {
  3710  			throw("tried to write heap bits, but no heap bits in span")
  3711  		}
  3712  
  3713  		src0 := readUintptr(getGCMask(typ))
  3714  
  3715  		const elemsize = 416
  3716  
  3717  		scanSize := typ.PtrBytes
  3718  		src := src0
  3719  		if typ.Size_ == goarch.PtrSize {
  3720  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3721  		} else {
  3722  
  3723  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3724  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3725  			}
  3726  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3727  				src |= src0 << (i / goarch.PtrSize)
  3728  				scanSize += typ.Size_
  3729  			}
  3730  		}
  3731  
  3732  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3733  		dst := unsafe.Pointer(dstBase)
  3734  		o := (x - span.base()) / goarch.PtrSize
  3735  		i := o / ptrBits
  3736  		j := o % ptrBits
  3737  		const bits uintptr = elemsize / goarch.PtrSize
  3738  
  3739  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3740  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3741  
  3742  			bits0 := ptrBits - j
  3743  			bits1 := bits - bits0
  3744  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3745  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3746  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3747  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3748  		} else {
  3749  
  3750  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3751  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3752  		}
  3753  
  3754  		const doubleCheck = false
  3755  		if doubleCheck {
  3756  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3757  		}
  3758  		if doubleCheckHeapSetType {
  3759  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3760  		}
  3761  		c.scanAlloc += scanSize
  3762  	}
  3763  
  3764  	publicationBarrier()
  3765  
  3766  	if writeBarrier.enabled {
  3767  
  3768  		gcmarknewobject(span, uintptr(x))
  3769  	} else {
  3770  
  3771  		span.freeIndexForScan = span.freeindex
  3772  	}
  3773  
  3774  	c.nextSample -= int64(elemsize)
  3775  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3776  		profilealloc(mp, x, elemsize)
  3777  	}
  3778  	mp.mallocing = 0
  3779  	releasem(mp)
  3780  
  3781  	if checkGCTrigger {
  3782  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3783  			gcStart(t)
  3784  		}
  3785  	}
  3786  	gp := getg()
  3787  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  3788  
  3789  		addSecret(x, size)
  3790  	}
  3791  
  3792  	if valgrindenabled {
  3793  		valgrindMalloc(x, size)
  3794  	}
  3795  
  3796  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3797  		if assistG := getg().m.curg; assistG != nil {
  3798  			assistG.gcAssistBytes -= int64(elemsize - size)
  3799  		}
  3800  	}
  3801  
  3802  	if debug.malloc {
  3803  		postMallocgcDebug(x, elemsize, typ)
  3804  	}
  3805  	return x
  3806  }
  3807  
  3808  func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3809  
  3810  	if doubleCheckMalloc {
  3811  		if gcphase == _GCmarktermination {
  3812  			throw("mallocgc called with gcphase == _GCmarktermination")
  3813  		}
  3814  	}
  3815  
  3816  	lockRankMayQueueFinalizer()
  3817  
  3818  	if debug.malloc {
  3819  		if x := preMallocgcDebug(size, typ); x != nil {
  3820  			return x
  3821  		}
  3822  	}
  3823  
  3824  	if gcBlackenEnabled != 0 {
  3825  		deductAssistCredit(size)
  3826  	}
  3827  
  3828  	const sizeclass = 24
  3829  
  3830  	const elemsize = 448
  3831  
  3832  	mp := acquirem()
  3833  	if doubleCheckMalloc {
  3834  		doubleCheckSmallScanNoHeader(size, typ, mp)
  3835  	}
  3836  	mp.mallocing = 1
  3837  
  3838  	checkGCTrigger := false
  3839  	c := getMCache(mp)
  3840  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  3841  	span := c.alloc[spc]
  3842  
  3843  	var nextFreeFastResult gclinkptr
  3844  	if span.allocCache != 0 {
  3845  		theBit := sys.TrailingZeros64(span.allocCache)
  3846  		result := span.freeindex + uint16(theBit)
  3847  		if result < span.nelems {
  3848  			freeidx := result + 1
  3849  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  3850  				span.allocCache >>= uint(theBit + 1)
  3851  				span.freeindex = freeidx
  3852  				span.allocCount++
  3853  				nextFreeFastResult = gclinkptr(uintptr(result)*
  3854  					448 +
  3855  					span.base())
  3856  			}
  3857  		}
  3858  	}
  3859  	v := nextFreeFastResult
  3860  	if v == 0 {
  3861  		v, span, checkGCTrigger = c.nextFree(spc)
  3862  	}
  3863  	x := unsafe.Pointer(v)
  3864  	if span.needzero != 0 {
  3865  		memclrNoHeapPointers(x, elemsize)
  3866  	}
  3867  	if goarch.PtrSize == 8 && sizeclass == 1 {
  3868  
  3869  		c.scanAlloc += 8
  3870  	} else {
  3871  		dataSize := size
  3872  		x := uintptr(x)
  3873  
  3874  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) {
  3875  			throw("tried to write heap bits, but no heap bits in span")
  3876  		}
  3877  
  3878  		src0 := readUintptr(getGCMask(typ))
  3879  
  3880  		const elemsize = 448
  3881  
  3882  		scanSize := typ.PtrBytes
  3883  		src := src0
  3884  		if typ.Size_ == goarch.PtrSize {
  3885  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  3886  		} else {
  3887  
  3888  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  3889  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  3890  			}
  3891  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  3892  				src |= src0 << (i / goarch.PtrSize)
  3893  				scanSize += typ.Size_
  3894  			}
  3895  		}
  3896  
  3897  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  3898  		dst := unsafe.Pointer(dstBase)
  3899  		o := (x - span.base()) / goarch.PtrSize
  3900  		i := o / ptrBits
  3901  		j := o % ptrBits
  3902  		const bits uintptr = elemsize / goarch.PtrSize
  3903  
  3904  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  3905  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  3906  
  3907  			bits0 := ptrBits - j
  3908  			bits1 := bits - bits0
  3909  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  3910  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  3911  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  3912  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  3913  		} else {
  3914  
  3915  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  3916  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  3917  		}
  3918  
  3919  		const doubleCheck = false
  3920  		if doubleCheck {
  3921  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  3922  		}
  3923  		if doubleCheckHeapSetType {
  3924  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  3925  		}
  3926  		c.scanAlloc += scanSize
  3927  	}
  3928  
  3929  	publicationBarrier()
  3930  
  3931  	if writeBarrier.enabled {
  3932  
  3933  		gcmarknewobject(span, uintptr(x))
  3934  	} else {
  3935  
  3936  		span.freeIndexForScan = span.freeindex
  3937  	}
  3938  
  3939  	c.nextSample -= int64(elemsize)
  3940  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  3941  		profilealloc(mp, x, elemsize)
  3942  	}
  3943  	mp.mallocing = 0
  3944  	releasem(mp)
  3945  
  3946  	if checkGCTrigger {
  3947  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  3948  			gcStart(t)
  3949  		}
  3950  	}
  3951  	gp := getg()
  3952  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  3953  
  3954  		addSecret(x, size)
  3955  	}
  3956  
  3957  	if valgrindenabled {
  3958  		valgrindMalloc(x, size)
  3959  	}
  3960  
  3961  	if gcBlackenEnabled != 0 && elemsize != 0 {
  3962  		if assistG := getg().m.curg; assistG != nil {
  3963  			assistG.gcAssistBytes -= int64(elemsize - size)
  3964  		}
  3965  	}
  3966  
  3967  	if debug.malloc {
  3968  		postMallocgcDebug(x, elemsize, typ)
  3969  	}
  3970  	return x
  3971  }
  3972  
  3973  func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  3974  
  3975  	if doubleCheckMalloc {
  3976  		if gcphase == _GCmarktermination {
  3977  			throw("mallocgc called with gcphase == _GCmarktermination")
  3978  		}
  3979  	}
  3980  
  3981  	lockRankMayQueueFinalizer()
  3982  
  3983  	if debug.malloc {
  3984  		if x := preMallocgcDebug(size, typ); x != nil {
  3985  			return x
  3986  		}
  3987  	}
  3988  
  3989  	if gcBlackenEnabled != 0 {
  3990  		deductAssistCredit(size)
  3991  	}
  3992  
  3993  	const sizeclass = 25
  3994  
  3995  	const elemsize = 480
  3996  
  3997  	mp := acquirem()
  3998  	if doubleCheckMalloc {
  3999  		doubleCheckSmallScanNoHeader(size, typ, mp)
  4000  	}
  4001  	mp.mallocing = 1
  4002  
  4003  	checkGCTrigger := false
  4004  	c := getMCache(mp)
  4005  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  4006  	span := c.alloc[spc]
  4007  
  4008  	var nextFreeFastResult gclinkptr
  4009  	if span.allocCache != 0 {
  4010  		theBit := sys.TrailingZeros64(span.allocCache)
  4011  		result := span.freeindex + uint16(theBit)
  4012  		if result < span.nelems {
  4013  			freeidx := result + 1
  4014  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  4015  				span.allocCache >>= uint(theBit + 1)
  4016  				span.freeindex = freeidx
  4017  				span.allocCount++
  4018  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4019  					480 +
  4020  					span.base())
  4021  			}
  4022  		}
  4023  	}
  4024  	v := nextFreeFastResult
  4025  	if v == 0 {
  4026  		v, span, checkGCTrigger = c.nextFree(spc)
  4027  	}
  4028  	x := unsafe.Pointer(v)
  4029  	if span.needzero != 0 {
  4030  		memclrNoHeapPointers(x, elemsize)
  4031  	}
  4032  	if goarch.PtrSize == 8 && sizeclass == 1 {
  4033  
  4034  		c.scanAlloc += 8
  4035  	} else {
  4036  		dataSize := size
  4037  		x := uintptr(x)
  4038  
  4039  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) {
  4040  			throw("tried to write heap bits, but no heap bits in span")
  4041  		}
  4042  
  4043  		src0 := readUintptr(getGCMask(typ))
  4044  
  4045  		const elemsize = 480
  4046  
  4047  		scanSize := typ.PtrBytes
  4048  		src := src0
  4049  		if typ.Size_ == goarch.PtrSize {
  4050  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  4051  		} else {
  4052  
  4053  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  4054  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  4055  			}
  4056  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  4057  				src |= src0 << (i / goarch.PtrSize)
  4058  				scanSize += typ.Size_
  4059  			}
  4060  		}
  4061  
  4062  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  4063  		dst := unsafe.Pointer(dstBase)
  4064  		o := (x - span.base()) / goarch.PtrSize
  4065  		i := o / ptrBits
  4066  		j := o % ptrBits
  4067  		const bits uintptr = elemsize / goarch.PtrSize
  4068  
  4069  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  4070  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  4071  
  4072  			bits0 := ptrBits - j
  4073  			bits1 := bits - bits0
  4074  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  4075  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  4076  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  4077  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  4078  		} else {
  4079  
  4080  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  4081  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  4082  		}
  4083  
  4084  		const doubleCheck = false
  4085  		if doubleCheck {
  4086  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  4087  		}
  4088  		if doubleCheckHeapSetType {
  4089  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  4090  		}
  4091  		c.scanAlloc += scanSize
  4092  	}
  4093  
  4094  	publicationBarrier()
  4095  
  4096  	if writeBarrier.enabled {
  4097  
  4098  		gcmarknewobject(span, uintptr(x))
  4099  	} else {
  4100  
  4101  		span.freeIndexForScan = span.freeindex
  4102  	}
  4103  
  4104  	c.nextSample -= int64(elemsize)
  4105  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4106  		profilealloc(mp, x, elemsize)
  4107  	}
  4108  	mp.mallocing = 0
  4109  	releasem(mp)
  4110  
  4111  	if checkGCTrigger {
  4112  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4113  			gcStart(t)
  4114  		}
  4115  	}
  4116  	gp := getg()
  4117  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4118  
  4119  		addSecret(x, size)
  4120  	}
  4121  
  4122  	if valgrindenabled {
  4123  		valgrindMalloc(x, size)
  4124  	}
  4125  
  4126  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4127  		if assistG := getg().m.curg; assistG != nil {
  4128  			assistG.gcAssistBytes -= int64(elemsize - size)
  4129  		}
  4130  	}
  4131  
  4132  	if debug.malloc {
  4133  		postMallocgcDebug(x, elemsize, typ)
  4134  	}
  4135  	return x
  4136  }
  4137  
  4138  func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4139  
  4140  	if doubleCheckMalloc {
  4141  		if gcphase == _GCmarktermination {
  4142  			throw("mallocgc called with gcphase == _GCmarktermination")
  4143  		}
  4144  	}
  4145  
  4146  	lockRankMayQueueFinalizer()
  4147  
  4148  	if debug.malloc {
  4149  		if x := preMallocgcDebug(size, typ); x != nil {
  4150  			return x
  4151  		}
  4152  	}
  4153  
  4154  	if gcBlackenEnabled != 0 {
  4155  		deductAssistCredit(size)
  4156  	}
  4157  
  4158  	const sizeclass = 26
  4159  
  4160  	const elemsize = 512
  4161  
  4162  	mp := acquirem()
  4163  	if doubleCheckMalloc {
  4164  		doubleCheckSmallScanNoHeader(size, typ, mp)
  4165  	}
  4166  	mp.mallocing = 1
  4167  
  4168  	checkGCTrigger := false
  4169  	c := getMCache(mp)
  4170  	const spc = spanClass(sizeclass<<1) | spanClass(0)
  4171  	span := c.alloc[spc]
  4172  
  4173  	var nextFreeFastResult gclinkptr
  4174  	if span.allocCache != 0 {
  4175  		theBit := sys.TrailingZeros64(span.allocCache)
  4176  		result := span.freeindex + uint16(theBit)
  4177  		if result < span.nelems {
  4178  			freeidx := result + 1
  4179  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  4180  				span.allocCache >>= uint(theBit + 1)
  4181  				span.freeindex = freeidx
  4182  				span.allocCount++
  4183  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4184  					512 +
  4185  					span.base())
  4186  			}
  4187  		}
  4188  	}
  4189  	v := nextFreeFastResult
  4190  	if v == 0 {
  4191  		v, span, checkGCTrigger = c.nextFree(spc)
  4192  	}
  4193  	x := unsafe.Pointer(v)
  4194  	if span.needzero != 0 {
  4195  		memclrNoHeapPointers(x, elemsize)
  4196  	}
  4197  	if goarch.PtrSize == 8 && sizeclass == 1 {
  4198  
  4199  		c.scanAlloc += 8
  4200  	} else {
  4201  		dataSize := size
  4202  		x := uintptr(x)
  4203  
  4204  		if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) {
  4205  			throw("tried to write heap bits, but no heap bits in span")
  4206  		}
  4207  
  4208  		src0 := readUintptr(getGCMask(typ))
  4209  
  4210  		const elemsize = 512
  4211  
  4212  		scanSize := typ.PtrBytes
  4213  		src := src0
  4214  		if typ.Size_ == goarch.PtrSize {
  4215  			src = (1 << (dataSize / goarch.PtrSize)) - 1
  4216  		} else {
  4217  
  4218  			if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
  4219  				throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
  4220  			}
  4221  			for i := typ.Size_; i < dataSize; i += typ.Size_ {
  4222  				src |= src0 << (i / goarch.PtrSize)
  4223  				scanSize += typ.Size_
  4224  			}
  4225  		}
  4226  
  4227  		dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
  4228  		dst := unsafe.Pointer(dstBase)
  4229  		o := (x - span.base()) / goarch.PtrSize
  4230  		i := o / ptrBits
  4231  		j := o % ptrBits
  4232  		const bits uintptr = elemsize / goarch.PtrSize
  4233  
  4234  		const bitsIsPowerOfTwo = bits&(bits-1) == 0
  4235  		if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
  4236  
  4237  			bits0 := ptrBits - j
  4238  			bits1 := bits - bits0
  4239  			dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
  4240  			dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
  4241  			*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
  4242  			*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
  4243  		} else {
  4244  
  4245  			dst := (*uintptr)(add(dst, i*goarch.PtrSize))
  4246  			*dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
  4247  		}
  4248  
  4249  		const doubleCheck = false
  4250  		if doubleCheck {
  4251  			writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
  4252  		}
  4253  		if doubleCheckHeapSetType {
  4254  			doubleCheckHeapType(x, dataSize, typ, nil, span)
  4255  		}
  4256  		c.scanAlloc += scanSize
  4257  	}
  4258  
  4259  	publicationBarrier()
  4260  
  4261  	if writeBarrier.enabled {
  4262  
  4263  		gcmarknewobject(span, uintptr(x))
  4264  	} else {
  4265  
  4266  		span.freeIndexForScan = span.freeindex
  4267  	}
  4268  
  4269  	c.nextSample -= int64(elemsize)
  4270  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4271  		profilealloc(mp, x, elemsize)
  4272  	}
  4273  	mp.mallocing = 0
  4274  	releasem(mp)
  4275  
  4276  	if checkGCTrigger {
  4277  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4278  			gcStart(t)
  4279  		}
  4280  	}
  4281  	gp := getg()
  4282  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4283  
  4284  		addSecret(x, size)
  4285  	}
  4286  
  4287  	if valgrindenabled {
  4288  		valgrindMalloc(x, size)
  4289  	}
  4290  
  4291  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4292  		if assistG := getg().m.curg; assistG != nil {
  4293  			assistG.gcAssistBytes -= int64(elemsize - size)
  4294  		}
  4295  	}
  4296  
  4297  	if debug.malloc {
  4298  		postMallocgcDebug(x, elemsize, typ)
  4299  	}
  4300  	return x
  4301  }
  4302  
  4303  func mallocgcTinySize1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4304  
  4305  	gp := getg()
  4306  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4307  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  4308  	}
  4309  
  4310  	if doubleCheckMalloc {
  4311  		if gcphase == _GCmarktermination {
  4312  			throw("mallocgc called with gcphase == _GCmarktermination")
  4313  		}
  4314  	}
  4315  
  4316  	lockRankMayQueueFinalizer()
  4317  
  4318  	if debug.malloc {
  4319  		if x := preMallocgcDebug(size, typ); x != nil {
  4320  			return x
  4321  		}
  4322  	}
  4323  
  4324  	if gcBlackenEnabled != 0 {
  4325  		deductAssistCredit(size)
  4326  	}
  4327  
  4328  	const constsize = 1
  4329  
  4330  	const elemsize = 16
  4331  
  4332  	mp := acquirem()
  4333  	if doubleCheckMalloc {
  4334  		doubleCheckTiny(constsize, typ, mp)
  4335  	}
  4336  	mp.mallocing = 1
  4337  
  4338  	c := getMCache(mp)
  4339  	off := c.tinyoffset
  4340  
  4341  	if constsize&7 == 0 {
  4342  		off = alignUp(off, 8)
  4343  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4344  
  4345  		off = alignUp(off, 8)
  4346  	} else if constsize&3 == 0 {
  4347  		off = alignUp(off, 4)
  4348  	} else if constsize&1 == 0 {
  4349  		off = alignUp(off, 2)
  4350  	}
  4351  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4352  
  4353  		x := unsafe.Pointer(c.tiny + off)
  4354  		c.tinyoffset = off + constsize
  4355  		c.tinyAllocs++
  4356  		mp.mallocing = 0
  4357  		releasem(mp)
  4358  		const elemsize = 0
  4359  		{
  4360  
  4361  			if valgrindenabled {
  4362  				valgrindMalloc(x, size)
  4363  			}
  4364  
  4365  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4366  				if assistG := getg().m.curg; assistG != nil {
  4367  					assistG.gcAssistBytes -= int64(elemsize - size)
  4368  				}
  4369  			}
  4370  
  4371  			if debug.malloc {
  4372  				postMallocgcDebug(x, elemsize, typ)
  4373  			}
  4374  			return x
  4375  		}
  4376  
  4377  	}
  4378  
  4379  	checkGCTrigger := false
  4380  	span := c.alloc[tinySpanClass]
  4381  
  4382  	const nbytes = 8192
  4383  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4384  		16,
  4385  	)
  4386  	var nextFreeFastResult gclinkptr
  4387  	if span.allocCache != 0 {
  4388  		theBit := sys.TrailingZeros64(span.allocCache)
  4389  		result := span.freeindex + uint16(theBit)
  4390  		if result < nelems {
  4391  			freeidx := result + 1
  4392  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4393  				span.allocCache >>= uint(theBit + 1)
  4394  				span.freeindex = freeidx
  4395  				span.allocCount++
  4396  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4397  					16 +
  4398  					span.base())
  4399  			}
  4400  		}
  4401  	}
  4402  	v := nextFreeFastResult
  4403  	if v == 0 {
  4404  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4405  	}
  4406  	x := unsafe.Pointer(v)
  4407  	(*[2]uint64)(x)[0] = 0
  4408  	(*[2]uint64)(x)[1] = 0
  4409  
  4410  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4411  
  4412  		c.tiny = uintptr(x)
  4413  		c.tinyoffset = constsize
  4414  	}
  4415  
  4416  	publicationBarrier()
  4417  
  4418  	if writeBarrier.enabled {
  4419  
  4420  		gcmarknewobject(span, uintptr(x))
  4421  	} else {
  4422  
  4423  		span.freeIndexForScan = span.freeindex
  4424  	}
  4425  
  4426  	c.nextSample -= int64(elemsize)
  4427  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4428  		profilealloc(mp, x, elemsize)
  4429  	}
  4430  	mp.mallocing = 0
  4431  	releasem(mp)
  4432  
  4433  	if checkGCTrigger {
  4434  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4435  			gcStart(t)
  4436  		}
  4437  	}
  4438  
  4439  	if raceenabled {
  4440  
  4441  		x = add(x, elemsize-constsize)
  4442  	}
  4443  	if valgrindenabled {
  4444  		valgrindMalloc(x, size)
  4445  	}
  4446  
  4447  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4448  		if assistG := getg().m.curg; assistG != nil {
  4449  			assistG.gcAssistBytes -= int64(elemsize - size)
  4450  		}
  4451  	}
  4452  
  4453  	if debug.malloc {
  4454  		postMallocgcDebug(x, elemsize, typ)
  4455  	}
  4456  	return x
  4457  }
  4458  
  4459  func mallocgcTinySize2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4460  
  4461  	gp := getg()
  4462  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4463  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  4464  	}
  4465  
  4466  	if doubleCheckMalloc {
  4467  		if gcphase == _GCmarktermination {
  4468  			throw("mallocgc called with gcphase == _GCmarktermination")
  4469  		}
  4470  	}
  4471  
  4472  	lockRankMayQueueFinalizer()
  4473  
  4474  	if debug.malloc {
  4475  		if x := preMallocgcDebug(size, typ); x != nil {
  4476  			return x
  4477  		}
  4478  	}
  4479  
  4480  	if gcBlackenEnabled != 0 {
  4481  		deductAssistCredit(size)
  4482  	}
  4483  
  4484  	const constsize = 2
  4485  
  4486  	const elemsize = 16
  4487  
  4488  	mp := acquirem()
  4489  	if doubleCheckMalloc {
  4490  		doubleCheckTiny(constsize, typ, mp)
  4491  	}
  4492  	mp.mallocing = 1
  4493  
  4494  	c := getMCache(mp)
  4495  	off := c.tinyoffset
  4496  
  4497  	if constsize&7 == 0 {
  4498  		off = alignUp(off, 8)
  4499  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4500  
  4501  		off = alignUp(off, 8)
  4502  	} else if constsize&3 == 0 {
  4503  		off = alignUp(off, 4)
  4504  	} else if constsize&1 == 0 {
  4505  		off = alignUp(off, 2)
  4506  	}
  4507  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4508  
  4509  		x := unsafe.Pointer(c.tiny + off)
  4510  		c.tinyoffset = off + constsize
  4511  		c.tinyAllocs++
  4512  		mp.mallocing = 0
  4513  		releasem(mp)
  4514  		const elemsize = 0
  4515  		{
  4516  
  4517  			if valgrindenabled {
  4518  				valgrindMalloc(x, size)
  4519  			}
  4520  
  4521  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4522  				if assistG := getg().m.curg; assistG != nil {
  4523  					assistG.gcAssistBytes -= int64(elemsize - size)
  4524  				}
  4525  			}
  4526  
  4527  			if debug.malloc {
  4528  				postMallocgcDebug(x, elemsize, typ)
  4529  			}
  4530  			return x
  4531  		}
  4532  
  4533  	}
  4534  
  4535  	checkGCTrigger := false
  4536  	span := c.alloc[tinySpanClass]
  4537  
  4538  	const nbytes = 8192
  4539  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4540  		16,
  4541  	)
  4542  	var nextFreeFastResult gclinkptr
  4543  	if span.allocCache != 0 {
  4544  		theBit := sys.TrailingZeros64(span.allocCache)
  4545  		result := span.freeindex + uint16(theBit)
  4546  		if result < nelems {
  4547  			freeidx := result + 1
  4548  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4549  				span.allocCache >>= uint(theBit + 1)
  4550  				span.freeindex = freeidx
  4551  				span.allocCount++
  4552  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4553  					16 +
  4554  					span.base())
  4555  			}
  4556  		}
  4557  	}
  4558  	v := nextFreeFastResult
  4559  	if v == 0 {
  4560  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4561  	}
  4562  	x := unsafe.Pointer(v)
  4563  	(*[2]uint64)(x)[0] = 0
  4564  	(*[2]uint64)(x)[1] = 0
  4565  
  4566  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4567  
  4568  		c.tiny = uintptr(x)
  4569  		c.tinyoffset = constsize
  4570  	}
  4571  
  4572  	publicationBarrier()
  4573  
  4574  	if writeBarrier.enabled {
  4575  
  4576  		gcmarknewobject(span, uintptr(x))
  4577  	} else {
  4578  
  4579  		span.freeIndexForScan = span.freeindex
  4580  	}
  4581  
  4582  	c.nextSample -= int64(elemsize)
  4583  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4584  		profilealloc(mp, x, elemsize)
  4585  	}
  4586  	mp.mallocing = 0
  4587  	releasem(mp)
  4588  
  4589  	if checkGCTrigger {
  4590  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4591  			gcStart(t)
  4592  		}
  4593  	}
  4594  
  4595  	if raceenabled {
  4596  
  4597  		x = add(x, elemsize-constsize)
  4598  	}
  4599  	if valgrindenabled {
  4600  		valgrindMalloc(x, size)
  4601  	}
  4602  
  4603  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4604  		if assistG := getg().m.curg; assistG != nil {
  4605  			assistG.gcAssistBytes -= int64(elemsize - size)
  4606  		}
  4607  	}
  4608  
  4609  	if debug.malloc {
  4610  		postMallocgcDebug(x, elemsize, typ)
  4611  	}
  4612  	return x
  4613  }
  4614  
  4615  func mallocgcTinySize3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4616  
  4617  	gp := getg()
  4618  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4619  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  4620  	}
  4621  
  4622  	if doubleCheckMalloc {
  4623  		if gcphase == _GCmarktermination {
  4624  			throw("mallocgc called with gcphase == _GCmarktermination")
  4625  		}
  4626  	}
  4627  
  4628  	lockRankMayQueueFinalizer()
  4629  
  4630  	if debug.malloc {
  4631  		if x := preMallocgcDebug(size, typ); x != nil {
  4632  			return x
  4633  		}
  4634  	}
  4635  
  4636  	if gcBlackenEnabled != 0 {
  4637  		deductAssistCredit(size)
  4638  	}
  4639  
  4640  	const constsize = 3
  4641  
  4642  	const elemsize = 16
  4643  
  4644  	mp := acquirem()
  4645  	if doubleCheckMalloc {
  4646  		doubleCheckTiny(constsize, typ, mp)
  4647  	}
  4648  	mp.mallocing = 1
  4649  
  4650  	c := getMCache(mp)
  4651  	off := c.tinyoffset
  4652  
  4653  	if constsize&7 == 0 {
  4654  		off = alignUp(off, 8)
  4655  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4656  
  4657  		off = alignUp(off, 8)
  4658  	} else if constsize&3 == 0 {
  4659  		off = alignUp(off, 4)
  4660  	} else if constsize&1 == 0 {
  4661  		off = alignUp(off, 2)
  4662  	}
  4663  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4664  
  4665  		x := unsafe.Pointer(c.tiny + off)
  4666  		c.tinyoffset = off + constsize
  4667  		c.tinyAllocs++
  4668  		mp.mallocing = 0
  4669  		releasem(mp)
  4670  		const elemsize = 0
  4671  		{
  4672  
  4673  			if valgrindenabled {
  4674  				valgrindMalloc(x, size)
  4675  			}
  4676  
  4677  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4678  				if assistG := getg().m.curg; assistG != nil {
  4679  					assistG.gcAssistBytes -= int64(elemsize - size)
  4680  				}
  4681  			}
  4682  
  4683  			if debug.malloc {
  4684  				postMallocgcDebug(x, elemsize, typ)
  4685  			}
  4686  			return x
  4687  		}
  4688  
  4689  	}
  4690  
  4691  	checkGCTrigger := false
  4692  	span := c.alloc[tinySpanClass]
  4693  
  4694  	const nbytes = 8192
  4695  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4696  		16,
  4697  	)
  4698  	var nextFreeFastResult gclinkptr
  4699  	if span.allocCache != 0 {
  4700  		theBit := sys.TrailingZeros64(span.allocCache)
  4701  		result := span.freeindex + uint16(theBit)
  4702  		if result < nelems {
  4703  			freeidx := result + 1
  4704  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4705  				span.allocCache >>= uint(theBit + 1)
  4706  				span.freeindex = freeidx
  4707  				span.allocCount++
  4708  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4709  					16 +
  4710  					span.base())
  4711  			}
  4712  		}
  4713  	}
  4714  	v := nextFreeFastResult
  4715  	if v == 0 {
  4716  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4717  	}
  4718  	x := unsafe.Pointer(v)
  4719  	(*[2]uint64)(x)[0] = 0
  4720  	(*[2]uint64)(x)[1] = 0
  4721  
  4722  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4723  
  4724  		c.tiny = uintptr(x)
  4725  		c.tinyoffset = constsize
  4726  	}
  4727  
  4728  	publicationBarrier()
  4729  
  4730  	if writeBarrier.enabled {
  4731  
  4732  		gcmarknewobject(span, uintptr(x))
  4733  	} else {
  4734  
  4735  		span.freeIndexForScan = span.freeindex
  4736  	}
  4737  
  4738  	c.nextSample -= int64(elemsize)
  4739  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4740  		profilealloc(mp, x, elemsize)
  4741  	}
  4742  	mp.mallocing = 0
  4743  	releasem(mp)
  4744  
  4745  	if checkGCTrigger {
  4746  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4747  			gcStart(t)
  4748  		}
  4749  	}
  4750  
  4751  	if raceenabled {
  4752  
  4753  		x = add(x, elemsize-constsize)
  4754  	}
  4755  	if valgrindenabled {
  4756  		valgrindMalloc(x, size)
  4757  	}
  4758  
  4759  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4760  		if assistG := getg().m.curg; assistG != nil {
  4761  			assistG.gcAssistBytes -= int64(elemsize - size)
  4762  		}
  4763  	}
  4764  
  4765  	if debug.malloc {
  4766  		postMallocgcDebug(x, elemsize, typ)
  4767  	}
  4768  	return x
  4769  }
  4770  
  4771  func mallocgcTinySize4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4772  
  4773  	gp := getg()
  4774  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4775  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  4776  	}
  4777  
  4778  	if doubleCheckMalloc {
  4779  		if gcphase == _GCmarktermination {
  4780  			throw("mallocgc called with gcphase == _GCmarktermination")
  4781  		}
  4782  	}
  4783  
  4784  	lockRankMayQueueFinalizer()
  4785  
  4786  	if debug.malloc {
  4787  		if x := preMallocgcDebug(size, typ); x != nil {
  4788  			return x
  4789  		}
  4790  	}
  4791  
  4792  	if gcBlackenEnabled != 0 {
  4793  		deductAssistCredit(size)
  4794  	}
  4795  
  4796  	const constsize = 4
  4797  
  4798  	const elemsize = 16
  4799  
  4800  	mp := acquirem()
  4801  	if doubleCheckMalloc {
  4802  		doubleCheckTiny(constsize, typ, mp)
  4803  	}
  4804  	mp.mallocing = 1
  4805  
  4806  	c := getMCache(mp)
  4807  	off := c.tinyoffset
  4808  
  4809  	if constsize&7 == 0 {
  4810  		off = alignUp(off, 8)
  4811  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4812  
  4813  		off = alignUp(off, 8)
  4814  	} else if constsize&3 == 0 {
  4815  		off = alignUp(off, 4)
  4816  	} else if constsize&1 == 0 {
  4817  		off = alignUp(off, 2)
  4818  	}
  4819  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4820  
  4821  		x := unsafe.Pointer(c.tiny + off)
  4822  		c.tinyoffset = off + constsize
  4823  		c.tinyAllocs++
  4824  		mp.mallocing = 0
  4825  		releasem(mp)
  4826  		const elemsize = 0
  4827  		{
  4828  
  4829  			if valgrindenabled {
  4830  				valgrindMalloc(x, size)
  4831  			}
  4832  
  4833  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4834  				if assistG := getg().m.curg; assistG != nil {
  4835  					assistG.gcAssistBytes -= int64(elemsize - size)
  4836  				}
  4837  			}
  4838  
  4839  			if debug.malloc {
  4840  				postMallocgcDebug(x, elemsize, typ)
  4841  			}
  4842  			return x
  4843  		}
  4844  
  4845  	}
  4846  
  4847  	checkGCTrigger := false
  4848  	span := c.alloc[tinySpanClass]
  4849  
  4850  	const nbytes = 8192
  4851  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  4852  		16,
  4853  	)
  4854  	var nextFreeFastResult gclinkptr
  4855  	if span.allocCache != 0 {
  4856  		theBit := sys.TrailingZeros64(span.allocCache)
  4857  		result := span.freeindex + uint16(theBit)
  4858  		if result < nelems {
  4859  			freeidx := result + 1
  4860  			if !(freeidx%64 == 0 && freeidx != nelems) {
  4861  				span.allocCache >>= uint(theBit + 1)
  4862  				span.freeindex = freeidx
  4863  				span.allocCount++
  4864  				nextFreeFastResult = gclinkptr(uintptr(result)*
  4865  					16 +
  4866  					span.base())
  4867  			}
  4868  		}
  4869  	}
  4870  	v := nextFreeFastResult
  4871  	if v == 0 {
  4872  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  4873  	}
  4874  	x := unsafe.Pointer(v)
  4875  	(*[2]uint64)(x)[0] = 0
  4876  	(*[2]uint64)(x)[1] = 0
  4877  
  4878  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  4879  
  4880  		c.tiny = uintptr(x)
  4881  		c.tinyoffset = constsize
  4882  	}
  4883  
  4884  	publicationBarrier()
  4885  
  4886  	if writeBarrier.enabled {
  4887  
  4888  		gcmarknewobject(span, uintptr(x))
  4889  	} else {
  4890  
  4891  		span.freeIndexForScan = span.freeindex
  4892  	}
  4893  
  4894  	c.nextSample -= int64(elemsize)
  4895  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  4896  		profilealloc(mp, x, elemsize)
  4897  	}
  4898  	mp.mallocing = 0
  4899  	releasem(mp)
  4900  
  4901  	if checkGCTrigger {
  4902  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  4903  			gcStart(t)
  4904  		}
  4905  	}
  4906  
  4907  	if raceenabled {
  4908  
  4909  		x = add(x, elemsize-constsize)
  4910  	}
  4911  	if valgrindenabled {
  4912  		valgrindMalloc(x, size)
  4913  	}
  4914  
  4915  	if gcBlackenEnabled != 0 && elemsize != 0 {
  4916  		if assistG := getg().m.curg; assistG != nil {
  4917  			assistG.gcAssistBytes -= int64(elemsize - size)
  4918  		}
  4919  	}
  4920  
  4921  	if debug.malloc {
  4922  		postMallocgcDebug(x, elemsize, typ)
  4923  	}
  4924  	return x
  4925  }
  4926  
  4927  func mallocgcTinySize5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  4928  
  4929  	gp := getg()
  4930  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  4931  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  4932  	}
  4933  
  4934  	if doubleCheckMalloc {
  4935  		if gcphase == _GCmarktermination {
  4936  			throw("mallocgc called with gcphase == _GCmarktermination")
  4937  		}
  4938  	}
  4939  
  4940  	lockRankMayQueueFinalizer()
  4941  
  4942  	if debug.malloc {
  4943  		if x := preMallocgcDebug(size, typ); x != nil {
  4944  			return x
  4945  		}
  4946  	}
  4947  
  4948  	if gcBlackenEnabled != 0 {
  4949  		deductAssistCredit(size)
  4950  	}
  4951  
  4952  	const constsize = 5
  4953  
  4954  	const elemsize = 16
  4955  
  4956  	mp := acquirem()
  4957  	if doubleCheckMalloc {
  4958  		doubleCheckTiny(constsize, typ, mp)
  4959  	}
  4960  	mp.mallocing = 1
  4961  
  4962  	c := getMCache(mp)
  4963  	off := c.tinyoffset
  4964  
  4965  	if constsize&7 == 0 {
  4966  		off = alignUp(off, 8)
  4967  	} else if goarch.PtrSize == 4 && constsize == 12 {
  4968  
  4969  		off = alignUp(off, 8)
  4970  	} else if constsize&3 == 0 {
  4971  		off = alignUp(off, 4)
  4972  	} else if constsize&1 == 0 {
  4973  		off = alignUp(off, 2)
  4974  	}
  4975  	if off+constsize <= maxTinySize && c.tiny != 0 {
  4976  
  4977  		x := unsafe.Pointer(c.tiny + off)
  4978  		c.tinyoffset = off + constsize
  4979  		c.tinyAllocs++
  4980  		mp.mallocing = 0
  4981  		releasem(mp)
  4982  		const elemsize = 0
  4983  		{
  4984  
  4985  			if valgrindenabled {
  4986  				valgrindMalloc(x, size)
  4987  			}
  4988  
  4989  			if gcBlackenEnabled != 0 && elemsize != 0 {
  4990  				if assistG := getg().m.curg; assistG != nil {
  4991  					assistG.gcAssistBytes -= int64(elemsize - size)
  4992  				}
  4993  			}
  4994  
  4995  			if debug.malloc {
  4996  				postMallocgcDebug(x, elemsize, typ)
  4997  			}
  4998  			return x
  4999  		}
  5000  
  5001  	}
  5002  
  5003  	checkGCTrigger := false
  5004  	span := c.alloc[tinySpanClass]
  5005  
  5006  	const nbytes = 8192
  5007  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5008  		16,
  5009  	)
  5010  	var nextFreeFastResult gclinkptr
  5011  	if span.allocCache != 0 {
  5012  		theBit := sys.TrailingZeros64(span.allocCache)
  5013  		result := span.freeindex + uint16(theBit)
  5014  		if result < nelems {
  5015  			freeidx := result + 1
  5016  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5017  				span.allocCache >>= uint(theBit + 1)
  5018  				span.freeindex = freeidx
  5019  				span.allocCount++
  5020  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5021  					16 +
  5022  					span.base())
  5023  			}
  5024  		}
  5025  	}
  5026  	v := nextFreeFastResult
  5027  	if v == 0 {
  5028  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5029  	}
  5030  	x := unsafe.Pointer(v)
  5031  	(*[2]uint64)(x)[0] = 0
  5032  	(*[2]uint64)(x)[1] = 0
  5033  
  5034  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5035  
  5036  		c.tiny = uintptr(x)
  5037  		c.tinyoffset = constsize
  5038  	}
  5039  
  5040  	publicationBarrier()
  5041  
  5042  	if writeBarrier.enabled {
  5043  
  5044  		gcmarknewobject(span, uintptr(x))
  5045  	} else {
  5046  
  5047  		span.freeIndexForScan = span.freeindex
  5048  	}
  5049  
  5050  	c.nextSample -= int64(elemsize)
  5051  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5052  		profilealloc(mp, x, elemsize)
  5053  	}
  5054  	mp.mallocing = 0
  5055  	releasem(mp)
  5056  
  5057  	if checkGCTrigger {
  5058  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5059  			gcStart(t)
  5060  		}
  5061  	}
  5062  
  5063  	if raceenabled {
  5064  
  5065  		x = add(x, elemsize-constsize)
  5066  	}
  5067  	if valgrindenabled {
  5068  		valgrindMalloc(x, size)
  5069  	}
  5070  
  5071  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5072  		if assistG := getg().m.curg; assistG != nil {
  5073  			assistG.gcAssistBytes -= int64(elemsize - size)
  5074  		}
  5075  	}
  5076  
  5077  	if debug.malloc {
  5078  		postMallocgcDebug(x, elemsize, typ)
  5079  	}
  5080  	return x
  5081  }
  5082  
  5083  func mallocgcTinySize6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5084  
  5085  	gp := getg()
  5086  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  5087  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  5088  	}
  5089  
  5090  	if doubleCheckMalloc {
  5091  		if gcphase == _GCmarktermination {
  5092  			throw("mallocgc called with gcphase == _GCmarktermination")
  5093  		}
  5094  	}
  5095  
  5096  	lockRankMayQueueFinalizer()
  5097  
  5098  	if debug.malloc {
  5099  		if x := preMallocgcDebug(size, typ); x != nil {
  5100  			return x
  5101  		}
  5102  	}
  5103  
  5104  	if gcBlackenEnabled != 0 {
  5105  		deductAssistCredit(size)
  5106  	}
  5107  
  5108  	const constsize = 6
  5109  
  5110  	const elemsize = 16
  5111  
  5112  	mp := acquirem()
  5113  	if doubleCheckMalloc {
  5114  		doubleCheckTiny(constsize, typ, mp)
  5115  	}
  5116  	mp.mallocing = 1
  5117  
  5118  	c := getMCache(mp)
  5119  	off := c.tinyoffset
  5120  
  5121  	if constsize&7 == 0 {
  5122  		off = alignUp(off, 8)
  5123  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5124  
  5125  		off = alignUp(off, 8)
  5126  	} else if constsize&3 == 0 {
  5127  		off = alignUp(off, 4)
  5128  	} else if constsize&1 == 0 {
  5129  		off = alignUp(off, 2)
  5130  	}
  5131  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5132  
  5133  		x := unsafe.Pointer(c.tiny + off)
  5134  		c.tinyoffset = off + constsize
  5135  		c.tinyAllocs++
  5136  		mp.mallocing = 0
  5137  		releasem(mp)
  5138  		const elemsize = 0
  5139  		{
  5140  
  5141  			if valgrindenabled {
  5142  				valgrindMalloc(x, size)
  5143  			}
  5144  
  5145  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5146  				if assistG := getg().m.curg; assistG != nil {
  5147  					assistG.gcAssistBytes -= int64(elemsize - size)
  5148  				}
  5149  			}
  5150  
  5151  			if debug.malloc {
  5152  				postMallocgcDebug(x, elemsize, typ)
  5153  			}
  5154  			return x
  5155  		}
  5156  
  5157  	}
  5158  
  5159  	checkGCTrigger := false
  5160  	span := c.alloc[tinySpanClass]
  5161  
  5162  	const nbytes = 8192
  5163  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5164  		16,
  5165  	)
  5166  	var nextFreeFastResult gclinkptr
  5167  	if span.allocCache != 0 {
  5168  		theBit := sys.TrailingZeros64(span.allocCache)
  5169  		result := span.freeindex + uint16(theBit)
  5170  		if result < nelems {
  5171  			freeidx := result + 1
  5172  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5173  				span.allocCache >>= uint(theBit + 1)
  5174  				span.freeindex = freeidx
  5175  				span.allocCount++
  5176  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5177  					16 +
  5178  					span.base())
  5179  			}
  5180  		}
  5181  	}
  5182  	v := nextFreeFastResult
  5183  	if v == 0 {
  5184  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5185  	}
  5186  	x := unsafe.Pointer(v)
  5187  	(*[2]uint64)(x)[0] = 0
  5188  	(*[2]uint64)(x)[1] = 0
  5189  
  5190  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5191  
  5192  		c.tiny = uintptr(x)
  5193  		c.tinyoffset = constsize
  5194  	}
  5195  
  5196  	publicationBarrier()
  5197  
  5198  	if writeBarrier.enabled {
  5199  
  5200  		gcmarknewobject(span, uintptr(x))
  5201  	} else {
  5202  
  5203  		span.freeIndexForScan = span.freeindex
  5204  	}
  5205  
  5206  	c.nextSample -= int64(elemsize)
  5207  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5208  		profilealloc(mp, x, elemsize)
  5209  	}
  5210  	mp.mallocing = 0
  5211  	releasem(mp)
  5212  
  5213  	if checkGCTrigger {
  5214  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5215  			gcStart(t)
  5216  		}
  5217  	}
  5218  
  5219  	if raceenabled {
  5220  
  5221  		x = add(x, elemsize-constsize)
  5222  	}
  5223  	if valgrindenabled {
  5224  		valgrindMalloc(x, size)
  5225  	}
  5226  
  5227  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5228  		if assistG := getg().m.curg; assistG != nil {
  5229  			assistG.gcAssistBytes -= int64(elemsize - size)
  5230  		}
  5231  	}
  5232  
  5233  	if debug.malloc {
  5234  		postMallocgcDebug(x, elemsize, typ)
  5235  	}
  5236  	return x
  5237  }
  5238  
  5239  func mallocgcTinySize7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5240  
  5241  	gp := getg()
  5242  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  5243  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  5244  	}
  5245  
  5246  	if doubleCheckMalloc {
  5247  		if gcphase == _GCmarktermination {
  5248  			throw("mallocgc called with gcphase == _GCmarktermination")
  5249  		}
  5250  	}
  5251  
  5252  	lockRankMayQueueFinalizer()
  5253  
  5254  	if debug.malloc {
  5255  		if x := preMallocgcDebug(size, typ); x != nil {
  5256  			return x
  5257  		}
  5258  	}
  5259  
  5260  	if gcBlackenEnabled != 0 {
  5261  		deductAssistCredit(size)
  5262  	}
  5263  
  5264  	const constsize = 7
  5265  
  5266  	const elemsize = 16
  5267  
  5268  	mp := acquirem()
  5269  	if doubleCheckMalloc {
  5270  		doubleCheckTiny(constsize, typ, mp)
  5271  	}
  5272  	mp.mallocing = 1
  5273  
  5274  	c := getMCache(mp)
  5275  	off := c.tinyoffset
  5276  
  5277  	if constsize&7 == 0 {
  5278  		off = alignUp(off, 8)
  5279  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5280  
  5281  		off = alignUp(off, 8)
  5282  	} else if constsize&3 == 0 {
  5283  		off = alignUp(off, 4)
  5284  	} else if constsize&1 == 0 {
  5285  		off = alignUp(off, 2)
  5286  	}
  5287  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5288  
  5289  		x := unsafe.Pointer(c.tiny + off)
  5290  		c.tinyoffset = off + constsize
  5291  		c.tinyAllocs++
  5292  		mp.mallocing = 0
  5293  		releasem(mp)
  5294  		const elemsize = 0
  5295  		{
  5296  
  5297  			if valgrindenabled {
  5298  				valgrindMalloc(x, size)
  5299  			}
  5300  
  5301  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5302  				if assistG := getg().m.curg; assistG != nil {
  5303  					assistG.gcAssistBytes -= int64(elemsize - size)
  5304  				}
  5305  			}
  5306  
  5307  			if debug.malloc {
  5308  				postMallocgcDebug(x, elemsize, typ)
  5309  			}
  5310  			return x
  5311  		}
  5312  
  5313  	}
  5314  
  5315  	checkGCTrigger := false
  5316  	span := c.alloc[tinySpanClass]
  5317  
  5318  	const nbytes = 8192
  5319  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5320  		16,
  5321  	)
  5322  	var nextFreeFastResult gclinkptr
  5323  	if span.allocCache != 0 {
  5324  		theBit := sys.TrailingZeros64(span.allocCache)
  5325  		result := span.freeindex + uint16(theBit)
  5326  		if result < nelems {
  5327  			freeidx := result + 1
  5328  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5329  				span.allocCache >>= uint(theBit + 1)
  5330  				span.freeindex = freeidx
  5331  				span.allocCount++
  5332  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5333  					16 +
  5334  					span.base())
  5335  			}
  5336  		}
  5337  	}
  5338  	v := nextFreeFastResult
  5339  	if v == 0 {
  5340  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5341  	}
  5342  	x := unsafe.Pointer(v)
  5343  	(*[2]uint64)(x)[0] = 0
  5344  	(*[2]uint64)(x)[1] = 0
  5345  
  5346  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5347  
  5348  		c.tiny = uintptr(x)
  5349  		c.tinyoffset = constsize
  5350  	}
  5351  
  5352  	publicationBarrier()
  5353  
  5354  	if writeBarrier.enabled {
  5355  
  5356  		gcmarknewobject(span, uintptr(x))
  5357  	} else {
  5358  
  5359  		span.freeIndexForScan = span.freeindex
  5360  	}
  5361  
  5362  	c.nextSample -= int64(elemsize)
  5363  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5364  		profilealloc(mp, x, elemsize)
  5365  	}
  5366  	mp.mallocing = 0
  5367  	releasem(mp)
  5368  
  5369  	if checkGCTrigger {
  5370  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5371  			gcStart(t)
  5372  		}
  5373  	}
  5374  
  5375  	if raceenabled {
  5376  
  5377  		x = add(x, elemsize-constsize)
  5378  	}
  5379  	if valgrindenabled {
  5380  		valgrindMalloc(x, size)
  5381  	}
  5382  
  5383  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5384  		if assistG := getg().m.curg; assistG != nil {
  5385  			assistG.gcAssistBytes -= int64(elemsize - size)
  5386  		}
  5387  	}
  5388  
  5389  	if debug.malloc {
  5390  		postMallocgcDebug(x, elemsize, typ)
  5391  	}
  5392  	return x
  5393  }
  5394  
  5395  func mallocgcTinySize8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5396  
  5397  	gp := getg()
  5398  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  5399  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  5400  	}
  5401  
  5402  	if doubleCheckMalloc {
  5403  		if gcphase == _GCmarktermination {
  5404  			throw("mallocgc called with gcphase == _GCmarktermination")
  5405  		}
  5406  	}
  5407  
  5408  	lockRankMayQueueFinalizer()
  5409  
  5410  	if debug.malloc {
  5411  		if x := preMallocgcDebug(size, typ); x != nil {
  5412  			return x
  5413  		}
  5414  	}
  5415  
  5416  	if gcBlackenEnabled != 0 {
  5417  		deductAssistCredit(size)
  5418  	}
  5419  
  5420  	const constsize = 8
  5421  
  5422  	const elemsize = 16
  5423  
  5424  	mp := acquirem()
  5425  	if doubleCheckMalloc {
  5426  		doubleCheckTiny(constsize, typ, mp)
  5427  	}
  5428  	mp.mallocing = 1
  5429  
  5430  	c := getMCache(mp)
  5431  	off := c.tinyoffset
  5432  
  5433  	if constsize&7 == 0 {
  5434  		off = alignUp(off, 8)
  5435  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5436  
  5437  		off = alignUp(off, 8)
  5438  	} else if constsize&3 == 0 {
  5439  		off = alignUp(off, 4)
  5440  	} else if constsize&1 == 0 {
  5441  		off = alignUp(off, 2)
  5442  	}
  5443  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5444  
  5445  		x := unsafe.Pointer(c.tiny + off)
  5446  		c.tinyoffset = off + constsize
  5447  		c.tinyAllocs++
  5448  		mp.mallocing = 0
  5449  		releasem(mp)
  5450  		const elemsize = 0
  5451  		{
  5452  
  5453  			if valgrindenabled {
  5454  				valgrindMalloc(x, size)
  5455  			}
  5456  
  5457  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5458  				if assistG := getg().m.curg; assistG != nil {
  5459  					assistG.gcAssistBytes -= int64(elemsize - size)
  5460  				}
  5461  			}
  5462  
  5463  			if debug.malloc {
  5464  				postMallocgcDebug(x, elemsize, typ)
  5465  			}
  5466  			return x
  5467  		}
  5468  
  5469  	}
  5470  
  5471  	checkGCTrigger := false
  5472  	span := c.alloc[tinySpanClass]
  5473  
  5474  	const nbytes = 8192
  5475  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5476  		16,
  5477  	)
  5478  	var nextFreeFastResult gclinkptr
  5479  	if span.allocCache != 0 {
  5480  		theBit := sys.TrailingZeros64(span.allocCache)
  5481  		result := span.freeindex + uint16(theBit)
  5482  		if result < nelems {
  5483  			freeidx := result + 1
  5484  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5485  				span.allocCache >>= uint(theBit + 1)
  5486  				span.freeindex = freeidx
  5487  				span.allocCount++
  5488  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5489  					16 +
  5490  					span.base())
  5491  			}
  5492  		}
  5493  	}
  5494  	v := nextFreeFastResult
  5495  	if v == 0 {
  5496  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5497  	}
  5498  	x := unsafe.Pointer(v)
  5499  	(*[2]uint64)(x)[0] = 0
  5500  	(*[2]uint64)(x)[1] = 0
  5501  
  5502  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5503  
  5504  		c.tiny = uintptr(x)
  5505  		c.tinyoffset = constsize
  5506  	}
  5507  
  5508  	publicationBarrier()
  5509  
  5510  	if writeBarrier.enabled {
  5511  
  5512  		gcmarknewobject(span, uintptr(x))
  5513  	} else {
  5514  
  5515  		span.freeIndexForScan = span.freeindex
  5516  	}
  5517  
  5518  	c.nextSample -= int64(elemsize)
  5519  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5520  		profilealloc(mp, x, elemsize)
  5521  	}
  5522  	mp.mallocing = 0
  5523  	releasem(mp)
  5524  
  5525  	if checkGCTrigger {
  5526  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5527  			gcStart(t)
  5528  		}
  5529  	}
  5530  
  5531  	if raceenabled {
  5532  
  5533  		x = add(x, elemsize-constsize)
  5534  	}
  5535  	if valgrindenabled {
  5536  		valgrindMalloc(x, size)
  5537  	}
  5538  
  5539  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5540  		if assistG := getg().m.curg; assistG != nil {
  5541  			assistG.gcAssistBytes -= int64(elemsize - size)
  5542  		}
  5543  	}
  5544  
  5545  	if debug.malloc {
  5546  		postMallocgcDebug(x, elemsize, typ)
  5547  	}
  5548  	return x
  5549  }
  5550  
  5551  func mallocgcTinySize9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5552  
  5553  	gp := getg()
  5554  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  5555  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  5556  	}
  5557  
  5558  	if doubleCheckMalloc {
  5559  		if gcphase == _GCmarktermination {
  5560  			throw("mallocgc called with gcphase == _GCmarktermination")
  5561  		}
  5562  	}
  5563  
  5564  	lockRankMayQueueFinalizer()
  5565  
  5566  	if debug.malloc {
  5567  		if x := preMallocgcDebug(size, typ); x != nil {
  5568  			return x
  5569  		}
  5570  	}
  5571  
  5572  	if gcBlackenEnabled != 0 {
  5573  		deductAssistCredit(size)
  5574  	}
  5575  
  5576  	const constsize = 9
  5577  
  5578  	const elemsize = 16
  5579  
  5580  	mp := acquirem()
  5581  	if doubleCheckMalloc {
  5582  		doubleCheckTiny(constsize, typ, mp)
  5583  	}
  5584  	mp.mallocing = 1
  5585  
  5586  	c := getMCache(mp)
  5587  	off := c.tinyoffset
  5588  
  5589  	if constsize&7 == 0 {
  5590  		off = alignUp(off, 8)
  5591  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5592  
  5593  		off = alignUp(off, 8)
  5594  	} else if constsize&3 == 0 {
  5595  		off = alignUp(off, 4)
  5596  	} else if constsize&1 == 0 {
  5597  		off = alignUp(off, 2)
  5598  	}
  5599  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5600  
  5601  		x := unsafe.Pointer(c.tiny + off)
  5602  		c.tinyoffset = off + constsize
  5603  		c.tinyAllocs++
  5604  		mp.mallocing = 0
  5605  		releasem(mp)
  5606  		const elemsize = 0
  5607  		{
  5608  
  5609  			if valgrindenabled {
  5610  				valgrindMalloc(x, size)
  5611  			}
  5612  
  5613  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5614  				if assistG := getg().m.curg; assistG != nil {
  5615  					assistG.gcAssistBytes -= int64(elemsize - size)
  5616  				}
  5617  			}
  5618  
  5619  			if debug.malloc {
  5620  				postMallocgcDebug(x, elemsize, typ)
  5621  			}
  5622  			return x
  5623  		}
  5624  
  5625  	}
  5626  
  5627  	checkGCTrigger := false
  5628  	span := c.alloc[tinySpanClass]
  5629  
  5630  	const nbytes = 8192
  5631  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5632  		16,
  5633  	)
  5634  	var nextFreeFastResult gclinkptr
  5635  	if span.allocCache != 0 {
  5636  		theBit := sys.TrailingZeros64(span.allocCache)
  5637  		result := span.freeindex + uint16(theBit)
  5638  		if result < nelems {
  5639  			freeidx := result + 1
  5640  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5641  				span.allocCache >>= uint(theBit + 1)
  5642  				span.freeindex = freeidx
  5643  				span.allocCount++
  5644  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5645  					16 +
  5646  					span.base())
  5647  			}
  5648  		}
  5649  	}
  5650  	v := nextFreeFastResult
  5651  	if v == 0 {
  5652  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5653  	}
  5654  	x := unsafe.Pointer(v)
  5655  	(*[2]uint64)(x)[0] = 0
  5656  	(*[2]uint64)(x)[1] = 0
  5657  
  5658  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5659  
  5660  		c.tiny = uintptr(x)
  5661  		c.tinyoffset = constsize
  5662  	}
  5663  
  5664  	publicationBarrier()
  5665  
  5666  	if writeBarrier.enabled {
  5667  
  5668  		gcmarknewobject(span, uintptr(x))
  5669  	} else {
  5670  
  5671  		span.freeIndexForScan = span.freeindex
  5672  	}
  5673  
  5674  	c.nextSample -= int64(elemsize)
  5675  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5676  		profilealloc(mp, x, elemsize)
  5677  	}
  5678  	mp.mallocing = 0
  5679  	releasem(mp)
  5680  
  5681  	if checkGCTrigger {
  5682  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5683  			gcStart(t)
  5684  		}
  5685  	}
  5686  
  5687  	if raceenabled {
  5688  
  5689  		x = add(x, elemsize-constsize)
  5690  	}
  5691  	if valgrindenabled {
  5692  		valgrindMalloc(x, size)
  5693  	}
  5694  
  5695  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5696  		if assistG := getg().m.curg; assistG != nil {
  5697  			assistG.gcAssistBytes -= int64(elemsize - size)
  5698  		}
  5699  	}
  5700  
  5701  	if debug.malloc {
  5702  		postMallocgcDebug(x, elemsize, typ)
  5703  	}
  5704  	return x
  5705  }
  5706  
  5707  func mallocgcTinySize10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5708  
  5709  	gp := getg()
  5710  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  5711  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  5712  	}
  5713  
  5714  	if doubleCheckMalloc {
  5715  		if gcphase == _GCmarktermination {
  5716  			throw("mallocgc called with gcphase == _GCmarktermination")
  5717  		}
  5718  	}
  5719  
  5720  	lockRankMayQueueFinalizer()
  5721  
  5722  	if debug.malloc {
  5723  		if x := preMallocgcDebug(size, typ); x != nil {
  5724  			return x
  5725  		}
  5726  	}
  5727  
  5728  	if gcBlackenEnabled != 0 {
  5729  		deductAssistCredit(size)
  5730  	}
  5731  
  5732  	const constsize = 10
  5733  
  5734  	const elemsize = 16
  5735  
  5736  	mp := acquirem()
  5737  	if doubleCheckMalloc {
  5738  		doubleCheckTiny(constsize, typ, mp)
  5739  	}
  5740  	mp.mallocing = 1
  5741  
  5742  	c := getMCache(mp)
  5743  	off := c.tinyoffset
  5744  
  5745  	if constsize&7 == 0 {
  5746  		off = alignUp(off, 8)
  5747  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5748  
  5749  		off = alignUp(off, 8)
  5750  	} else if constsize&3 == 0 {
  5751  		off = alignUp(off, 4)
  5752  	} else if constsize&1 == 0 {
  5753  		off = alignUp(off, 2)
  5754  	}
  5755  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5756  
  5757  		x := unsafe.Pointer(c.tiny + off)
  5758  		c.tinyoffset = off + constsize
  5759  		c.tinyAllocs++
  5760  		mp.mallocing = 0
  5761  		releasem(mp)
  5762  		const elemsize = 0
  5763  		{
  5764  
  5765  			if valgrindenabled {
  5766  				valgrindMalloc(x, size)
  5767  			}
  5768  
  5769  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5770  				if assistG := getg().m.curg; assistG != nil {
  5771  					assistG.gcAssistBytes -= int64(elemsize - size)
  5772  				}
  5773  			}
  5774  
  5775  			if debug.malloc {
  5776  				postMallocgcDebug(x, elemsize, typ)
  5777  			}
  5778  			return x
  5779  		}
  5780  
  5781  	}
  5782  
  5783  	checkGCTrigger := false
  5784  	span := c.alloc[tinySpanClass]
  5785  
  5786  	const nbytes = 8192
  5787  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5788  		16,
  5789  	)
  5790  	var nextFreeFastResult gclinkptr
  5791  	if span.allocCache != 0 {
  5792  		theBit := sys.TrailingZeros64(span.allocCache)
  5793  		result := span.freeindex + uint16(theBit)
  5794  		if result < nelems {
  5795  			freeidx := result + 1
  5796  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5797  				span.allocCache >>= uint(theBit + 1)
  5798  				span.freeindex = freeidx
  5799  				span.allocCount++
  5800  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5801  					16 +
  5802  					span.base())
  5803  			}
  5804  		}
  5805  	}
  5806  	v := nextFreeFastResult
  5807  	if v == 0 {
  5808  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5809  	}
  5810  	x := unsafe.Pointer(v)
  5811  	(*[2]uint64)(x)[0] = 0
  5812  	(*[2]uint64)(x)[1] = 0
  5813  
  5814  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5815  
  5816  		c.tiny = uintptr(x)
  5817  		c.tinyoffset = constsize
  5818  	}
  5819  
  5820  	publicationBarrier()
  5821  
  5822  	if writeBarrier.enabled {
  5823  
  5824  		gcmarknewobject(span, uintptr(x))
  5825  	} else {
  5826  
  5827  		span.freeIndexForScan = span.freeindex
  5828  	}
  5829  
  5830  	c.nextSample -= int64(elemsize)
  5831  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5832  		profilealloc(mp, x, elemsize)
  5833  	}
  5834  	mp.mallocing = 0
  5835  	releasem(mp)
  5836  
  5837  	if checkGCTrigger {
  5838  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5839  			gcStart(t)
  5840  		}
  5841  	}
  5842  
  5843  	if raceenabled {
  5844  
  5845  		x = add(x, elemsize-constsize)
  5846  	}
  5847  	if valgrindenabled {
  5848  		valgrindMalloc(x, size)
  5849  	}
  5850  
  5851  	if gcBlackenEnabled != 0 && elemsize != 0 {
  5852  		if assistG := getg().m.curg; assistG != nil {
  5853  			assistG.gcAssistBytes -= int64(elemsize - size)
  5854  		}
  5855  	}
  5856  
  5857  	if debug.malloc {
  5858  		postMallocgcDebug(x, elemsize, typ)
  5859  	}
  5860  	return x
  5861  }
  5862  
  5863  func mallocgcTinySize11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  5864  
  5865  	gp := getg()
  5866  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  5867  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  5868  	}
  5869  
  5870  	if doubleCheckMalloc {
  5871  		if gcphase == _GCmarktermination {
  5872  			throw("mallocgc called with gcphase == _GCmarktermination")
  5873  		}
  5874  	}
  5875  
  5876  	lockRankMayQueueFinalizer()
  5877  
  5878  	if debug.malloc {
  5879  		if x := preMallocgcDebug(size, typ); x != nil {
  5880  			return x
  5881  		}
  5882  	}
  5883  
  5884  	if gcBlackenEnabled != 0 {
  5885  		deductAssistCredit(size)
  5886  	}
  5887  
  5888  	const constsize = 11
  5889  
  5890  	const elemsize = 16
  5891  
  5892  	mp := acquirem()
  5893  	if doubleCheckMalloc {
  5894  		doubleCheckTiny(constsize, typ, mp)
  5895  	}
  5896  	mp.mallocing = 1
  5897  
  5898  	c := getMCache(mp)
  5899  	off := c.tinyoffset
  5900  
  5901  	if constsize&7 == 0 {
  5902  		off = alignUp(off, 8)
  5903  	} else if goarch.PtrSize == 4 && constsize == 12 {
  5904  
  5905  		off = alignUp(off, 8)
  5906  	} else if constsize&3 == 0 {
  5907  		off = alignUp(off, 4)
  5908  	} else if constsize&1 == 0 {
  5909  		off = alignUp(off, 2)
  5910  	}
  5911  	if off+constsize <= maxTinySize && c.tiny != 0 {
  5912  
  5913  		x := unsafe.Pointer(c.tiny + off)
  5914  		c.tinyoffset = off + constsize
  5915  		c.tinyAllocs++
  5916  		mp.mallocing = 0
  5917  		releasem(mp)
  5918  		const elemsize = 0
  5919  		{
  5920  
  5921  			if valgrindenabled {
  5922  				valgrindMalloc(x, size)
  5923  			}
  5924  
  5925  			if gcBlackenEnabled != 0 && elemsize != 0 {
  5926  				if assistG := getg().m.curg; assistG != nil {
  5927  					assistG.gcAssistBytes -= int64(elemsize - size)
  5928  				}
  5929  			}
  5930  
  5931  			if debug.malloc {
  5932  				postMallocgcDebug(x, elemsize, typ)
  5933  			}
  5934  			return x
  5935  		}
  5936  
  5937  	}
  5938  
  5939  	checkGCTrigger := false
  5940  	span := c.alloc[tinySpanClass]
  5941  
  5942  	const nbytes = 8192
  5943  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  5944  		16,
  5945  	)
  5946  	var nextFreeFastResult gclinkptr
  5947  	if span.allocCache != 0 {
  5948  		theBit := sys.TrailingZeros64(span.allocCache)
  5949  		result := span.freeindex + uint16(theBit)
  5950  		if result < nelems {
  5951  			freeidx := result + 1
  5952  			if !(freeidx%64 == 0 && freeidx != nelems) {
  5953  				span.allocCache >>= uint(theBit + 1)
  5954  				span.freeindex = freeidx
  5955  				span.allocCount++
  5956  				nextFreeFastResult = gclinkptr(uintptr(result)*
  5957  					16 +
  5958  					span.base())
  5959  			}
  5960  		}
  5961  	}
  5962  	v := nextFreeFastResult
  5963  	if v == 0 {
  5964  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  5965  	}
  5966  	x := unsafe.Pointer(v)
  5967  	(*[2]uint64)(x)[0] = 0
  5968  	(*[2]uint64)(x)[1] = 0
  5969  
  5970  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  5971  
  5972  		c.tiny = uintptr(x)
  5973  		c.tinyoffset = constsize
  5974  	}
  5975  
  5976  	publicationBarrier()
  5977  
  5978  	if writeBarrier.enabled {
  5979  
  5980  		gcmarknewobject(span, uintptr(x))
  5981  	} else {
  5982  
  5983  		span.freeIndexForScan = span.freeindex
  5984  	}
  5985  
  5986  	c.nextSample -= int64(elemsize)
  5987  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  5988  		profilealloc(mp, x, elemsize)
  5989  	}
  5990  	mp.mallocing = 0
  5991  	releasem(mp)
  5992  
  5993  	if checkGCTrigger {
  5994  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  5995  			gcStart(t)
  5996  		}
  5997  	}
  5998  
  5999  	if raceenabled {
  6000  
  6001  		x = add(x, elemsize-constsize)
  6002  	}
  6003  	if valgrindenabled {
  6004  		valgrindMalloc(x, size)
  6005  	}
  6006  
  6007  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6008  		if assistG := getg().m.curg; assistG != nil {
  6009  			assistG.gcAssistBytes -= int64(elemsize - size)
  6010  		}
  6011  	}
  6012  
  6013  	if debug.malloc {
  6014  		postMallocgcDebug(x, elemsize, typ)
  6015  	}
  6016  	return x
  6017  }
  6018  
  6019  func mallocgcTinySize12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6020  
  6021  	gp := getg()
  6022  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  6023  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  6024  	}
  6025  
  6026  	if doubleCheckMalloc {
  6027  		if gcphase == _GCmarktermination {
  6028  			throw("mallocgc called with gcphase == _GCmarktermination")
  6029  		}
  6030  	}
  6031  
  6032  	lockRankMayQueueFinalizer()
  6033  
  6034  	if debug.malloc {
  6035  		if x := preMallocgcDebug(size, typ); x != nil {
  6036  			return x
  6037  		}
  6038  	}
  6039  
  6040  	if gcBlackenEnabled != 0 {
  6041  		deductAssistCredit(size)
  6042  	}
  6043  
  6044  	const constsize = 12
  6045  
  6046  	const elemsize = 16
  6047  
  6048  	mp := acquirem()
  6049  	if doubleCheckMalloc {
  6050  		doubleCheckTiny(constsize, typ, mp)
  6051  	}
  6052  	mp.mallocing = 1
  6053  
  6054  	c := getMCache(mp)
  6055  	off := c.tinyoffset
  6056  
  6057  	if constsize&7 == 0 {
  6058  		off = alignUp(off, 8)
  6059  	} else if goarch.PtrSize == 4 && constsize == 12 {
  6060  
  6061  		off = alignUp(off, 8)
  6062  	} else if constsize&3 == 0 {
  6063  		off = alignUp(off, 4)
  6064  	} else if constsize&1 == 0 {
  6065  		off = alignUp(off, 2)
  6066  	}
  6067  	if off+constsize <= maxTinySize && c.tiny != 0 {
  6068  
  6069  		x := unsafe.Pointer(c.tiny + off)
  6070  		c.tinyoffset = off + constsize
  6071  		c.tinyAllocs++
  6072  		mp.mallocing = 0
  6073  		releasem(mp)
  6074  		const elemsize = 0
  6075  		{
  6076  
  6077  			if valgrindenabled {
  6078  				valgrindMalloc(x, size)
  6079  			}
  6080  
  6081  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6082  				if assistG := getg().m.curg; assistG != nil {
  6083  					assistG.gcAssistBytes -= int64(elemsize - size)
  6084  				}
  6085  			}
  6086  
  6087  			if debug.malloc {
  6088  				postMallocgcDebug(x, elemsize, typ)
  6089  			}
  6090  			return x
  6091  		}
  6092  
  6093  	}
  6094  
  6095  	checkGCTrigger := false
  6096  	span := c.alloc[tinySpanClass]
  6097  
  6098  	const nbytes = 8192
  6099  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  6100  		16,
  6101  	)
  6102  	var nextFreeFastResult gclinkptr
  6103  	if span.allocCache != 0 {
  6104  		theBit := sys.TrailingZeros64(span.allocCache)
  6105  		result := span.freeindex + uint16(theBit)
  6106  		if result < nelems {
  6107  			freeidx := result + 1
  6108  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6109  				span.allocCache >>= uint(theBit + 1)
  6110  				span.freeindex = freeidx
  6111  				span.allocCount++
  6112  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6113  					16 +
  6114  					span.base())
  6115  			}
  6116  		}
  6117  	}
  6118  	v := nextFreeFastResult
  6119  	if v == 0 {
  6120  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6121  	}
  6122  	x := unsafe.Pointer(v)
  6123  	(*[2]uint64)(x)[0] = 0
  6124  	(*[2]uint64)(x)[1] = 0
  6125  
  6126  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6127  
  6128  		c.tiny = uintptr(x)
  6129  		c.tinyoffset = constsize
  6130  	}
  6131  
  6132  	publicationBarrier()
  6133  
  6134  	if writeBarrier.enabled {
  6135  
  6136  		gcmarknewobject(span, uintptr(x))
  6137  	} else {
  6138  
  6139  		span.freeIndexForScan = span.freeindex
  6140  	}
  6141  
  6142  	c.nextSample -= int64(elemsize)
  6143  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6144  		profilealloc(mp, x, elemsize)
  6145  	}
  6146  	mp.mallocing = 0
  6147  	releasem(mp)
  6148  
  6149  	if checkGCTrigger {
  6150  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6151  			gcStart(t)
  6152  		}
  6153  	}
  6154  
  6155  	if raceenabled {
  6156  
  6157  		x = add(x, elemsize-constsize)
  6158  	}
  6159  	if valgrindenabled {
  6160  		valgrindMalloc(x, size)
  6161  	}
  6162  
  6163  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6164  		if assistG := getg().m.curg; assistG != nil {
  6165  			assistG.gcAssistBytes -= int64(elemsize - size)
  6166  		}
  6167  	}
  6168  
  6169  	if debug.malloc {
  6170  		postMallocgcDebug(x, elemsize, typ)
  6171  	}
  6172  	return x
  6173  }
  6174  
  6175  func mallocgcTinySize13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6176  
  6177  	gp := getg()
  6178  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  6179  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  6180  	}
  6181  
  6182  	if doubleCheckMalloc {
  6183  		if gcphase == _GCmarktermination {
  6184  			throw("mallocgc called with gcphase == _GCmarktermination")
  6185  		}
  6186  	}
  6187  
  6188  	lockRankMayQueueFinalizer()
  6189  
  6190  	if debug.malloc {
  6191  		if x := preMallocgcDebug(size, typ); x != nil {
  6192  			return x
  6193  		}
  6194  	}
  6195  
  6196  	if gcBlackenEnabled != 0 {
  6197  		deductAssistCredit(size)
  6198  	}
  6199  
  6200  	const constsize = 13
  6201  
  6202  	const elemsize = 16
  6203  
  6204  	mp := acquirem()
  6205  	if doubleCheckMalloc {
  6206  		doubleCheckTiny(constsize, typ, mp)
  6207  	}
  6208  	mp.mallocing = 1
  6209  
  6210  	c := getMCache(mp)
  6211  	off := c.tinyoffset
  6212  
  6213  	if constsize&7 == 0 {
  6214  		off = alignUp(off, 8)
  6215  	} else if goarch.PtrSize == 4 && constsize == 12 {
  6216  
  6217  		off = alignUp(off, 8)
  6218  	} else if constsize&3 == 0 {
  6219  		off = alignUp(off, 4)
  6220  	} else if constsize&1 == 0 {
  6221  		off = alignUp(off, 2)
  6222  	}
  6223  	if off+constsize <= maxTinySize && c.tiny != 0 {
  6224  
  6225  		x := unsafe.Pointer(c.tiny + off)
  6226  		c.tinyoffset = off + constsize
  6227  		c.tinyAllocs++
  6228  		mp.mallocing = 0
  6229  		releasem(mp)
  6230  		const elemsize = 0
  6231  		{
  6232  
  6233  			if valgrindenabled {
  6234  				valgrindMalloc(x, size)
  6235  			}
  6236  
  6237  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6238  				if assistG := getg().m.curg; assistG != nil {
  6239  					assistG.gcAssistBytes -= int64(elemsize - size)
  6240  				}
  6241  			}
  6242  
  6243  			if debug.malloc {
  6244  				postMallocgcDebug(x, elemsize, typ)
  6245  			}
  6246  			return x
  6247  		}
  6248  
  6249  	}
  6250  
  6251  	checkGCTrigger := false
  6252  	span := c.alloc[tinySpanClass]
  6253  
  6254  	const nbytes = 8192
  6255  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  6256  		16,
  6257  	)
  6258  	var nextFreeFastResult gclinkptr
  6259  	if span.allocCache != 0 {
  6260  		theBit := sys.TrailingZeros64(span.allocCache)
  6261  		result := span.freeindex + uint16(theBit)
  6262  		if result < nelems {
  6263  			freeidx := result + 1
  6264  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6265  				span.allocCache >>= uint(theBit + 1)
  6266  				span.freeindex = freeidx
  6267  				span.allocCount++
  6268  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6269  					16 +
  6270  					span.base())
  6271  			}
  6272  		}
  6273  	}
  6274  	v := nextFreeFastResult
  6275  	if v == 0 {
  6276  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6277  	}
  6278  	x := unsafe.Pointer(v)
  6279  	(*[2]uint64)(x)[0] = 0
  6280  	(*[2]uint64)(x)[1] = 0
  6281  
  6282  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6283  
  6284  		c.tiny = uintptr(x)
  6285  		c.tinyoffset = constsize
  6286  	}
  6287  
  6288  	publicationBarrier()
  6289  
  6290  	if writeBarrier.enabled {
  6291  
  6292  		gcmarknewobject(span, uintptr(x))
  6293  	} else {
  6294  
  6295  		span.freeIndexForScan = span.freeindex
  6296  	}
  6297  
  6298  	c.nextSample -= int64(elemsize)
  6299  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6300  		profilealloc(mp, x, elemsize)
  6301  	}
  6302  	mp.mallocing = 0
  6303  	releasem(mp)
  6304  
  6305  	if checkGCTrigger {
  6306  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6307  			gcStart(t)
  6308  		}
  6309  	}
  6310  
  6311  	if raceenabled {
  6312  
  6313  		x = add(x, elemsize-constsize)
  6314  	}
  6315  	if valgrindenabled {
  6316  		valgrindMalloc(x, size)
  6317  	}
  6318  
  6319  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6320  		if assistG := getg().m.curg; assistG != nil {
  6321  			assistG.gcAssistBytes -= int64(elemsize - size)
  6322  		}
  6323  	}
  6324  
  6325  	if debug.malloc {
  6326  		postMallocgcDebug(x, elemsize, typ)
  6327  	}
  6328  	return x
  6329  }
  6330  
  6331  func mallocgcTinySize14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6332  
  6333  	gp := getg()
  6334  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  6335  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  6336  	}
  6337  
  6338  	if doubleCheckMalloc {
  6339  		if gcphase == _GCmarktermination {
  6340  			throw("mallocgc called with gcphase == _GCmarktermination")
  6341  		}
  6342  	}
  6343  
  6344  	lockRankMayQueueFinalizer()
  6345  
  6346  	if debug.malloc {
  6347  		if x := preMallocgcDebug(size, typ); x != nil {
  6348  			return x
  6349  		}
  6350  	}
  6351  
  6352  	if gcBlackenEnabled != 0 {
  6353  		deductAssistCredit(size)
  6354  	}
  6355  
  6356  	const constsize = 14
  6357  
  6358  	const elemsize = 16
  6359  
  6360  	mp := acquirem()
  6361  	if doubleCheckMalloc {
  6362  		doubleCheckTiny(constsize, typ, mp)
  6363  	}
  6364  	mp.mallocing = 1
  6365  
  6366  	c := getMCache(mp)
  6367  	off := c.tinyoffset
  6368  
  6369  	if constsize&7 == 0 {
  6370  		off = alignUp(off, 8)
  6371  	} else if goarch.PtrSize == 4 && constsize == 12 {
  6372  
  6373  		off = alignUp(off, 8)
  6374  	} else if constsize&3 == 0 {
  6375  		off = alignUp(off, 4)
  6376  	} else if constsize&1 == 0 {
  6377  		off = alignUp(off, 2)
  6378  	}
  6379  	if off+constsize <= maxTinySize && c.tiny != 0 {
  6380  
  6381  		x := unsafe.Pointer(c.tiny + off)
  6382  		c.tinyoffset = off + constsize
  6383  		c.tinyAllocs++
  6384  		mp.mallocing = 0
  6385  		releasem(mp)
  6386  		const elemsize = 0
  6387  		{
  6388  
  6389  			if valgrindenabled {
  6390  				valgrindMalloc(x, size)
  6391  			}
  6392  
  6393  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6394  				if assistG := getg().m.curg; assistG != nil {
  6395  					assistG.gcAssistBytes -= int64(elemsize - size)
  6396  				}
  6397  			}
  6398  
  6399  			if debug.malloc {
  6400  				postMallocgcDebug(x, elemsize, typ)
  6401  			}
  6402  			return x
  6403  		}
  6404  
  6405  	}
  6406  
  6407  	checkGCTrigger := false
  6408  	span := c.alloc[tinySpanClass]
  6409  
  6410  	const nbytes = 8192
  6411  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  6412  		16,
  6413  	)
  6414  	var nextFreeFastResult gclinkptr
  6415  	if span.allocCache != 0 {
  6416  		theBit := sys.TrailingZeros64(span.allocCache)
  6417  		result := span.freeindex + uint16(theBit)
  6418  		if result < nelems {
  6419  			freeidx := result + 1
  6420  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6421  				span.allocCache >>= uint(theBit + 1)
  6422  				span.freeindex = freeidx
  6423  				span.allocCount++
  6424  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6425  					16 +
  6426  					span.base())
  6427  			}
  6428  		}
  6429  	}
  6430  	v := nextFreeFastResult
  6431  	if v == 0 {
  6432  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6433  	}
  6434  	x := unsafe.Pointer(v)
  6435  	(*[2]uint64)(x)[0] = 0
  6436  	(*[2]uint64)(x)[1] = 0
  6437  
  6438  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6439  
  6440  		c.tiny = uintptr(x)
  6441  		c.tinyoffset = constsize
  6442  	}
  6443  
  6444  	publicationBarrier()
  6445  
  6446  	if writeBarrier.enabled {
  6447  
  6448  		gcmarknewobject(span, uintptr(x))
  6449  	} else {
  6450  
  6451  		span.freeIndexForScan = span.freeindex
  6452  	}
  6453  
  6454  	c.nextSample -= int64(elemsize)
  6455  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6456  		profilealloc(mp, x, elemsize)
  6457  	}
  6458  	mp.mallocing = 0
  6459  	releasem(mp)
  6460  
  6461  	if checkGCTrigger {
  6462  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6463  			gcStart(t)
  6464  		}
  6465  	}
  6466  
  6467  	if raceenabled {
  6468  
  6469  		x = add(x, elemsize-constsize)
  6470  	}
  6471  	if valgrindenabled {
  6472  		valgrindMalloc(x, size)
  6473  	}
  6474  
  6475  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6476  		if assistG := getg().m.curg; assistG != nil {
  6477  			assistG.gcAssistBytes -= int64(elemsize - size)
  6478  		}
  6479  	}
  6480  
  6481  	if debug.malloc {
  6482  		postMallocgcDebug(x, elemsize, typ)
  6483  	}
  6484  	return x
  6485  }
  6486  
  6487  func mallocgcTinySize15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6488  
  6489  	gp := getg()
  6490  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  6491  		return mallocgcSmallNoScanSC2(size, typ, needzero)
  6492  	}
  6493  
  6494  	if doubleCheckMalloc {
  6495  		if gcphase == _GCmarktermination {
  6496  			throw("mallocgc called with gcphase == _GCmarktermination")
  6497  		}
  6498  	}
  6499  
  6500  	lockRankMayQueueFinalizer()
  6501  
  6502  	if debug.malloc {
  6503  		if x := preMallocgcDebug(size, typ); x != nil {
  6504  			return x
  6505  		}
  6506  	}
  6507  
  6508  	if gcBlackenEnabled != 0 {
  6509  		deductAssistCredit(size)
  6510  	}
  6511  
  6512  	const constsize = 15
  6513  
  6514  	const elemsize = 16
  6515  
  6516  	mp := acquirem()
  6517  	if doubleCheckMalloc {
  6518  		doubleCheckTiny(constsize, typ, mp)
  6519  	}
  6520  	mp.mallocing = 1
  6521  
  6522  	c := getMCache(mp)
  6523  	off := c.tinyoffset
  6524  
  6525  	if constsize&7 == 0 {
  6526  		off = alignUp(off, 8)
  6527  	} else if goarch.PtrSize == 4 && constsize == 12 {
  6528  
  6529  		off = alignUp(off, 8)
  6530  	} else if constsize&3 == 0 {
  6531  		off = alignUp(off, 4)
  6532  	} else if constsize&1 == 0 {
  6533  		off = alignUp(off, 2)
  6534  	}
  6535  	if off+constsize <= maxTinySize && c.tiny != 0 {
  6536  
  6537  		x := unsafe.Pointer(c.tiny + off)
  6538  		c.tinyoffset = off + constsize
  6539  		c.tinyAllocs++
  6540  		mp.mallocing = 0
  6541  		releasem(mp)
  6542  		const elemsize = 0
  6543  		{
  6544  
  6545  			if valgrindenabled {
  6546  				valgrindMalloc(x, size)
  6547  			}
  6548  
  6549  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6550  				if assistG := getg().m.curg; assistG != nil {
  6551  					assistG.gcAssistBytes -= int64(elemsize - size)
  6552  				}
  6553  			}
  6554  
  6555  			if debug.malloc {
  6556  				postMallocgcDebug(x, elemsize, typ)
  6557  			}
  6558  			return x
  6559  		}
  6560  
  6561  	}
  6562  
  6563  	checkGCTrigger := false
  6564  	span := c.alloc[tinySpanClass]
  6565  
  6566  	const nbytes = 8192
  6567  	const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
  6568  		16,
  6569  	)
  6570  	var nextFreeFastResult gclinkptr
  6571  	if span.allocCache != 0 {
  6572  		theBit := sys.TrailingZeros64(span.allocCache)
  6573  		result := span.freeindex + uint16(theBit)
  6574  		if result < nelems {
  6575  			freeidx := result + 1
  6576  			if !(freeidx%64 == 0 && freeidx != nelems) {
  6577  				span.allocCache >>= uint(theBit + 1)
  6578  				span.freeindex = freeidx
  6579  				span.allocCount++
  6580  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6581  					16 +
  6582  					span.base())
  6583  			}
  6584  		}
  6585  	}
  6586  	v := nextFreeFastResult
  6587  	if v == 0 {
  6588  		v, span, checkGCTrigger = c.nextFree(tinySpanClass)
  6589  	}
  6590  	x := unsafe.Pointer(v)
  6591  	(*[2]uint64)(x)[0] = 0
  6592  	(*[2]uint64)(x)[1] = 0
  6593  
  6594  	if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
  6595  
  6596  		c.tiny = uintptr(x)
  6597  		c.tinyoffset = constsize
  6598  	}
  6599  
  6600  	publicationBarrier()
  6601  
  6602  	if writeBarrier.enabled {
  6603  
  6604  		gcmarknewobject(span, uintptr(x))
  6605  	} else {
  6606  
  6607  		span.freeIndexForScan = span.freeindex
  6608  	}
  6609  
  6610  	c.nextSample -= int64(elemsize)
  6611  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6612  		profilealloc(mp, x, elemsize)
  6613  	}
  6614  	mp.mallocing = 0
  6615  	releasem(mp)
  6616  
  6617  	if checkGCTrigger {
  6618  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6619  			gcStart(t)
  6620  		}
  6621  	}
  6622  
  6623  	if raceenabled {
  6624  
  6625  		x = add(x, elemsize-constsize)
  6626  	}
  6627  	if valgrindenabled {
  6628  		valgrindMalloc(x, size)
  6629  	}
  6630  
  6631  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6632  		if assistG := getg().m.curg; assistG != nil {
  6633  			assistG.gcAssistBytes -= int64(elemsize - size)
  6634  		}
  6635  	}
  6636  
  6637  	if debug.malloc {
  6638  		postMallocgcDebug(x, elemsize, typ)
  6639  	}
  6640  	return x
  6641  }
  6642  
  6643  func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6644  
  6645  	if doubleCheckMalloc {
  6646  		if gcphase == _GCmarktermination {
  6647  			throw("mallocgc called with gcphase == _GCmarktermination")
  6648  		}
  6649  	}
  6650  
  6651  	lockRankMayQueueFinalizer()
  6652  
  6653  	if debug.malloc {
  6654  		if x := preMallocgcDebug(size, typ); x != nil {
  6655  			return x
  6656  		}
  6657  	}
  6658  
  6659  	if gcBlackenEnabled != 0 {
  6660  		deductAssistCredit(size)
  6661  	}
  6662  
  6663  	const sizeclass = 2
  6664  
  6665  	const elemsize = 16
  6666  
  6667  	mp := acquirem()
  6668  	if doubleCheckMalloc {
  6669  		doubleCheckSmallNoScan(typ, mp)
  6670  	}
  6671  	mp.mallocing = 1
  6672  
  6673  	checkGCTrigger := false
  6674  	c := getMCache(mp)
  6675  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6676  	span := c.alloc[spc]
  6677  
  6678  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  6679  
  6680  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  6681  		mp.mallocing = 0
  6682  		releasem(mp)
  6683  		x := v
  6684  		{
  6685  
  6686  			gp := getg()
  6687  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  6688  
  6689  				addSecret(x, size)
  6690  			}
  6691  
  6692  			if valgrindenabled {
  6693  				valgrindMalloc(x, size)
  6694  			}
  6695  
  6696  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6697  				if assistG := getg().m.curg; assistG != nil {
  6698  					assistG.gcAssistBytes -= int64(elemsize - size)
  6699  				}
  6700  			}
  6701  
  6702  			if debug.malloc {
  6703  				postMallocgcDebug(x, elemsize, typ)
  6704  			}
  6705  			return x
  6706  		}
  6707  
  6708  	}
  6709  
  6710  	var nextFreeFastResult gclinkptr
  6711  	if span.allocCache != 0 {
  6712  		theBit := sys.TrailingZeros64(span.allocCache)
  6713  		result := span.freeindex + uint16(theBit)
  6714  		if result < span.nelems {
  6715  			freeidx := result + 1
  6716  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6717  				span.allocCache >>= uint(theBit + 1)
  6718  				span.freeindex = freeidx
  6719  				span.allocCount++
  6720  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6721  					16 +
  6722  					span.base())
  6723  			}
  6724  		}
  6725  	}
  6726  	v := nextFreeFastResult
  6727  	if v == 0 {
  6728  		v, span, checkGCTrigger = c.nextFree(spc)
  6729  	}
  6730  	x := unsafe.Pointer(v)
  6731  	if needzero && span.needzero != 0 {
  6732  		memclrNoHeapPointers(x, elemsize)
  6733  	}
  6734  
  6735  	publicationBarrier()
  6736  
  6737  	if writeBarrier.enabled {
  6738  
  6739  		gcmarknewobject(span, uintptr(x))
  6740  	} else {
  6741  
  6742  		span.freeIndexForScan = span.freeindex
  6743  	}
  6744  
  6745  	c.nextSample -= int64(elemsize)
  6746  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6747  		profilealloc(mp, x, elemsize)
  6748  	}
  6749  	mp.mallocing = 0
  6750  	releasem(mp)
  6751  
  6752  	if checkGCTrigger {
  6753  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6754  			gcStart(t)
  6755  		}
  6756  	}
  6757  	gp := getg()
  6758  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  6759  
  6760  		addSecret(x, size)
  6761  	}
  6762  
  6763  	if valgrindenabled {
  6764  		valgrindMalloc(x, size)
  6765  	}
  6766  
  6767  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6768  		if assistG := getg().m.curg; assistG != nil {
  6769  			assistG.gcAssistBytes -= int64(elemsize - size)
  6770  		}
  6771  	}
  6772  
  6773  	if debug.malloc {
  6774  		postMallocgcDebug(x, elemsize, typ)
  6775  	}
  6776  	return x
  6777  }
  6778  
  6779  func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6780  
  6781  	if doubleCheckMalloc {
  6782  		if gcphase == _GCmarktermination {
  6783  			throw("mallocgc called with gcphase == _GCmarktermination")
  6784  		}
  6785  	}
  6786  
  6787  	lockRankMayQueueFinalizer()
  6788  
  6789  	if debug.malloc {
  6790  		if x := preMallocgcDebug(size, typ); x != nil {
  6791  			return x
  6792  		}
  6793  	}
  6794  
  6795  	if gcBlackenEnabled != 0 {
  6796  		deductAssistCredit(size)
  6797  	}
  6798  
  6799  	const sizeclass = 3
  6800  
  6801  	const elemsize = 24
  6802  
  6803  	mp := acquirem()
  6804  	if doubleCheckMalloc {
  6805  		doubleCheckSmallNoScan(typ, mp)
  6806  	}
  6807  	mp.mallocing = 1
  6808  
  6809  	checkGCTrigger := false
  6810  	c := getMCache(mp)
  6811  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6812  	span := c.alloc[spc]
  6813  
  6814  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  6815  
  6816  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  6817  		mp.mallocing = 0
  6818  		releasem(mp)
  6819  		x := v
  6820  		{
  6821  
  6822  			gp := getg()
  6823  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  6824  
  6825  				addSecret(x, size)
  6826  			}
  6827  
  6828  			if valgrindenabled {
  6829  				valgrindMalloc(x, size)
  6830  			}
  6831  
  6832  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6833  				if assistG := getg().m.curg; assistG != nil {
  6834  					assistG.gcAssistBytes -= int64(elemsize - size)
  6835  				}
  6836  			}
  6837  
  6838  			if debug.malloc {
  6839  				postMallocgcDebug(x, elemsize, typ)
  6840  			}
  6841  			return x
  6842  		}
  6843  
  6844  	}
  6845  
  6846  	var nextFreeFastResult gclinkptr
  6847  	if span.allocCache != 0 {
  6848  		theBit := sys.TrailingZeros64(span.allocCache)
  6849  		result := span.freeindex + uint16(theBit)
  6850  		if result < span.nelems {
  6851  			freeidx := result + 1
  6852  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6853  				span.allocCache >>= uint(theBit + 1)
  6854  				span.freeindex = freeidx
  6855  				span.allocCount++
  6856  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6857  					24 +
  6858  					span.base())
  6859  			}
  6860  		}
  6861  	}
  6862  	v := nextFreeFastResult
  6863  	if v == 0 {
  6864  		v, span, checkGCTrigger = c.nextFree(spc)
  6865  	}
  6866  	x := unsafe.Pointer(v)
  6867  	if needzero && span.needzero != 0 {
  6868  		memclrNoHeapPointers(x, elemsize)
  6869  	}
  6870  
  6871  	publicationBarrier()
  6872  
  6873  	if writeBarrier.enabled {
  6874  
  6875  		gcmarknewobject(span, uintptr(x))
  6876  	} else {
  6877  
  6878  		span.freeIndexForScan = span.freeindex
  6879  	}
  6880  
  6881  	c.nextSample -= int64(elemsize)
  6882  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  6883  		profilealloc(mp, x, elemsize)
  6884  	}
  6885  	mp.mallocing = 0
  6886  	releasem(mp)
  6887  
  6888  	if checkGCTrigger {
  6889  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  6890  			gcStart(t)
  6891  		}
  6892  	}
  6893  	gp := getg()
  6894  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  6895  
  6896  		addSecret(x, size)
  6897  	}
  6898  
  6899  	if valgrindenabled {
  6900  		valgrindMalloc(x, size)
  6901  	}
  6902  
  6903  	if gcBlackenEnabled != 0 && elemsize != 0 {
  6904  		if assistG := getg().m.curg; assistG != nil {
  6905  			assistG.gcAssistBytes -= int64(elemsize - size)
  6906  		}
  6907  	}
  6908  
  6909  	if debug.malloc {
  6910  		postMallocgcDebug(x, elemsize, typ)
  6911  	}
  6912  	return x
  6913  }
  6914  
  6915  func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  6916  
  6917  	if doubleCheckMalloc {
  6918  		if gcphase == _GCmarktermination {
  6919  			throw("mallocgc called with gcphase == _GCmarktermination")
  6920  		}
  6921  	}
  6922  
  6923  	lockRankMayQueueFinalizer()
  6924  
  6925  	if debug.malloc {
  6926  		if x := preMallocgcDebug(size, typ); x != nil {
  6927  			return x
  6928  		}
  6929  	}
  6930  
  6931  	if gcBlackenEnabled != 0 {
  6932  		deductAssistCredit(size)
  6933  	}
  6934  
  6935  	const sizeclass = 4
  6936  
  6937  	const elemsize = 32
  6938  
  6939  	mp := acquirem()
  6940  	if doubleCheckMalloc {
  6941  		doubleCheckSmallNoScan(typ, mp)
  6942  	}
  6943  	mp.mallocing = 1
  6944  
  6945  	checkGCTrigger := false
  6946  	c := getMCache(mp)
  6947  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  6948  	span := c.alloc[spc]
  6949  
  6950  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  6951  
  6952  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  6953  		mp.mallocing = 0
  6954  		releasem(mp)
  6955  		x := v
  6956  		{
  6957  
  6958  			gp := getg()
  6959  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  6960  
  6961  				addSecret(x, size)
  6962  			}
  6963  
  6964  			if valgrindenabled {
  6965  				valgrindMalloc(x, size)
  6966  			}
  6967  
  6968  			if gcBlackenEnabled != 0 && elemsize != 0 {
  6969  				if assistG := getg().m.curg; assistG != nil {
  6970  					assistG.gcAssistBytes -= int64(elemsize - size)
  6971  				}
  6972  			}
  6973  
  6974  			if debug.malloc {
  6975  				postMallocgcDebug(x, elemsize, typ)
  6976  			}
  6977  			return x
  6978  		}
  6979  
  6980  	}
  6981  
  6982  	var nextFreeFastResult gclinkptr
  6983  	if span.allocCache != 0 {
  6984  		theBit := sys.TrailingZeros64(span.allocCache)
  6985  		result := span.freeindex + uint16(theBit)
  6986  		if result < span.nelems {
  6987  			freeidx := result + 1
  6988  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  6989  				span.allocCache >>= uint(theBit + 1)
  6990  				span.freeindex = freeidx
  6991  				span.allocCount++
  6992  				nextFreeFastResult = gclinkptr(uintptr(result)*
  6993  					32 +
  6994  					span.base())
  6995  			}
  6996  		}
  6997  	}
  6998  	v := nextFreeFastResult
  6999  	if v == 0 {
  7000  		v, span, checkGCTrigger = c.nextFree(spc)
  7001  	}
  7002  	x := unsafe.Pointer(v)
  7003  	if needzero && span.needzero != 0 {
  7004  		memclrNoHeapPointers(x, elemsize)
  7005  	}
  7006  
  7007  	publicationBarrier()
  7008  
  7009  	if writeBarrier.enabled {
  7010  
  7011  		gcmarknewobject(span, uintptr(x))
  7012  	} else {
  7013  
  7014  		span.freeIndexForScan = span.freeindex
  7015  	}
  7016  
  7017  	c.nextSample -= int64(elemsize)
  7018  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7019  		profilealloc(mp, x, elemsize)
  7020  	}
  7021  	mp.mallocing = 0
  7022  	releasem(mp)
  7023  
  7024  	if checkGCTrigger {
  7025  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7026  			gcStart(t)
  7027  		}
  7028  	}
  7029  	gp := getg()
  7030  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7031  
  7032  		addSecret(x, size)
  7033  	}
  7034  
  7035  	if valgrindenabled {
  7036  		valgrindMalloc(x, size)
  7037  	}
  7038  
  7039  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7040  		if assistG := getg().m.curg; assistG != nil {
  7041  			assistG.gcAssistBytes -= int64(elemsize - size)
  7042  		}
  7043  	}
  7044  
  7045  	if debug.malloc {
  7046  		postMallocgcDebug(x, elemsize, typ)
  7047  	}
  7048  	return x
  7049  }
  7050  
  7051  func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7052  
  7053  	if doubleCheckMalloc {
  7054  		if gcphase == _GCmarktermination {
  7055  			throw("mallocgc called with gcphase == _GCmarktermination")
  7056  		}
  7057  	}
  7058  
  7059  	lockRankMayQueueFinalizer()
  7060  
  7061  	if debug.malloc {
  7062  		if x := preMallocgcDebug(size, typ); x != nil {
  7063  			return x
  7064  		}
  7065  	}
  7066  
  7067  	if gcBlackenEnabled != 0 {
  7068  		deductAssistCredit(size)
  7069  	}
  7070  
  7071  	const sizeclass = 5
  7072  
  7073  	const elemsize = 48
  7074  
  7075  	mp := acquirem()
  7076  	if doubleCheckMalloc {
  7077  		doubleCheckSmallNoScan(typ, mp)
  7078  	}
  7079  	mp.mallocing = 1
  7080  
  7081  	checkGCTrigger := false
  7082  	c := getMCache(mp)
  7083  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7084  	span := c.alloc[spc]
  7085  
  7086  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7087  
  7088  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7089  		mp.mallocing = 0
  7090  		releasem(mp)
  7091  		x := v
  7092  		{
  7093  
  7094  			gp := getg()
  7095  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7096  
  7097  				addSecret(x, size)
  7098  			}
  7099  
  7100  			if valgrindenabled {
  7101  				valgrindMalloc(x, size)
  7102  			}
  7103  
  7104  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7105  				if assistG := getg().m.curg; assistG != nil {
  7106  					assistG.gcAssistBytes -= int64(elemsize - size)
  7107  				}
  7108  			}
  7109  
  7110  			if debug.malloc {
  7111  				postMallocgcDebug(x, elemsize, typ)
  7112  			}
  7113  			return x
  7114  		}
  7115  
  7116  	}
  7117  
  7118  	var nextFreeFastResult gclinkptr
  7119  	if span.allocCache != 0 {
  7120  		theBit := sys.TrailingZeros64(span.allocCache)
  7121  		result := span.freeindex + uint16(theBit)
  7122  		if result < span.nelems {
  7123  			freeidx := result + 1
  7124  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7125  				span.allocCache >>= uint(theBit + 1)
  7126  				span.freeindex = freeidx
  7127  				span.allocCount++
  7128  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7129  					48 +
  7130  					span.base())
  7131  			}
  7132  		}
  7133  	}
  7134  	v := nextFreeFastResult
  7135  	if v == 0 {
  7136  		v, span, checkGCTrigger = c.nextFree(spc)
  7137  	}
  7138  	x := unsafe.Pointer(v)
  7139  	if needzero && span.needzero != 0 {
  7140  		memclrNoHeapPointers(x, elemsize)
  7141  	}
  7142  
  7143  	publicationBarrier()
  7144  
  7145  	if writeBarrier.enabled {
  7146  
  7147  		gcmarknewobject(span, uintptr(x))
  7148  	} else {
  7149  
  7150  		span.freeIndexForScan = span.freeindex
  7151  	}
  7152  
  7153  	c.nextSample -= int64(elemsize)
  7154  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7155  		profilealloc(mp, x, elemsize)
  7156  	}
  7157  	mp.mallocing = 0
  7158  	releasem(mp)
  7159  
  7160  	if checkGCTrigger {
  7161  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7162  			gcStart(t)
  7163  		}
  7164  	}
  7165  	gp := getg()
  7166  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7167  
  7168  		addSecret(x, size)
  7169  	}
  7170  
  7171  	if valgrindenabled {
  7172  		valgrindMalloc(x, size)
  7173  	}
  7174  
  7175  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7176  		if assistG := getg().m.curg; assistG != nil {
  7177  			assistG.gcAssistBytes -= int64(elemsize - size)
  7178  		}
  7179  	}
  7180  
  7181  	if debug.malloc {
  7182  		postMallocgcDebug(x, elemsize, typ)
  7183  	}
  7184  	return x
  7185  }
  7186  
  7187  func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7188  
  7189  	if doubleCheckMalloc {
  7190  		if gcphase == _GCmarktermination {
  7191  			throw("mallocgc called with gcphase == _GCmarktermination")
  7192  		}
  7193  	}
  7194  
  7195  	lockRankMayQueueFinalizer()
  7196  
  7197  	if debug.malloc {
  7198  		if x := preMallocgcDebug(size, typ); x != nil {
  7199  			return x
  7200  		}
  7201  	}
  7202  
  7203  	if gcBlackenEnabled != 0 {
  7204  		deductAssistCredit(size)
  7205  	}
  7206  
  7207  	const sizeclass = 6
  7208  
  7209  	const elemsize = 64
  7210  
  7211  	mp := acquirem()
  7212  	if doubleCheckMalloc {
  7213  		doubleCheckSmallNoScan(typ, mp)
  7214  	}
  7215  	mp.mallocing = 1
  7216  
  7217  	checkGCTrigger := false
  7218  	c := getMCache(mp)
  7219  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7220  	span := c.alloc[spc]
  7221  
  7222  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7223  
  7224  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7225  		mp.mallocing = 0
  7226  		releasem(mp)
  7227  		x := v
  7228  		{
  7229  
  7230  			gp := getg()
  7231  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7232  
  7233  				addSecret(x, size)
  7234  			}
  7235  
  7236  			if valgrindenabled {
  7237  				valgrindMalloc(x, size)
  7238  			}
  7239  
  7240  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7241  				if assistG := getg().m.curg; assistG != nil {
  7242  					assistG.gcAssistBytes -= int64(elemsize - size)
  7243  				}
  7244  			}
  7245  
  7246  			if debug.malloc {
  7247  				postMallocgcDebug(x, elemsize, typ)
  7248  			}
  7249  			return x
  7250  		}
  7251  
  7252  	}
  7253  
  7254  	var nextFreeFastResult gclinkptr
  7255  	if span.allocCache != 0 {
  7256  		theBit := sys.TrailingZeros64(span.allocCache)
  7257  		result := span.freeindex + uint16(theBit)
  7258  		if result < span.nelems {
  7259  			freeidx := result + 1
  7260  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7261  				span.allocCache >>= uint(theBit + 1)
  7262  				span.freeindex = freeidx
  7263  				span.allocCount++
  7264  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7265  					64 +
  7266  					span.base())
  7267  			}
  7268  		}
  7269  	}
  7270  	v := nextFreeFastResult
  7271  	if v == 0 {
  7272  		v, span, checkGCTrigger = c.nextFree(spc)
  7273  	}
  7274  	x := unsafe.Pointer(v)
  7275  	if needzero && span.needzero != 0 {
  7276  		memclrNoHeapPointers(x, elemsize)
  7277  	}
  7278  
  7279  	publicationBarrier()
  7280  
  7281  	if writeBarrier.enabled {
  7282  
  7283  		gcmarknewobject(span, uintptr(x))
  7284  	} else {
  7285  
  7286  		span.freeIndexForScan = span.freeindex
  7287  	}
  7288  
  7289  	c.nextSample -= int64(elemsize)
  7290  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7291  		profilealloc(mp, x, elemsize)
  7292  	}
  7293  	mp.mallocing = 0
  7294  	releasem(mp)
  7295  
  7296  	if checkGCTrigger {
  7297  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7298  			gcStart(t)
  7299  		}
  7300  	}
  7301  	gp := getg()
  7302  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7303  
  7304  		addSecret(x, size)
  7305  	}
  7306  
  7307  	if valgrindenabled {
  7308  		valgrindMalloc(x, size)
  7309  	}
  7310  
  7311  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7312  		if assistG := getg().m.curg; assistG != nil {
  7313  			assistG.gcAssistBytes -= int64(elemsize - size)
  7314  		}
  7315  	}
  7316  
  7317  	if debug.malloc {
  7318  		postMallocgcDebug(x, elemsize, typ)
  7319  	}
  7320  	return x
  7321  }
  7322  
  7323  func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7324  
  7325  	if doubleCheckMalloc {
  7326  		if gcphase == _GCmarktermination {
  7327  			throw("mallocgc called with gcphase == _GCmarktermination")
  7328  		}
  7329  	}
  7330  
  7331  	lockRankMayQueueFinalizer()
  7332  
  7333  	if debug.malloc {
  7334  		if x := preMallocgcDebug(size, typ); x != nil {
  7335  			return x
  7336  		}
  7337  	}
  7338  
  7339  	if gcBlackenEnabled != 0 {
  7340  		deductAssistCredit(size)
  7341  	}
  7342  
  7343  	const sizeclass = 7
  7344  
  7345  	const elemsize = 80
  7346  
  7347  	mp := acquirem()
  7348  	if doubleCheckMalloc {
  7349  		doubleCheckSmallNoScan(typ, mp)
  7350  	}
  7351  	mp.mallocing = 1
  7352  
  7353  	checkGCTrigger := false
  7354  	c := getMCache(mp)
  7355  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7356  	span := c.alloc[spc]
  7357  
  7358  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7359  
  7360  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7361  		mp.mallocing = 0
  7362  		releasem(mp)
  7363  		x := v
  7364  		{
  7365  
  7366  			gp := getg()
  7367  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7368  
  7369  				addSecret(x, size)
  7370  			}
  7371  
  7372  			if valgrindenabled {
  7373  				valgrindMalloc(x, size)
  7374  			}
  7375  
  7376  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7377  				if assistG := getg().m.curg; assistG != nil {
  7378  					assistG.gcAssistBytes -= int64(elemsize - size)
  7379  				}
  7380  			}
  7381  
  7382  			if debug.malloc {
  7383  				postMallocgcDebug(x, elemsize, typ)
  7384  			}
  7385  			return x
  7386  		}
  7387  
  7388  	}
  7389  
  7390  	var nextFreeFastResult gclinkptr
  7391  	if span.allocCache != 0 {
  7392  		theBit := sys.TrailingZeros64(span.allocCache)
  7393  		result := span.freeindex + uint16(theBit)
  7394  		if result < span.nelems {
  7395  			freeidx := result + 1
  7396  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7397  				span.allocCache >>= uint(theBit + 1)
  7398  				span.freeindex = freeidx
  7399  				span.allocCount++
  7400  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7401  					80 +
  7402  					span.base())
  7403  			}
  7404  		}
  7405  	}
  7406  	v := nextFreeFastResult
  7407  	if v == 0 {
  7408  		v, span, checkGCTrigger = c.nextFree(spc)
  7409  	}
  7410  	x := unsafe.Pointer(v)
  7411  	if needzero && span.needzero != 0 {
  7412  		memclrNoHeapPointers(x, elemsize)
  7413  	}
  7414  
  7415  	publicationBarrier()
  7416  
  7417  	if writeBarrier.enabled {
  7418  
  7419  		gcmarknewobject(span, uintptr(x))
  7420  	} else {
  7421  
  7422  		span.freeIndexForScan = span.freeindex
  7423  	}
  7424  
  7425  	c.nextSample -= int64(elemsize)
  7426  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7427  		profilealloc(mp, x, elemsize)
  7428  	}
  7429  	mp.mallocing = 0
  7430  	releasem(mp)
  7431  
  7432  	if checkGCTrigger {
  7433  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7434  			gcStart(t)
  7435  		}
  7436  	}
  7437  	gp := getg()
  7438  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7439  
  7440  		addSecret(x, size)
  7441  	}
  7442  
  7443  	if valgrindenabled {
  7444  		valgrindMalloc(x, size)
  7445  	}
  7446  
  7447  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7448  		if assistG := getg().m.curg; assistG != nil {
  7449  			assistG.gcAssistBytes -= int64(elemsize - size)
  7450  		}
  7451  	}
  7452  
  7453  	if debug.malloc {
  7454  		postMallocgcDebug(x, elemsize, typ)
  7455  	}
  7456  	return x
  7457  }
  7458  
  7459  func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7460  
  7461  	if doubleCheckMalloc {
  7462  		if gcphase == _GCmarktermination {
  7463  			throw("mallocgc called with gcphase == _GCmarktermination")
  7464  		}
  7465  	}
  7466  
  7467  	lockRankMayQueueFinalizer()
  7468  
  7469  	if debug.malloc {
  7470  		if x := preMallocgcDebug(size, typ); x != nil {
  7471  			return x
  7472  		}
  7473  	}
  7474  
  7475  	if gcBlackenEnabled != 0 {
  7476  		deductAssistCredit(size)
  7477  	}
  7478  
  7479  	const sizeclass = 8
  7480  
  7481  	const elemsize = 96
  7482  
  7483  	mp := acquirem()
  7484  	if doubleCheckMalloc {
  7485  		doubleCheckSmallNoScan(typ, mp)
  7486  	}
  7487  	mp.mallocing = 1
  7488  
  7489  	checkGCTrigger := false
  7490  	c := getMCache(mp)
  7491  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7492  	span := c.alloc[spc]
  7493  
  7494  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7495  
  7496  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7497  		mp.mallocing = 0
  7498  		releasem(mp)
  7499  		x := v
  7500  		{
  7501  
  7502  			gp := getg()
  7503  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7504  
  7505  				addSecret(x, size)
  7506  			}
  7507  
  7508  			if valgrindenabled {
  7509  				valgrindMalloc(x, size)
  7510  			}
  7511  
  7512  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7513  				if assistG := getg().m.curg; assistG != nil {
  7514  					assistG.gcAssistBytes -= int64(elemsize - size)
  7515  				}
  7516  			}
  7517  
  7518  			if debug.malloc {
  7519  				postMallocgcDebug(x, elemsize, typ)
  7520  			}
  7521  			return x
  7522  		}
  7523  
  7524  	}
  7525  
  7526  	var nextFreeFastResult gclinkptr
  7527  	if span.allocCache != 0 {
  7528  		theBit := sys.TrailingZeros64(span.allocCache)
  7529  		result := span.freeindex + uint16(theBit)
  7530  		if result < span.nelems {
  7531  			freeidx := result + 1
  7532  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7533  				span.allocCache >>= uint(theBit + 1)
  7534  				span.freeindex = freeidx
  7535  				span.allocCount++
  7536  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7537  					96 +
  7538  					span.base())
  7539  			}
  7540  		}
  7541  	}
  7542  	v := nextFreeFastResult
  7543  	if v == 0 {
  7544  		v, span, checkGCTrigger = c.nextFree(spc)
  7545  	}
  7546  	x := unsafe.Pointer(v)
  7547  	if needzero && span.needzero != 0 {
  7548  		memclrNoHeapPointers(x, elemsize)
  7549  	}
  7550  
  7551  	publicationBarrier()
  7552  
  7553  	if writeBarrier.enabled {
  7554  
  7555  		gcmarknewobject(span, uintptr(x))
  7556  	} else {
  7557  
  7558  		span.freeIndexForScan = span.freeindex
  7559  	}
  7560  
  7561  	c.nextSample -= int64(elemsize)
  7562  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7563  		profilealloc(mp, x, elemsize)
  7564  	}
  7565  	mp.mallocing = 0
  7566  	releasem(mp)
  7567  
  7568  	if checkGCTrigger {
  7569  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7570  			gcStart(t)
  7571  		}
  7572  	}
  7573  	gp := getg()
  7574  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7575  
  7576  		addSecret(x, size)
  7577  	}
  7578  
  7579  	if valgrindenabled {
  7580  		valgrindMalloc(x, size)
  7581  	}
  7582  
  7583  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7584  		if assistG := getg().m.curg; assistG != nil {
  7585  			assistG.gcAssistBytes -= int64(elemsize - size)
  7586  		}
  7587  	}
  7588  
  7589  	if debug.malloc {
  7590  		postMallocgcDebug(x, elemsize, typ)
  7591  	}
  7592  	return x
  7593  }
  7594  
  7595  func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7596  
  7597  	if doubleCheckMalloc {
  7598  		if gcphase == _GCmarktermination {
  7599  			throw("mallocgc called with gcphase == _GCmarktermination")
  7600  		}
  7601  	}
  7602  
  7603  	lockRankMayQueueFinalizer()
  7604  
  7605  	if debug.malloc {
  7606  		if x := preMallocgcDebug(size, typ); x != nil {
  7607  			return x
  7608  		}
  7609  	}
  7610  
  7611  	if gcBlackenEnabled != 0 {
  7612  		deductAssistCredit(size)
  7613  	}
  7614  
  7615  	const sizeclass = 9
  7616  
  7617  	const elemsize = 112
  7618  
  7619  	mp := acquirem()
  7620  	if doubleCheckMalloc {
  7621  		doubleCheckSmallNoScan(typ, mp)
  7622  	}
  7623  	mp.mallocing = 1
  7624  
  7625  	checkGCTrigger := false
  7626  	c := getMCache(mp)
  7627  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7628  	span := c.alloc[spc]
  7629  
  7630  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7631  
  7632  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7633  		mp.mallocing = 0
  7634  		releasem(mp)
  7635  		x := v
  7636  		{
  7637  
  7638  			gp := getg()
  7639  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7640  
  7641  				addSecret(x, size)
  7642  			}
  7643  
  7644  			if valgrindenabled {
  7645  				valgrindMalloc(x, size)
  7646  			}
  7647  
  7648  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7649  				if assistG := getg().m.curg; assistG != nil {
  7650  					assistG.gcAssistBytes -= int64(elemsize - size)
  7651  				}
  7652  			}
  7653  
  7654  			if debug.malloc {
  7655  				postMallocgcDebug(x, elemsize, typ)
  7656  			}
  7657  			return x
  7658  		}
  7659  
  7660  	}
  7661  
  7662  	var nextFreeFastResult gclinkptr
  7663  	if span.allocCache != 0 {
  7664  		theBit := sys.TrailingZeros64(span.allocCache)
  7665  		result := span.freeindex + uint16(theBit)
  7666  		if result < span.nelems {
  7667  			freeidx := result + 1
  7668  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7669  				span.allocCache >>= uint(theBit + 1)
  7670  				span.freeindex = freeidx
  7671  				span.allocCount++
  7672  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7673  					112 +
  7674  					span.base())
  7675  			}
  7676  		}
  7677  	}
  7678  	v := nextFreeFastResult
  7679  	if v == 0 {
  7680  		v, span, checkGCTrigger = c.nextFree(spc)
  7681  	}
  7682  	x := unsafe.Pointer(v)
  7683  	if needzero && span.needzero != 0 {
  7684  		memclrNoHeapPointers(x, elemsize)
  7685  	}
  7686  
  7687  	publicationBarrier()
  7688  
  7689  	if writeBarrier.enabled {
  7690  
  7691  		gcmarknewobject(span, uintptr(x))
  7692  	} else {
  7693  
  7694  		span.freeIndexForScan = span.freeindex
  7695  	}
  7696  
  7697  	c.nextSample -= int64(elemsize)
  7698  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7699  		profilealloc(mp, x, elemsize)
  7700  	}
  7701  	mp.mallocing = 0
  7702  	releasem(mp)
  7703  
  7704  	if checkGCTrigger {
  7705  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7706  			gcStart(t)
  7707  		}
  7708  	}
  7709  	gp := getg()
  7710  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7711  
  7712  		addSecret(x, size)
  7713  	}
  7714  
  7715  	if valgrindenabled {
  7716  		valgrindMalloc(x, size)
  7717  	}
  7718  
  7719  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7720  		if assistG := getg().m.curg; assistG != nil {
  7721  			assistG.gcAssistBytes -= int64(elemsize - size)
  7722  		}
  7723  	}
  7724  
  7725  	if debug.malloc {
  7726  		postMallocgcDebug(x, elemsize, typ)
  7727  	}
  7728  	return x
  7729  }
  7730  
  7731  func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7732  
  7733  	if doubleCheckMalloc {
  7734  		if gcphase == _GCmarktermination {
  7735  			throw("mallocgc called with gcphase == _GCmarktermination")
  7736  		}
  7737  	}
  7738  
  7739  	lockRankMayQueueFinalizer()
  7740  
  7741  	if debug.malloc {
  7742  		if x := preMallocgcDebug(size, typ); x != nil {
  7743  			return x
  7744  		}
  7745  	}
  7746  
  7747  	if gcBlackenEnabled != 0 {
  7748  		deductAssistCredit(size)
  7749  	}
  7750  
  7751  	const sizeclass = 10
  7752  
  7753  	const elemsize = 128
  7754  
  7755  	mp := acquirem()
  7756  	if doubleCheckMalloc {
  7757  		doubleCheckSmallNoScan(typ, mp)
  7758  	}
  7759  	mp.mallocing = 1
  7760  
  7761  	checkGCTrigger := false
  7762  	c := getMCache(mp)
  7763  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7764  	span := c.alloc[spc]
  7765  
  7766  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7767  
  7768  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7769  		mp.mallocing = 0
  7770  		releasem(mp)
  7771  		x := v
  7772  		{
  7773  
  7774  			gp := getg()
  7775  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7776  
  7777  				addSecret(x, size)
  7778  			}
  7779  
  7780  			if valgrindenabled {
  7781  				valgrindMalloc(x, size)
  7782  			}
  7783  
  7784  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7785  				if assistG := getg().m.curg; assistG != nil {
  7786  					assistG.gcAssistBytes -= int64(elemsize - size)
  7787  				}
  7788  			}
  7789  
  7790  			if debug.malloc {
  7791  				postMallocgcDebug(x, elemsize, typ)
  7792  			}
  7793  			return x
  7794  		}
  7795  
  7796  	}
  7797  
  7798  	var nextFreeFastResult gclinkptr
  7799  	if span.allocCache != 0 {
  7800  		theBit := sys.TrailingZeros64(span.allocCache)
  7801  		result := span.freeindex + uint16(theBit)
  7802  		if result < span.nelems {
  7803  			freeidx := result + 1
  7804  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7805  				span.allocCache >>= uint(theBit + 1)
  7806  				span.freeindex = freeidx
  7807  				span.allocCount++
  7808  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7809  					128 +
  7810  					span.base())
  7811  			}
  7812  		}
  7813  	}
  7814  	v := nextFreeFastResult
  7815  	if v == 0 {
  7816  		v, span, checkGCTrigger = c.nextFree(spc)
  7817  	}
  7818  	x := unsafe.Pointer(v)
  7819  	if needzero && span.needzero != 0 {
  7820  		memclrNoHeapPointers(x, elemsize)
  7821  	}
  7822  
  7823  	publicationBarrier()
  7824  
  7825  	if writeBarrier.enabled {
  7826  
  7827  		gcmarknewobject(span, uintptr(x))
  7828  	} else {
  7829  
  7830  		span.freeIndexForScan = span.freeindex
  7831  	}
  7832  
  7833  	c.nextSample -= int64(elemsize)
  7834  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7835  		profilealloc(mp, x, elemsize)
  7836  	}
  7837  	mp.mallocing = 0
  7838  	releasem(mp)
  7839  
  7840  	if checkGCTrigger {
  7841  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7842  			gcStart(t)
  7843  		}
  7844  	}
  7845  	gp := getg()
  7846  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7847  
  7848  		addSecret(x, size)
  7849  	}
  7850  
  7851  	if valgrindenabled {
  7852  		valgrindMalloc(x, size)
  7853  	}
  7854  
  7855  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7856  		if assistG := getg().m.curg; assistG != nil {
  7857  			assistG.gcAssistBytes -= int64(elemsize - size)
  7858  		}
  7859  	}
  7860  
  7861  	if debug.malloc {
  7862  		postMallocgcDebug(x, elemsize, typ)
  7863  	}
  7864  	return x
  7865  }
  7866  
  7867  func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  7868  
  7869  	if doubleCheckMalloc {
  7870  		if gcphase == _GCmarktermination {
  7871  			throw("mallocgc called with gcphase == _GCmarktermination")
  7872  		}
  7873  	}
  7874  
  7875  	lockRankMayQueueFinalizer()
  7876  
  7877  	if debug.malloc {
  7878  		if x := preMallocgcDebug(size, typ); x != nil {
  7879  			return x
  7880  		}
  7881  	}
  7882  
  7883  	if gcBlackenEnabled != 0 {
  7884  		deductAssistCredit(size)
  7885  	}
  7886  
  7887  	const sizeclass = 11
  7888  
  7889  	const elemsize = 144
  7890  
  7891  	mp := acquirem()
  7892  	if doubleCheckMalloc {
  7893  		doubleCheckSmallNoScan(typ, mp)
  7894  	}
  7895  	mp.mallocing = 1
  7896  
  7897  	checkGCTrigger := false
  7898  	c := getMCache(mp)
  7899  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  7900  	span := c.alloc[spc]
  7901  
  7902  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  7903  
  7904  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  7905  		mp.mallocing = 0
  7906  		releasem(mp)
  7907  		x := v
  7908  		{
  7909  
  7910  			gp := getg()
  7911  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  7912  
  7913  				addSecret(x, size)
  7914  			}
  7915  
  7916  			if valgrindenabled {
  7917  				valgrindMalloc(x, size)
  7918  			}
  7919  
  7920  			if gcBlackenEnabled != 0 && elemsize != 0 {
  7921  				if assistG := getg().m.curg; assistG != nil {
  7922  					assistG.gcAssistBytes -= int64(elemsize - size)
  7923  				}
  7924  			}
  7925  
  7926  			if debug.malloc {
  7927  				postMallocgcDebug(x, elemsize, typ)
  7928  			}
  7929  			return x
  7930  		}
  7931  
  7932  	}
  7933  
  7934  	var nextFreeFastResult gclinkptr
  7935  	if span.allocCache != 0 {
  7936  		theBit := sys.TrailingZeros64(span.allocCache)
  7937  		result := span.freeindex + uint16(theBit)
  7938  		if result < span.nelems {
  7939  			freeidx := result + 1
  7940  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  7941  				span.allocCache >>= uint(theBit + 1)
  7942  				span.freeindex = freeidx
  7943  				span.allocCount++
  7944  				nextFreeFastResult = gclinkptr(uintptr(result)*
  7945  					144 +
  7946  					span.base())
  7947  			}
  7948  		}
  7949  	}
  7950  	v := nextFreeFastResult
  7951  	if v == 0 {
  7952  		v, span, checkGCTrigger = c.nextFree(spc)
  7953  	}
  7954  	x := unsafe.Pointer(v)
  7955  	if needzero && span.needzero != 0 {
  7956  		memclrNoHeapPointers(x, elemsize)
  7957  	}
  7958  
  7959  	publicationBarrier()
  7960  
  7961  	if writeBarrier.enabled {
  7962  
  7963  		gcmarknewobject(span, uintptr(x))
  7964  	} else {
  7965  
  7966  		span.freeIndexForScan = span.freeindex
  7967  	}
  7968  
  7969  	c.nextSample -= int64(elemsize)
  7970  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  7971  		profilealloc(mp, x, elemsize)
  7972  	}
  7973  	mp.mallocing = 0
  7974  	releasem(mp)
  7975  
  7976  	if checkGCTrigger {
  7977  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  7978  			gcStart(t)
  7979  		}
  7980  	}
  7981  	gp := getg()
  7982  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  7983  
  7984  		addSecret(x, size)
  7985  	}
  7986  
  7987  	if valgrindenabled {
  7988  		valgrindMalloc(x, size)
  7989  	}
  7990  
  7991  	if gcBlackenEnabled != 0 && elemsize != 0 {
  7992  		if assistG := getg().m.curg; assistG != nil {
  7993  			assistG.gcAssistBytes -= int64(elemsize - size)
  7994  		}
  7995  	}
  7996  
  7997  	if debug.malloc {
  7998  		postMallocgcDebug(x, elemsize, typ)
  7999  	}
  8000  	return x
  8001  }
  8002  
  8003  func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8004  
  8005  	if doubleCheckMalloc {
  8006  		if gcphase == _GCmarktermination {
  8007  			throw("mallocgc called with gcphase == _GCmarktermination")
  8008  		}
  8009  	}
  8010  
  8011  	lockRankMayQueueFinalizer()
  8012  
  8013  	if debug.malloc {
  8014  		if x := preMallocgcDebug(size, typ); x != nil {
  8015  			return x
  8016  		}
  8017  	}
  8018  
  8019  	if gcBlackenEnabled != 0 {
  8020  		deductAssistCredit(size)
  8021  	}
  8022  
  8023  	const sizeclass = 12
  8024  
  8025  	const elemsize = 160
  8026  
  8027  	mp := acquirem()
  8028  	if doubleCheckMalloc {
  8029  		doubleCheckSmallNoScan(typ, mp)
  8030  	}
  8031  	mp.mallocing = 1
  8032  
  8033  	checkGCTrigger := false
  8034  	c := getMCache(mp)
  8035  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8036  	span := c.alloc[spc]
  8037  
  8038  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8039  
  8040  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8041  		mp.mallocing = 0
  8042  		releasem(mp)
  8043  		x := v
  8044  		{
  8045  
  8046  			gp := getg()
  8047  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8048  
  8049  				addSecret(x, size)
  8050  			}
  8051  
  8052  			if valgrindenabled {
  8053  				valgrindMalloc(x, size)
  8054  			}
  8055  
  8056  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8057  				if assistG := getg().m.curg; assistG != nil {
  8058  					assistG.gcAssistBytes -= int64(elemsize - size)
  8059  				}
  8060  			}
  8061  
  8062  			if debug.malloc {
  8063  				postMallocgcDebug(x, elemsize, typ)
  8064  			}
  8065  			return x
  8066  		}
  8067  
  8068  	}
  8069  
  8070  	var nextFreeFastResult gclinkptr
  8071  	if span.allocCache != 0 {
  8072  		theBit := sys.TrailingZeros64(span.allocCache)
  8073  		result := span.freeindex + uint16(theBit)
  8074  		if result < span.nelems {
  8075  			freeidx := result + 1
  8076  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8077  				span.allocCache >>= uint(theBit + 1)
  8078  				span.freeindex = freeidx
  8079  				span.allocCount++
  8080  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8081  					160 +
  8082  					span.base())
  8083  			}
  8084  		}
  8085  	}
  8086  	v := nextFreeFastResult
  8087  	if v == 0 {
  8088  		v, span, checkGCTrigger = c.nextFree(spc)
  8089  	}
  8090  	x := unsafe.Pointer(v)
  8091  	if needzero && span.needzero != 0 {
  8092  		memclrNoHeapPointers(x, elemsize)
  8093  	}
  8094  
  8095  	publicationBarrier()
  8096  
  8097  	if writeBarrier.enabled {
  8098  
  8099  		gcmarknewobject(span, uintptr(x))
  8100  	} else {
  8101  
  8102  		span.freeIndexForScan = span.freeindex
  8103  	}
  8104  
  8105  	c.nextSample -= int64(elemsize)
  8106  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8107  		profilealloc(mp, x, elemsize)
  8108  	}
  8109  	mp.mallocing = 0
  8110  	releasem(mp)
  8111  
  8112  	if checkGCTrigger {
  8113  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8114  			gcStart(t)
  8115  		}
  8116  	}
  8117  	gp := getg()
  8118  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8119  
  8120  		addSecret(x, size)
  8121  	}
  8122  
  8123  	if valgrindenabled {
  8124  		valgrindMalloc(x, size)
  8125  	}
  8126  
  8127  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8128  		if assistG := getg().m.curg; assistG != nil {
  8129  			assistG.gcAssistBytes -= int64(elemsize - size)
  8130  		}
  8131  	}
  8132  
  8133  	if debug.malloc {
  8134  		postMallocgcDebug(x, elemsize, typ)
  8135  	}
  8136  	return x
  8137  }
  8138  
  8139  func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8140  
  8141  	if doubleCheckMalloc {
  8142  		if gcphase == _GCmarktermination {
  8143  			throw("mallocgc called with gcphase == _GCmarktermination")
  8144  		}
  8145  	}
  8146  
  8147  	lockRankMayQueueFinalizer()
  8148  
  8149  	if debug.malloc {
  8150  		if x := preMallocgcDebug(size, typ); x != nil {
  8151  			return x
  8152  		}
  8153  	}
  8154  
  8155  	if gcBlackenEnabled != 0 {
  8156  		deductAssistCredit(size)
  8157  	}
  8158  
  8159  	const sizeclass = 13
  8160  
  8161  	const elemsize = 176
  8162  
  8163  	mp := acquirem()
  8164  	if doubleCheckMalloc {
  8165  		doubleCheckSmallNoScan(typ, mp)
  8166  	}
  8167  	mp.mallocing = 1
  8168  
  8169  	checkGCTrigger := false
  8170  	c := getMCache(mp)
  8171  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8172  	span := c.alloc[spc]
  8173  
  8174  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8175  
  8176  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8177  		mp.mallocing = 0
  8178  		releasem(mp)
  8179  		x := v
  8180  		{
  8181  
  8182  			gp := getg()
  8183  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8184  
  8185  				addSecret(x, size)
  8186  			}
  8187  
  8188  			if valgrindenabled {
  8189  				valgrindMalloc(x, size)
  8190  			}
  8191  
  8192  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8193  				if assistG := getg().m.curg; assistG != nil {
  8194  					assistG.gcAssistBytes -= int64(elemsize - size)
  8195  				}
  8196  			}
  8197  
  8198  			if debug.malloc {
  8199  				postMallocgcDebug(x, elemsize, typ)
  8200  			}
  8201  			return x
  8202  		}
  8203  
  8204  	}
  8205  
  8206  	var nextFreeFastResult gclinkptr
  8207  	if span.allocCache != 0 {
  8208  		theBit := sys.TrailingZeros64(span.allocCache)
  8209  		result := span.freeindex + uint16(theBit)
  8210  		if result < span.nelems {
  8211  			freeidx := result + 1
  8212  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8213  				span.allocCache >>= uint(theBit + 1)
  8214  				span.freeindex = freeidx
  8215  				span.allocCount++
  8216  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8217  					176 +
  8218  					span.base())
  8219  			}
  8220  		}
  8221  	}
  8222  	v := nextFreeFastResult
  8223  	if v == 0 {
  8224  		v, span, checkGCTrigger = c.nextFree(spc)
  8225  	}
  8226  	x := unsafe.Pointer(v)
  8227  	if needzero && span.needzero != 0 {
  8228  		memclrNoHeapPointers(x, elemsize)
  8229  	}
  8230  
  8231  	publicationBarrier()
  8232  
  8233  	if writeBarrier.enabled {
  8234  
  8235  		gcmarknewobject(span, uintptr(x))
  8236  	} else {
  8237  
  8238  		span.freeIndexForScan = span.freeindex
  8239  	}
  8240  
  8241  	c.nextSample -= int64(elemsize)
  8242  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8243  		profilealloc(mp, x, elemsize)
  8244  	}
  8245  	mp.mallocing = 0
  8246  	releasem(mp)
  8247  
  8248  	if checkGCTrigger {
  8249  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8250  			gcStart(t)
  8251  		}
  8252  	}
  8253  	gp := getg()
  8254  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8255  
  8256  		addSecret(x, size)
  8257  	}
  8258  
  8259  	if valgrindenabled {
  8260  		valgrindMalloc(x, size)
  8261  	}
  8262  
  8263  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8264  		if assistG := getg().m.curg; assistG != nil {
  8265  			assistG.gcAssistBytes -= int64(elemsize - size)
  8266  		}
  8267  	}
  8268  
  8269  	if debug.malloc {
  8270  		postMallocgcDebug(x, elemsize, typ)
  8271  	}
  8272  	return x
  8273  }
  8274  
  8275  func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8276  
  8277  	if doubleCheckMalloc {
  8278  		if gcphase == _GCmarktermination {
  8279  			throw("mallocgc called with gcphase == _GCmarktermination")
  8280  		}
  8281  	}
  8282  
  8283  	lockRankMayQueueFinalizer()
  8284  
  8285  	if debug.malloc {
  8286  		if x := preMallocgcDebug(size, typ); x != nil {
  8287  			return x
  8288  		}
  8289  	}
  8290  
  8291  	if gcBlackenEnabled != 0 {
  8292  		deductAssistCredit(size)
  8293  	}
  8294  
  8295  	const sizeclass = 14
  8296  
  8297  	const elemsize = 192
  8298  
  8299  	mp := acquirem()
  8300  	if doubleCheckMalloc {
  8301  		doubleCheckSmallNoScan(typ, mp)
  8302  	}
  8303  	mp.mallocing = 1
  8304  
  8305  	checkGCTrigger := false
  8306  	c := getMCache(mp)
  8307  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8308  	span := c.alloc[spc]
  8309  
  8310  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8311  
  8312  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8313  		mp.mallocing = 0
  8314  		releasem(mp)
  8315  		x := v
  8316  		{
  8317  
  8318  			gp := getg()
  8319  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8320  
  8321  				addSecret(x, size)
  8322  			}
  8323  
  8324  			if valgrindenabled {
  8325  				valgrindMalloc(x, size)
  8326  			}
  8327  
  8328  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8329  				if assistG := getg().m.curg; assistG != nil {
  8330  					assistG.gcAssistBytes -= int64(elemsize - size)
  8331  				}
  8332  			}
  8333  
  8334  			if debug.malloc {
  8335  				postMallocgcDebug(x, elemsize, typ)
  8336  			}
  8337  			return x
  8338  		}
  8339  
  8340  	}
  8341  
  8342  	var nextFreeFastResult gclinkptr
  8343  	if span.allocCache != 0 {
  8344  		theBit := sys.TrailingZeros64(span.allocCache)
  8345  		result := span.freeindex + uint16(theBit)
  8346  		if result < span.nelems {
  8347  			freeidx := result + 1
  8348  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8349  				span.allocCache >>= uint(theBit + 1)
  8350  				span.freeindex = freeidx
  8351  				span.allocCount++
  8352  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8353  					192 +
  8354  					span.base())
  8355  			}
  8356  		}
  8357  	}
  8358  	v := nextFreeFastResult
  8359  	if v == 0 {
  8360  		v, span, checkGCTrigger = c.nextFree(spc)
  8361  	}
  8362  	x := unsafe.Pointer(v)
  8363  	if needzero && span.needzero != 0 {
  8364  		memclrNoHeapPointers(x, elemsize)
  8365  	}
  8366  
  8367  	publicationBarrier()
  8368  
  8369  	if writeBarrier.enabled {
  8370  
  8371  		gcmarknewobject(span, uintptr(x))
  8372  	} else {
  8373  
  8374  		span.freeIndexForScan = span.freeindex
  8375  	}
  8376  
  8377  	c.nextSample -= int64(elemsize)
  8378  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8379  		profilealloc(mp, x, elemsize)
  8380  	}
  8381  	mp.mallocing = 0
  8382  	releasem(mp)
  8383  
  8384  	if checkGCTrigger {
  8385  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8386  			gcStart(t)
  8387  		}
  8388  	}
  8389  	gp := getg()
  8390  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8391  
  8392  		addSecret(x, size)
  8393  	}
  8394  
  8395  	if valgrindenabled {
  8396  		valgrindMalloc(x, size)
  8397  	}
  8398  
  8399  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8400  		if assistG := getg().m.curg; assistG != nil {
  8401  			assistG.gcAssistBytes -= int64(elemsize - size)
  8402  		}
  8403  	}
  8404  
  8405  	if debug.malloc {
  8406  		postMallocgcDebug(x, elemsize, typ)
  8407  	}
  8408  	return x
  8409  }
  8410  
  8411  func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8412  
  8413  	if doubleCheckMalloc {
  8414  		if gcphase == _GCmarktermination {
  8415  			throw("mallocgc called with gcphase == _GCmarktermination")
  8416  		}
  8417  	}
  8418  
  8419  	lockRankMayQueueFinalizer()
  8420  
  8421  	if debug.malloc {
  8422  		if x := preMallocgcDebug(size, typ); x != nil {
  8423  			return x
  8424  		}
  8425  	}
  8426  
  8427  	if gcBlackenEnabled != 0 {
  8428  		deductAssistCredit(size)
  8429  	}
  8430  
  8431  	const sizeclass = 15
  8432  
  8433  	const elemsize = 208
  8434  
  8435  	mp := acquirem()
  8436  	if doubleCheckMalloc {
  8437  		doubleCheckSmallNoScan(typ, mp)
  8438  	}
  8439  	mp.mallocing = 1
  8440  
  8441  	checkGCTrigger := false
  8442  	c := getMCache(mp)
  8443  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8444  	span := c.alloc[spc]
  8445  
  8446  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8447  
  8448  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8449  		mp.mallocing = 0
  8450  		releasem(mp)
  8451  		x := v
  8452  		{
  8453  
  8454  			gp := getg()
  8455  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8456  
  8457  				addSecret(x, size)
  8458  			}
  8459  
  8460  			if valgrindenabled {
  8461  				valgrindMalloc(x, size)
  8462  			}
  8463  
  8464  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8465  				if assistG := getg().m.curg; assistG != nil {
  8466  					assistG.gcAssistBytes -= int64(elemsize - size)
  8467  				}
  8468  			}
  8469  
  8470  			if debug.malloc {
  8471  				postMallocgcDebug(x, elemsize, typ)
  8472  			}
  8473  			return x
  8474  		}
  8475  
  8476  	}
  8477  
  8478  	var nextFreeFastResult gclinkptr
  8479  	if span.allocCache != 0 {
  8480  		theBit := sys.TrailingZeros64(span.allocCache)
  8481  		result := span.freeindex + uint16(theBit)
  8482  		if result < span.nelems {
  8483  			freeidx := result + 1
  8484  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8485  				span.allocCache >>= uint(theBit + 1)
  8486  				span.freeindex = freeidx
  8487  				span.allocCount++
  8488  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8489  					208 +
  8490  					span.base())
  8491  			}
  8492  		}
  8493  	}
  8494  	v := nextFreeFastResult
  8495  	if v == 0 {
  8496  		v, span, checkGCTrigger = c.nextFree(spc)
  8497  	}
  8498  	x := unsafe.Pointer(v)
  8499  	if needzero && span.needzero != 0 {
  8500  		memclrNoHeapPointers(x, elemsize)
  8501  	}
  8502  
  8503  	publicationBarrier()
  8504  
  8505  	if writeBarrier.enabled {
  8506  
  8507  		gcmarknewobject(span, uintptr(x))
  8508  	} else {
  8509  
  8510  		span.freeIndexForScan = span.freeindex
  8511  	}
  8512  
  8513  	c.nextSample -= int64(elemsize)
  8514  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8515  		profilealloc(mp, x, elemsize)
  8516  	}
  8517  	mp.mallocing = 0
  8518  	releasem(mp)
  8519  
  8520  	if checkGCTrigger {
  8521  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8522  			gcStart(t)
  8523  		}
  8524  	}
  8525  	gp := getg()
  8526  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8527  
  8528  		addSecret(x, size)
  8529  	}
  8530  
  8531  	if valgrindenabled {
  8532  		valgrindMalloc(x, size)
  8533  	}
  8534  
  8535  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8536  		if assistG := getg().m.curg; assistG != nil {
  8537  			assistG.gcAssistBytes -= int64(elemsize - size)
  8538  		}
  8539  	}
  8540  
  8541  	if debug.malloc {
  8542  		postMallocgcDebug(x, elemsize, typ)
  8543  	}
  8544  	return x
  8545  }
  8546  
  8547  func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8548  
  8549  	if doubleCheckMalloc {
  8550  		if gcphase == _GCmarktermination {
  8551  			throw("mallocgc called with gcphase == _GCmarktermination")
  8552  		}
  8553  	}
  8554  
  8555  	lockRankMayQueueFinalizer()
  8556  
  8557  	if debug.malloc {
  8558  		if x := preMallocgcDebug(size, typ); x != nil {
  8559  			return x
  8560  		}
  8561  	}
  8562  
  8563  	if gcBlackenEnabled != 0 {
  8564  		deductAssistCredit(size)
  8565  	}
  8566  
  8567  	const sizeclass = 16
  8568  
  8569  	const elemsize = 224
  8570  
  8571  	mp := acquirem()
  8572  	if doubleCheckMalloc {
  8573  		doubleCheckSmallNoScan(typ, mp)
  8574  	}
  8575  	mp.mallocing = 1
  8576  
  8577  	checkGCTrigger := false
  8578  	c := getMCache(mp)
  8579  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8580  	span := c.alloc[spc]
  8581  
  8582  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8583  
  8584  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8585  		mp.mallocing = 0
  8586  		releasem(mp)
  8587  		x := v
  8588  		{
  8589  
  8590  			gp := getg()
  8591  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8592  
  8593  				addSecret(x, size)
  8594  			}
  8595  
  8596  			if valgrindenabled {
  8597  				valgrindMalloc(x, size)
  8598  			}
  8599  
  8600  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8601  				if assistG := getg().m.curg; assistG != nil {
  8602  					assistG.gcAssistBytes -= int64(elemsize - size)
  8603  				}
  8604  			}
  8605  
  8606  			if debug.malloc {
  8607  				postMallocgcDebug(x, elemsize, typ)
  8608  			}
  8609  			return x
  8610  		}
  8611  
  8612  	}
  8613  
  8614  	var nextFreeFastResult gclinkptr
  8615  	if span.allocCache != 0 {
  8616  		theBit := sys.TrailingZeros64(span.allocCache)
  8617  		result := span.freeindex + uint16(theBit)
  8618  		if result < span.nelems {
  8619  			freeidx := result + 1
  8620  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8621  				span.allocCache >>= uint(theBit + 1)
  8622  				span.freeindex = freeidx
  8623  				span.allocCount++
  8624  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8625  					224 +
  8626  					span.base())
  8627  			}
  8628  		}
  8629  	}
  8630  	v := nextFreeFastResult
  8631  	if v == 0 {
  8632  		v, span, checkGCTrigger = c.nextFree(spc)
  8633  	}
  8634  	x := unsafe.Pointer(v)
  8635  	if needzero && span.needzero != 0 {
  8636  		memclrNoHeapPointers(x, elemsize)
  8637  	}
  8638  
  8639  	publicationBarrier()
  8640  
  8641  	if writeBarrier.enabled {
  8642  
  8643  		gcmarknewobject(span, uintptr(x))
  8644  	} else {
  8645  
  8646  		span.freeIndexForScan = span.freeindex
  8647  	}
  8648  
  8649  	c.nextSample -= int64(elemsize)
  8650  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8651  		profilealloc(mp, x, elemsize)
  8652  	}
  8653  	mp.mallocing = 0
  8654  	releasem(mp)
  8655  
  8656  	if checkGCTrigger {
  8657  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8658  			gcStart(t)
  8659  		}
  8660  	}
  8661  	gp := getg()
  8662  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8663  
  8664  		addSecret(x, size)
  8665  	}
  8666  
  8667  	if valgrindenabled {
  8668  		valgrindMalloc(x, size)
  8669  	}
  8670  
  8671  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8672  		if assistG := getg().m.curg; assistG != nil {
  8673  			assistG.gcAssistBytes -= int64(elemsize - size)
  8674  		}
  8675  	}
  8676  
  8677  	if debug.malloc {
  8678  		postMallocgcDebug(x, elemsize, typ)
  8679  	}
  8680  	return x
  8681  }
  8682  
  8683  func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8684  
  8685  	if doubleCheckMalloc {
  8686  		if gcphase == _GCmarktermination {
  8687  			throw("mallocgc called with gcphase == _GCmarktermination")
  8688  		}
  8689  	}
  8690  
  8691  	lockRankMayQueueFinalizer()
  8692  
  8693  	if debug.malloc {
  8694  		if x := preMallocgcDebug(size, typ); x != nil {
  8695  			return x
  8696  		}
  8697  	}
  8698  
  8699  	if gcBlackenEnabled != 0 {
  8700  		deductAssistCredit(size)
  8701  	}
  8702  
  8703  	const sizeclass = 17
  8704  
  8705  	const elemsize = 240
  8706  
  8707  	mp := acquirem()
  8708  	if doubleCheckMalloc {
  8709  		doubleCheckSmallNoScan(typ, mp)
  8710  	}
  8711  	mp.mallocing = 1
  8712  
  8713  	checkGCTrigger := false
  8714  	c := getMCache(mp)
  8715  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8716  	span := c.alloc[spc]
  8717  
  8718  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8719  
  8720  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8721  		mp.mallocing = 0
  8722  		releasem(mp)
  8723  		x := v
  8724  		{
  8725  
  8726  			gp := getg()
  8727  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8728  
  8729  				addSecret(x, size)
  8730  			}
  8731  
  8732  			if valgrindenabled {
  8733  				valgrindMalloc(x, size)
  8734  			}
  8735  
  8736  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8737  				if assistG := getg().m.curg; assistG != nil {
  8738  					assistG.gcAssistBytes -= int64(elemsize - size)
  8739  				}
  8740  			}
  8741  
  8742  			if debug.malloc {
  8743  				postMallocgcDebug(x, elemsize, typ)
  8744  			}
  8745  			return x
  8746  		}
  8747  
  8748  	}
  8749  
  8750  	var nextFreeFastResult gclinkptr
  8751  	if span.allocCache != 0 {
  8752  		theBit := sys.TrailingZeros64(span.allocCache)
  8753  		result := span.freeindex + uint16(theBit)
  8754  		if result < span.nelems {
  8755  			freeidx := result + 1
  8756  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8757  				span.allocCache >>= uint(theBit + 1)
  8758  				span.freeindex = freeidx
  8759  				span.allocCount++
  8760  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8761  					240 +
  8762  					span.base())
  8763  			}
  8764  		}
  8765  	}
  8766  	v := nextFreeFastResult
  8767  	if v == 0 {
  8768  		v, span, checkGCTrigger = c.nextFree(spc)
  8769  	}
  8770  	x := unsafe.Pointer(v)
  8771  	if needzero && span.needzero != 0 {
  8772  		memclrNoHeapPointers(x, elemsize)
  8773  	}
  8774  
  8775  	publicationBarrier()
  8776  
  8777  	if writeBarrier.enabled {
  8778  
  8779  		gcmarknewobject(span, uintptr(x))
  8780  	} else {
  8781  
  8782  		span.freeIndexForScan = span.freeindex
  8783  	}
  8784  
  8785  	c.nextSample -= int64(elemsize)
  8786  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8787  		profilealloc(mp, x, elemsize)
  8788  	}
  8789  	mp.mallocing = 0
  8790  	releasem(mp)
  8791  
  8792  	if checkGCTrigger {
  8793  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8794  			gcStart(t)
  8795  		}
  8796  	}
  8797  	gp := getg()
  8798  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8799  
  8800  		addSecret(x, size)
  8801  	}
  8802  
  8803  	if valgrindenabled {
  8804  		valgrindMalloc(x, size)
  8805  	}
  8806  
  8807  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8808  		if assistG := getg().m.curg; assistG != nil {
  8809  			assistG.gcAssistBytes -= int64(elemsize - size)
  8810  		}
  8811  	}
  8812  
  8813  	if debug.malloc {
  8814  		postMallocgcDebug(x, elemsize, typ)
  8815  	}
  8816  	return x
  8817  }
  8818  
  8819  func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8820  
  8821  	if doubleCheckMalloc {
  8822  		if gcphase == _GCmarktermination {
  8823  			throw("mallocgc called with gcphase == _GCmarktermination")
  8824  		}
  8825  	}
  8826  
  8827  	lockRankMayQueueFinalizer()
  8828  
  8829  	if debug.malloc {
  8830  		if x := preMallocgcDebug(size, typ); x != nil {
  8831  			return x
  8832  		}
  8833  	}
  8834  
  8835  	if gcBlackenEnabled != 0 {
  8836  		deductAssistCredit(size)
  8837  	}
  8838  
  8839  	const sizeclass = 18
  8840  
  8841  	const elemsize = 256
  8842  
  8843  	mp := acquirem()
  8844  	if doubleCheckMalloc {
  8845  		doubleCheckSmallNoScan(typ, mp)
  8846  	}
  8847  	mp.mallocing = 1
  8848  
  8849  	checkGCTrigger := false
  8850  	c := getMCache(mp)
  8851  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8852  	span := c.alloc[spc]
  8853  
  8854  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8855  
  8856  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8857  		mp.mallocing = 0
  8858  		releasem(mp)
  8859  		x := v
  8860  		{
  8861  
  8862  			gp := getg()
  8863  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  8864  
  8865  				addSecret(x, size)
  8866  			}
  8867  
  8868  			if valgrindenabled {
  8869  				valgrindMalloc(x, size)
  8870  			}
  8871  
  8872  			if gcBlackenEnabled != 0 && elemsize != 0 {
  8873  				if assistG := getg().m.curg; assistG != nil {
  8874  					assistG.gcAssistBytes -= int64(elemsize - size)
  8875  				}
  8876  			}
  8877  
  8878  			if debug.malloc {
  8879  				postMallocgcDebug(x, elemsize, typ)
  8880  			}
  8881  			return x
  8882  		}
  8883  
  8884  	}
  8885  
  8886  	var nextFreeFastResult gclinkptr
  8887  	if span.allocCache != 0 {
  8888  		theBit := sys.TrailingZeros64(span.allocCache)
  8889  		result := span.freeindex + uint16(theBit)
  8890  		if result < span.nelems {
  8891  			freeidx := result + 1
  8892  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  8893  				span.allocCache >>= uint(theBit + 1)
  8894  				span.freeindex = freeidx
  8895  				span.allocCount++
  8896  				nextFreeFastResult = gclinkptr(uintptr(result)*
  8897  					256 +
  8898  					span.base())
  8899  			}
  8900  		}
  8901  	}
  8902  	v := nextFreeFastResult
  8903  	if v == 0 {
  8904  		v, span, checkGCTrigger = c.nextFree(spc)
  8905  	}
  8906  	x := unsafe.Pointer(v)
  8907  	if needzero && span.needzero != 0 {
  8908  		memclrNoHeapPointers(x, elemsize)
  8909  	}
  8910  
  8911  	publicationBarrier()
  8912  
  8913  	if writeBarrier.enabled {
  8914  
  8915  		gcmarknewobject(span, uintptr(x))
  8916  	} else {
  8917  
  8918  		span.freeIndexForScan = span.freeindex
  8919  	}
  8920  
  8921  	c.nextSample -= int64(elemsize)
  8922  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  8923  		profilealloc(mp, x, elemsize)
  8924  	}
  8925  	mp.mallocing = 0
  8926  	releasem(mp)
  8927  
  8928  	if checkGCTrigger {
  8929  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  8930  			gcStart(t)
  8931  		}
  8932  	}
  8933  	gp := getg()
  8934  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  8935  
  8936  		addSecret(x, size)
  8937  	}
  8938  
  8939  	if valgrindenabled {
  8940  		valgrindMalloc(x, size)
  8941  	}
  8942  
  8943  	if gcBlackenEnabled != 0 && elemsize != 0 {
  8944  		if assistG := getg().m.curg; assistG != nil {
  8945  			assistG.gcAssistBytes -= int64(elemsize - size)
  8946  		}
  8947  	}
  8948  
  8949  	if debug.malloc {
  8950  		postMallocgcDebug(x, elemsize, typ)
  8951  	}
  8952  	return x
  8953  }
  8954  
  8955  func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  8956  
  8957  	if doubleCheckMalloc {
  8958  		if gcphase == _GCmarktermination {
  8959  			throw("mallocgc called with gcphase == _GCmarktermination")
  8960  		}
  8961  	}
  8962  
  8963  	lockRankMayQueueFinalizer()
  8964  
  8965  	if debug.malloc {
  8966  		if x := preMallocgcDebug(size, typ); x != nil {
  8967  			return x
  8968  		}
  8969  	}
  8970  
  8971  	if gcBlackenEnabled != 0 {
  8972  		deductAssistCredit(size)
  8973  	}
  8974  
  8975  	const sizeclass = 19
  8976  
  8977  	const elemsize = 288
  8978  
  8979  	mp := acquirem()
  8980  	if doubleCheckMalloc {
  8981  		doubleCheckSmallNoScan(typ, mp)
  8982  	}
  8983  	mp.mallocing = 1
  8984  
  8985  	checkGCTrigger := false
  8986  	c := getMCache(mp)
  8987  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  8988  	span := c.alloc[spc]
  8989  
  8990  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  8991  
  8992  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  8993  		mp.mallocing = 0
  8994  		releasem(mp)
  8995  		x := v
  8996  		{
  8997  
  8998  			gp := getg()
  8999  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9000  
  9001  				addSecret(x, size)
  9002  			}
  9003  
  9004  			if valgrindenabled {
  9005  				valgrindMalloc(x, size)
  9006  			}
  9007  
  9008  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9009  				if assistG := getg().m.curg; assistG != nil {
  9010  					assistG.gcAssistBytes -= int64(elemsize - size)
  9011  				}
  9012  			}
  9013  
  9014  			if debug.malloc {
  9015  				postMallocgcDebug(x, elemsize, typ)
  9016  			}
  9017  			return x
  9018  		}
  9019  
  9020  	}
  9021  
  9022  	var nextFreeFastResult gclinkptr
  9023  	if span.allocCache != 0 {
  9024  		theBit := sys.TrailingZeros64(span.allocCache)
  9025  		result := span.freeindex + uint16(theBit)
  9026  		if result < span.nelems {
  9027  			freeidx := result + 1
  9028  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9029  				span.allocCache >>= uint(theBit + 1)
  9030  				span.freeindex = freeidx
  9031  				span.allocCount++
  9032  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9033  					288 +
  9034  					span.base())
  9035  			}
  9036  		}
  9037  	}
  9038  	v := nextFreeFastResult
  9039  	if v == 0 {
  9040  		v, span, checkGCTrigger = c.nextFree(spc)
  9041  	}
  9042  	x := unsafe.Pointer(v)
  9043  	if needzero && span.needzero != 0 {
  9044  		memclrNoHeapPointers(x, elemsize)
  9045  	}
  9046  
  9047  	publicationBarrier()
  9048  
  9049  	if writeBarrier.enabled {
  9050  
  9051  		gcmarknewobject(span, uintptr(x))
  9052  	} else {
  9053  
  9054  		span.freeIndexForScan = span.freeindex
  9055  	}
  9056  
  9057  	c.nextSample -= int64(elemsize)
  9058  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9059  		profilealloc(mp, x, elemsize)
  9060  	}
  9061  	mp.mallocing = 0
  9062  	releasem(mp)
  9063  
  9064  	if checkGCTrigger {
  9065  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9066  			gcStart(t)
  9067  		}
  9068  	}
  9069  	gp := getg()
  9070  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9071  
  9072  		addSecret(x, size)
  9073  	}
  9074  
  9075  	if valgrindenabled {
  9076  		valgrindMalloc(x, size)
  9077  	}
  9078  
  9079  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9080  		if assistG := getg().m.curg; assistG != nil {
  9081  			assistG.gcAssistBytes -= int64(elemsize - size)
  9082  		}
  9083  	}
  9084  
  9085  	if debug.malloc {
  9086  		postMallocgcDebug(x, elemsize, typ)
  9087  	}
  9088  	return x
  9089  }
  9090  
  9091  func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9092  
  9093  	if doubleCheckMalloc {
  9094  		if gcphase == _GCmarktermination {
  9095  			throw("mallocgc called with gcphase == _GCmarktermination")
  9096  		}
  9097  	}
  9098  
  9099  	lockRankMayQueueFinalizer()
  9100  
  9101  	if debug.malloc {
  9102  		if x := preMallocgcDebug(size, typ); x != nil {
  9103  			return x
  9104  		}
  9105  	}
  9106  
  9107  	if gcBlackenEnabled != 0 {
  9108  		deductAssistCredit(size)
  9109  	}
  9110  
  9111  	const sizeclass = 20
  9112  
  9113  	const elemsize = 320
  9114  
  9115  	mp := acquirem()
  9116  	if doubleCheckMalloc {
  9117  		doubleCheckSmallNoScan(typ, mp)
  9118  	}
  9119  	mp.mallocing = 1
  9120  
  9121  	checkGCTrigger := false
  9122  	c := getMCache(mp)
  9123  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9124  	span := c.alloc[spc]
  9125  
  9126  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9127  
  9128  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9129  		mp.mallocing = 0
  9130  		releasem(mp)
  9131  		x := v
  9132  		{
  9133  
  9134  			gp := getg()
  9135  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9136  
  9137  				addSecret(x, size)
  9138  			}
  9139  
  9140  			if valgrindenabled {
  9141  				valgrindMalloc(x, size)
  9142  			}
  9143  
  9144  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9145  				if assistG := getg().m.curg; assistG != nil {
  9146  					assistG.gcAssistBytes -= int64(elemsize - size)
  9147  				}
  9148  			}
  9149  
  9150  			if debug.malloc {
  9151  				postMallocgcDebug(x, elemsize, typ)
  9152  			}
  9153  			return x
  9154  		}
  9155  
  9156  	}
  9157  
  9158  	var nextFreeFastResult gclinkptr
  9159  	if span.allocCache != 0 {
  9160  		theBit := sys.TrailingZeros64(span.allocCache)
  9161  		result := span.freeindex + uint16(theBit)
  9162  		if result < span.nelems {
  9163  			freeidx := result + 1
  9164  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9165  				span.allocCache >>= uint(theBit + 1)
  9166  				span.freeindex = freeidx
  9167  				span.allocCount++
  9168  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9169  					320 +
  9170  					span.base())
  9171  			}
  9172  		}
  9173  	}
  9174  	v := nextFreeFastResult
  9175  	if v == 0 {
  9176  		v, span, checkGCTrigger = c.nextFree(spc)
  9177  	}
  9178  	x := unsafe.Pointer(v)
  9179  	if needzero && span.needzero != 0 {
  9180  		memclrNoHeapPointers(x, elemsize)
  9181  	}
  9182  
  9183  	publicationBarrier()
  9184  
  9185  	if writeBarrier.enabled {
  9186  
  9187  		gcmarknewobject(span, uintptr(x))
  9188  	} else {
  9189  
  9190  		span.freeIndexForScan = span.freeindex
  9191  	}
  9192  
  9193  	c.nextSample -= int64(elemsize)
  9194  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9195  		profilealloc(mp, x, elemsize)
  9196  	}
  9197  	mp.mallocing = 0
  9198  	releasem(mp)
  9199  
  9200  	if checkGCTrigger {
  9201  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9202  			gcStart(t)
  9203  		}
  9204  	}
  9205  	gp := getg()
  9206  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9207  
  9208  		addSecret(x, size)
  9209  	}
  9210  
  9211  	if valgrindenabled {
  9212  		valgrindMalloc(x, size)
  9213  	}
  9214  
  9215  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9216  		if assistG := getg().m.curg; assistG != nil {
  9217  			assistG.gcAssistBytes -= int64(elemsize - size)
  9218  		}
  9219  	}
  9220  
  9221  	if debug.malloc {
  9222  		postMallocgcDebug(x, elemsize, typ)
  9223  	}
  9224  	return x
  9225  }
  9226  
  9227  func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9228  
  9229  	if doubleCheckMalloc {
  9230  		if gcphase == _GCmarktermination {
  9231  			throw("mallocgc called with gcphase == _GCmarktermination")
  9232  		}
  9233  	}
  9234  
  9235  	lockRankMayQueueFinalizer()
  9236  
  9237  	if debug.malloc {
  9238  		if x := preMallocgcDebug(size, typ); x != nil {
  9239  			return x
  9240  		}
  9241  	}
  9242  
  9243  	if gcBlackenEnabled != 0 {
  9244  		deductAssistCredit(size)
  9245  	}
  9246  
  9247  	const sizeclass = 21
  9248  
  9249  	const elemsize = 352
  9250  
  9251  	mp := acquirem()
  9252  	if doubleCheckMalloc {
  9253  		doubleCheckSmallNoScan(typ, mp)
  9254  	}
  9255  	mp.mallocing = 1
  9256  
  9257  	checkGCTrigger := false
  9258  	c := getMCache(mp)
  9259  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9260  	span := c.alloc[spc]
  9261  
  9262  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9263  
  9264  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9265  		mp.mallocing = 0
  9266  		releasem(mp)
  9267  		x := v
  9268  		{
  9269  
  9270  			gp := getg()
  9271  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9272  
  9273  				addSecret(x, size)
  9274  			}
  9275  
  9276  			if valgrindenabled {
  9277  				valgrindMalloc(x, size)
  9278  			}
  9279  
  9280  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9281  				if assistG := getg().m.curg; assistG != nil {
  9282  					assistG.gcAssistBytes -= int64(elemsize - size)
  9283  				}
  9284  			}
  9285  
  9286  			if debug.malloc {
  9287  				postMallocgcDebug(x, elemsize, typ)
  9288  			}
  9289  			return x
  9290  		}
  9291  
  9292  	}
  9293  
  9294  	var nextFreeFastResult gclinkptr
  9295  	if span.allocCache != 0 {
  9296  		theBit := sys.TrailingZeros64(span.allocCache)
  9297  		result := span.freeindex + uint16(theBit)
  9298  		if result < span.nelems {
  9299  			freeidx := result + 1
  9300  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9301  				span.allocCache >>= uint(theBit + 1)
  9302  				span.freeindex = freeidx
  9303  				span.allocCount++
  9304  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9305  					352 +
  9306  					span.base())
  9307  			}
  9308  		}
  9309  	}
  9310  	v := nextFreeFastResult
  9311  	if v == 0 {
  9312  		v, span, checkGCTrigger = c.nextFree(spc)
  9313  	}
  9314  	x := unsafe.Pointer(v)
  9315  	if needzero && span.needzero != 0 {
  9316  		memclrNoHeapPointers(x, elemsize)
  9317  	}
  9318  
  9319  	publicationBarrier()
  9320  
  9321  	if writeBarrier.enabled {
  9322  
  9323  		gcmarknewobject(span, uintptr(x))
  9324  	} else {
  9325  
  9326  		span.freeIndexForScan = span.freeindex
  9327  	}
  9328  
  9329  	c.nextSample -= int64(elemsize)
  9330  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9331  		profilealloc(mp, x, elemsize)
  9332  	}
  9333  	mp.mallocing = 0
  9334  	releasem(mp)
  9335  
  9336  	if checkGCTrigger {
  9337  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9338  			gcStart(t)
  9339  		}
  9340  	}
  9341  	gp := getg()
  9342  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9343  
  9344  		addSecret(x, size)
  9345  	}
  9346  
  9347  	if valgrindenabled {
  9348  		valgrindMalloc(x, size)
  9349  	}
  9350  
  9351  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9352  		if assistG := getg().m.curg; assistG != nil {
  9353  			assistG.gcAssistBytes -= int64(elemsize - size)
  9354  		}
  9355  	}
  9356  
  9357  	if debug.malloc {
  9358  		postMallocgcDebug(x, elemsize, typ)
  9359  	}
  9360  	return x
  9361  }
  9362  
  9363  func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9364  
  9365  	if doubleCheckMalloc {
  9366  		if gcphase == _GCmarktermination {
  9367  			throw("mallocgc called with gcphase == _GCmarktermination")
  9368  		}
  9369  	}
  9370  
  9371  	lockRankMayQueueFinalizer()
  9372  
  9373  	if debug.malloc {
  9374  		if x := preMallocgcDebug(size, typ); x != nil {
  9375  			return x
  9376  		}
  9377  	}
  9378  
  9379  	if gcBlackenEnabled != 0 {
  9380  		deductAssistCredit(size)
  9381  	}
  9382  
  9383  	const sizeclass = 22
  9384  
  9385  	const elemsize = 384
  9386  
  9387  	mp := acquirem()
  9388  	if doubleCheckMalloc {
  9389  		doubleCheckSmallNoScan(typ, mp)
  9390  	}
  9391  	mp.mallocing = 1
  9392  
  9393  	checkGCTrigger := false
  9394  	c := getMCache(mp)
  9395  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9396  	span := c.alloc[spc]
  9397  
  9398  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9399  
  9400  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9401  		mp.mallocing = 0
  9402  		releasem(mp)
  9403  		x := v
  9404  		{
  9405  
  9406  			gp := getg()
  9407  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9408  
  9409  				addSecret(x, size)
  9410  			}
  9411  
  9412  			if valgrindenabled {
  9413  				valgrindMalloc(x, size)
  9414  			}
  9415  
  9416  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9417  				if assistG := getg().m.curg; assistG != nil {
  9418  					assistG.gcAssistBytes -= int64(elemsize - size)
  9419  				}
  9420  			}
  9421  
  9422  			if debug.malloc {
  9423  				postMallocgcDebug(x, elemsize, typ)
  9424  			}
  9425  			return x
  9426  		}
  9427  
  9428  	}
  9429  
  9430  	var nextFreeFastResult gclinkptr
  9431  	if span.allocCache != 0 {
  9432  		theBit := sys.TrailingZeros64(span.allocCache)
  9433  		result := span.freeindex + uint16(theBit)
  9434  		if result < span.nelems {
  9435  			freeidx := result + 1
  9436  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9437  				span.allocCache >>= uint(theBit + 1)
  9438  				span.freeindex = freeidx
  9439  				span.allocCount++
  9440  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9441  					384 +
  9442  					span.base())
  9443  			}
  9444  		}
  9445  	}
  9446  	v := nextFreeFastResult
  9447  	if v == 0 {
  9448  		v, span, checkGCTrigger = c.nextFree(spc)
  9449  	}
  9450  	x := unsafe.Pointer(v)
  9451  	if needzero && span.needzero != 0 {
  9452  		memclrNoHeapPointers(x, elemsize)
  9453  	}
  9454  
  9455  	publicationBarrier()
  9456  
  9457  	if writeBarrier.enabled {
  9458  
  9459  		gcmarknewobject(span, uintptr(x))
  9460  	} else {
  9461  
  9462  		span.freeIndexForScan = span.freeindex
  9463  	}
  9464  
  9465  	c.nextSample -= int64(elemsize)
  9466  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9467  		profilealloc(mp, x, elemsize)
  9468  	}
  9469  	mp.mallocing = 0
  9470  	releasem(mp)
  9471  
  9472  	if checkGCTrigger {
  9473  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9474  			gcStart(t)
  9475  		}
  9476  	}
  9477  	gp := getg()
  9478  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9479  
  9480  		addSecret(x, size)
  9481  	}
  9482  
  9483  	if valgrindenabled {
  9484  		valgrindMalloc(x, size)
  9485  	}
  9486  
  9487  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9488  		if assistG := getg().m.curg; assistG != nil {
  9489  			assistG.gcAssistBytes -= int64(elemsize - size)
  9490  		}
  9491  	}
  9492  
  9493  	if debug.malloc {
  9494  		postMallocgcDebug(x, elemsize, typ)
  9495  	}
  9496  	return x
  9497  }
  9498  
  9499  func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9500  
  9501  	if doubleCheckMalloc {
  9502  		if gcphase == _GCmarktermination {
  9503  			throw("mallocgc called with gcphase == _GCmarktermination")
  9504  		}
  9505  	}
  9506  
  9507  	lockRankMayQueueFinalizer()
  9508  
  9509  	if debug.malloc {
  9510  		if x := preMallocgcDebug(size, typ); x != nil {
  9511  			return x
  9512  		}
  9513  	}
  9514  
  9515  	if gcBlackenEnabled != 0 {
  9516  		deductAssistCredit(size)
  9517  	}
  9518  
  9519  	const sizeclass = 23
  9520  
  9521  	const elemsize = 416
  9522  
  9523  	mp := acquirem()
  9524  	if doubleCheckMalloc {
  9525  		doubleCheckSmallNoScan(typ, mp)
  9526  	}
  9527  	mp.mallocing = 1
  9528  
  9529  	checkGCTrigger := false
  9530  	c := getMCache(mp)
  9531  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9532  	span := c.alloc[spc]
  9533  
  9534  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9535  
  9536  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9537  		mp.mallocing = 0
  9538  		releasem(mp)
  9539  		x := v
  9540  		{
  9541  
  9542  			gp := getg()
  9543  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9544  
  9545  				addSecret(x, size)
  9546  			}
  9547  
  9548  			if valgrindenabled {
  9549  				valgrindMalloc(x, size)
  9550  			}
  9551  
  9552  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9553  				if assistG := getg().m.curg; assistG != nil {
  9554  					assistG.gcAssistBytes -= int64(elemsize - size)
  9555  				}
  9556  			}
  9557  
  9558  			if debug.malloc {
  9559  				postMallocgcDebug(x, elemsize, typ)
  9560  			}
  9561  			return x
  9562  		}
  9563  
  9564  	}
  9565  
  9566  	var nextFreeFastResult gclinkptr
  9567  	if span.allocCache != 0 {
  9568  		theBit := sys.TrailingZeros64(span.allocCache)
  9569  		result := span.freeindex + uint16(theBit)
  9570  		if result < span.nelems {
  9571  			freeidx := result + 1
  9572  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9573  				span.allocCache >>= uint(theBit + 1)
  9574  				span.freeindex = freeidx
  9575  				span.allocCount++
  9576  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9577  					416 +
  9578  					span.base())
  9579  			}
  9580  		}
  9581  	}
  9582  	v := nextFreeFastResult
  9583  	if v == 0 {
  9584  		v, span, checkGCTrigger = c.nextFree(spc)
  9585  	}
  9586  	x := unsafe.Pointer(v)
  9587  	if needzero && span.needzero != 0 {
  9588  		memclrNoHeapPointers(x, elemsize)
  9589  	}
  9590  
  9591  	publicationBarrier()
  9592  
  9593  	if writeBarrier.enabled {
  9594  
  9595  		gcmarknewobject(span, uintptr(x))
  9596  	} else {
  9597  
  9598  		span.freeIndexForScan = span.freeindex
  9599  	}
  9600  
  9601  	c.nextSample -= int64(elemsize)
  9602  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9603  		profilealloc(mp, x, elemsize)
  9604  	}
  9605  	mp.mallocing = 0
  9606  	releasem(mp)
  9607  
  9608  	if checkGCTrigger {
  9609  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9610  			gcStart(t)
  9611  		}
  9612  	}
  9613  	gp := getg()
  9614  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9615  
  9616  		addSecret(x, size)
  9617  	}
  9618  
  9619  	if valgrindenabled {
  9620  		valgrindMalloc(x, size)
  9621  	}
  9622  
  9623  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9624  		if assistG := getg().m.curg; assistG != nil {
  9625  			assistG.gcAssistBytes -= int64(elemsize - size)
  9626  		}
  9627  	}
  9628  
  9629  	if debug.malloc {
  9630  		postMallocgcDebug(x, elemsize, typ)
  9631  	}
  9632  	return x
  9633  }
  9634  
  9635  func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9636  
  9637  	if doubleCheckMalloc {
  9638  		if gcphase == _GCmarktermination {
  9639  			throw("mallocgc called with gcphase == _GCmarktermination")
  9640  		}
  9641  	}
  9642  
  9643  	lockRankMayQueueFinalizer()
  9644  
  9645  	if debug.malloc {
  9646  		if x := preMallocgcDebug(size, typ); x != nil {
  9647  			return x
  9648  		}
  9649  	}
  9650  
  9651  	if gcBlackenEnabled != 0 {
  9652  		deductAssistCredit(size)
  9653  	}
  9654  
  9655  	const sizeclass = 24
  9656  
  9657  	const elemsize = 448
  9658  
  9659  	mp := acquirem()
  9660  	if doubleCheckMalloc {
  9661  		doubleCheckSmallNoScan(typ, mp)
  9662  	}
  9663  	mp.mallocing = 1
  9664  
  9665  	checkGCTrigger := false
  9666  	c := getMCache(mp)
  9667  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9668  	span := c.alloc[spc]
  9669  
  9670  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9671  
  9672  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9673  		mp.mallocing = 0
  9674  		releasem(mp)
  9675  		x := v
  9676  		{
  9677  
  9678  			gp := getg()
  9679  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9680  
  9681  				addSecret(x, size)
  9682  			}
  9683  
  9684  			if valgrindenabled {
  9685  				valgrindMalloc(x, size)
  9686  			}
  9687  
  9688  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9689  				if assistG := getg().m.curg; assistG != nil {
  9690  					assistG.gcAssistBytes -= int64(elemsize - size)
  9691  				}
  9692  			}
  9693  
  9694  			if debug.malloc {
  9695  				postMallocgcDebug(x, elemsize, typ)
  9696  			}
  9697  			return x
  9698  		}
  9699  
  9700  	}
  9701  
  9702  	var nextFreeFastResult gclinkptr
  9703  	if span.allocCache != 0 {
  9704  		theBit := sys.TrailingZeros64(span.allocCache)
  9705  		result := span.freeindex + uint16(theBit)
  9706  		if result < span.nelems {
  9707  			freeidx := result + 1
  9708  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9709  				span.allocCache >>= uint(theBit + 1)
  9710  				span.freeindex = freeidx
  9711  				span.allocCount++
  9712  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9713  					448 +
  9714  					span.base())
  9715  			}
  9716  		}
  9717  	}
  9718  	v := nextFreeFastResult
  9719  	if v == 0 {
  9720  		v, span, checkGCTrigger = c.nextFree(spc)
  9721  	}
  9722  	x := unsafe.Pointer(v)
  9723  	if needzero && span.needzero != 0 {
  9724  		memclrNoHeapPointers(x, elemsize)
  9725  	}
  9726  
  9727  	publicationBarrier()
  9728  
  9729  	if writeBarrier.enabled {
  9730  
  9731  		gcmarknewobject(span, uintptr(x))
  9732  	} else {
  9733  
  9734  		span.freeIndexForScan = span.freeindex
  9735  	}
  9736  
  9737  	c.nextSample -= int64(elemsize)
  9738  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9739  		profilealloc(mp, x, elemsize)
  9740  	}
  9741  	mp.mallocing = 0
  9742  	releasem(mp)
  9743  
  9744  	if checkGCTrigger {
  9745  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9746  			gcStart(t)
  9747  		}
  9748  	}
  9749  	gp := getg()
  9750  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9751  
  9752  		addSecret(x, size)
  9753  	}
  9754  
  9755  	if valgrindenabled {
  9756  		valgrindMalloc(x, size)
  9757  	}
  9758  
  9759  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9760  		if assistG := getg().m.curg; assistG != nil {
  9761  			assistG.gcAssistBytes -= int64(elemsize - size)
  9762  		}
  9763  	}
  9764  
  9765  	if debug.malloc {
  9766  		postMallocgcDebug(x, elemsize, typ)
  9767  	}
  9768  	return x
  9769  }
  9770  
  9771  func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9772  
  9773  	if doubleCheckMalloc {
  9774  		if gcphase == _GCmarktermination {
  9775  			throw("mallocgc called with gcphase == _GCmarktermination")
  9776  		}
  9777  	}
  9778  
  9779  	lockRankMayQueueFinalizer()
  9780  
  9781  	if debug.malloc {
  9782  		if x := preMallocgcDebug(size, typ); x != nil {
  9783  			return x
  9784  		}
  9785  	}
  9786  
  9787  	if gcBlackenEnabled != 0 {
  9788  		deductAssistCredit(size)
  9789  	}
  9790  
  9791  	const sizeclass = 25
  9792  
  9793  	const elemsize = 480
  9794  
  9795  	mp := acquirem()
  9796  	if doubleCheckMalloc {
  9797  		doubleCheckSmallNoScan(typ, mp)
  9798  	}
  9799  	mp.mallocing = 1
  9800  
  9801  	checkGCTrigger := false
  9802  	c := getMCache(mp)
  9803  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9804  	span := c.alloc[spc]
  9805  
  9806  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9807  
  9808  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9809  		mp.mallocing = 0
  9810  		releasem(mp)
  9811  		x := v
  9812  		{
  9813  
  9814  			gp := getg()
  9815  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9816  
  9817  				addSecret(x, size)
  9818  			}
  9819  
  9820  			if valgrindenabled {
  9821  				valgrindMalloc(x, size)
  9822  			}
  9823  
  9824  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9825  				if assistG := getg().m.curg; assistG != nil {
  9826  					assistG.gcAssistBytes -= int64(elemsize - size)
  9827  				}
  9828  			}
  9829  
  9830  			if debug.malloc {
  9831  				postMallocgcDebug(x, elemsize, typ)
  9832  			}
  9833  			return x
  9834  		}
  9835  
  9836  	}
  9837  
  9838  	var nextFreeFastResult gclinkptr
  9839  	if span.allocCache != 0 {
  9840  		theBit := sys.TrailingZeros64(span.allocCache)
  9841  		result := span.freeindex + uint16(theBit)
  9842  		if result < span.nelems {
  9843  			freeidx := result + 1
  9844  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9845  				span.allocCache >>= uint(theBit + 1)
  9846  				span.freeindex = freeidx
  9847  				span.allocCount++
  9848  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9849  					480 +
  9850  					span.base())
  9851  			}
  9852  		}
  9853  	}
  9854  	v := nextFreeFastResult
  9855  	if v == 0 {
  9856  		v, span, checkGCTrigger = c.nextFree(spc)
  9857  	}
  9858  	x := unsafe.Pointer(v)
  9859  	if needzero && span.needzero != 0 {
  9860  		memclrNoHeapPointers(x, elemsize)
  9861  	}
  9862  
  9863  	publicationBarrier()
  9864  
  9865  	if writeBarrier.enabled {
  9866  
  9867  		gcmarknewobject(span, uintptr(x))
  9868  	} else {
  9869  
  9870  		span.freeIndexForScan = span.freeindex
  9871  	}
  9872  
  9873  	c.nextSample -= int64(elemsize)
  9874  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
  9875  		profilealloc(mp, x, elemsize)
  9876  	}
  9877  	mp.mallocing = 0
  9878  	releasem(mp)
  9879  
  9880  	if checkGCTrigger {
  9881  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
  9882  			gcStart(t)
  9883  		}
  9884  	}
  9885  	gp := getg()
  9886  	if goexperiment.RuntimeSecret && gp.secret > 0 {
  9887  
  9888  		addSecret(x, size)
  9889  	}
  9890  
  9891  	if valgrindenabled {
  9892  		valgrindMalloc(x, size)
  9893  	}
  9894  
  9895  	if gcBlackenEnabled != 0 && elemsize != 0 {
  9896  		if assistG := getg().m.curg; assistG != nil {
  9897  			assistG.gcAssistBytes -= int64(elemsize - size)
  9898  		}
  9899  	}
  9900  
  9901  	if debug.malloc {
  9902  		postMallocgcDebug(x, elemsize, typ)
  9903  	}
  9904  	return x
  9905  }
  9906  
  9907  func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
  9908  
  9909  	if doubleCheckMalloc {
  9910  		if gcphase == _GCmarktermination {
  9911  			throw("mallocgc called with gcphase == _GCmarktermination")
  9912  		}
  9913  	}
  9914  
  9915  	lockRankMayQueueFinalizer()
  9916  
  9917  	if debug.malloc {
  9918  		if x := preMallocgcDebug(size, typ); x != nil {
  9919  			return x
  9920  		}
  9921  	}
  9922  
  9923  	if gcBlackenEnabled != 0 {
  9924  		deductAssistCredit(size)
  9925  	}
  9926  
  9927  	const sizeclass = 26
  9928  
  9929  	const elemsize = 512
  9930  
  9931  	mp := acquirem()
  9932  	if doubleCheckMalloc {
  9933  		doubleCheckSmallNoScan(typ, mp)
  9934  	}
  9935  	mp.mallocing = 1
  9936  
  9937  	checkGCTrigger := false
  9938  	c := getMCache(mp)
  9939  	const spc = spanClass(sizeclass<<1) | spanClass(1)
  9940  	span := c.alloc[spc]
  9941  
  9942  	if runtimeFreegcEnabled && c.hasReusableNoscan(spc) {
  9943  
  9944  		v := mallocgcSmallNoscanReuse(c, span, spc, elemsize, needzero)
  9945  		mp.mallocing = 0
  9946  		releasem(mp)
  9947  		x := v
  9948  		{
  9949  
  9950  			gp := getg()
  9951  			if goexperiment.RuntimeSecret && gp.secret > 0 {
  9952  
  9953  				addSecret(x, size)
  9954  			}
  9955  
  9956  			if valgrindenabled {
  9957  				valgrindMalloc(x, size)
  9958  			}
  9959  
  9960  			if gcBlackenEnabled != 0 && elemsize != 0 {
  9961  				if assistG := getg().m.curg; assistG != nil {
  9962  					assistG.gcAssistBytes -= int64(elemsize - size)
  9963  				}
  9964  			}
  9965  
  9966  			if debug.malloc {
  9967  				postMallocgcDebug(x, elemsize, typ)
  9968  			}
  9969  			return x
  9970  		}
  9971  
  9972  	}
  9973  
  9974  	var nextFreeFastResult gclinkptr
  9975  	if span.allocCache != 0 {
  9976  		theBit := sys.TrailingZeros64(span.allocCache)
  9977  		result := span.freeindex + uint16(theBit)
  9978  		if result < span.nelems {
  9979  			freeidx := result + 1
  9980  			if !(freeidx%64 == 0 && freeidx != span.nelems) {
  9981  				span.allocCache >>= uint(theBit + 1)
  9982  				span.freeindex = freeidx
  9983  				span.allocCount++
  9984  				nextFreeFastResult = gclinkptr(uintptr(result)*
  9985  					512 +
  9986  					span.base())
  9987  			}
  9988  		}
  9989  	}
  9990  	v := nextFreeFastResult
  9991  	if v == 0 {
  9992  		v, span, checkGCTrigger = c.nextFree(spc)
  9993  	}
  9994  	x := unsafe.Pointer(v)
  9995  	if needzero && span.needzero != 0 {
  9996  		memclrNoHeapPointers(x, elemsize)
  9997  	}
  9998  
  9999  	publicationBarrier()
 10000  
 10001  	if writeBarrier.enabled {
 10002  
 10003  		gcmarknewobject(span, uintptr(x))
 10004  	} else {
 10005  
 10006  		span.freeIndexForScan = span.freeindex
 10007  	}
 10008  
 10009  	c.nextSample -= int64(elemsize)
 10010  	if c.nextSample < 0 || MemProfileRate != c.memProfRate {
 10011  		profilealloc(mp, x, elemsize)
 10012  	}
 10013  	mp.mallocing = 0
 10014  	releasem(mp)
 10015  
 10016  	if checkGCTrigger {
 10017  		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
 10018  			gcStart(t)
 10019  		}
 10020  	}
 10021  	gp := getg()
 10022  	if goexperiment.RuntimeSecret && gp.secret > 0 {
 10023  
 10024  		addSecret(x, size)
 10025  	}
 10026  
 10027  	if valgrindenabled {
 10028  		valgrindMalloc(x, size)
 10029  	}
 10030  
 10031  	if gcBlackenEnabled != 0 && elemsize != 0 {
 10032  		if assistG := getg().m.curg; assistG != nil {
 10033  			assistG.gcAssistBytes -= int64(elemsize - size)
 10034  		}
 10035  	}
 10036  
 10037  	if debug.malloc {
 10038  		postMallocgcDebug(x, elemsize, typ)
 10039  	}
 10040  	return x
 10041  }
 10042  

View as plain text