60 Commits

Author SHA1 Message Date
Syoyo Fujita
160d6be10f Format code. 2017-12-12 17:31:22 +09:00
Syoyo Fujita
583590767e Fix compilation.
Remove unused file.
2017-12-12 17:30:59 +09:00
tigrazone
dc542d6638 comment for export data and functions block with hashed keywords to other projects 2017-12-10 10:48:05 +02:00
tigrazone
c5976f931b comment for export data and functions block with hashed keywords to other projects 2017-12-10 10:47:32 +02:00
tigrazone
0511658e86 comment for export data and functions block with hashed keywords to other projects 2017-12-10 10:46:35 +02:00
tigrazone
54851f8ac7 comment for export data and functions block with hashed keywords to other projects 2017-12-10 10:46:08 +02:00
tigrazone
a6c1d07560 comment for export data and functions block with hashed keywords to other projects 2017-12-10 10:43:24 +02:00
tigrazone
4fe479453b smallish optimize 2017-12-10 09:34:10 +02:00
tigrazone
af1bcf1e11 errors fix 2017-12-10 00:43:50 +02:00
tigrazone
05f06d09d8 hashed tokens as keys of map. 5% speedup 2017-12-09 13:17:04 +02:00
tigrazone
3c0196bfb7 map vs unordered_map remake 2017-12-09 11:59:33 +02:00
tigrazone
dc4c970262 small speedups - up to 1-3% 2017-12-08 23:39:07 +02:00
tigrazone
aeb0f05c0e remove stringstream for simple string copy 2017-12-08 23:19:31 +02:00
tigrazone
c016910317 minimize token checks 2017-12-08 21:53:36 +02:00
tigrazone
5d7f6bf539 buffered file read 2017-12-08 13:21:45 +02:00
tigrazone
b818a34f1a small fixes2 2017-12-08 09:05:42 +02:00
tigrazone
baa62f4d89 small fixes 2017-12-08 08:43:33 +02:00
tigrazone
b2f07d10aa 1.1.2 : new hashed keywords 2017-12-07 08:38:44 +02:00
tigrazone
ee2c734c15 refactoring for new speedup release1 2017-12-06 22:44:04 +02:00
tigrazone
6e579f027f refactoring for new speedup release 2017-12-06 22:37:44 +02:00
Syoyo Fujita
b434c2497f Update master URL. 2017-10-16 17:48:45 +09:00
Syoyo Fujita
94fc413466 Fix parsing of crease tags(t)
Support parsing texture filename containing whitespace.
2017-10-13 18:13:36 +09:00
Syoyo Fujita
1c6dbf9bd8 Merge branch 'master' of github.com:syoyo/tinyobjloader 2017-10-13 02:04:11 +09:00
Syoyo Fujita
88ad575f62 Initial support of parsing vertex color(extension format) 2017-10-12 19:04:38 +09:00
Syoyo Fujita
5cd30b70e0 Merge pull request #142 from Ododo/master
[Python] Fix mapping for #131 and compilation error #139 #132 (related)
2017-10-08 15:09:07 +09:00
Ododo
eb1f395101 Fix mapping for #131 and compilation error #139 #132 (related) 2017-10-08 04:00:14 +02:00
Syoyo Fujita
27bdd547f0 If you use tinyobjloader, please let us know via github issue!. 2017-09-25 02:36:15 +09:00
Syoyo Fujita
75a4bd1d35 Add zero-value check when parsing `f' line. Fixes #140. 2017-09-25 02:30:24 +09:00
Syoyo Fujita
7c7335c907 Add test for parsing bump_multipler for normal map. 2017-09-15 16:34:31 +09:00
Syoyo Fujita
3a9483ca6f Add regression test for issue 138. 2017-09-12 02:21:11 +09:00
Syoyo Fujita
1065d7cfb2 Change to add a shape when shape.mesh.indices.size() > 0 once g tag appears. Fixes #138. 2017-09-12 02:18:27 +09:00
Syoyo Fujita
303043f9ec Merge branch 'master' of github.com:syoyo/tinyobjloader 2017-09-11 15:08:39 +09:00
Syoyo Fujita
981f7c5f99 Add VFPR URL. 2017-09-11 14:58:40 +09:00
Syoyo Fujita
dcbc8d51aa Update URL for Rungholt scene. 2017-09-03 12:45:57 +09:00
Syoyo Fujita
785af4491d Add support for parsing map_Bump in mtl. 2017-08-20 18:30:54 +09:00
Syoyo Fujita
be46318a52 print some material infos. 2017-08-08 16:02:12 +09:00
Syoyo Fujita
b5961cd7b1 Merge pull request #133 from pmysiak/patch-1
Update README.md
2017-08-05 15:48:53 +09:00
Peter Myšiak
76632f80b3 Update README.md
Fix typo.
2017-08-04 12:11:36 +02:00
Syoyo Fujita
f59f93d7dc Rename variables to avoid confusion. Fixes #108 . 2017-07-17 18:48:04 +09:00
Syoyo Fujita
1dfd117ccd Add project URLs using tinyobjloader 1.0.x. 2017-07-17 18:40:42 +09:00
Syoyo Fujita
a86b78386b Merge pull request #132 from chrisliebert/master
Added Python mapping from material ID to material name (#131)
2017-07-17 17:21:39 +09:00
chrisliebert
e3508c3ca3 Added mapping from material index to to material name (#131) to Python wrapper. 2017-07-17 01:07:40 -07:00
Syoyo Fujita
8e7da82852 Fix index calculation. 2017-07-10 01:41:39 +09:00
Syoyo Fujita
6cde18eb55 Fix ifdef guard. 2017-07-05 16:50:12 +09:00
Syoyo Fujita
2409832b24 Remove unused file. 2017-07-05 16:32:00 +09:00
Syoyo Fujita
95fba2ab32 Changed to use lfpAlloc from ltalloc for experimental multi-threaded .obj parser since ltalloc is not a porable(e.g. it does not support ARM archtecture). 2017-07-05 16:32:40 +09:00
Syoyo Fujita
99518b6d3e Suppress some clang warnings. 2017-06-05 02:00:32 +09:00
Syoyo Fujita
d8f702c6f7 Merge pull request #128 from sloretz/cppcheck_warnings
Fix Cppcheck warnings
2017-06-03 00:19:51 +09:00
Shane Loretz
6eca09f2bf Removed double import of sstream 2017-06-02 07:31:19 -07:00
Shane Loretz
a1324f17fd Replaced snprintf with stringstream
Deleted now unused TINYOBJ_SSCANF_BUFFER_SIZE
2017-06-02 07:28:20 -07:00
Shane Loretz
56fa047ba9 Replace sscanf with snprintf 2017-06-01 14:14:52 -07:00
Shane Loretz
0f4a955e01 4096 -> TINYOBJ_SSCANF_BUFFER_SIZE 2017-06-01 14:09:15 -07:00
Shane Loretz
fb80e04212 Check sentry to remove unused variable warning 2017-06-01 14:05:03 -07:00
Syoyo Fujita
cdb5c2d375 Fix the number of triangle calculation. Fixes #127 2017-06-02 03:04:42 +09:00
Syoyo Fujita
44bff466e5 Initial support of reflection map(refl). 2017-05-24 17:43:45 +09:00
Syoyo Fujita
47989b591f Update copyright year and version. 2017-05-24 17:22:46 +09:00
Syoyo Fujita
41acdc95bc Merge pull request #126 from alangfel/multiple_texoptions
Deal with more than one texture option
2017-05-24 14:59:56 +09:00
alangfel
cc948e4c44 Deal with more than one texture option
After processing one textureoption like "-s u v w" the next option " -o u v w" has a beginning whitespace. Due to this it does not match to the option "-o" and it is skipped.
2017-05-12 08:34:05 +02:00
Syoyo Fujita
889b2187c1 Add link to Vulkan Cookbook. 2017-05-09 01:23:09 +09:00
Syoyo Fujita
3e146c376c Update README. 2017-04-25 15:50:42 +09:00
33 changed files with 3006 additions and 1816 deletions

View File

@@ -10,7 +10,7 @@
[![Coverage Status](https://coveralls.io/repos/github/syoyo/tinyobjloader/badge.svg?branch=master)](https://coveralls.io/github/syoyo/tinyobjloader?branch=master)
http://syoyo.github.io/tinyobjloader/
[https://github.com/syoyo/tinyobjloader](https://github.com/syoyo/tinyobjloader)
Tiny but powerful single file wavefront obj loader written in C++. No dependency except for C++ STL. It can parse over 10M polygons with moderate memory and time.
@@ -26,7 +26,7 @@ Old version is available `v0.9.x` branch https://github.com/syoyo/tinyobjloader/
## What's new
* 20 Aug, 2016 : Bump version v1.0.0. New data strcutre and API!
* 20 Aug, 2016 : Bump version v1.0.0. New data structure and API!
### Old version
@@ -37,7 +37,7 @@ Previous old version is avaiable in `v0.9.x` branch.
![Rungholt](images/rungholt.jpg)
tinyobjloader can successfully load 6M triangles Rungholt scene.
http://graphics.cs.williams.edu/data/meshes.xml
http://casual-effects.com/data/index.html
![](images/sanmugel.png)
@@ -51,9 +51,14 @@ TinyObjLoader is successfully used in ...
### New version(v1.0.x)
* Double precision support through `TINYOBJLOADER_USE_DOUBLE` thanks to noma
* Loading models in Vulkan Tutorial https://vulkan-tutorial.com/Loading_models
* .obj viewer with Metal https://github.com/middlefeng/NuoModelViewer/tree/master
* Your project here!
* Vulkan Cookbook https://github.com/PacktPublishing/Vulkan-Cookbook
* cudabox: CUDA Solid Voxelizer Engine https://github.com/gaspardzoss/cudavox
* Drake: A planning, control, and analysis toolbox for nonlinear dynamical systems https://github.com/RobotLocomotion/drake
* VFPR - a Vulkan Forward Plus Renderer : https://github.com/WindyDarian/Vulkan-Forward-Plus-Renderer
* Your project here! (Letting us know via github issue is welcome!)
### Old version(v0.9.x)
@@ -84,6 +89,7 @@ TinyObjLoader is successfully used in ...
* Group(parse multiple group name)
* Vertex
* Vertex color(as an extension: https://blender.stackexchange.com/questions/31997/how-can-i-get-vertex-painted-obj-files-to-import-into-blender)
* Texcoord
* Normal
* Material
@@ -91,6 +97,7 @@ TinyObjLoader is successfully used in ...
* Crease tag('t'). This is OpenSubdiv specific(not in wavefront .obj specification)
* PBR material extension for .MTL. Its proposed here: http://exocortex.com/blog/extending_wavefront_mtl_to_support_pbr
* Callback API for custom loading.
* Double precision support(for HPC application).
## TODO
@@ -133,6 +140,13 @@ attrib_t::texcoords => 2 floats per vertex
| u | v | u | v | u | v | u | v | .... | u | v |
+-----------+-----------+-----------+-----------+ +-----------+
attrib_t::colors => 3 floats per vertex(vertex color. optional)
c[0] c[1] c[2] c[3] c[n-1]
+-----------+-----------+-----------+-----------+ +-----------+
| x | y | z | x | y | z | x | y | z | x | y | z | .... | x | y | z |
+-----------+-----------+-----------+-----------+ +-----------+
```
Each `shape_t::mesh_t` does not contain vertex data but contains array index to `attrib_t`.
@@ -175,6 +189,12 @@ mesh_t::num_face_vertices => array of the number of vertices per face(e.g. 3 = t
Note that when `triangulate` flas is true in `tinyobj::LoadObj()` argument, `num_face_vertices` are all filled with 3(triangle).
### float data type
TinyObjLoader now use `real_t` for floating point data type.
Default is `float(32bit)`.
You can enable `double(64bit)` precision by using `TINYOBJLOADER_USE_DOUBLE` define.
#### Example code
```c++
@@ -208,14 +228,18 @@ for (size_t s = 0; s < shapes.size(); s++) {
for (size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v];
float vx = attrib.vertices[3*idx.vertex_index+0];
float vy = attrib.vertices[3*idx.vertex_index+1];
float vz = attrib.vertices[3*idx.vertex_index+2];
float nx = attrib.normals[3*idx.normal_index+0];
float ny = attrib.normals[3*idx.normal_index+1];
float nz = attrib.normals[3*idx.normal_index+2];
float tx = attrib.texcoords[2*idx.texcoord_index+0];
float ty = attrib.texcoords[2*idx.texcoord_index+1];
tinyobj::real_t vx = attrib.vertices[3*idx.vertex_index+0];
tinyobj::real_t vy = attrib.vertices[3*idx.vertex_index+1];
tinyobj::real_t vz = attrib.vertices[3*idx.vertex_index+2];
tinyobj::real_t nx = attrib.normals[3*idx.normal_index+0];
tinyobj::real_t ny = attrib.normals[3*idx.normal_index+1];
tinyobj::real_t nz = attrib.normals[3*idx.normal_index+2];
tinyobj::real_t tx = attrib.texcoords[2*idx.texcoord_index+0];
tinyobj::real_t ty = attrib.texcoords[2*idx.texcoord_index+1];
// Optional: vertex colors
// tinyobj::real_t red = attrib.colors[3*idx.vertex_index+0];
// tinyobj::real_t green = attrib.colors[3*idx.vertex_index+1];
// tinyobj::real_t blue = attrib.colors[3*idx.vertex_index+2];
}
index_offset += fv;

View File

@@ -6,7 +6,6 @@
* glfw3
* glew
## Build on MaCOSX
Install glfw3 and glew using brew.
@@ -35,3 +34,9 @@ Put glfw3 and glew library somewhere and replace include and lib path in `premak
Then,
> premake5.exe vs2013
## TODO
* [ ] Support per-face material.
* [ ] Use shader-based GL rendering.
* [ ] PBR shader support.

View File

@@ -123,7 +123,7 @@ class timerutil {
};
typedef struct {
GLuint vb; // vertex buffer
GLuint vb_id; // vertex buffer id
int numTriangles;
size_t material_id;
} DrawObject;
@@ -207,6 +207,9 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
tm.start();
std::string base_dir = GetBaseDir(filename);
if (base_dir.empty()) {
base_dir = ".";
}
#ifdef _WIN32
base_dir += "\\";
#else
@@ -238,6 +241,10 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
// Append `default` material
materials.push_back(tinyobj::material_t());
for (size_t i = 0; i < materials.size(); i++) {
printf("material[%d].diffuse_texname = %s\n", int(i), materials[i].diffuse_texname.c_str());
}
// Load diffuse textures
{
for (size_t m = 0; m < materials.size(); m++) {
@@ -265,15 +272,19 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
std::cerr << "Unable to load texture: " << texture_filename << std::endl;
exit(1);
}
std::cout << "Loaded texture: " << texture_filename << ", w = " << w << ", h = " << h << ", comp = " << comp << std::endl;
glGenTextures(1, &texture_id);
glBindTexture(GL_TEXTURE_2D, texture_id);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if (comp == 3) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, w, h, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
}
else if (comp == 4) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, image);
} else {
assert(0); // TODO
}
glBindTexture(GL_TEXTURE_2D, 0);
stbi_image_free(image);
@@ -289,7 +300,7 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
{
for (size_t s = 0; s < shapes.size(); s++) {
DrawObject o;
std::vector<float> vb; // pos(3float), normal(3float), color(3float)
std::vector<float> buffer; // pos(3float), normal(3float), color(3float)
for (size_t f = 0; f < shapes[s].mesh.indices.size() / 3; f++) {
tinyobj::index_t idx0 = shapes[s].mesh.indices[3 * f + 0];
tinyobj::index_t idx1 = shapes[s].mesh.indices[3 * f + 1];
@@ -314,6 +325,8 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
assert(attrib.texcoords.size() > 2 * idx0.texcoord_index + 1);
assert(attrib.texcoords.size() > 2 * idx1.texcoord_index + 1);
assert(attrib.texcoords.size() > 2 * idx2.texcoord_index + 1);
// Flip Y coord.
tc[0][0] = attrib.texcoords[2 * idx0.texcoord_index];
tc[0][1] = 1.0f - attrib.texcoords[2 * idx0.texcoord_index + 1];
tc[1][0] = attrib.texcoords[2 * idx1.texcoord_index];
@@ -374,12 +387,12 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
}
for (int k = 0; k < 3; k++) {
vb.push_back(v[k][0]);
vb.push_back(v[k][1]);
vb.push_back(v[k][2]);
vb.push_back(n[k][0]);
vb.push_back(n[k][1]);
vb.push_back(n[k][2]);
buffer.push_back(v[k][0]);
buffer.push_back(v[k][1]);
buffer.push_back(v[k][2]);
buffer.push_back(n[k][0]);
buffer.push_back(n[k][1]);
buffer.push_back(n[k][2]);
// Combine normal and diffuse to get color.
float normal_factor = 0.2;
float diffuse_factor = 1 - normal_factor;
@@ -396,32 +409,33 @@ static bool LoadObjAndConvert(float bmin[3], float bmax[3],
c[1] /= len;
c[2] /= len;
}
vb.push_back(c[0] * 0.5 + 0.5);
vb.push_back(c[1] * 0.5 + 0.5);
vb.push_back(c[2] * 0.5 + 0.5);
buffer.push_back(c[0] * 0.5 + 0.5);
buffer.push_back(c[1] * 0.5 + 0.5);
buffer.push_back(c[2] * 0.5 + 0.5);
vb.push_back(tc[k][0]);
vb.push_back(tc[k][1]);
buffer.push_back(tc[k][0]);
buffer.push_back(tc[k][1]);
}
}
o.vb = 0;
o.vb_id = 0;
o.numTriangles = 0;
// OpenGL viewer does not support texturing with per-face material.
if (shapes[s].mesh.material_ids.size() > 0 && shapes[s].mesh.material_ids.size() > s) {
// Base case
o.material_id = shapes[s].mesh.material_ids[s];
o.material_id = shapes[s].mesh.material_ids[0]; // use the material ID of the first face.
} else {
o.material_id = materials.size() - 1; // = ID for default material.
}
printf("shape[%d] material_id %d\n", int(s), int(o.material_id));
if (vb.size() > 0) {
glGenBuffers(1, &o.vb);
glBindBuffer(GL_ARRAY_BUFFER, o.vb);
glBufferData(GL_ARRAY_BUFFER, vb.size() * sizeof(float), &vb.at(0),
if (buffer.size() > 0) {
glGenBuffers(1, &o.vb_id);
glBindBuffer(GL_ARRAY_BUFFER, o.vb_id);
glBufferData(GL_ARRAY_BUFFER, buffer.size() * sizeof(float), &buffer.at(0),
GL_STATIC_DRAW);
o.numTriangles = vb.size() / (3 + 3 + 3 + 2) * 3;
o.numTriangles = buffer.size() / (3 + 3 + 3 + 2) / 3; // 3:vtx, 3:normal, 3:col, 2:texcoord
printf("shape[%d] # of triangles = %d\n", static_cast<int>(s),
o.numTriangles);
}
@@ -544,16 +558,17 @@ static void Draw(const std::vector<DrawObject>& drawObjects, std::vector<tinyobj
GLsizei stride = (3 + 3 + 3 + 2) * sizeof(float);
for (size_t i = 0; i < drawObjects.size(); i++) {
DrawObject o = drawObjects[i];
if (o.vb < 1) {
if (o.vb_id < 1) {
continue;
}
glBindBuffer(GL_ARRAY_BUFFER, o.vb);
glBindBuffer(GL_ARRAY_BUFFER, o.vb_id);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture(GL_TEXTURE_2D, 0);
if ((o.material_id < materials.size())) {
std::string diffuse_texname = materials[o.material_id].diffuse_texname;
if (textures.find(diffuse_texname) != textures.end()) {
@@ -578,11 +593,11 @@ static void Draw(const std::vector<DrawObject>& drawObjects, std::vector<tinyobj
glColor3f(0.0f, 0.0f, 0.4f);
for (size_t i = 0; i < drawObjects.size(); i++) {
DrawObject o = drawObjects[i];
if (o.vb < 1) {
if (o.vb_id < 1) {
continue;
}
glBindBuffer(GL_ARRAY_BUFFER, o.vb);
glBindBuffer(GL_ARRAY_BUFFER, o.vb_id);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);

View File

@@ -1,26 +0,0 @@
Copyright (c) 2013, Alexander Tretyak
Copyright (c) 2015, r-lyeh
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,5 +1,16 @@
Experimental code for .obj loader.
# Experimental code for .obj loader.
* Multi-threaded optimized parser : tinyobj_loader_opt.h
## Requirements
* C++-11 compiler
## Compile options
* zstd compressed .obj support. `--with-zstd` premake option.
* gzip compressed .obj support. `--with-zlib` premake option.
## Licenses
* lfpAlloc : MIT license.

View File

@@ -0,0 +1,89 @@
#ifndef LF_POOL_ALLOCATOR
#define LF_POOL_ALLOCATOR
#include <memory>
#include <thread>
#include <lfpAlloc/PoolDispatcher.hpp>
namespace lfpAlloc {
template <typename T, std::size_t NumPools = 70>
class lfpAllocator {
public:
using value_type = T;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = T const&;
template <typename U>
struct rebind {
typedef lfpAllocator<U, NumPools> other;
};
lfpAllocator() {}
template <typename U>
lfpAllocator(lfpAllocator<U, NumPools>&&) noexcept {}
template <typename U>
lfpAllocator(const lfpAllocator<U, NumPools>&) noexcept {}
T* allocate(std::size_t count) {
if (sizeof(T) * count <=
alignof(std::max_align_t) * NumPools - sizeof(void*)) {
return reinterpret_cast<T*>(
dispatcher_.allocate(sizeof(T) * count));
} else {
return new T[count];
}
}
void deallocate(T* p, std::size_t count) noexcept {
if (sizeof(T) * count <=
alignof(std::max_align_t) * NumPools - sizeof(void*)) {
dispatcher_.deallocate(p, sizeof(T) * count);
} else {
delete[] p;
}
}
// Should not be required, but allocator_traits is not complete in
// gcc 4.9.1
template <typename U>
void destroy(U* p) {
p->~U();
}
template <typename U, typename... Args>
void construct(U* p, Args&&... args) {
new (p) U(std::forward<Args>(args)...);
}
template <typename Ty, typename U, std::size_t N, std::size_t M>
friend bool operator==(const lfpAllocator<Ty, N>&,
const lfpAllocator<U, M>&) noexcept;
template <typename U, std::size_t M>
friend class lfpAllocator;
private:
static PoolDispatcher<NumPools> dispatcher_;
};
template <typename T, std::size_t N>
PoolDispatcher<N> lfpAllocator<T, N>::dispatcher_;
template <typename T, typename U, std::size_t N, std::size_t M>
inline bool operator==(const lfpAllocator<T, N>&,
const lfpAllocator<U, M>&) noexcept {
return N == M;
}
template <typename T, typename U, std::size_t N, std::size_t M>
inline bool operator!=(const lfpAllocator<T, N>& left,
const lfpAllocator<U, M>& right) noexcept {
return !(left == right);
}
}
#endif

View File

@@ -0,0 +1,116 @@
#ifndef LF_POOL_ALLOC_CHUNK_LIST
#define LF_POOL_ALLOC_CHUNK_LIST
#include <cstdint>
#include <atomic>
#include <type_traits>
#ifndef LFP_ALLOW_BLOCKING
static_assert(ATOMIC_POINTER_LOCK_FREE == 2,
"Atomic pointer is not lock-free.");
#endif
namespace lfpAlloc {
template <std::size_t Size>
struct Cell {
uint8_t val_[Size];
Cell* next_ = this + 1;
};
// For small types (less than the size of void*), no additional
// space is needed, so union val_ with next_ to avoid overhead.
template <>
struct Cell<0> {
Cell() : next_{this + 1} {}
union {
uint8_t val_[sizeof(Cell*)];
Cell* next_;
};
};
template <std::size_t Size, std::size_t AllocationsPerChunk>
struct Chunk {
Chunk() noexcept {
auto& last = memBlock_[AllocationsPerChunk - 1];
last.next_ = nullptr;
}
Cell<Size> memBlock_[AllocationsPerChunk];
};
template <typename T>
struct Node {
Node() : val_(), next_(nullptr) {}
Node(const T& val) : val_(val), next_(nullptr) {}
T val_;
std::atomic<Node<T>*> next_;
};
template <std::size_t Size, std::size_t AllocationsPerChunk>
class ChunkList {
static constexpr auto CellSize =
(Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
using Chunk_t = Chunk<CellSize, AllocationsPerChunk>;
using Cell_t = Cell<CellSize>;
using ChunkNode = Node<Chunk_t>;
using CellNode = Node<Cell_t*>;
public:
static ChunkList& getInstance() {
static ChunkList c;
return c;
}
Cell_t* allocateChain() {
CellNode* recentHead = head_.load();
CellNode* currentNext = nullptr;
do {
// If there are no available chains, allocate a new chunk
if (!recentHead) {
ChunkNode* currentHandle;
// Make a new node
auto newChunk = new ChunkNode();
// Add the chunk to the chain
do {
currentHandle = handle_.load();
newChunk->next_ = currentHandle;
} while (
!handle_.compare_exchange_weak(currentHandle, newChunk));
return &newChunk->val_.memBlock_[0];
}
currentNext = recentHead->next_;
} while (!head_.compare_exchange_weak(recentHead, currentNext));
auto retnValue = recentHead->val_;
delete recentHead;
return retnValue;
}
void deallocateChain(Cell_t* newCell) {
if (!newCell) {
return;
}
CellNode* currentHead = head_.load();
// Construct a new node to be added to the linked list
CellNode* newHead = new CellNode(newCell);
// Add the chain to the linked list
do {
newHead->next_.store(currentHead, std::memory_order_release);
} while (!head_.compare_exchange_weak(currentHead, newHead));
}
private:
ChunkList() : handle_(nullptr), head_(nullptr) {}
std::atomic<ChunkNode*> handle_;
std::atomic<CellNode*> head_;
};
}
#endif

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Adam Schwalm
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,48 @@
#ifndef LF_POOL_ALLOC_POOL
#define LF_POOL_ALLOC_POOL
#include <lfpAlloc/Utils.hpp>
#include <lfpAlloc/ChunkList.hpp>
namespace lfpAlloc {
template <std::size_t Size, std::size_t AllocationsPerChunk>
class Pool {
using ChunkList_t = ChunkList<Size, AllocationsPerChunk>;
public:
static constexpr auto CellSize =
(Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
using Cell_t = Cell<CellSize>;
Pool() : head_(nullptr) {}
~Pool() { ChunkList_t::getInstance().deallocateChain(head_); }
void* allocate() {
// Head loaded from head_
Cell_t* currentHead = head_;
Cell_t* next;
// Out of cells to allocate
if (!currentHead) {
currentHead = ChunkList_t::getInstance().allocateChain();
}
next = currentHead->next_;
head_ = next;
return &currentHead->val_;
}
void deallocate(void* p) noexcept {
auto newHead = reinterpret_cast<Cell_t*>(p);
Cell_t* currentHead = head_;
newHead->next_ = currentHead;
head_ = newHead;
}
private:
Cell_t* head_;
};
}
#endif

View File

@@ -0,0 +1,79 @@
#ifndef LF_POOL_DISPATCHER
#define LF_POOL_DISPATCHER
#include <tuple>
#include <cassert>
#include <cstddef>
#include <lfpAlloc/Pool.hpp>
#ifndef LFP_ALLOCATIONS_PER_CHUNK
#define LFP_ALLOCATIONS_PER_CHUNK 64 * 100
#endif
namespace lfpAlloc {
namespace detail {
template <std::size_t Num, uint16_t... Ts>
struct Pools : Pools<Num - 1, alignof(std::max_align_t) * Num, Ts...> {};
template <uint16_t... Size>
struct Pools<0, Size...> {
using type = std::tuple<Pool<Size, LFP_ALLOCATIONS_PER_CHUNK>...>;
};
}
template <std::size_t NumPools>
class PoolDispatcher {
public:
void* allocate(std::size_t size) { return dispatchAllocate<0>(size); }
void deallocate(void* p, std::size_t size) noexcept {
dispatchDeallocate<0>(p, size);
}
private:
thread_local static typename detail::Pools<NumPools>::type pools_;
static_assert(NumPools > 0, "Invalid number of pools");
template <std::size_t Index>
typename std::enable_if <
Index<NumPools, void*>::type
dispatchAllocate(std::size_t const& requestSize) {
if (requestSize <= std::get<Index>(pools_).CellSize) {
return std::get<Index>(pools_).allocate();
} else {
return dispatchAllocate<Index + 1>(requestSize);
}
}
template <std::size_t Index>
typename std::enable_if<!(Index < NumPools), void*>::type
dispatchAllocate(std::size_t const&) {
assert(false && "Invalid allocation size.");
return nullptr;
}
template <std::size_t Index>
typename std::enable_if <
Index<NumPools>::type
dispatchDeallocate(void* p, std::size_t const& requestSize) noexcept {
if (requestSize <= std::get<Index>(pools_).CellSize) {
std::get<Index>(pools_).deallocate(p);
} else {
dispatchDeallocate<Index + 1>(p, requestSize);
}
}
template <std::size_t Index>
typename std::enable_if<!(Index < NumPools)>::type
dispatchDeallocate(void*, std::size_t const&) noexcept {
assert(false && "Invalid deallocation size.");
}
};
template <std::size_t NumPools>
thread_local typename detail::Pools<NumPools>::type
PoolDispatcher<NumPools>::pools_;
}
#endif

View File

@@ -0,0 +1,20 @@
#include <cstdint>
namespace lfpAlloc {
namespace detail {
template <std::size_t Val, std::size_t base = 2>
struct Log {
enum { value = 1 + Log<Val / base, base>::value };
};
template <std::size_t base>
struct Log<1, base> {
enum { value = 0 };
};
template <std::size_t base>
struct Log<0, base> {
enum { value = 0 };
};
}
}

View File

@@ -1,973 +0,0 @@
/*
Copyright (c) 2013, Alexander Tretyak
Copyright (c) 2015, r-lyeh
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Note: The latest version of this memory allocator obtainable at
http://ltalloc.googlecode.com/hg/ltalloc.cc
Project URL: http://code.google.com/p/ltalloc
*/
#include "ltalloc.h"
#define LTALLOC_VERSION "2.0.0" /* (2015/06/16) - ltcalloc(), ltmsize(), ltrealloc(), ltmemalign(), LTALLOC_AUTO_GC_INTERVAL
#define LTALLOC_VERSION "1.0.0" /* (2015/06/16) - standard STL allocator provided [see ltalloc.hpp file](ltalloc.hpp)
#define LTALLOC_VERSION "0.0.0" /* (2013/xx/xx) - fork from public repository */
//Customizable constants
//#define LTALLOC_DISABLE_OPERATOR_NEW_OVERRIDE
//#define LTALLOC_AUTO_GC_INTERVAL 3.0
#ifndef LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO
#define LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO 2//determines how accurately size classes are spaced (i.e. when = 0, allocation requests are rounded up to the nearest power of two (2^n), when = 1, rounded to 2^n, or (2^n)*1.5, when = 2, rounded to 2^n, (2^n)*1.25, (2^n)*1.5, or (2^n)*1.75, and so on); this parameter have direct influence on memory fragmentation - bigger values lead to reducing internal fragmentation (which can be approximately estimated as pow(0.5, VALUE)*100%), but at the same time increasing external fragmentation
#endif
#define CHUNK_SIZE (64*1024)//size of chunk (basic allocation unit for all allocations of size <= MAX_BLOCK_SIZE); must be a power of two (as well as all following parameters), also should not be less than allocation granularity on Windows (which is always 64K by design of NT kernel)
#define CACHE_LINE_SIZE 64
#define MAX_NUM_OF_BLOCKS_IN_BATCH 256//maximum number of blocks to move between a thread cache and a central cache in one shot
static const unsigned int MAX_BATCH_SIZE = 64*1024;//maximum total size of blocks to move between a thread cache and a central cache in one shot (corresponds to half size of thread cache of each size class)
static const unsigned int MAX_BLOCK_SIZE = CHUNK_SIZE;//requesting memory of any size greater than this value will lead to direct call of system virtual memory allocation routine
//Platform-specific stuff
#ifdef __cplusplus
#define CPPCODE(code) code
#include <new>
#else
#define CPPCODE(code)
#endif
#ifdef LTALLOC_AUTO_GC_INTERVAL
#include <time.h>
# if LTALLOC_AUTO_GC_INTERVAL <= 0
# undef LTALLOC_AUTO_GC_INTERVAL
# define LTALLOC_AUTO_GC_INTERVAL 3.00
# endif
#endif
#ifdef __GNUC__
#define __STDC_LIMIT_MACROS
#include <stdint.h> //for SIZE_MAX
#include <limits.h> //for UINT_MAX
#define alignas(a) __attribute__((aligned(a)))
#define thread_local __thread
#define NOINLINE __attribute__((noinline))
#define CAS_LOCK(lock) __sync_lock_test_and_set(lock, 1)
#define SPINLOCK_RELEASE(lock) __sync_lock_release(lock)
#define PAUSE __asm__ __volatile__("pause" ::: "memory")
#define BSR(r, v) r = CODE3264(__builtin_clz(v) ^ 31, __builtin_clzll(v) ^ 63)//x ^ 31 = 31 - x, but gcc does not optimize 31 - __builtin_clz(x) to bsr(x), but generates 31 - (bsr(x) ^ 31)
#elif _MSC_VER
#define _ALLOW_KEYWORD_MACROS
#include <limits.h> //for SIZE_MAX and UINT_MAX
#define alignas(a) __declspec(align(a))
#define thread_local __declspec(thread)
#define NOINLINE __declspec(noinline)
#define CAS_LOCK(lock) _InterlockedExchange((long*)lock, 1)
#define SPINLOCK_RELEASE(lock) _InterlockedExchange((long*)lock, 0)
#define PAUSE _mm_pause()
#define BSR(r, v) CODE3264(_BitScanReverse, _BitScanReverse64)((unsigned long*)&r, v)
CPPCODE(extern "C") long _InterlockedExchange(long volatile *, long);
CPPCODE(extern "C") void _mm_pause();
#pragma warning(disable: 4127 4201 4324 4290)//"conditional expression is constant", "nonstandard extension used : nameless struct/union", and "structure was padded due to __declspec(align())"
#else
#error Unsupported compiler
#endif
#if __GNUC__ || __INTEL_COMPILER
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else
#define likely(x) (x)
#define unlikely(x) (x)
#endif
static void SPINLOCK_ACQUIRE(volatile int *lock) {if (CAS_LOCK(lock)) while (*lock || CAS_LOCK(lock)) PAUSE;}
#include <assert.h>
#include <string.h> //for memset
#if SIZE_MAX == UINT_MAX
#define CODE3264(c32, c64) c32
#else
#define CODE3264(c32, c64) c64
#endif
typedef char CODE3264_check[sizeof(void*) == CODE3264(4, 8) ? 1 : -1];
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define VMALLOC(size) VirtualAlloc(NULL, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE)
#define VMFREE(p, size) VirtualFree(p, 0, MEM_RELEASE)
#else
#include <sys/mman.h>
#include <unistd.h>
#define VMALLOC(size) (void*)(((uintptr_t)mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0)+1)&~1)//with the conversion of MAP_FAILED to 0
#define VMFREE(p, size) munmap(p, size)
static size_t page_size()
{
assert((uintptr_t)MAP_FAILED+1 == 0);//have to use dynamic check somewhere, because some gcc versions (e.g. 4.4.5) won't compile typedef char MAP_FAILED_value_static_check[(uintptr_t)MAP_FAILED+1 == 0 ? 1 : -1];
static size_t pagesize = 0;
if (!pagesize) pagesize = sysconf(_SC_PAGE_SIZE);//assuming that writing of size_t value is atomic, so this can be done safely in different simultaneously running threads
return pagesize;
}
typedef struct PTrieNode // Compressed radix tree (patricia trie) for storing sizes of system allocations
{ // This implementation have some specific properties:
uintptr_t keys[2]; // 1. There are no separate leaf nodes (with both null children), as leaf's value is stored directly in place of corresponding child node pointer. Least significant bit of that pointer used to determine its meaning (i.e., is it a value, or child node pointer).
struct PTrieNode *childNodes[2];// 2. Inserting a new element (key/value) into this tree require to create always an exactly one new node (and similarly for remove key/node operation).
} PTrieNode; // 3. Tree always contains just one node with null child (i.e. all but one nodes in any possible tree are always have two children).
#define PTRIE_NULL_NODE (PTrieNode*)(uintptr_t)1
static PTrieNode *ptrieRoot = PTRIE_NULL_NODE, *ptrieFreeNodesList = NULL, *ptrieNewAllocatedPage = NULL;
static volatile int ptrieLock = 0;
static uintptr_t ptrie_lookup(uintptr_t key)
{
PTrieNode *node = ptrieRoot;
uintptr_t *lastKey = NULL;
while (!((uintptr_t)node & 1))
{
int branch = (key >> (node->keys[0] & 0xFF)) & 1;
lastKey = &node->keys[branch];
node = node->childNodes[branch];
}
assert(lastKey && (*lastKey & ~0xFF) == key);
return (uintptr_t)node & ~1;
}
static void ptrie_insert(uintptr_t key, uintptr_t value, PTrieNode *newNode/* = (PTrieNode*)malloc(sizeof(PTrieNode))*/)
{
PTrieNode **node = &ptrieRoot, *n;
uintptr_t *prevKey = NULL, x, pkey;
unsigned int index, b;
assert(!((value & 1) | (key & 0xFF)));//check constraints for key/value
for (;;)
{
n = *node;
if (!((uintptr_t)n & 1))//not a leaf
{
int prefixEnd = n->keys[0] & 0xFF;
x = key ^ n->keys[0];// & ~0xFF;
if (!(x & (~(uintptr_t)1 << prefixEnd))) {//prefix matches, so go on
int branch = (key >> prefixEnd) & 1;
node = &n->childNodes[branch];
prevKey = &n->keys[branch];
} else {//insert a new node before current
pkey = n->keys[0] & ~0xFF;
break;
}
} else {//leaf
if (*node == PTRIE_NULL_NODE) {
*node = newNode;
newNode->keys[0] = key;//left prefixEnd = 0, so all following insertions will be before this node
newNode->childNodes[0] = (PTrieNode*)(value | 1);
newNode->childNodes[1] = PTRIE_NULL_NODE;
return;
} else {
pkey = *prevKey & ~0xFF;
x = key ^ pkey;
assert(x/*key != pkey*/ && "key already inserted");
break;
}
}
}
BSR(index, x);
b = (key >> index) & 1;
newNode->keys[b] = key;
newNode->keys[b^1] = pkey;
newNode->keys[0] |= index;
newNode->childNodes[b] = (PTrieNode*)(value | 1);
newNode->childNodes[b^1] = n;
*node = newNode;
}
static uintptr_t ptrie_remove(uintptr_t key)
{
PTrieNode **node = &ptrieRoot;
uintptr_t *pkey = NULL;
assert(ptrieRoot != PTRIE_NULL_NODE && "trie is empty!");
for (;;)
{
PTrieNode *n = *node;
int branch = (key >> (n->keys[0] & 0xFF)) & 1;
PTrieNode *cn = n->childNodes[branch];//current child node
if ((uintptr_t)cn & 1)//leaf
{
PTrieNode *other = n->childNodes[branch^1];
assert((n->keys[branch] & ~0xFF) == key);
assert(cn != PTRIE_NULL_NODE && "node's key is probably broken");
// if (other == PTRIE_NULL_NODE) *node = PTRIE_NULL_NODE; else//special handling for null child nodes is not necessary
if (((uintptr_t)other & 1) && other != PTRIE_NULL_NODE)//if other node is not a pointer
*pkey = (n->keys[branch^1] & ~0xFF) | ((*pkey) & 0xFF);
*node = other;
*(PTrieNode**)n = ptrieFreeNodesList; ptrieFreeNodesList = n;//free(n);
return (uintptr_t)cn & ~1;
}
pkey = &n->keys[branch];
node = &n->childNodes[branch];
}
}
#endif
static void *sys_aligned_alloc(size_t alignment, size_t size)
{
void *p = VMALLOC(size);//optimistically try mapping precisely the right amount before falling back to the slow method
assert(!(alignment & (alignment-1)) && "alignment must be a power of two");
if ((uintptr_t)p & (alignment-1)/* && p != MAP_FAILED*/)
{
VMFREE(p, size);
#ifdef _WIN32
{static DWORD allocationGranularity = 0;
if (!allocationGranularity) {
SYSTEM_INFO si;
GetSystemInfo(&si);
allocationGranularity = si.dwAllocationGranularity;
}
if ((uintptr_t)p < 16*1024*1024)//fill "bubbles" (reserve unaligned regions) at the beginning of virtual address space, otherwise there will be always falling back to the slow method
VirtualAlloc(p, alignment - ((uintptr_t)p & (alignment-1)), MEM_RESERVE, PAGE_NOACCESS);
do
{
p = VirtualAlloc(NULL, size + alignment - allocationGranularity, MEM_RESERVE, PAGE_NOACCESS);
if (p == NULL) return NULL;
VirtualFree(p, 0, MEM_RELEASE);//unfortunately, WinAPI doesn't support release a part of allocated region, so release a whole region
p = VirtualAlloc((void*)(((uintptr_t)p + (alignment-1)) & ~(alignment-1)), size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
} while (p == NULL);}
#else
p = VMALLOC(size + alignment - page_size());
if (p/* != MAP_FAILED*/)
{
uintptr_t ap = ((uintptr_t)p + (alignment-1)) & ~(alignment-1);
uintptr_t diff = ap - (uintptr_t)p;
if (diff) VMFREE(p, diff);
diff = alignment - page_size() - diff;
assert((intptr_t)diff >= 0);
if (diff) VMFREE((void*)(ap + size), diff);
return (void*)ap;
}
#endif
}
//if (p == 0) p = sys_aligned_alloc(alignment, size);//just in case (because 0 pointer is handled specially elsewhere)
//if (p == MAP_FAILED) p = NULL;
return p;
}
static NOINLINE void sys_free(void *p)
{
if (p == NULL) return;
#ifdef _WIN32
VirtualFree(p, 0, MEM_RELEASE);
#else
SPINLOCK_ACQUIRE(&ptrieLock);
size_t size = ptrie_remove((uintptr_t)p);
SPINLOCK_RELEASE(&ptrieLock);
munmap(p, size);
#endif
}
static void release_thread_cache(void*);
#ifdef __GNUC__
#include <pthread.h>
#pragma weak pthread_once
#pragma weak pthread_key_create
#pragma weak pthread_setspecific
static pthread_key_t pthread_key;
static pthread_once_t init_once = PTHREAD_ONCE_INIT;
static void init_pthread_key() { pthread_key_create(&pthread_key, release_thread_cache); }
static thread_local int thread_initialized = 0;
static void init_pthread_destructor()//must be called only when some block placed into a thread cache's free list
{
if (unlikely(!thread_initialized))
{
thread_initialized = 1;
if (pthread_once)
{
pthread_once(&init_once, init_pthread_key);
pthread_setspecific(pthread_key, (void*)1);//set nonzero value to force calling of release_thread_cache() on thread terminate
}
}
}
#else
static void NTAPI on_tls_callback(PVOID h, DWORD reason, PVOID pv) { h; pv; if (reason == DLL_THREAD_DETACH) release_thread_cache(0); }
#pragma comment(linker, "/INCLUDE:" CODE3264("_","") "p_thread_callback_ltalloc")
#pragma const_seg(".CRT$XLL")
extern CPPCODE("C") const PIMAGE_TLS_CALLBACK p_thread_callback_ltalloc = on_tls_callback;
#pragma const_seg()
#define init_pthread_destructor()
#endif
//End of platform-specific stuff
#define MAX_BLOCK_SIZE (MAX_BLOCK_SIZE < CHUNK_SIZE - (CHUNK_SIZE >> (1 + LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)) ? \
MAX_BLOCK_SIZE : CHUNK_SIZE - (CHUNK_SIZE >> (1 + LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)))
#define NUMBER_OF_SIZE_CLASSES ((sizeof(void*)*8 + 1) << LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)
typedef struct FreeBlock
{
struct FreeBlock *next,
*nextBatch;//in the central cache blocks are organized into batches to allow fast moving blocks from thread cache and back
} FreeBlock;
typedef struct alignas(CACHE_LINE_SIZE) ChunkBase//force sizeof(Chunk) = cache line size to avoid false sharing
{
unsigned int sizeClass;
} Chunk;
typedef struct alignas(CACHE_LINE_SIZE) ChunkSm//chunk of smallest blocks of size = sizeof(void*)
{
unsigned int sizeClass;//struct ChunkBase chunk;
struct ChunkSm *prev, *next;
int numBatches;
#define NUM_OF_BATCHES_IN_CHUNK_SM CHUNK_SIZE/(sizeof(void*)*MAX_NUM_OF_BLOCKS_IN_BATCH)
FreeBlock *batches[NUM_OF_BATCHES_IN_CHUNK_SM];//batches of blocks inside ChunkSm have to be stored separately (as smallest blocks of size = sizeof(void*) do not have enough space to store second pointer for the batch)
} ChunkSm;
typedef struct alignas(CACHE_LINE_SIZE)//align needed to prevent cache line sharing between adjacent classes accessed from different threads
{
volatile int lock;
unsigned int freeBlocksInLastChunk;
char *lastChunk;//Chunk or ChunkSm
union {
FreeBlock *firstBatch;
ChunkSm *chunkWithFreeBatches;
};
FreeBlock *freeList;//short list of free blocks that for some reason are not organized into batches
unsigned int freeListSize;//should be less than batch size
uintptr_t minChunkAddr, maxChunkAddr;
} CentralCache;
static CentralCache centralCache[NUMBER_OF_SIZE_CLASSES];// = {{0}};
typedef struct
{
FreeBlock *freeList;
FreeBlock *tempList;//intermediate list providing a hysteresis in order to avoid a corner case of too frequent moving free blocks to the central cache and back from
int counter;//number of blocks in freeList (used to determine when to move free blocks list to the central cache)
} ThreadCache;
static thread_local ThreadCache threadCache[NUMBER_OF_SIZE_CLASSES];// = {{0}};
static struct
{
volatile int lock;
void *freeChunk;
size_t size;
} pad = {0, NULL, 0};
static CPPCODE(inline) unsigned int get_size_class(size_t size)
{
unsigned int index;
#if _MSC_VER && LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO == 2
static const unsigned char small_size_classes[256] = {//have to use a const array here, because MS compiler unfortunately does not evaluate _BitScanReverse with a constant argument at compile time (as gcc does for __builtin_clz)
#if CODE3264(1, 0)
131, 4, 15, 17, 19, 20, 21, 22, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33, 33, 34, 34, 34, 34, 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43
#else
131, 15, 19, 21, 23, 24, 25, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 32, 32, 32, 33, 33, 33, 33, 34, 34, 34, 34, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47
#endif
};
if (size < 256 * sizeof(void*) - (sizeof(void*)-1))
return small_size_classes[(size + (sizeof(void*)-1)) / sizeof(void*)];
#endif
size = (size + (sizeof(void*)-1)) & ~(sizeof(void*)-1);//minimum block size is sizeof(void*), doing this is better than just "size = max(size, sizeof(void*))"
BSR(index, (size-1)|1);//"|1" needed because result of BSR is undefined for zero input
#if LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO == 0
return index;
#else
return (index<<LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO) + (unsigned int)((size-1) >> (index-LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO));
#endif
}
static unsigned int class_to_size(unsigned int c)
{
#if LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO == 0
return 2 << c;
#else
#if LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO >= CODE3264(2, 3)
if (unlikely(c < (LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO<<LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)))//for block sizes less than or equal to pow(2, LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)
return 2 << (c>>LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO);
else
#endif
{
c -= (1<<LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)-1;
return ((c & ((1<<LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)-1)) | (1<<LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)) << ((c>>LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)-LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO);
}
#endif
}
static unsigned int batch_size(unsigned int sizeClass)//calculates a number of blocks to move between a thread cache and a central cache in one shot
{
return ((MAX_BATCH_SIZE-1) >> (sizeClass >> LTALLOC_SIZE_CLASSES_SUBPOWER_OF_TWO)) & (MAX_NUM_OF_BLOCKS_IN_BATCH-1);
}
CPPCODE(template <bool> static) void *ltmalloc(size_t size);
CPPCODE(template <bool throw_>) static void *fetch_from_central_cache(size_t size, ThreadCache *tc, unsigned int sizeClass)
{
void *p;
if (likely(size-1u <= MAX_BLOCK_SIZE-1u))//<=> if (size <= MAX_BLOCK_SIZE && size != 0)
{
FreeBlock *fb = tc->tempList;
if (fb)
{
assert(tc->counter == (int)batch_size(sizeClass)+1);
tc->counter = 1;
tc->freeList = fb->next;
tc->tempList = NULL;
return fb;
}
assert(tc->counter == 0 || tc->counter == (int)batch_size(sizeClass)+1);
tc->counter = 1;
{CentralCache *cc = &centralCache[sizeClass];
SPINLOCK_ACQUIRE(&cc->lock);
if (unlikely(!cc->firstBatch))//no free batch
{no_free_batch:{
unsigned int batchSize = batch_size(sizeClass)+1;
if (cc->freeList)
{
assert(cc->freeListSize);
if (likely(cc->freeListSize <= batchSize + 1))
{
tc->counter = batchSize - cc->freeListSize + 1;
// batchSize = cc->freeListSize;
cc->freeListSize = 0;
fb = cc->freeList;
cc->freeList = NULL;
}
else
{
cc->freeListSize -= batchSize;
fb = cc->freeList;
{FreeBlock *b = cc->freeList;
while (--batchSize) b = b->next;
cc->freeList = b->next;
b->next = NULL;}
}
SPINLOCK_RELEASE(&cc->lock);
tc->freeList = fb->next;
init_pthread_destructor();//this call must be placed carefully to allow recursive memory allocation from pthread_key_create (in case when ltalloc replaces the system malloc)
return fb;
}
{unsigned int blockSize = class_to_size(sizeClass);
if (cc->freeBlocksInLastChunk)
{
char *firstFree = cc->lastChunk;
assert(cc->lastChunk && cc->freeBlocksInLastChunk == (CHUNK_SIZE - ((uintptr_t)cc->lastChunk & (CHUNK_SIZE-1)))/blockSize);
if (cc->freeBlocksInLastChunk < batchSize) {
tc->counter = batchSize - cc->freeBlocksInLastChunk + 1;
batchSize = cc->freeBlocksInLastChunk;
}
cc->freeBlocksInLastChunk -= batchSize;
cc->lastChunk += blockSize * batchSize;
if (cc->freeBlocksInLastChunk == 0) {
assert(((uintptr_t)cc->lastChunk & (CHUNK_SIZE-1)) == 0);
cc->lastChunk = ((char**)cc->lastChunk)[-1];
if (cc->lastChunk)
cc->freeBlocksInLastChunk = (CHUNK_SIZE - ((uintptr_t)cc->lastChunk & (CHUNK_SIZE-1)))/blockSize;
}
SPINLOCK_RELEASE(&cc->lock);
fb = (FreeBlock*)firstFree;
while (--batchSize)
firstFree = (char*)(((FreeBlock*)firstFree)->next = (FreeBlock*)(firstFree + blockSize));
((FreeBlock*)firstFree)->next = NULL;
tc->freeList = fb->next;
init_pthread_destructor();
return fb;
}
//Allocate new chunk
SPINLOCK_RELEASE(&cc->lock);//release lock for a while
SPINLOCK_ACQUIRE(&pad.lock);
if (pad.freeChunk)
{
p = pad.freeChunk;
pad.freeChunk = *(void**)p;
pad.size -= CHUNK_SIZE;
SPINLOCK_RELEASE(&pad.lock);
((char**)((char*)p + CHUNK_SIZE))[-1] = 0;
} else {
SPINLOCK_RELEASE(&pad.lock);
p = sys_aligned_alloc(CHUNK_SIZE, CHUNK_SIZE);
if (unlikely(!p)) { CPPCODE(if (throw_) throw std::bad_alloc(); else) return NULL; }
}
#define CHUNK_IS_SMALL unlikely(sizeClass < get_size_class(2*sizeof(void*)))
{unsigned int numBlocksInChunk = (CHUNK_SIZE - (CHUNK_IS_SMALL ? sizeof(ChunkSm) : sizeof(Chunk)))/blockSize;
#ifndef _WIN32
//intptr_t sz = ((CHUNK_SIZE - numBlocksInChunk*blockSize) & ~(page_size()-1)) - page_size();
//if (sz > 0) mprotect((char*)p + page_size(), sz, PROT_NONE);//munmap((char*)p + page_size(), sz);//to make possible unmapping, we need to be more careful when returning memory to the system, not simply VMFREE(firstFreeChunk, CHUNK_SIZE), so let there be just mprotect
#endif
assert(((char**)((char*)p + CHUNK_SIZE))[-1] == 0);//assume that allocated memory is always zero filled (on first access); it is better not to zero it explicitly because it will lead to allocation of physical page which may never needed otherwise
if (numBlocksInChunk < batchSize) {
tc->counter = batchSize - numBlocksInChunk + 1;
batchSize = numBlocksInChunk;
}
//Prepare chunk
((Chunk*)p)->sizeClass = sizeClass;
{char *firstFree = (char*)p + CHUNK_SIZE - numBlocksInChunk*blockSize;//blocks in chunk are located in such way to achieve a maximum possible alignment
fb = (FreeBlock*)firstFree;
{int n = batchSize; while (--n)
firstFree = (char*)(((FreeBlock*)firstFree)->next = (FreeBlock*)(firstFree + blockSize));}
((FreeBlock*)firstFree)->next = NULL;
firstFree += blockSize;
SPINLOCK_ACQUIRE(&cc->lock);
if ((uintptr_t)p < cc->minChunkAddr || !cc->minChunkAddr) cc->minChunkAddr = (uintptr_t)p;
if ((uintptr_t)p > cc->maxChunkAddr ) cc->maxChunkAddr = (uintptr_t)p;
if (CHUNK_IS_SMALL)//special handling for smallest blocks of size = sizeof(void*)
{
ChunkSm *cs = (ChunkSm*)p;
cs->numBatches = 0;
//Insert new chunk right after chunkWithFreeBatches
cs->prev = cc->chunkWithFreeBatches;
if (cc->chunkWithFreeBatches) {
cs->next = cc->chunkWithFreeBatches->next;
if (cc->chunkWithFreeBatches->next) cc->chunkWithFreeBatches->next->prev = cs;
cc->chunkWithFreeBatches->next = cs;
} else {
cs->next = NULL;
cc->chunkWithFreeBatches = cs;
}
}
if (unlikely(cc->freeBlocksInLastChunk))//so happened that other thread have already allocated chunk for the same size class while the lock was released
{
//Hook pointer to the current lastChunk at the end of new chunk (another way is just put all blocks to cc->freeList which is much less effecient)
((char**)(((uintptr_t)firstFree & ~(CHUNK_SIZE-1)) + CHUNK_SIZE))[-1] = cc->lastChunk;
}
cc->freeBlocksInLastChunk = numBlocksInChunk - batchSize;
cc->lastChunk = firstFree;
}}}}}
else {
if (!CHUNK_IS_SMALL)//smallest blocks of size = sizeof(void*) are handled specially
{
fb = cc->firstBatch;
cc->firstBatch = fb->nextBatch;
}
else//size of block = sizeof(void*)
{
ChunkSm *cs = cc->chunkWithFreeBatches;
if (unlikely(cs->numBatches == 0))
{
if (unlikely(cs->prev == NULL)) goto no_free_batch;
cs = cc->chunkWithFreeBatches = cs->prev;
assert(cs->numBatches == NUM_OF_BATCHES_IN_CHUNK_SM);
}
fb = cs->batches[--cs->numBatches];
}
}
SPINLOCK_RELEASE(&cc->lock);}
tc->freeList = fb->next;
init_pthread_destructor();
return fb;
}
else//allocate block directly from the system
{
if (unlikely(size == 0)) return ltmalloc CPPCODE(<throw_>)(1);//return NULL;//doing this check here is better than on the top level
size = (size + CHUNK_SIZE-1) & ~(CHUNK_SIZE-1);
p = sys_aligned_alloc(CHUNK_SIZE, size);
#ifndef _WIN32
if (p) {
SPINLOCK_ACQUIRE(&ptrieLock);
PTrieNode *newNode;
if (ptrieFreeNodesList)
ptrieFreeNodesList = *(PTrieNode**)(newNode = ptrieFreeNodesList);
else if (ptrieNewAllocatedPage) {
newNode = ptrieNewAllocatedPage;
if (!((uintptr_t)++ptrieNewAllocatedPage & (page_size()-1)))
ptrieNewAllocatedPage = ((PTrieNode**)ptrieNewAllocatedPage)[-1];
} else {
SPINLOCK_RELEASE(&ptrieLock);
newNode = (PTrieNode*)VMALLOC(page_size());
if (unlikely(!newNode)) { CPPCODE(if (throw_) throw std::bad_alloc(); else) return NULL; }
assert(((char**)((char*)newNode + page_size()))[-1] == 0);
SPINLOCK_ACQUIRE(&ptrieLock);
((PTrieNode**)((char*)newNode + page_size()))[-1] = ptrieNewAllocatedPage;//in case if other thread also have just allocated a new page
ptrieNewAllocatedPage = newNode + 1;
}
ptrie_insert((uintptr_t)p, size, newNode);
SPINLOCK_RELEASE(&ptrieLock);
}
#endif
CPPCODE(if (throw_) if (unlikely(!p)) throw std::bad_alloc();)
return p;
}
}
CPPCODE(template <bool throw_> static) void *ltmalloc(size_t size)
{
unsigned int sizeClass = get_size_class(size);
ThreadCache *tc = &threadCache[sizeClass];
FreeBlock *fb = tc->freeList;
if (likely(fb))
{
tc->freeList = fb->next;
tc->counter++;
return fb;
}
else
return fetch_from_central_cache CPPCODE(<throw_>)(size, tc, sizeClass);
}
CPPCODE(void *ltmalloc(size_t size) {return ltmalloc<false>(size);})//for possible external usage
static void add_batch_to_central_cache(CentralCache *cc, unsigned int sizeClass, FreeBlock *batch)
{
if (!CHUNK_IS_SMALL)
{
batch->nextBatch = cc->firstBatch;
cc->firstBatch = batch;
}
else
{
ChunkSm *cs = cc->chunkWithFreeBatches;
if (unlikely(cs->numBatches == NUM_OF_BATCHES_IN_CHUNK_SM))
{
cs = cc->chunkWithFreeBatches = cc->chunkWithFreeBatches->next;
assert(cs && cs->numBatches == 0);
}
cs->batches[cs->numBatches++] = batch;
}
}
static NOINLINE void move_to_central_cache(ThreadCache *tc, unsigned int sizeClass)
{
init_pthread_destructor();//needed for cases when freed memory was allocated in the other thread and no alloc was called in this thread till its termination
tc->counter = batch_size(sizeClass);
if (tc->tempList)//move temp list to the central cache
{
CentralCache *cc = &centralCache[sizeClass];
SPINLOCK_ACQUIRE(&cc->lock);
add_batch_to_central_cache(cc, sizeClass, tc->tempList);
SPINLOCK_RELEASE(&cc->lock);
}
// else if (unlikely(!tc->freeList))//this is a first call (i.e. when counter = 0) - just initialization of counter needed
// {
// tc->counter--;
// return;
// }
tc->tempList = tc->freeList;
tc->freeList = NULL;
}
void ltfree(void *p)
{
if (likely((uintptr_t)p & (CHUNK_SIZE-1)))
{
unsigned int sizeClass = ((Chunk*)((uintptr_t)p & ~(CHUNK_SIZE-1)))->sizeClass;
ThreadCache *tc = &threadCache[sizeClass];
if (unlikely(--tc->counter < 0))
move_to_central_cache(tc, sizeClass);
((FreeBlock*)p)->next = tc->freeList;
tc->freeList = (FreeBlock*)p;
}
else
sys_free(p);
}
size_t ltmsize(void *p)
{
if (likely((uintptr_t)p & (CHUNK_SIZE-1)))
{
return class_to_size(((Chunk*)((uintptr_t)p & ~(CHUNK_SIZE-1)))->sizeClass);
}
else
{
if (p == NULL) return 0;
#ifdef _WIN32
{MEMORY_BASIC_INFORMATION mi;
VirtualQuery(p, &mi, sizeof(mi));
return mi.RegionSize;}
#else
SPINLOCK_ACQUIRE(&ptrieLock);
size_t size = ptrie_lookup((uintptr_t)p);
SPINLOCK_RELEASE(&ptrieLock);
return size;
#endif
}
}
static void release_thread_cache(void *p)
{
unsigned int sizeClass = 0; (void)p;
for (;sizeClass < NUMBER_OF_SIZE_CLASSES; sizeClass++)
{
ThreadCache *tc = &threadCache[sizeClass];
if (tc->freeList || tc->tempList)
{
FreeBlock *tail = tc->freeList;
unsigned int freeListSize = 1;
CentralCache *cc = &centralCache[sizeClass];
if (tail)
while (tail->next)//search for end of list
tail = tail->next, freeListSize++;
SPINLOCK_ACQUIRE(&cc->lock);
if (tc->tempList)
add_batch_to_central_cache(cc, sizeClass, tc->tempList);
if (tc->freeList) {//append tc->freeList to cc->freeList
tail->next = cc->freeList;
cc->freeList = tc->freeList;
assert(freeListSize == batch_size(sizeClass)+1 - tc->counter);
cc->freeListSize += freeListSize;
}
SPINLOCK_RELEASE(&cc->lock);
}
}
}
void ltsqueeze(size_t padsz)
{
unsigned int sizeClass = get_size_class(2*sizeof(void*));//skip small chunks because corresponding batches can not be efficiently detached from the central cache (if that becomes relevant, may be it worths to reimplement batches for small chunks from array to linked lists)
for (;sizeClass < NUMBER_OF_SIZE_CLASSES; sizeClass++)
{
CentralCache *cc = &centralCache[sizeClass];
if (cc->maxChunkAddr - cc->minChunkAddr <= CHUNK_SIZE)//preliminary check without lock (assume that writing to minChunkAddr/maxChunkAddr is atomic)
continue;
SPINLOCK_ACQUIRE(&cc->lock);
if (cc->maxChunkAddr - cc->minChunkAddr <= CHUNK_SIZE) {//quick check for theoretical possibility that at least one chunk is totally free
SPINLOCK_RELEASE(&cc->lock);
continue;
}
{uintptr_t minChunkAddr = cc->minChunkAddr;
size_t bufferSize = ((cc->maxChunkAddr - minChunkAddr) / CHUNK_SIZE + 1) * sizeof(short);
//Quickly detach all batches of the current size class from the central cache
unsigned int freeListSize = cc->freeListSize;
FreeBlock *firstBatch = cc->firstBatch, *freeList = cc->freeList;
cc->firstBatch = NULL;
cc->freeList = NULL;
cc->freeListSize = 0;
SPINLOCK_RELEASE(&cc->lock);
//1. Find out chunks with only free blocks via a simple counting the number of free blocks in each chunk
{char buffer[32*1024];//enough for 1GB address space
unsigned short *inChunkFreeBlocks = (unsigned short*)(bufferSize <= sizeof(buffer) ? memset(buffer, 0, bufferSize) : VMALLOC(bufferSize));
unsigned int numBlocksInChunk = (CHUNK_SIZE - (/*CHUNK_IS_SMALL ? sizeof(ChunkSm) : */sizeof(Chunk)))/class_to_size(sizeClass);
FreeBlock **pbatch, *block, **pblock;
Chunk *firstFreeChunk = NULL;
assert(numBlocksInChunk < (1U<<(sizeof(short)*8)));//in case if CHUNK_SIZE is too big that total count of blocks in it doesn't fit at short type (...may be use static_assert instead?)
if (inChunkFreeBlocks)//consider VMALLOC can fail
{
for (pbatch = &firstBatch; *pbatch; pbatch = &(*pbatch)->nextBatch)
for (block = *pbatch; block; block = block->next)
#define FREE_BLOCK(block) \
if (++inChunkFreeBlocks[((uintptr_t)block - minChunkAddr) / CHUNK_SIZE] == numBlocksInChunk)/*chunk is totally free*/\
{\
Chunk *chunk = (Chunk*)((uintptr_t)block & ~(CHUNK_SIZE-1));\
assert(chunk->sizeClass == sizeClass);/*just in case check before overwriting this info*/\
*(Chunk**)chunk = firstFreeChunk;/*put nextFreeChunk pointer right at the beginning of Chunk as there are always must be a space for one pointer before first memory block*/\
firstFreeChunk = chunk;\
}
FREE_BLOCK(block)
for (pblock = &freeList; *pblock; pblock = &(*pblock)->next)
FREE_BLOCK(*pblock)
#undef FREE_BLOCK
}
else {
for (pbatch = &firstBatch; *pbatch; pbatch = &(*pbatch)->nextBatch);
for (pblock = &freeList; *pblock; pblock = &(*pblock)->next);
}
if (firstFreeChunk)//is anything to release
{
//2. Unlink all matching blocks from the corresponding free lists
FreeBlock *additionalBatchesList = NULL, *additionalBlocksList = NULL, **abatch = &additionalBatchesList, **ablock = &additionalBlocksList;
unsigned int additionalBlocksListSize = 0, batchSize = batch_size(sizeClass)+1;
for (pbatch = &firstBatch; *pbatch;)
{
for (block = *pbatch; block; block = block->next)
if (inChunkFreeBlocks[((uintptr_t)block - minChunkAddr) / CHUNK_SIZE] == numBlocksInChunk)//if at least one block belongs to a releasable chunk, then this batch should be handled specially
{
FreeBlock *nextBatch = (*pbatch)->nextBatch;
for (block = *pbatch; block;)//re-add blocks of not-for-release chunks and organize them into another batches' list (to join it with the main later)
if (inChunkFreeBlocks[((uintptr_t)block - minChunkAddr) / CHUNK_SIZE] != numBlocksInChunk)//skip matching-for-release blocks
{
*ablock = block;
do//this loop needed only to minimize memory write operations, otherwise a simpler approach could be used (like in the next loop below)
{
ablock = &block->next;
block = block->next;
if (++additionalBlocksListSize == batchSize)
{
abatch = &(*abatch = additionalBlocksList)->nextBatch;
*abatch = NULL;
*ablock = NULL;
ablock = &additionalBlocksList;
additionalBlocksList = NULL;
additionalBlocksListSize = 0;
break;//to force *ablock = block; for starting a new batch
}
} while (block && inChunkFreeBlocks[((uintptr_t)block - minChunkAddr) / CHUNK_SIZE] != numBlocksInChunk);
}
else
block = block->next;
*ablock = NULL;
*pbatch = nextBatch;//unlink batch
goto continue_;
}
pbatch = &(*pbatch)->nextBatch;
continue_:;
}
for (block = freeList; block;)
if (inChunkFreeBlocks[((uintptr_t)block - minChunkAddr) / CHUNK_SIZE] != numBlocksInChunk)
{
//*pblock = (*pblock)->next, freeListSize--;//unlink block
ablock = &(*ablock = block)->next;
block = block->next;
*ablock = NULL;
if (++additionalBlocksListSize == batchSize)
{
abatch = &(*abatch = additionalBlocksList)->nextBatch;
*abatch = NULL;
ablock = &additionalBlocksList;
additionalBlocksList = NULL;
additionalBlocksListSize = 0;
}
}
else
block = block->next;
//Add additional lists
*abatch = *pbatch;
*pbatch = additionalBatchesList;
pblock = ablock;
freeList = additionalBlocksList;
freeListSize = additionalBlocksListSize;
//Return back all left not-for-release blocks to the central cache as quickly as possible (as other threads may want to allocate a new memory)
#define GIVE_LISTS_BACK_TO_CC \
SPINLOCK_ACQUIRE(&cc->lock);\
*pbatch = cc->firstBatch;\
cc->firstBatch = firstBatch;\
*pblock = cc->freeList;\
cc->freeList = freeList;\
cc->freeListSize += freeListSize;\
SPINLOCK_RELEASE(&cc->lock);\
if (bufferSize > sizeof(buffer)) VMFREE(inChunkFreeBlocks, bufferSize);//this better to do before 3. as kernel is likely optimized for release of just allocated range
GIVE_LISTS_BACK_TO_CC
if (padsz)
{
SPINLOCK_ACQUIRE(&pad.lock);
if (pad.size < padsz)
{
Chunk *first = firstFreeChunk, **c;
do//put off free chunks up to a specified pad size
{
c = (Chunk**)firstFreeChunk;
firstFreeChunk = *c;
pad.size += CHUNK_SIZE;
} while (pad.size < padsz && firstFreeChunk);
*c = (Chunk*)pad.freeChunk;
pad.freeChunk = first;
}
SPINLOCK_RELEASE(&pad.lock);
}
//3. Return memory to the system
while (firstFreeChunk)
{
Chunk *nextFreeChunk = *(Chunk**)firstFreeChunk;
VMFREE(firstFreeChunk, CHUNK_SIZE);
firstFreeChunk = nextFreeChunk;
}
}
else//nothing to release - just return batches back to the central cache
{
GIVE_LISTS_BACK_TO_CC
#undef GIVE_LISTS_BACK_TO_CC
}}}
}
}
#if defined(__cplusplus) && !defined(LTALLOC_DISABLE_OPERATOR_NEW_OVERRIDE)
void *operator new (size_t size) throw(std::bad_alloc) {return ltmalloc<true> (size);}
void *operator new (size_t size, const std::nothrow_t&) throw() {return ltmalloc<false>(size);}
void *operator new[](size_t size) throw(std::bad_alloc) {return ltmalloc<true> (size);}
void *operator new[](size_t size, const std::nothrow_t&) throw() {return ltmalloc<false>(size);}
void operator delete (void* p) throw() {ltfree(p);}
void operator delete (void* p, const std::nothrow_t&) throw() {ltfree(p);}
void operator delete[](void* p) throw() {ltfree(p);}
void operator delete[](void* p, const std::nothrow_t&) throw() {ltfree(p);}
#endif
/* @r-lyeh's { */
#include <string.h>
void *ltcalloc(size_t elems, size_t size) {
size *= elems;
return memset( ltmalloc( size ), 0, size );
}
void *ltmemalign( size_t align, size_t size ) {
return --align, ltmalloc( (size+align)&~align );
}
void *ltrealloc( void *ptr, size_t sz ) {
if( !ptr ) return ltmalloc( sz );
if( !sz ) return ltfree( ptr ), (void *)0;
size_t osz = ltmsize( ptr );
if( sz <= osz ) {
return ptr;
}
void *nptr = memcpy( ltmalloc(sz), ptr, osz );
ltfree( ptr );
#ifdef LTALLOC_AUTO_GC_INTERVAL
/* this is kind of compromise; the following timer is to guarantee
that memory gets wiped out at least every given seconds between consecutive
ltrealloc() calls (I am assuming frequency usage for ltrealloc() is smaller
than ltmalloc() or ltfree() too) - @r-lyeh */
clock_t now = clock();
static clock_t then = now;
if( ( double(now - then) / CLOCKS_PER_SEC ) > LTALLOC_AUTO_GC_INTERVAL ) {
ltsqueeze(0);
}
then = now;
#endif
return nptr;
}
/* } */

View File

@@ -1,14 +0,0 @@
#include <stdlib.h> /*a more portable size_t definition than stddef.h itself*/
#ifdef __cplusplus
extern "C" {
#endif
void* ltmalloc(size_t);
void ltfree(void*);
void* ltrealloc( void *, size_t );
void* ltcalloc( size_t, size_t );
void* ltmemalign( size_t, size_t );
void ltsqueeze(size_t pad); /*return memory to system (see README.md)*/
size_t ltmsize(void*);
#ifdef __cplusplus
}
#endif

View File

@@ -1,59 +0,0 @@
// based on code by Jerry Coffin (most likely Public Domain)
// - rlyeh
#pragma once
#include <stdlib.h>
#include <new>
#include <limits>
#include "ltalloc.h"
namespace lt {
template <class T>
struct allocator {
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template <class U> struct rebind { typedef allocator<U> other; };
allocator() throw() {}
allocator(const allocator&) throw() {}
template <class U> allocator(const allocator<U>&) throw(){}
~allocator() throw() {}
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type s, void const * = 0) {
if (0 == s)
return NULL;
pointer temp = (pointer)ltmalloc(s * sizeof(T));
if (temp == NULL)
throw std::bad_alloc();
return temp;
}
void deallocate(pointer p, size_type) {
ltfree(p);
}
size_type max_size() const throw() {
return std::numeric_limits<size_t>::max() / sizeof(T);
}
void construct(pointer p, const T& val) {
new((void *)p) T(val);
}
void destroy(pointer p) {
p->~T();
}
};
}

View File

@@ -27,12 +27,11 @@ solution "objview"
kind "ConsoleApp"
language "C++"
files { "viewer.cc", "trackball.cc", "ltalloc.cc" }
files { "viewer.cc", "trackball.cc" }
includedirs { "./" }
includedirs { "../../" }
flags { "c++11" }
--buildoptions { "-std=c++11" }
if _OPTIONS['clang'] then
toolset "clang"

View File

@@ -1,12 +1,12 @@
//
// Optimized wavefront .obj loader.
// Requires ltalloc and C++11
// Requires lfpAlloc and C++11
//
/*
The MIT License (MIT)
Copyright (c) 2012-2016 Syoyo Fujita and many contributors.
Copyright (c) 2012-2017 Syoyo Fujita and many contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -38,7 +38,6 @@ THE SOFTWARE.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <unistd.h>
#endif
#include <cassert>
@@ -55,7 +54,7 @@ THE SOFTWARE.
#include <chrono> // C++11
#include <thread> // C++11
#include "ltalloc.hpp"
#include "lfpAlloc/Allocator.hpp"
namespace tinyobj_opt {
@@ -327,12 +326,12 @@ struct index_t {
};
typedef struct {
std::vector<float, lt::allocator<float> > vertices;
std::vector<float, lt::allocator<float> > normals;
std::vector<float, lt::allocator<float> > texcoords;
std::vector<index_t, lt::allocator<index_t> > indices;
std::vector<int, lt::allocator<int> > face_num_verts;
std::vector<int, lt::allocator<int> > material_ids;
std::vector<float, lfpAlloc::lfpAllocator<float> > vertices;
std::vector<float, lfpAlloc::lfpAllocator<float> > normals;
std::vector<float, lfpAlloc::lfpAllocator<float> > texcoords;
std::vector<index_t, lfpAlloc::lfpAllocator<index_t> > indices;
std::vector<int, lfpAlloc::lfpAllocator<int> > face_num_verts;
std::vector<int, lfpAlloc::lfpAllocator<int> > material_ids;
} attrib_t;
typedef StackVector<char, 256> ShortString;
@@ -999,9 +998,9 @@ typedef struct {
float tx, ty;
// for f
std::vector<index_t, lt::allocator<index_t> > f;
std::vector<index_t, lfpAlloc::lfpAllocator<index_t> > f;
// std::vector<index_t> f;
std::vector<int, lt::allocator<int> > f_num_verts;
std::vector<int, lfpAlloc::lfpAllocator<int> > f_num_verts;
const char *group_name;
unsigned int group_name_len;
@@ -1047,8 +1046,14 @@ bool parseObj(attrib_t *attrib, std::vector<shape_t> *shapes,
std::vector<material_t> *materials, const char *buf, size_t len,
const LoadOption &option);
} // namespace tinyobj_opt
#endif // TINOBJ_LOADER_OPT_H_
#ifdef TINYOBJ_LOADER_OPT_IMPLEMENTATION
namespace tinyobj_opt {
static bool parseLine(Command *command, const char *p, size_t p_len,
bool triangulate = true) {
// @todo { operate directly on pointer `p'. to do that, add range check for
@@ -1271,7 +1276,8 @@ bool parseObj(attrib_t *attrib, std::vector<shape_t> *shapes,
auto t1 = std::chrono::high_resolution_clock::now();
std::vector<LineInfo, lt::allocator<LineInfo> > line_infos[kMaxThreads];
std::vector<LineInfo, lfpAlloc::lfpAllocator<LineInfo> >
line_infos[kMaxThreads];
for (size_t t = 0; t < static_cast<size_t>(num_threads); t++) {
// Pre allocate enough memory. len / 128 / num_threads is just a heuristic
// value.
@@ -1325,7 +1331,7 @@ bool parseObj(attrib_t *attrib, std::vector<shape_t> *shapes,
// Find extra line which spand across chunk boundary.
if ((t < num_threads) && (buf[end_idx - 1] != '\n')) {
auto extra_span_idx = (std::min)(end_idx - 1 + chunk_size, len - 1);
auto extra_span_idx = (std::min)(end_idx - 1 + chunk_size, len);
for (size_t i = end_idx; i < extra_span_idx; i++) {
if (is_line_ending(buf, i, extra_span_idx)) {
LineInfo info;
@@ -1558,8 +1564,9 @@ bool parseObj(attrib_t *attrib, std::vector<shape_t> *shapes,
index_t(vertex_index, texcoord_index, normal_index);
}
for (size_t k = 0; k < commands[t][i].f_num_verts.size(); k++) {
attrib->material_ids[face_count + k] = material_id;
attrib->face_num_verts[face_count + k] = commands[t][i].f_num_verts[k];
attrib->material_ids[face_count + k] = material_id;
attrib->face_num_verts[face_count + k] =
commands[t][i].f_num_verts[k];
}
f_count += commands[t][i].f.size();
@@ -1671,8 +1678,7 @@ bool parseObj(attrib_t *attrib, std::vector<shape_t> *shapes,
return true;
}
#endif // TINYOBJ_LOADER_OPT_IMPLEMENTATION
} // namespace tinyobj_opt
#endif // TINOBJ_LOADER_OPT_H_
#endif // TINYOBJ_LOADER_OPT_IMPLEMENTATION

View File

@@ -361,7 +361,7 @@ static bool TestStreamLoadObj() {
virtual ~MaterialStringStreamReader() {}
virtual bool operator()(const std::string& matId,
std::vector<material_t>* materials,
std::map<std::string, int>* matMap,
std::map<unsigned int, int>* matMap,
std::string* err) {
(void)matId;
std::string warning;

31
models/cube-vertexcol.obj Normal file
View File

@@ -0,0 +1,31 @@
mtllib cube.mtl
v 0.000000 2.000000 2.000000 0 0 0
v 0.000000 0.000000 2.000000 0 0 1
v 2.000000 0.000000 2.000000 0 1 0
v 2.000000 2.000000 2.000000 0 1 1
v 0.000000 2.000000 0.000000 1 0 0
v 0.000000 0.000000 0.000000 1 0 1
v 2.000000 0.000000 0.000000 1 1 0
v 2.000000 2.000000 0.000000 1 1 1
# 8 vertices
g front cube
usemtl white
f 1 2 3 4
g back cube
# expects white material
f 8 7 6 5
g right cube
usemtl red
f 4 3 7 8
g top cube
usemtl white
f 5 1 4 8
g left cube
usemtl green
f 5 6 2 1
g bottom cube
usemtl white
f 2 6 7 3
# 6 elements

23
models/issue-138.mtl Normal file
View File

@@ -0,0 +1,23 @@
newmtl test1
Ns 10.0000
Ni 1.5000
d 1.0000
Tr 0.0000
Tf 1.0000 1.0000 1.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.5 0.2 0.2
Ks 0.0000 0.0000 0.0000
Ke 0.0000 0.0000 0.0000
newmtl test2
Ns 10.0000
Ni 1.5000
d 1.0000
Tr 0.0000
Tf 1.0000 1.0000 1.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.2 0.5 0.2
Ks 0.0000 0.0000 0.0000
Ke 0.0000 0.0000 0.0000

51
models/issue-138.obj Normal file
View File

@@ -0,0 +1,51 @@
# cube.obj
#
mtllib issue-138.mtl
v -0.500000 -0.500000 0.500000
v 0.500000 -0.500000 0.500000
v -0.500000 0.500000 0.500000
v 0.500000 0.500000 0.500000
v -0.500000 0.500000 -0.500000
v 0.500000 0.500000 -0.500000
v -0.500000 -0.500000 -0.500000
v 0.500000 -0.500000 -0.500000
vt 0.000000 0.000000
vt 1.000000 0.000000
vt 0.000000 1.000000
vt 1.000000 1.000000
vn 0.000000 0.000000 1.000000
vn 0.000000 1.000000 0.000000
vn 0.000000 0.000000 -1.000000
vn 0.000000 -1.000000 0.000000
vn 1.000000 0.000000 0.000000
vn -1.000000 0.000000 0.000000
usemtl test1
g test1
s 1
f 1/1/1 2/2/1 3/3/1
f 3/3/1 2/2/1 4/4/1
usemtl test2
g test2
s 2
f 3/1/2 4/2/2 5/3/2
f 5/3/2 4/2/2 6/4/2
s 3
f 5/4/3 6/3/3 7/2/3
f 7/2/3 6/3/3 8/1/3
s 4
f 7/1/4 8/2/4 1/3/4
f 1/3/4 8/2/4 2/4/4
s 5
f 2/1/5 8/2/5 4/3/5
f 4/3/5 8/2/5 6/4/5
s 6
f 7/1/6 1/2/6 5/3/6
f 5/3/6 1/2/6 3/4/6

View File

@@ -0,0 +1,2 @@
newmtl main
Kd 1 1 1

View File

@@ -0,0 +1,17 @@
mtllib issue-140-zero-face-idx.mtl
v -0.5 -0.5 0
v 0.5 -0.5 0
v 0.5 0.5 0
v -0.5 0.5 0
vt 0 0 0
vt 1 0 0
vt 1 1 0
vt 0 1 0
vn 0 0 -1
usemtl main
f 0/0/0 1/1/0 3/3/0
f 1/1/0 3/3/0 2/2/0

10
models/map-bump.mtl Normal file
View File

@@ -0,0 +1,10 @@
newmtl Material.001
Ka 0 0 0
Kd 0 0 0
Ks 0 0 0
map_Bump bump.jpg
newmtl Material.003
Ka 0 0 0
Kd 1 1 1
Ks 0 0 0

817
models/map-bump.obj Normal file
View File

@@ -0,0 +1,817 @@
# https://github.com/syoyo/tinyobjloader/issues/68
# Blender v2.73 (sub 0) OBJ File: 'enemy.blend'
# www.blender.org
mtllib map-bump.mtl
o Cube
v 1.864151 -1.219172 -5.532511
v 0.575869 -0.666304 5.896140
v 0.940448 1.000000 -1.971128
v 1.620345 1.000000 -5.815706
v 1.864152 1.000000 -6.334323
v 0.575869 -0.129842 5.896143
v 5.440438 -1.462153 -5.818601
v 4.896782 -1.462153 -2.744413
v 1.000825 -0.677484 1.899605
v 5.440438 -1.246362 -5.818600
v 1.000825 0.852342 1.899608
v 4.896782 -1.246362 -2.744412
v 1.160660 -0.450871 -2.356325
v 1.704316 -0.450871 -5.430513
v 1.000825 -0.351920 -1.293797
v 1.000825 1.000000 -1.293794
v 1.160660 -0.877888 -2.356326
v 1.704316 -0.877888 -5.430514
v 1.000825 -1.219172 -1.452514
v 1.000825 1.000000 -1.452511
v 1.000825 -0.351920 1.759410
v 1.000825 1.000000 1.759413
v 9.097919 1.221145 -6.212147
v 8.356775 1.221145 -2.021231
v 1.864151 -0.109586 -6.334325
v 0.575869 -0.398073 5.896141
v 9.097919 0.943958 -6.212148
v 8.356775 0.943958 -2.021233
v 1.061916 0.113661 -1.797961
v 1.000825 0.161258 1.899606
v 1.000825 0.324040 -1.293795
v 1.803060 0.113661 -5.988876
v 1.000825 -0.109586 -1.452513
v 1.061916 0.776753 -1.797960
v 1.803061 0.776753 -5.988875
v 1.000825 0.324040 1.759412
v 0.000825 -1.219172 -5.532512
v 0.000825 -0.666304 5.896139
v 0.000826 1.000000 -6.334325
v 0.000825 -0.129842 5.896140
v 0.000825 0.852342 1.899606
v 0.000825 -0.677484 1.899604
v 0.000825 -0.351920 -1.293797
v 0.000825 1.000000 -1.293796
v 0.000825 1.000000 -1.452513
v 0.000825 -1.219172 -1.452515
v 0.000825 -0.351920 1.759409
v 0.000825 1.000000 1.759411
v 0.000826 -0.109586 -6.334326
v 0.000825 -0.398073 5.896140
v 0.152918 1.000000 -5.815708
v 0.152917 1.000000 -1.971130
v 0.940448 1.168419 -1.971128
v 1.620345 1.168419 -5.815706
v 0.152918 1.168419 -5.815708
v 0.152917 1.168419 -1.971130
v 0.921118 1.091883 -1.050430
v 0.921118 1.091883 1.516050
v 0.080533 1.091883 -1.050432
v 0.080533 1.091883 1.516048
v 0.613003 -0.553430 5.546911
v 0.963691 -0.559956 2.248834
v 0.613003 -0.396857 5.546912
v 0.963691 -0.070362 2.248835
v 1.499370 -0.994317 3.966028
v 1.850058 -0.997914 0.667950
v 1.499370 -0.908021 3.966029
v 1.850058 -0.728071 0.667951
v 1.601022 0.760960 -6.334324
v 1.601021 0.129454 -6.334325
v 0.263955 0.760960 -6.334325
v 0.263955 0.129454 -6.334325
v 1.334809 0.760960 -7.515329
v 1.334809 0.129455 -7.515330
v 0.530168 0.760960 -7.515330
v 0.530168 0.129455 -7.515330
v 1.192720 0.649445 -7.515329
v 1.192720 0.240971 -7.515330
v 0.672258 0.649445 -7.515330
v 0.672258 0.240971 -7.515330
v 1.192719 0.649444 -6.524630
v 1.192719 0.240970 -6.524631
v 0.672257 0.649444 -6.524631
v 0.672257 0.240970 -6.524631
v 3.851026 0.431116 -1.883326
v 3.851026 0.946662 -1.883325
v 4.592170 0.946662 -6.074241
v 4.592169 0.431116 -6.074242
v 4.995714 0.561404 -1.918362
v 4.995714 1.016394 -1.918360
v 5.736857 1.016394 -6.109276
v 5.736857 0.561404 -6.109277
v 3.975454 0.471731 -2.162156
v 3.975454 0.919244 -2.162155
v 4.618796 0.919244 -5.800034
v 4.618795 0.471730 -5.800035
v 4.969088 0.584825 -2.192568
v 4.969088 0.979775 -2.192567
v 5.612430 0.979775 -5.830446
v 5.612429 0.584825 -5.830447
v 0.864214 -0.673890 3.184381
v 0.864213 0.489129 3.184384
v 0.864213 -0.018552 3.184383
v 0.000825 0.489129 3.184382
v 0.000825 -0.673890 3.184381
v 0.850955 -0.557858 3.309075
v 0.850955 -0.175321 3.309076
v 1.737321 -0.996758 1.728192
v 1.737321 -0.785920 1.728193
v -1.864151 -1.219172 -5.532511
v -0.575869 -0.666304 5.896140
v -0.940448 1.000000 -1.971128
v -1.620345 1.000000 -5.815706
v -1.864152 1.000000 -6.334323
v -0.575869 -0.129842 5.896143
v -5.440438 -1.462153 -5.818601
v -4.896782 -1.462153 -2.744413
v -1.000825 -0.677484 1.899605
v -5.440438 -1.246362 -5.818600
v -1.000825 0.852342 1.899608
v -4.896782 -1.246362 -2.744412
v -1.160660 -0.450871 -2.356325
v -1.704316 -0.450871 -5.430513
v -1.000825 -0.351920 -1.293797
v -1.000825 1.000000 -1.293794
v -1.160660 -0.877888 -2.356326
v -1.704316 -0.877888 -5.430514
v -1.000825 -1.219172 -1.452514
v -1.000825 1.000000 -1.452511
v -1.000825 -0.351920 1.759410
v -1.000825 1.000000 1.759413
v -9.097919 1.221145 -6.212147
v -8.356775 1.221145 -2.021231
v -1.864151 -0.109586 -6.334325
v -0.575869 -0.398073 5.896141
v -9.097919 0.943958 -6.212148
v -8.356775 0.943958 -2.021233
v -1.061916 0.113661 -1.797961
v -1.000825 0.161258 1.899606
v -1.000825 0.324040 -1.293795
v -1.803060 0.113661 -5.988876
v -1.000825 -0.109586 -1.452513
v -1.061916 0.776753 -1.797960
v -1.803061 0.776753 -5.988875
v -1.000825 0.324040 1.759412
v -0.000825 -1.219172 -5.532512
v -0.000825 -0.666304 5.896139
v -0.000826 1.000000 -6.334325
v -0.000825 -0.129842 5.896140
v -0.000825 0.852342 1.899606
v -0.000825 -0.677484 1.899604
v -0.000825 -0.351920 -1.293797
v -0.000825 1.000000 -1.293796
v -0.000825 1.000000 -1.452513
v -0.000825 -1.219172 -1.452515
v -0.000825 -0.351920 1.759409
v -0.000825 1.000000 1.759411
v -0.000826 -0.109586 -6.334326
v -0.000825 -0.398073 5.896140
v -0.152918 1.000000 -5.815708
v -0.152917 1.000000 -1.971130
v -0.940448 1.168419 -1.971128
v -1.620345 1.168419 -5.815706
v -0.152918 1.168419 -5.815708
v -0.152917 1.168419 -1.971130
v -0.921118 1.091883 -1.050430
v -0.921118 1.091883 1.516050
v -0.080533 1.091883 -1.050432
v -0.080533 1.091883 1.516048
v -0.613003 -0.553430 5.546911
v -0.963691 -0.559956 2.248834
v -0.613003 -0.396857 5.546912
v -0.963691 -0.070362 2.248835
v -1.499370 -0.994317 3.966028
v -1.850058 -0.997914 0.667950
v -1.499370 -0.908021 3.966029
v -1.850058 -0.728071 0.667951
v -1.601022 0.760960 -6.334324
v -1.601021 0.129454 -6.334325
v -0.263955 0.760960 -6.334325
v -0.263955 0.129454 -6.334325
v -1.334809 0.760960 -7.515329
v -1.334809 0.129455 -7.515330
v -0.530168 0.760960 -7.515330
v -0.530168 0.129455 -7.515330
v -1.192720 0.649445 -7.515329
v -1.192720 0.240971 -7.515330
v -0.672258 0.649445 -7.515330
v -0.672258 0.240971 -7.515330
v -1.192719 0.649444 -6.524630
v -1.192719 0.240970 -6.524631
v -0.672257 0.649444 -6.524631
v -0.672257 0.240970 -6.524631
v -3.851026 0.431116 -1.883326
v -3.851026 0.946662 -1.883325
v -4.592170 0.946662 -6.074241
v -4.592169 0.431116 -6.074242
v -4.995714 0.561404 -1.918362
v -4.995714 1.016394 -1.918360
v -5.736857 1.016394 -6.109276
v -5.736857 0.561404 -6.109277
v -3.975454 0.471731 -2.162156
v -3.975454 0.919244 -2.162155
v -4.618796 0.919244 -5.800034
v -4.618795 0.471730 -5.800035
v -4.969088 0.584825 -2.192568
v -4.969088 0.979775 -2.192567
v -5.612430 0.979775 -5.830446
v -5.612429 0.584825 -5.830447
v -0.864214 -0.673890 3.184381
v -0.864213 0.489129 3.184384
v -0.864213 -0.018552 3.184383
v -0.000825 0.489129 3.184382
v -0.000825 -0.673890 3.184381
v -0.850955 -0.557858 3.309075
v -0.850955 -0.175321 3.309076
v -1.737321 -0.996758 1.728192
v -1.737321 -0.785920 1.728193
vt 0.135351 -0.558072
vt 0.003035 -0.363507
vt 0.092282 -0.976844
vt -0.081322 0.947351
vt 0.100058 1.958891
vt 0.050091 1.852185
vt -0.092752 1.055565
vt -0.251711 1.059474
vt 0.075587 0.041384
vt -0.086008 0.279003
vt -0.086212 0.249830
vt -0.276044 1.968137
vt -0.246101 1.859467
vt 0.009828 1.911388
vt -0.133014 1.114769
vt 0.413322 1.261595
vt 0.299103 0.624605
vt 1.243955 0.407183
vt 0.515404 1.111487
vt 1.358173 1.044173
vt -0.081553 0.914324
vt 0.080042 0.676706
vt 0.401185 0.474498
vt 1.295541 0.331328
vt 0.365315 1.568841
vt 0.299111 1.575740
vt 0.143401 0.707357
vt 0.629403 1.011947
vt 0.449192 0.167251
vt 1.409760 0.968317
vt 0.986264 1.738667
vt 1.573373 1.877873
vt 1.417663 1.009490
vt 0.237182 -0.196235
vt 0.721785 1.030226
vt 0.830554 0.870285
vt 0.877494 1.898608
vt 1.351399 1.106930
vt 0.183935 0.557301
vt 1.507109 1.975312
vt 0.241636 0.439088
vt 0.114297 -0.045011
vt 0.140593 1.808834
vt -0.015118 0.940452
vt 0.156405 -1.071134
vt 0.164119 -0.998223
vt 0.040336 -1.068281
vt 0.104459 -1.162571
vt -0.165787 1.882802
vt -0.014821 1.660811
vt -0.287852 0.283965
vt -0.293374 0.366508
vt -0.289630 0.900550
vt 0.035337 -0.191272
vt 0.247348 0.172213
vt 0.253300 1.021193
vt -0.283166 0.952313
vt -0.283398 0.919286
vt 0.039792 0.444050
vt 0.314806 -0.339851
vt 0.112962 -0.334889
vt -0.288056 0.254793
vt -0.023788 -0.973990
vt -0.155922 -0.359599
vt 0.220528 -1.165425
vt 0.108710 -0.748730
vt -0.286364 1.918670
vt -0.291973 1.118678
vt -0.119962 0.896379
vt -0.123707 0.362337
vt 0.162891 -0.598569
vt 0.467532 -0.853353
vt 0.201549 -1.053262
vt 0.161663 -0.198915
vt 0.267667 -0.752638
vt 0.278705 -0.371021
vt 0.526390 -0.542053
vt 0.483821 -0.479457
vt 0.488162 -0.883689
vt 0.500110 -0.105561
vt 0.564618 -0.200418
vt -0.110331 2.127229
vt 0.040636 1.905238
vt -0.010786 1.578087
vt 0.104092 1.876168
vt 0.255058 1.654176
vt -0.054992 2.087323
vt 0.203048 1.901245
vt 0.052081 2.123235
vt 0.042658 1.943733
vt -0.056437 1.881175
vt 0.147710 1.941151
vt 0.050060 2.084741
vt 0.146264 1.735002
vt 0.041212 1.737584
vt 0.048615 1.878591
vt 0.663065 1.872485
vt 0.786311 1.691257
vt 0.507355 1.004102
vt 0.630601 0.822874
vt 0.955144 1.689498
vt 0.860727 1.828333
vt 0.725565 1.074543
vt 0.819981 0.935708
vt 0.674594 1.805657
vt 0.539432 1.051867
vt 0.646413 0.894554
vt 0.781576 1.648344
vt 0.240127 -0.712141
vn 0.994400 0.000000 0.105700
vn 0.000000 1.000000 0.000000
vn 1.000000 0.000000 0.000000
vn 0.984700 0.000000 0.174100
vn 0.211800 0.976600 0.037500
vn -0.103300 0.000000 -0.994600
vn 0.103300 -0.000000 0.994600
vn 0.911400 0.378700 0.161200
vn -0.157300 -0.987200 -0.027800
vn 0.113700 -0.993300 0.020100
vn 0.030600 -0.000000 0.999500
vn -0.061100 0.998100 -0.010800
vn -0.030600 0.000000 -0.999500
vn -0.000000 -0.000000 1.000000
vn 0.000000 0.000000 -1.000000
vn -0.755400 0.655300 0.000000
vn 0.000000 -1.000000 0.000000
vn -0.000000 -0.180000 0.983700
vn 0.000000 -0.395500 -0.918500
vn -0.000000 0.688500 0.725200
vn 0.000000 -0.585700 -0.810500
vn -0.000000 0.974900 0.222500
vn -0.000000 -1.000000 0.002800
vn -1.000000 0.000000 -0.000000
vn -0.000000 0.935500 0.353200
vn 0.755400 0.655300 0.000000
vn 0.000000 0.935500 -0.353200
vn 0.673800 0.724900 0.143400
vn 0.872300 -0.000000 0.489100
vn -0.872300 0.000000 -0.489100
vn -0.518300 -0.853500 -0.054200
vn -0.975500 0.000000 -0.219900
vn 0.975500 0.000000 -0.219900
vn -0.913200 0.000000 -0.407500
vn -0.436900 0.896200 -0.077300
vn -0.995300 -0.000000 0.096600
vn -0.297300 -0.953400 -0.052600
vn 0.473900 -0.876600 0.083800
vn 0.913200 0.000000 0.407500
vn 0.342200 0.937700 0.060500
vn 0.995300 -0.000000 -0.096600
vn -0.519200 -0.853000 -0.054300
vn 0.722400 0.676400 0.143800
vn -0.994400 0.000000 0.105700
vn -0.984700 0.000000 0.174100
vn -0.211800 0.976600 0.037500
vn 0.103300 0.000000 -0.994600
vn -0.103300 -0.000000 0.994600
vn -0.911400 0.378700 0.161200
vn 0.157300 -0.987200 -0.027800
vn -0.113700 -0.993300 0.020100
vn -0.030600 -0.000000 0.999500
vn 0.061100 0.998100 -0.010800
vn 0.030600 0.000000 -0.999500
vn -0.691900 0.713200 0.112500
vn -0.872300 -0.000000 0.489100
vn 0.872300 0.000000 -0.489100
vn 0.518300 -0.853500 -0.054200
vn 0.913200 0.000000 -0.407500
vn 0.436900 0.896200 -0.077300
vn 0.995300 0.000000 0.096600
vn 0.297300 -0.953300 -0.052600
vn -0.473900 -0.876600 0.083800
vn -0.913200 -0.000000 0.407500
vn -0.342200 0.937700 0.060500
vn -0.995300 -0.000000 -0.096600
vn 0.519200 -0.853000 -0.054300
vn -0.714800 0.690100 0.113700
vn 0.974400 0.089700 0.206200
vn 0.870400 0.288400 0.399100
vn 0.691900 0.713200 0.112500
vn -0.518000 -0.853700 -0.053400
vn -0.519700 -0.852700 -0.053600
vn 0.714800 0.690100 0.113700
vn -0.974400 0.089700 0.206200
vn -0.870400 0.288400 0.399100
vn -0.673800 0.724900 0.143400
vn 0.518000 -0.853700 -0.053400
vn 0.297300 -0.953400 -0.052600
vn 0.519700 -0.852700 -0.053600
vn -0.722400 0.676400 0.143800
vn -0.000000 0.962300 0.272000
usemtl Material.001
s off
f 103/1/1 102/2/1 6/3/1
f 20/4/2 5/5/2 4/6/2
f 20/4/2 3/7/2 52/8/2
f 36/9/3 22/10/3 11/11/3
f 39/12/2 51/13/2 4/6/2
f 4/6/4 54/14/4 53/15/4
f 14/16/5 13/17/5 12/18/5
f 18/19/6 14/16/6 10/20/6
f 20/4/3 16/21/3 31/22/3
f 17/23/7 8/24/7 12/18/7
f 25/25/4 32/26/4 29/27/4
f 10/20/4 12/18/4 8/24/4
f 1/28/8 18/19/8 17/23/8
f 19/29/4 17/23/4 13/17/4
f 25/25/4 14/16/4 18/19/4
f 18/19/9 7/30/9 8/24/9
f 92/31/10 27/32/10 28/33/10
f 16/21/3 22/10/3 36/9/3
f 31/22/3 36/9/3 21/34/3
f 90/35/11 89/36/11 28/33/11
f 91/37/12 90/35/12 24/38/12
f 33/39/4 13/17/4 14/16/4
f 23/40/4 24/38/4 28/33/4
f 33/39/3 31/22/3 15/41/3
f 21/34/3 36/9/3 30/42/3
f 5/5/4 35/43/4 32/26/4
f 5/5/4 20/4/4 34/44/4
f 33/39/4 29/27/4 34/44/4
f 91/37/13 23/40/13 27/32/13
f 103/1/1 26/45/1 63/46/1
f 26/45/14 50/47/14 38/48/14
f 39/12/15 71/49/15 72/50/15
f 48/51/16 60/52/16 59/53/16
f 15/41/17 21/34/17 47/54/17
f 19/29/17 46/55/17 37/56/17
f 39/12/2 45/57/2 52/8/2
f 20/4/2 45/57/2 44/58/2
f 19/29/18 15/41/18 43/59/18
f 9/60/19 42/61/19 47/54/19
f 22/10/20 48/51/20 41/62/20
f 25/25/21 1/28/21 37/56/21
f 6/3/14 40/63/14 50/47/14
f 104/64/22 40/63/22 6/3/22
f 2/65/23 38/48/23 105/66/23
f 55/67/2 56/68/2 53/15/2
f 3/7/14 53/15/14 56/68/14
f 51/13/15 55/67/15 54/14/15
f 52/8/24 56/68/24 55/67/24
f 57/69/2 59/53/2 60/52/2
f 48/51/25 22/10/25 58/70/25
f 16/21/26 57/69/26 58/70/26
f 16/21/27 44/58/27 59/53/27
f 107/71/28 63/46/28 67/72/28
f 26/45/1 2/65/1 61/73/1
f 9/60/1 30/42/1 64/74/1
f 101/75/1 9/60/1 62/76/1
f 108/77/1 109/78/1 67/72/1
f 61/73/29 65/79/29 67/72/29
f 62/76/30 64/74/30 68/80/30
f 62/76/31 66/81/31 108/77/31
f 71/49/32 75/82/32 76/83/32
f 25/25/15 49/84/15 72/50/15
f 5/5/15 69/85/15 71/49/15
f 25/25/15 70/86/15 69/85/15
f 76/83/15 75/82/15 79/87/15
f 72/50/17 76/83/17 74/88/17
f 71/49/2 69/85/2 73/89/2
f 70/86/33 74/88/33 73/89/33
f 80/90/3 79/87/3 83/91/3
f 76/83/15 80/90/15 78/92/15
f 75/82/15 73/89/15 77/93/15
f 74/88/15 78/92/15 77/93/15
f 82/94/15 84/95/15 83/91/15
f 80/90/2 84/95/2 82/94/2
f 77/93/17 81/96/17 83/91/17
f 77/93/24 78/92/24 82/94/24
f 35/43/13 87/97/13 88/98/13
f 35/43/12 34/44/12 86/99/12
f 34/44/11 29/27/11 85/100/11
f 32/26/10 88/98/10 85/100/10
f 92/31/34 100/101/34 99/102/34
f 90/35/35 91/37/35 99/102/35
f 89/36/36 90/35/36 98/103/36
f 89/36/37 97/104/37 100/101/37
f 95/105/13 99/102/13 100/101/13
f 95/105/12 94/106/12 98/103/12
f 94/106/11 93/107/11 97/104/11
f 96/108/10 100/101/10 97/104/10
f 88/98/38 96/108/38 93/107/38
f 86/99/39 85/100/39 93/107/39
f 87/97/40 86/99/40 94/106/40
f 87/97/41 95/105/41 96/108/41
f 106/109/42 108/77/42 65/79/42
f 66/81/1 68/80/1 109/78/1
f 101/75/1 106/109/1 61/73/1
f 64/74/43 107/71/43 109/78/43
f 101/75/23 105/66/23 42/61/23
f 103/1/1 107/71/1 64/74/1
f 30/42/1 11/11/1 102/2/1
f 212/1/44 135/45/44 115/3/44
f 129/4/2 112/7/2 113/6/2
f 161/8/2 112/7/2 129/4/2
f 145/9/24 139/42/24 120/11/24
f 113/6/2 160/13/2 148/12/2
f 162/15/45 163/14/45 113/6/45
f 123/16/46 119/20/46 121/18/46
f 127/19/47 116/30/47 119/20/47
f 140/22/24 125/21/24 129/4/24
f 121/18/48 117/24/48 126/23/48
f 138/27/45 141/26/45 134/25/45
f 117/24/45 121/18/45 119/20/45
f 126/23/49 127/19/49 110/28/49
f 122/17/45 126/23/45 128/29/45
f 127/19/45 123/16/45 134/25/45
f 117/24/50 116/30/50 127/19/50
f 137/33/51 136/32/51 201/31/51
f 145/9/24 131/10/24 125/21/24
f 130/34/24 145/9/24 140/22/24
f 199/35/52 133/38/52 137/33/52
f 200/37/53 132/40/53 133/38/53
f 123/16/45 122/17/45 142/39/45
f 137/33/45 133/38/45 132/40/45
f 124/41/24 140/22/24 142/39/24
f 130/34/24 118/60/24 139/42/24
f 141/26/45 144/43/45 114/5/45
f 114/5/45 144/43/45 143/44/45
f 143/44/45 138/27/45 142/39/45
f 136/32/54 132/40/54 200/37/54
f 212/1/44 216/71/44 172/46/44
f 147/48/14 159/47/14 135/45/14
f 181/50/15 180/49/15 148/12/15
f 168/53/26 169/52/26 157/51/26
f 124/41/17 152/59/17 156/54/17
f 146/56/17 155/55/17 128/29/17
f 148/12/2 160/13/2 161/8/2
f 129/4/2 125/21/2 153/58/2
f 155/55/18 152/59/18 124/41/18
f 130/34/19 156/54/19 151/61/19
f 131/10/20 120/11/20 150/62/20
f 134/25/21 158/84/21 146/56/21
f 159/47/14 149/63/14 115/3/14
f 115/3/22 149/63/22 213/64/22
f 214/66/23 147/48/23 111/65/23
f 162/15/2 165/68/2 164/67/2
f 165/68/14 162/15/14 112/7/14
f 163/14/15 164/67/15 160/13/15
f 164/67/3 165/68/3 161/8/3
f 166/69/2 167/70/2 169/52/2
f 157/51/25 169/52/25 167/70/25
f 167/70/16 166/69/16 125/21/16
f 125/21/27 166/69/27 168/53/27
f 216/71/55 218/78/55 176/72/55
f 135/45/44 172/46/44 170/73/44
f 118/60/44 171/76/44 173/74/44
f 210/75/44 215/109/44 171/76/44
f 217/77/44 174/79/44 176/72/44
f 176/72/56 174/79/56 170/73/56
f 171/76/57 175/81/57 177/80/57
f 217/77/58 175/81/58 171/76/58
f 185/83/33 184/82/33 180/49/33
f 134/25/15 179/86/15 181/50/15
f 180/49/15 178/85/15 114/5/15
f 178/85/15 179/86/15 134/25/15
f 189/90/15 188/87/15 184/82/15
f 183/88/17 185/83/17 181/50/17
f 180/49/2 184/82/2 182/89/2
f 182/89/32 183/88/32 179/86/32
f 189/90/24 193/95/24 192/91/24
f 187/92/15 189/90/15 185/83/15
f 184/82/15 188/87/15 186/93/15
f 186/93/15 187/92/15 183/88/15
f 192/91/15 193/95/15 191/94/15
f 191/94/2 193/95/2 189/90/2
f 192/91/17 190/96/17 186/93/17
f 186/93/3 190/96/3 191/94/3
f 197/98/54 196/97/54 144/43/54
f 144/43/53 196/97/53 195/99/53
f 143/44/52 195/99/52 194/100/52
f 194/100/51 197/98/51 141/26/51
f 208/102/59 209/101/59 201/31/59
f 199/35/60 207/103/60 208/102/60
f 198/36/61 206/104/61 207/103/61
f 209/101/62 206/104/62 198/36/62
f 209/101/54 208/102/54 204/105/54
f 204/105/53 208/102/53 207/103/53
f 203/106/52 207/103/52 206/104/52
f 206/104/51 209/101/51 205/108/51
f 202/107/63 205/108/63 197/98/63
f 195/99/64 203/106/64 202/107/64
f 196/97/65 204/105/65 203/106/65
f 205/108/66 204/105/66 196/97/66
f 174/79/67 217/77/67 215/109/67
f 175/81/44 217/77/44 218/78/44
f 170/73/44 215/109/44 210/75/44
f 173/74/68 177/80/68 218/78/68
f 151/61/23 214/66/23 210/75/23
f 173/74/44 216/71/44 212/1/44
f 139/42/44 212/1/44 211/2/44
f 26/45/1 103/1/1 6/3/1
f 3/7/2 20/4/2 4/6/2
f 45/57/2 20/4/2 52/8/2
f 30/42/3 36/9/3 11/11/3
f 5/5/2 39/12/2 4/6/2
f 3/7/4 4/6/4 53/15/4
f 10/20/5 14/16/5 12/18/5
f 7/30/6 18/19/6 10/20/6
f 33/39/3 20/4/3 31/22/3
f 13/17/7 17/23/7 12/18/7
f 33/39/4 25/25/4 29/27/4
f 7/30/4 10/20/4 8/24/4
f 19/29/69 1/28/69 17/23/69
f 33/39/4 19/29/4 13/17/4
f 1/28/70 25/25/70 18/19/70
f 17/23/9 18/19/9 8/24/9
f 89/36/10 92/31/10 28/33/10
f 31/22/3 16/21/3 36/9/3
f 15/41/3 31/22/3 21/34/3
f 24/38/11 90/35/11 28/33/11
f 23/40/12 91/37/12 24/38/12
f 25/25/4 33/39/4 14/16/4
f 27/32/4 23/40/4 28/33/4
f 19/29/3 33/39/3 15/41/3
f 9/60/3 21/34/3 30/42/3
f 25/25/4 5/5/4 32/26/4
f 35/43/4 5/5/4 34/44/4
f 20/4/4 33/39/4 34/44/4
f 92/31/13 91/37/13 27/32/13
f 107/71/1 103/1/1 63/46/1
f 2/65/14 26/45/14 38/48/14
f 49/84/15 39/12/15 72/50/15
f 44/58/16 48/51/16 59/53/16
f 43/59/17 15/41/17 47/54/17
f 1/28/17 19/29/17 37/56/17
f 51/13/2 39/12/2 52/8/2
f 16/21/2 20/4/2 44/58/2
f 46/55/18 19/29/18 43/59/18
f 21/34/19 9/60/19 47/54/19
f 11/11/20 22/10/20 41/62/20
f 49/84/21 25/25/21 37/56/21
f 26/45/14 6/3/14 50/47/14
f 102/2/22 104/64/22 6/3/22
f 101/75/23 2/65/23 105/66/23
f 54/14/2 55/67/2 53/15/2
f 52/8/14 3/7/14 56/68/14
f 4/6/15 51/13/15 54/14/15
f 51/13/24 52/8/24 55/67/24
f 58/70/2 57/69/2 60/52/2
f 60/52/25 48/51/25 58/70/25
f 22/10/26 16/21/26 58/70/26
f 57/69/27 16/21/27 59/53/27
f 109/78/71 107/71/71 67/72/71
f 63/46/1 26/45/1 61/73/1
f 62/76/1 9/60/1 64/74/1
f 106/109/1 101/75/1 62/76/1
f 65/79/1 108/77/1 67/72/1
f 63/46/29 61/73/29 67/72/29
f 66/81/30 62/76/30 68/80/30
f 106/109/72 62/76/72 108/77/72
f 72/50/32 71/49/32 76/83/32
f 70/86/15 25/25/15 72/50/15
f 39/12/15 5/5/15 71/49/15
f 5/5/15 25/25/15 69/85/15
f 80/90/15 76/83/15 79/87/15
f 70/86/17 72/50/17 74/88/17
f 75/82/2 71/49/2 73/89/2
f 69/85/33 70/86/33 73/89/33
f 84/95/3 80/90/3 83/91/3
f 74/88/15 76/83/15 78/92/15
f 79/87/15 75/82/15 77/93/15
f 73/89/15 74/88/15 77/93/15
f 81/96/15 82/94/15 83/91/15
f 78/92/2 80/90/2 82/94/2
f 79/87/17 77/93/17 83/91/17
f 81/96/24 77/93/24 82/94/24
f 32/26/13 35/43/13 88/98/13
f 87/97/12 35/43/12 86/99/12
f 86/99/11 34/44/11 85/100/11
f 29/27/10 32/26/10 85/100/10
f 91/37/34 92/31/34 99/102/34
f 98/103/35 90/35/35 99/102/35
f 97/104/36 89/36/36 98/103/36
f 92/31/37 89/36/37 100/101/37
f 96/108/13 95/105/13 100/101/13
f 99/102/12 95/105/12 98/103/12
f 98/103/11 94/106/11 97/104/11
f 93/107/10 96/108/10 97/104/10
f 85/100/38 88/98/38 93/107/38
f 94/106/39 86/99/39 93/107/39
f 95/105/40 87/97/40 94/106/40
f 88/98/41 87/97/41 96/108/41
f 61/73/73 106/109/73 65/79/73
f 108/77/1 66/81/1 109/78/1
f 2/65/1 101/75/1 61/73/1
f 68/80/74 64/74/74 109/78/74
f 9/60/23 101/75/23 42/61/23
f 30/42/1 103/1/1 64/74/1
f 103/1/1 30/42/1 102/2/1
f 211/2/44 212/1/44 115/3/44
f 114/5/2 129/4/2 113/6/2
f 154/57/2 161/8/2 129/4/2
f 131/10/24 145/9/24 120/11/24
f 114/5/2 113/6/2 148/12/2
f 112/7/45 162/15/45 113/6/45
f 122/17/46 123/16/46 121/18/46
f 123/16/47 127/19/47 119/20/47
f 142/39/24 140/22/24 129/4/24
f 122/17/48 121/18/48 126/23/48
f 142/39/45 138/27/45 134/25/45
f 116/30/45 117/24/45 119/20/45
f 128/29/75 126/23/75 110/28/75
f 142/39/45 122/17/45 128/29/45
f 110/28/76 127/19/76 134/25/76
f 126/23/50 117/24/50 127/19/50
f 198/36/51 137/33/51 201/31/51
f 140/22/24 145/9/24 125/21/24
f 124/41/24 130/34/24 140/22/24
f 198/36/52 199/35/52 137/33/52
f 199/35/53 200/37/53 133/38/53
f 134/25/45 123/16/45 142/39/45
f 136/32/45 137/33/45 132/40/45
f 128/29/24 124/41/24 142/39/24
f 145/9/24 130/34/24 139/42/24
f 134/25/45 141/26/45 114/5/45
f 129/4/45 114/5/45 143/44/45
f 129/4/45 143/44/45 142/39/45
f 201/31/54 136/32/54 200/37/54
f 135/45/44 212/1/44 172/46/44
f 111/65/14 147/48/14 135/45/14
f 158/84/15 181/50/15 148/12/15
f 153/58/26 168/53/26 157/51/26
f 130/34/17 124/41/17 156/54/17
f 110/28/17 146/56/17 128/29/17
f 154/57/2 148/12/2 161/8/2
f 154/57/2 129/4/2 153/58/2
f 128/29/18 155/55/18 124/41/18
f 118/60/19 130/34/19 151/61/19
f 157/51/20 131/10/20 150/62/20
f 110/28/21 134/25/21 146/56/21
f 135/45/14 159/47/14 115/3/14
f 211/2/22 115/3/22 213/64/22
f 210/75/23 214/66/23 111/65/23
f 163/14/2 162/15/2 164/67/2
f 161/8/14 165/68/14 112/7/14
f 113/6/15 163/14/15 160/13/15
f 160/13/3 164/67/3 161/8/3
f 168/53/2 166/69/2 169/52/2
f 131/10/25 157/51/25 167/70/25
f 131/10/16 167/70/16 125/21/16
f 153/58/27 125/21/27 168/53/27
f 172/46/77 216/71/77 176/72/77
f 111/65/44 135/45/44 170/73/44
f 139/42/44 118/60/44 173/74/44
f 118/60/44 210/75/44 171/76/44
f 218/78/44 217/77/44 176/72/44
f 172/46/56 176/72/56 170/73/56
f 173/74/57 171/76/57 177/80/57
f 215/109/78 217/77/78 171/76/78
f 181/50/33 185/83/33 180/49/33
f 158/84/15 134/25/15 181/50/15
f 148/12/15 180/49/15 114/5/15
f 114/5/15 178/85/15 134/25/15
f 185/83/15 189/90/15 184/82/15
f 179/86/17 183/88/17 181/50/17
f 178/85/2 180/49/2 182/89/2
f 178/85/32 182/89/32 179/86/32
f 188/87/24 189/90/24 192/91/24
f 183/88/15 187/92/15 185/83/15
f 182/89/15 184/82/15 186/93/15
f 182/89/15 186/93/15 183/88/15
f 190/96/15 192/91/15 191/94/15
f 187/92/2 191/94/2 189/90/2
f 188/87/17 192/91/17 186/93/17
f 187/92/3 186/93/3 191/94/3
f 141/26/54 197/98/54 144/43/54
f 143/44/53 144/43/53 195/99/53
f 138/27/52 143/44/52 194/100/52
f 138/27/51 194/100/51 141/26/51
f 200/37/59 208/102/59 201/31/59
f 200/37/60 199/35/60 208/102/60
f 199/35/61 198/36/61 207/103/61
f 201/31/79 209/101/79 198/36/79
f 205/108/54 209/101/54 204/105/54
f 203/106/53 204/105/53 207/103/53
f 202/107/52 203/106/52 206/104/52
f 202/107/51 206/104/51 205/108/51
f 194/100/63 202/107/63 197/98/63
f 194/100/64 195/99/64 202/107/64
f 195/99/65 196/97/65 203/106/65
f 197/98/66 205/108/66 196/97/66
f 170/73/80 174/79/80 215/109/80
f 177/80/44 175/81/44 218/78/44
f 111/65/44 170/73/44 210/75/44
f 216/71/81 173/74/81 218/78/81
f 118/60/23 151/61/23 210/75/23
f 139/42/44 173/74/44 212/1/44
f 120/11/44 139/42/44 211/2/44
usemtl Material.003
f 41/62/82 104/64/82 102/2/82
f 211/2/82 213/64/82 150/62/82
f 11/11/82 41/62/82 102/2/82
f 120/11/82 211/2/82 150/62/82

7
models/norm-texopt.mtl Normal file
View File

@@ -0,0 +1,7 @@
newmtl default
Ka 0 0 0
Kd 0 0 0
Ks 0 0 0
Kt 0.1 0.2 0.3
norm -bm 3 normalmap.jpg

7
models/norm-texopt.obj Normal file
View File

@@ -0,0 +1,7 @@
mtllib norm-texopt.mtl
o Test
v 1.864151 -1.219172 -5.532511
v 0.575869 -0.666304 5.896140
v 0.940448 1.000000 -1.971128
usemtl default
f 1 2 3

25
models/refl.mtl Normal file
View File

@@ -0,0 +1,25 @@
newmtl white
Ka 0 0 0
Kd 1 1 1
Ks 0 0 0
refl reflection.tga
newmtl red
Ka 0 0 0
Kd 1 0 0
Ks 0 0 0
newmtl green
Ka 0 0 0
Kd 0 1 0
Ks 0 0 0
newmtl blue
Ka 0 0 0
Kd 0 0 1
Ks 0 0 0
newmtl light
Ka 20 20 20
Kd 1 1 1
Ks 0 0 0

32
models/refl.obj Normal file
View File

@@ -0,0 +1,32 @@
# Test for `refl` material parameter
mtllib refl.mtl
v 0.000000 2.000000 2.000000
v 0.000000 0.000000 2.000000
v 2.000000 0.000000 2.000000
v 2.000000 2.000000 2.000000
v 0.000000 2.000000 0.000000
v 0.000000 0.000000 0.000000
v 2.000000 0.000000 0.000000
v 2.000000 2.000000 0.000000
# 8 vertices
g front cube
usemtl white
f 1 2 3 4
g back cube
# expects white material
f 8 7 6 5
g right cube
usemtl red
f 4 3 7 8
g top cube
usemtl white
f 5 1 4 8
g left cube
usemtl green
f 5 6 2 1
g bottom cube
usemtl white
f 2 6 7 3
# 6 elements

View File

@@ -0,0 +1,28 @@
newmtl white
Ka 0 0 0
Kd 1 1 1
Ks 0 0 0
# filename with white space.
map_Kd texture 01.png
newmtl red
Ka 0 0 0
Kd 1 0 0
Ks 0 0 0
# texture option + filename with white space.
bump -bm 2 bump 01.png
newmtl green
Ka 0 0 0
Kd 0 1 0
Ks 0 0 0
newmtl blue
Ka 0 0 0
Kd 0 0 1
Ks 0 0 0
newmtl light
Ka 20 20 20
Kd 1 1 1
Ks 0 0 0

View File

@@ -0,0 +1,31 @@
mtllib texture-filename-with-whitespace.mtl
v 0.000000 2.000000 2.000000
v 0.000000 0.000000 2.000000
v 2.000000 0.000000 2.000000
v 2.000000 2.000000 2.000000
v 0.000000 2.000000 0.000000
v 0.000000 0.000000 0.000000
v 2.000000 0.000000 0.000000
v 2.000000 2.000000 0.000000
# 8 vertices
g front cube
usemtl white
f 1 2 3 4
g back cube
# expects white material
f 8 7 6 5
g right cube
usemtl red
f 4 3 7 8
g top cube
usemtl white
f 5 1 4 8
g left cube
usemtl green
f 5 6 2 1
g bottom cube
usemtl white
f 2 6 7 3
# 6 elements

View File

@@ -29,7 +29,7 @@ PyObject* pyTupleFromfloat3(float array[3]) {
extern "C" {
static PyObject* pyLoadObj(PyObject* self, PyObject* args) {
PyObject *rtndict, *pyshapes, *pymaterials, *attribobj, *current, *meshobj;
PyObject *rtndict, *pyshapes, *pymaterials, *pymaterial_indices, *attribobj, *current, *meshobj;
char const* current_name;
char const* filename;
@@ -48,6 +48,7 @@ static PyObject* pyLoadObj(PyObject* self, PyObject* args) {
pyshapes = PyDict_New();
pymaterials = PyDict_New();
pymaterial_indices = PyList_New(0);
rtndict = PyDict_New();
attribobj = PyDict_New();
@@ -129,49 +130,51 @@ static PyObject* pyLoadObj(PyObject* self, PyObject* args) {
PyObject* unknown_parameter = PyDict_New();
for (std::map<std::string, std::string>::iterator p =
(*mat).unknown_parameter.begin();
p != (*mat).unknown_parameter.end(); ++p) {
mat->unknown_parameter.begin();
p != mat->unknown_parameter.end(); ++p) {
PyDict_SetItemString(unknown_parameter, p->first.c_str(),
PyUnicode_FromString(p->second.c_str()));
}
PyDict_SetItemString(matobj, "shininess",
PyFloat_FromDouble((*mat).shininess));
PyDict_SetItemString(matobj, "ior", PyFloat_FromDouble((*mat).ior));
PyFloat_FromDouble(mat->shininess));
PyDict_SetItemString(matobj, "ior", PyFloat_FromDouble(mat->ior));
PyDict_SetItemString(matobj, "dissolve",
PyFloat_FromDouble((*mat).dissolve));
PyDict_SetItemString(matobj, "illum", PyLong_FromLong((*mat).illum));
PyFloat_FromDouble(mat->dissolve));
PyDict_SetItemString(matobj, "illum", PyLong_FromLong(mat->illum));
PyDict_SetItemString(matobj, "ambient_texname",
PyUnicode_FromString((*mat).ambient_texname.c_str()));
PyUnicode_FromString(mat->ambient_texname.c_str()));
PyDict_SetItemString(matobj, "diffuse_texname",
PyUnicode_FromString((*mat).diffuse_texname.c_str()));
PyUnicode_FromString(mat->diffuse_texname.c_str()));
PyDict_SetItemString(matobj, "specular_texname",
PyUnicode_FromString((*mat).specular_texname.c_str()));
PyUnicode_FromString(mat->specular_texname.c_str()));
PyDict_SetItemString(
matobj, "specular_highlight_texname",
PyUnicode_FromString((*mat).specular_highlight_texname.c_str()));
PyUnicode_FromString(mat->specular_highlight_texname.c_str()));
PyDict_SetItemString(matobj, "bump_texname",
PyUnicode_FromString((*mat).bump_texname.c_str()));
PyUnicode_FromString(mat->bump_texname.c_str()));
PyDict_SetItemString(
matobj, "displacement_texname",
PyUnicode_FromString((*mat).displacement_texname.c_str()));
PyUnicode_FromString(mat->displacement_texname.c_str()));
PyDict_SetItemString(matobj, "alpha_texname",
PyUnicode_FromString((*mat).alpha_texname.c_str()));
PyDict_SetItemString(matobj, "ambient", pyTupleFromfloat3((*mat).ambient));
PyDict_SetItemString(matobj, "diffuse", pyTupleFromfloat3((*mat).diffuse));
PyUnicode_FromString(mat->alpha_texname.c_str()));
PyDict_SetItemString(matobj, "ambient", pyTupleFromfloat3(mat->ambient));
PyDict_SetItemString(matobj, "diffuse", pyTupleFromfloat3(mat->diffuse));
PyDict_SetItemString(matobj, "specular",
pyTupleFromfloat3((*mat).specular));
pyTupleFromfloat3(mat->specular));
PyDict_SetItemString(matobj, "transmittance",
pyTupleFromfloat3((*mat).transmittance));
pyTupleFromfloat3(mat->transmittance));
PyDict_SetItemString(matobj, "emission",
pyTupleFromfloat3((*mat).emission));
pyTupleFromfloat3(mat->emission));
PyDict_SetItemString(matobj, "unknown_parameter", unknown_parameter);
PyDict_SetItemString(pymaterials, (*mat).name.c_str(), matobj);
PyDict_SetItemString(pymaterials, mat->name.c_str(), matobj);
PyList_Append(pymaterial_indices, PyUnicode_FromString(mat->name.c_str()));
}
PyDict_SetItemString(rtndict, "shapes", pyshapes);
PyDict_SetItemString(rtndict, "materials", pymaterials);
PyDict_SetItemString(rtndict, "material_indices", pymaterial_indices);
PyDict_SetItemString(rtndict, "attribs", attribobj);
return rtndict;

View File

@@ -145,6 +145,7 @@ static void PrintInfo(const tinyobj::attrib_t &attrib, const std::vector<tinyobj
printf(" material.map_bump = %s\n", materials[i].bump_texname.c_str());
printf(" material.map_d = %s\n", materials[i].alpha_texname.c_str());
printf(" material.disp = %s\n", materials[i].displacement_texname.c_str());
printf(" material.refl = %s\n", materials[i].reflection_texname.c_str());
std::map<std::string, std::string>::const_iterator it(materials[i].unknown_parameter.begin());
std::map<std::string, std::string>::const_iterator itEnd(materials[i].unknown_parameter.end());
@@ -302,7 +303,8 @@ std::string matStream(
virtual bool operator() (
const std::string& matId,
std::vector<material_t>* materials,
std::map<std::string, int>* matMap,
//std::map<std::string, int>* matMap,
std::map<uint32_t, int>* matMap,
std::string* err)
{
(void)matId;
@@ -564,6 +566,158 @@ TEST_CASE("tr_and_d", "[Issue43]") {
REQUIRE(0.75 == Approx(materials[1].dissolve));
}
TEST_CASE("refl", "[refl]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/refl.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << err << std::endl;
}
PrintInfo(attrib, shapes, materials);
REQUIRE(true == ret);
REQUIRE(5 == materials.size());
REQUIRE(materials[0].reflection_texname.compare("reflection.tga") == 0);
}
TEST_CASE("map_Bump", "[bump]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/map-bump.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << err << std::endl;
}
PrintInfo(attrib, shapes, materials);
REQUIRE(true == ret);
REQUIRE(2 == materials.size());
REQUIRE(materials[0].bump_texname.compare("bump.jpg") == 0);
}
TEST_CASE("g_ignored", "[Issue138]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/issue-138.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << err << std::endl;
}
PrintInfo(attrib, shapes, materials);
REQUIRE(true == ret);
REQUIRE(2 == shapes.size());
REQUIRE(2 == materials.size());
}
TEST_CASE("vertex-col-ext", "[Issue144]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/cube-vertexcol.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << err << std::endl;
}
//PrintInfo(attrib, shapes, materials);
REQUIRE(true == ret);
REQUIRE((8 * 3) == attrib.colors.size());
REQUIRE(0 == Approx(attrib.colors[3 * 0 + 0]));
REQUIRE(0 == Approx(attrib.colors[3 * 0 + 1]));
REQUIRE(0 == Approx(attrib.colors[3 * 0 + 2]));
REQUIRE(0 == Approx(attrib.colors[3 * 1 + 0]));
REQUIRE(0 == Approx(attrib.colors[3 * 1 + 1]));
REQUIRE(1 == Approx(attrib.colors[3 * 1 + 2]));
REQUIRE(1 == Approx(attrib.colors[3 * 4 + 0]));
REQUIRE(1 == Approx(attrib.colors[3 * 7 + 0]));
REQUIRE(1 == Approx(attrib.colors[3 * 7 + 1]));
REQUIRE(1 == Approx(attrib.colors[3 * 7 + 2]));
}
TEST_CASE("norm_texopts", "[norm]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/norm-texopt.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << err << std::endl;
}
REQUIRE(true == ret);
REQUIRE(1 == shapes.size());
REQUIRE(1 == materials.size());
REQUIRE(3.0 == Approx(materials[0].normal_texopt.bump_multiplier));
}
TEST_CASE("zero-face-idx-value", "[Issue140]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/issue-140-zero-face-idx.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << err << std::endl;
}
REQUIRE(false == ret);
REQUIRE(!err.empty());
}
TEST_CASE("texture-name-whitespace", "[Issue145]") {
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
std::string err;
bool ret = tinyobj::LoadObj(&attrib, &shapes, &materials, &err, "../models/texture-filename-with-whitespace.obj", gMtlBasePath);
if (!err.empty()) {
std::cerr << "[Issue145] " << err << std::endl;
}
REQUIRE(true == ret);
REQUIRE(err.empty());
REQUIRE(2 < materials.size());
REQUIRE(0 == materials[0].diffuse_texname.compare("texture 01.png"));
REQUIRE(0 == materials[1].bump_texname.compare("bump 01.png"));
REQUIRE(2 == Approx(materials[1].bump_texopt.bump_multiplier));
}
#if 0
int
main(

File diff suppressed because it is too large Load Diff