Publicaciones
2012 |
E. Fernández, H.E. Salomone, O.J. Chiotti A model driven development approach based on a reference model for predicting disruptive events in a supply process (Artículo de revista) Computers in Industry, 63 (5), pp. 482-499, 2012, (cited By 3). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fernández2012482, title = {A model driven development approach based on a reference model for predicting disruptive events in a supply process}, author = { E. Fernández and H.E. Salomone and O.J. Chiotti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84861093897&partnerID=40&md5=90a693f11355a6369556b6dacfa6174f}, doi = {10.1016/j.compind.2012.02.002}, year = {2012}, date = {2012-01-01}, journal = {Computers in Industry}, volume = {63}, number = {5}, pages = {482-499}, abstract = {Due to the impossibility of predicting with certainty the occurrence of disruptive events, buffers defined to obtain a robust schedule could not absorb all the changes. Then, local modifications of the schedule are usually performed to avoid a new planning task. For this task, obtaining disruptive event information in advance can help to make better decisions. As a result, ability to predict disruptive events that affect the execution of the supply process an order represents is required. With the objective of satisfying this requirement, this work proposes a model driven development approach based on a reference model to automate the generation of the monitoring model of a supply process able to anticipate the occurrence of a disruptive event by monitoring variables that can explain it. The approach proposes both a reference model to represent the monitoring model independently of the implementation platform, and a specific model to represent the monitoring model with the particular language of the implementation platform. An engine based on transformation rules allows automating the generation of a platform dependent monitoring model from an instance of a platform independent metamodel. The monitoring component of a SCEM system has been developed, which implements the transformation engine as a Bayesian Network model, and uses an appropriate tool to execute it. For an empirical validation of the model three case studies are presented. © 2012 Elsevier B.V. All rights reserved.}, note = {cited By 3}, keywords = {}, pubstate = {published}, tppubtype = {article} } Due to the impossibility of predicting with certainty the occurrence of disruptive events, buffers defined to obtain a robust schedule could not absorb all the changes. Then, local modifications of the schedule are usually performed to avoid a new planning task. For this task, obtaining disruptive event information in advance can help to make better decisions. As a result, ability to predict disruptive events that affect the execution of the supply process an order represents is required. With the objective of satisfying this requirement, this work proposes a model driven development approach based on a reference model to automate the generation of the monitoring model of a supply process able to anticipate the occurrence of a disruptive event by monitoring variables that can explain it. The approach proposes both a reference model to represent the monitoring model independently of the implementation platform, and a specific model to represent the monitoring model with the particular language of the implementation platform. An engine based on transformation rules allows automating the generation of a platform dependent monitoring model from an instance of a platform independent metamodel. The monitoring component of a SCEM system has been developed, which implements the transformation engine as a Bayesian Network model, and uses an appropriate tool to execute it. For an empirical validation of the model three case studies are presented. © 2012 Elsevier B.V. All rights reserved. |
L.A. Bearzotti, H.E. Salomone, O.J. Chiotti An autonomous multi-agent approach to supply chain event management (Artículo de revista) International Journal of Production Economics, 135 (1), pp. 468-478, 2012, (cited By 9). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Bearzotti2012468, title = {An autonomous multi-agent approach to supply chain event management}, author = { L.A. Bearzotti and H.E. Salomone and O.J. Chiotti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80055099667&partnerID=40&md5=d4ba55eedaa1cf95ddeb517dbbf918d9}, doi = {10.1016/j.ijpe.2011.08.023}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Production Economics}, volume = {135}, number = {1}, pages = {468-478}, abstract = {Organizations have made a significant effort to implement software for planning and scheduling, but disruptive event management is still a problem to be solved. Since a disruptive event can affect the overall performance of the supply chain, SCEM (Supply Chain Event Management) systems presenting different automation levels such as monitoring, alarm and decision support have been proposed. However, the management of disruptive events, taking into account the distributed nature of the supply chain, the members autonomy and the ability to exert corrective control actions, has been identified as a problem that requires further research. This work presents an agent-based approach for the SCEM problem, which can perform autonomous corrective control actions to minimize the effect of deviations in the plan that is currently being executed. These control actions consist of a distribution of the variation between supply chain members, using the plans slack in a collaborative way. An innovative feature of this approach is its focus on resources, which are affected by disruptive events in a direct way. Based on this approach, a SCEM system is designed as a net of control points defined on resources connected through supply process orders. Two novel aspects are the distributed collaborative inter-organizational architecture of the SCEM system and a Double Contract Net Protocol. This protocol allows a set of resource:representing agents to interact through an agent, representing a supply process order as a mediator. An application to a case study of the Multi-Agent SCEM system implemented with JADE is provided. © 2010 Elsevier B.V. All rights reserved.}, note = {cited By 9}, keywords = {}, pubstate = {published}, tppubtype = {article} } Organizations have made a significant effort to implement software for planning and scheduling, but disruptive event management is still a problem to be solved. Since a disruptive event can affect the overall performance of the supply chain, SCEM (Supply Chain Event Management) systems presenting different automation levels such as monitoring, alarm and decision support have been proposed. However, the management of disruptive events, taking into account the distributed nature of the supply chain, the members autonomy and the ability to exert corrective control actions, has been identified as a problem that requires further research. This work presents an agent-based approach for the SCEM problem, which can perform autonomous corrective control actions to minimize the effect of deviations in the plan that is currently being executed. These control actions consist of a distribution of the variation between supply chain members, using the plans slack in a collaborative way. An innovative feature of this approach is its focus on resources, which are affected by disruptive events in a direct way. Based on this approach, a SCEM system is designed as a net of control points defined on resources connected through supply process orders. Two novel aspects are the distributed collaborative inter-organizational architecture of the SCEM system and a Double Contract Net Protocol. This protocol allows a set of resource:representing agents to interact through an agent, representing a supply process order as a mediator. An application to a case study of the Multi-Agent SCEM system implemented with JADE is provided. © 2010 Elsevier B.V. All rights reserved. |
J. Roa, O.J. Chiotti, P. Villarreal A verification method for collaborative business processes (Artículo de revista) Lecture Notes in Business Information Processing, 99 LNBIP (PART 1), pp. 293-305, 2012, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Roa2012293, title = {A verification method for collaborative business processes}, author = { J. Roa and O.J. Chiotti and P. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84856564594&partnerID=40&md5=f34dd2d80a9c1f8eeed9acd2bbf09df4}, doi = {10.1007/978-3-642-28108-2_29}, year = {2012}, date = {2012-01-01}, journal = {Lecture Notes in Business Information Processing}, volume = {99 LNBIP}, number = {PART 1}, pages = {293-305}, abstract = {The verification of collaborative processes is a key issue to consider in cross-organizational modeling methodologies. Some of the existing verification approaches provide only partial support, whereas others impose some restrictions to verify models with advanced control flow, compromise (completely or partially) the enterprise autonomy, or are focused on technology-dependent specifications. In order to deal with these issues we introduce Global Interaction Nets, which are based on Hierarchical and Colored Petri Nets, and the Global Interaction Soundness property, which was adapted from the classical definition of soundness, as the main correctness criterion. The method can be used to formalize and verify models defined with different modeling languages. In addition, we apply the method through a case study modeled with UP-ColBPIP, which is a modeling language for collaborative processes, and formalize its constructs by means of Global Interaction Nets. © 2012 Springer-Verlag.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } The verification of collaborative processes is a key issue to consider in cross-organizational modeling methodologies. Some of the existing verification approaches provide only partial support, whereas others impose some restrictions to verify models with advanced control flow, compromise (completely or partially) the enterprise autonomy, or are focused on technology-dependent specifications. In order to deal with these issues we introduce Global Interaction Nets, which are based on Hierarchical and Colored Petri Nets, and the Global Interaction Soundness property, which was adapted from the classical definition of soundness, as the main correctness criterion. The method can be used to formalize and verify models defined with different modeling languages. In addition, we apply the method through a case study modeled with UP-ColBPIP, which is a modeling language for collaborative processes, and formalize its constructs by means of Global Interaction Nets. © 2012 Springer-Verlag. |
M. Vegetti, M.L. Roldán, S. Gonnet, G. Henning, H.P. Leone ONTOTracED: A framework to capture and trace ontology development processes (Conferencia) 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Vegetti2012419, title = {ONTOTracED: A framework to capture and trace ontology development processes}, author = { M. Vegetti and M.L. Roldán and S. Gonnet and G. Henning and H.P. Leone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84881407554&partnerID=40&md5=1006a51c0dd4bc507de39d327bb0ca9c}, year = {2012}, date = {2012-01-01}, journal = {KEOD 2012 - Proceedings of the International Conference on Knowledge Engineering and Ontology Development}, pages = {419-422}, abstract = {In the last two decades several methodologies to assist the ontology development process have been reported in the literature. However, despite important advances, there are no computational tools supporting them yet. Thus, when an ontology development process ends, the things that remain are just design products (e.g., competency questions, class diagrams, implementations, etc.), without an explicit representation of how they were obtained. This paper presents a framework meant to explicitly capture and trace ontology development processes (the activities carried out, the actors executing them, etc.), along with their associated products.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } In the last two decades several methodologies to assist the ontology development process have been reported in the literature. However, despite important advances, there are no computational tools supporting them yet. Thus, when an ontology development process ends, the things that remain are just design products (e.g., competency questions, class diagrams, implementations, etc.), without an explicit representation of how they were obtained. This paper presents a framework meant to explicitly capture and trace ontology development processes (the activities carried out, the actors executing them, etc.), along with their associated products. |
M. Vegetti, M.L. Roldán, S. Gonnet, G. Henning, H.P. Leone An operational approach for capturing and tracing the ontology development process (Conferencia) 938 , 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Vegetti201236, title = {An operational approach for capturing and tracing the ontology development process}, author = { M. Vegetti and M.L. Roldán and S. Gonnet and G. Henning and H.P. Leone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84893162370&partnerID=40&md5=e34ba9b6df661ad6953b46b24723d245}, year = {2012}, date = {2012-01-01}, journal = {CEUR Workshop Proceedings}, volume = {938}, pages = {36-47}, abstract = {The history of an ontology development project, including its intermediate products, together with the executed activities, and the decisions made, might be of great importance in other future ontology developments. However, current tools supporting this kind of projects do not capture such information; thus, the process trace is lost, and any new ontology development project would start from scratch. This paper presents a framework meant to do overcome these deficiencies, allowing the capture and trace of such projects.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The history of an ontology development project, including its intermediate products, together with the executed activities, and the decisions made, might be of great importance in other future ontology developments. However, current tools supporting this kind of projects do not capture such information; thus, the process trace is lost, and any new ontology development project would start from scratch. This paper presents a framework meant to do overcome these deficiencies, allowing the capture and trace of such projects. |
M.A. Rodriguez, A.R. Vecchietti Mid-term planning optimization model with sales contracts under demand uncertainty (Artículo de revista) Computers and Chemical Engineering, 47 , pp. 227-236, 2012, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rodriguez2012227, title = {Mid-term planning optimization model with sales contracts under demand uncertainty}, author = { M.A. Rodriguez and A.R. Vecchietti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84869487803&partnerID=40&md5=69548a772f52df64737f1b7f2d64d3fb}, doi = {10.1016/j.compchemeng.2012.06.040}, year = {2012}, date = {2012-01-01}, journal = {Computers and Chemical Engineering}, volume = {47}, pages = {227-236}, abstract = {Uncertainty modeling is a challenging topic in supply chain and operation management. When planning material purchase and stock levels, demand uncertainty could have an important impact on the plan results and its feasibility. Additionally, uncertainty could greatly affect customer satisfaction, inventory costs and company profits. From a modeling perspective, problems considering uncertainty are difficult to tackle and lead to complex optimization approaches. This work proposes a mid-term planning model dealing with sales contracts to diminish the effect of uncertainty. Another interesting feature is given by the selection of different price levels. Price elasticity functions are introduced for each customer in order to jointly decide demand targets and prices. A linear generalized disjunctive programming model is developed. Short execution time shows that this model can be applied to analyze several real scenarios to decide material purchase plan, inventory levels, sales strategies, prices and demand levels in a medium term horizon planning. © 2012 Elsevier Ltd.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } Uncertainty modeling is a challenging topic in supply chain and operation management. When planning material purchase and stock levels, demand uncertainty could have an important impact on the plan results and its feasibility. Additionally, uncertainty could greatly affect customer satisfaction, inventory costs and company profits. From a modeling perspective, problems considering uncertainty are difficult to tackle and lead to complex optimization approaches. This work proposes a mid-term planning model dealing with sales contracts to diminish the effect of uncertainty. Another interesting feature is given by the selection of different price levels. Price elasticity functions are introduced for each customer in order to jointly decide demand targets and prices. A linear generalized disjunctive programming model is developed. Short execution time shows that this model can be applied to analyze several real scenarios to decide material purchase plan, inventory levels, sales strategies, prices and demand levels in a medium term horizon planning. © 2012 Elsevier Ltd. |
C.E. Alvez, A.R. Vecchietti Efficient image recovery using visual and semantic contents (Conferencia) 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Alvez2012, title = {Efficient image recovery using visual and semantic contents}, author = { C.E. Alvez and A.R. Vecchietti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84874307570&partnerID=40&md5=fca9c3570596e14cfdcf8c73754af42e}, doi = {10.1109/CLEI.2012.6427147}, year = {2012}, date = {2012-01-01}, journal = {38th Latin America Conference on Informatics, CLEI 2012 - Conference Proceedings}, abstract = {Images recovery by combining low level descriptors and semantic content is a big challenge. In this work we present a three level software architecture implemented in an Object-Relational database that allows the image queries using both descriptions. For the physical level (low-level) MPEG-7 standard descriptors are employed and for the semantic metadata RDF triplets are used. The tests performed using combined low-level and semantic queries gave very good results in terms of images recovered. Since the semantic data can involve a huge number of RDF triplets, we are proposed and indexation structure for the triplets which shows a very good performance compared with other approaches. © 2012 IEEE.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Images recovery by combining low level descriptors and semantic content is a big challenge. In this work we present a three level software architecture implemented in an Object-Relational database that allows the image queries using both descriptions. For the physical level (low-level) MPEG-7 standard descriptors are employed and for the semantic metadata RDF triplets are used. The tests performed using combined low-level and semantic queries gave very good results in terms of images recovered. Since the semantic data can involve a huge number of RDF triplets, we are proposed and indexation structure for the triplets which shows a very good performance compared with other approaches. © 2012 IEEE. |
P. Druetta, S.F. Mussati, P.A. Aguirre Seawater Desalination Processes. Optimal Design of Multi Effect Evaporation Systems (Artículo de revista) Computer Aided Chemical Engineering, 31 , pp. 770-774, 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Druetta2012770, title = {Seawater Desalination Processes. Optimal Design of Multi Effect Evaporation Systems}, author = { P. Druetta and S.F. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84864508015&partnerID=40&md5=34e4a73ac0d4d0c17dad8d2872c2fd4c}, doi = {10.1016/B978-0-444-59507-2.50146-3}, year = {2012}, date = {2012-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {31}, pages = {770-774}, abstract = {This is the first paper of a series of articles that deals with the modeling and optimization of dual-purpose desalination plants which combine thermal desalination processes and combined heat and power systems, specifically solid oxide fuel cell SOFC electricity generators. This paper presents preliminary results obtained for the multi effect evaporation (MEE) process (stand alone process). The steady state performance of the MEE system is described by a simplified and no linear programming (NLP) model. Optimal operating conditions including profiles of temperature, flow-rate and heat transfer area along the evaporator are analyzed. In addition, the influence of the effect number on the evaporation efficiency is also investigated. © 2012 Elsevier B.V.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } This is the first paper of a series of articles that deals with the modeling and optimization of dual-purpose desalination plants which combine thermal desalination processes and combined heat and power systems, specifically solid oxide fuel cell SOFC electricity generators. This paper presents preliminary results obtained for the multi effect evaporation (MEE) process (stand alone process). The steady state performance of the MEE system is described by a simplified and no linear programming (NLP) model. Optimal operating conditions including profiles of temperature, flow-rate and heat transfer area along the evaporator are analyzed. In addition, the influence of the effect number on the evaporation efficiency is also investigated. © 2012 Elsevier B.V. |
F. Serralunga, M.C. Mussati, P.A. Aguirre Real-time optimization of energy systems in sugar and ethanol facilities. a modifier adaptation approach (Artículo de revista) Computer Aided Chemical Engineering, 31 , pp. 375-379, 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Serralunga2012375, title = {Real-time optimization of energy systems in sugar and ethanol facilities. a modifier adaptation approach}, author = { F. Serralunga and M.C. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84864516395&partnerID=40&md5=5c082846d7ac3311f4884579c6cf2f98}, doi = {10.1016/B978-0-444-59507-2.50067-6}, year = {2012}, date = {2012-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {31}, pages = {375-379}, abstract = {Real-time optimization (RTO) is used to operate industrial processes close to their minimum cost or maximum profit conditions. Model-based RTO needs an adaptation step to update the model under plant/model mismatch. This work proposes a novel RTO approach based on the modifier adaptation strategy, aiming to reduce the dimension of the gradient correction problem. The approach was tested in a model of a heat and power system of a sugar/ethanol facility. It could be also applied to other kind of processes. The results showed a noticeable performance improvement when compared to adaptation strategies without gradient correction. © 2012 Elsevier B.V.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Real-time optimization (RTO) is used to operate industrial processes close to their minimum cost or maximum profit conditions. Model-based RTO needs an adaptation step to update the model under plant/model mismatch. This work proposes a novel RTO approach based on the modifier adaptation strategy, aiming to reduce the dimension of the gradient correction problem. The approach was tested in a model of a heat and power system of a sugar/ethanol facility. It could be also applied to other kind of processes. The results showed a noticeable performance improvement when compared to adaptation strategies without gradient correction. © 2012 Elsevier B.V. |
M. Fuentes, N.J. Scenna, P.A. Aguirre Optimization of hybrid anaerobic-aerobic SBR-based systems (Artículo de revista) Computer Aided Chemical Engineering, 30 , pp. 137-141, 2012, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fuentes2012137, title = {Optimization of hybrid anaerobic-aerobic SBR-based systems}, author = { M. Fuentes and N.J. Scenna and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84862864929&partnerID=40&md5=b0aee63e54f402ad0a24b0a051e10ac3}, doi = {10.1016/B978-0-444-59519-5.50028-9}, year = {2012}, date = {2012-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {30}, pages = {137-141}, abstract = {This work deals with optimization of biological hybrid systems based on both anaerobic and aerobic sequential batch reactors (SBRs). The global model can address the differences between aerated and anaerobic systems by assigning adequate parameter values related to the presence of oxygen in the medium, aeration and sedimentation times, and selecting the kinetics model to represent the digestion stage. An existing set of experimental data is used for model validation. Fluctuating operation conditions during cycles such as disturbances in the organic loading rate, feed and recycle flow rates and changes in the hydrodynamic regime result in strong numerical discontinuities that are included in the simulation schedules. Optimization results are based on minimizing the reaction time/total cycle time ratio subjected to (pH, DO and nitrite) path constraints and interior- and end-point constraints related to the (COD and nitrogen) pollutant removal efficiency and settling conditions. A decrease of 29% in the total cycle time, i.e. an increase in the organic loading rate from 443 to 611 mg dm -3 d -1 is reached without modifying the quality of effluent. gOPT tool of gPROMS was used to perform the dynamic optimization. © 2012 Elsevier B.V.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } This work deals with optimization of biological hybrid systems based on both anaerobic and aerobic sequential batch reactors (SBRs). The global model can address the differences between aerated and anaerobic systems by assigning adequate parameter values related to the presence of oxygen in the medium, aeration and sedimentation times, and selecting the kinetics model to represent the digestion stage. An existing set of experimental data is used for model validation. Fluctuating operation conditions during cycles such as disturbances in the organic loading rate, feed and recycle flow rates and changes in the hydrodynamic regime result in strong numerical discontinuities that are included in the simulation schedules. Optimization results are based on minimizing the reaction time/total cycle time ratio subjected to (pH, DO and nitrite) path constraints and interior- and end-point constraints related to the (COD and nitrogen) pollutant removal efficiency and settling conditions. A decrease of 29% in the total cycle time, i.e. an increase in the organic loading rate from 443 to 611 mg dm -3 d -1 is reached without modifying the quality of effluent. gOPT tool of gPROMS was used to perform the dynamic optimization. © 2012 Elsevier B.V. |
F. Serralunga, M.C. Mussati, P.A. Aguirre An alternative real-time optimization algorithm with modifier adaptation. Application to heat and power systems (Artículo de revista) Computer Aided Chemical Engineering, 30 , pp. 367-371, 2012, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Serralunga2012367, title = {An alternative real-time optimization algorithm with modifier adaptation. Application to heat and power systems}, author = { F. Serralunga and M.C. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84862878471&partnerID=40&md5=3020db0ed54cfb8e4294bed85c68dc64}, doi = {10.1016/B978-0-444-59519-5.50074-5}, year = {2012}, date = {2012-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {30}, pages = {367-371}, abstract = {Performance of industrial processes can be continually improved through real-time optimization (RTO). Model-based RTO approaches always deal with parametric and structural plant/model mismatch, and adaptation strategies have been developed to overcome this problem. This work proposes an alternative algorithm based on the modifier adaptation approach, which simplifies the gradient estimation problem in systems with a large number of variables. The algorithm is tested in a heat and power system model, which includes a steam network with electric power generation. © 2012 Elsevier B.V.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } Performance of industrial processes can be continually improved through real-time optimization (RTO). Model-based RTO approaches always deal with parametric and structural plant/model mismatch, and adaptation strategies have been developed to overcome this problem. This work proposes an alternative algorithm based on the modifier adaptation approach, which simplifies the gradient estimation problem in systems with a large number of variables. The algorithm is tested in a heat and power system model, which includes a steam network with electric power generation. © 2012 Elsevier B.V. |
C. Pieragostini, M.C. Mussati, P.A. Aguirre On process optimization considering LCA methodology (Artículo de revista) Journal of Environmental Management, 96 (1), pp. 43-54, 2012, (cited By 38). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Pieragostini201243, title = {On process optimization considering LCA methodology}, author = { C. Pieragostini and M.C. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-82055194285&partnerID=40&md5=bafb97cf00e64c03fb02977cbaf08b0a}, doi = {10.1016/j.jenvman.2011.10.014}, year = {2012}, date = {2012-01-01}, journal = {Journal of Environmental Management}, volume = {96}, number = {1}, pages = {43-54}, abstract = {The goal of this work is to research the state-of-the-art in process optimization techniques and tools based on LCA, focused in the process engineering field. A collection of methods, approaches, applications, specific software packages, and insights regarding experiences and progress made in applying the LCA methodology coupled to optimization frameworks is provided, and general trends are identified. The " cradle-to-gate" concept to define the system boundaries is the most used approach in practice, instead of the " cradle-to-grave" approach. Normally, the relationship between inventory data and impact category indicators is linearly expressed by the characterization factors; then, synergic effects of the contaminants are neglected. Among the LCIA methods, the eco-indicator 99, which is based on the endpoint category and the panel method, is the most used in practice. A single environmental impact function, resulting from the aggregation of environmental impacts, is formulated as the environmental objective in most analyzed cases. SimaPro is the most used software for LCA applications in literature analyzed. The multi-objective optimization is the most used approach for dealing with this kind of problems, where the ε-constraint method for generating the Pareto set is the most applied technique. However, a renewed interest in formulating a single economic objective function in optimization frameworks can be observed, favored by the development of life cycle cost software and progress made in assessing costs of environmental externalities. Finally, a trend to deal with multi-period scenarios into integrated LCA-optimization frameworks can be distinguished providing more accurate results upon data availability. © 2011 Elsevier Ltd.}, note = {cited By 38}, keywords = {}, pubstate = {published}, tppubtype = {article} } The goal of this work is to research the state-of-the-art in process optimization techniques and tools based on LCA, focused in the process engineering field. A collection of methods, approaches, applications, specific software packages, and insights regarding experiences and progress made in applying the LCA methodology coupled to optimization frameworks is provided, and general trends are identified. The " cradle-to-gate" concept to define the system boundaries is the most used approach in practice, instead of the " cradle-to-grave" approach. Normally, the relationship between inventory data and impact category indicators is linearly expressed by the characterization factors; then, synergic effects of the contaminants are neglected. Among the LCIA methods, the eco-indicator 99, which is based on the endpoint category and the panel method, is the most used in practice. A single environmental impact function, resulting from the aggregation of environmental impacts, is formulated as the environmental objective in most analyzed cases. SimaPro is the most used software for LCA applications in literature analyzed. The multi-objective optimization is the most used approach for dealing with this kind of problems, where the ε-constraint method for generating the Pareto set is the most applied technique. However, a renewed interest in formulating a single economic objective function in optimization frameworks can be observed, favored by the development of life cycle cost software and progress made in assessing costs of environmental externalities. Finally, a trend to deal with multi-period scenarios into integrated LCA-optimization frameworks can be distinguished providing more accurate results upon data availability. © 2011 Elsevier Ltd. |
P. Mores, N. Scenna, S.F. Mussati CO 2 capture using monoethanolamine (MEA) aqueous solution: Modeling and optimization of the solvent regeneration and CO 2 desorption process (Artículo de revista) Energy, 45 (1), pp. 1042-1058, 2012, (cited By 14). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Mores20121042, title = {CO 2 capture using monoethanolamine (MEA) aqueous solution: Modeling and optimization of the solvent regeneration and CO 2 desorption process}, author = { P. Mores and N. Scenna and S.F. Mussati}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84865423617&partnerID=40&md5=c5fb17910edb0b8e7519b780fa6fd58b}, doi = {10.1016/j.energy.2012.06.038}, year = {2012}, date = {2012-01-01}, journal = {Energy}, volume = {45}, number = {1}, pages = {1042-1058}, abstract = {This paper deals with the simultaneously optimization of operating conditions (pressures, temperatures and flow-rates) and dimensions (diameter and height) of the amine regeneration unit in the post-combustion CO 2 capture process. The proposed model takes into account the effect of kinetic reactions on the mass transfer, the hydraulics of the random packing and the pressure drop along the column. In addition, profiles of temperature, composition and flow-rate along the height of the regenerator are also predicted.The resulting mathematical model is implemented into the optimization environment General Algebraic Modeling System (GAMS) which is a high-level modeling system for mathematical programming and optimization. The benefits of the mathematical programming techniques (equation-oriented modeling tools) are exploited for the simultaneous optimization not only of the operating conditions but also the dimensions of the all piece of equipments (heat exchangers, regeneration unit, condenser and reboiler).The mathematical model was successfully verified by comparison of the obtained results with published experimental data and simulated solutions obtained by a process simulator (HYSYS). Once validated, the model was used for optimization purpose.Finally, in order to study the effect of the main process parameters on the optimized results a sensitivity analysis is also investigated and discussed in detail. © 2012 Elsevier Ltd.}, note = {cited By 14}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper deals with the simultaneously optimization of operating conditions (pressures, temperatures and flow-rates) and dimensions (diameter and height) of the amine regeneration unit in the post-combustion CO 2 capture process. The proposed model takes into account the effect of kinetic reactions on the mass transfer, the hydraulics of the random packing and the pressure drop along the column. In addition, profiles of temperature, composition and flow-rate along the height of the regenerator are also predicted.The resulting mathematical model is implemented into the optimization environment General Algebraic Modeling System (GAMS) which is a high-level modeling system for mathematical programming and optimization. The benefits of the mathematical programming techniques (equation-oriented modeling tools) are exploited for the simultaneous optimization not only of the operating conditions but also the dimensions of the all piece of equipments (heat exchangers, regeneration unit, condenser and reboiler).The mathematical model was successfully verified by comparison of the obtained results with published experimental data and simulated solutions obtained by a process simulator (HYSYS). Once validated, the model was used for optimization purpose.Finally, in order to study the effect of the main process parameters on the optimized results a sensitivity analysis is also investigated and discussed in detail. © 2012 Elsevier Ltd. |
P. Mores, N. Rodríguez, N. Scenna, S.F. Mussati CO 2 capture in power plants: Minimization of the investment and operating cost of the post-combustion process using MEA aqueous solution (Artículo de revista) International Journal of Greenhouse Gas Control, 10 , pp. 148-163, 2012, (cited By 28). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Mores2012148, title = {CO 2 capture in power plants: Minimization of the investment and operating cost of the post-combustion process using MEA aqueous solution}, author = { P. Mores and N. Rodríguez and N. Scenna and S.F. Mussati}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84863464542&partnerID=40&md5=698eb14fd42300c522014b43b8c33ec7}, doi = {10.1016/j.ijggc.2012.06.002}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Greenhouse Gas Control}, volume = {10}, pages = {148-163}, abstract = {The post combustion process based on the CO 2 absorption using amine aqueous solution is one of the more attractive options to drastically reduce greenhouse gas emissions from electric power sector. However, the solvent regeneration is highly energy intensive affecting the total operating cost significantly. The CO 2 removal target depends on the absorption and desorption processes where the main parameters of both processes are strongly coupled. Consequently, the simultaneous optimization of the whole CO 2 capture process is essential to determine the best design and operating conditions in order to minimize the total cost. This paper presents and discusses different cost optimizations including both investments and operating costs. The impact of different CO 2 emission reduction targets on the total annual cost, operating conditions and dimensions of process units is investigated in detail. Optimized results are discussed through different case studies. © 2012 Elsevier Ltd.}, note = {cited By 28}, keywords = {}, pubstate = {published}, tppubtype = {article} } The post combustion process based on the CO 2 absorption using amine aqueous solution is one of the more attractive options to drastically reduce greenhouse gas emissions from electric power sector. However, the solvent regeneration is highly energy intensive affecting the total operating cost significantly. The CO 2 removal target depends on the absorption and desorption processes where the main parameters of both processes are strongly coupled. Consequently, the simultaneous optimization of the whole CO 2 capture process is essential to determine the best design and operating conditions in order to minimize the total cost. This paper presents and discusses different cost optimizations including both investments and operating costs. The impact of different CO 2 emission reduction targets on the total annual cost, operating conditions and dimensions of process units is investigated in detail. Optimized results are discussed through different case studies. © 2012 Elsevier Ltd. |
M.A. Reinheimer, S.F. Mussati, N.J. Scenna Optimization of operating conditions of a cooling tunnel for production of hard candies (Artículo de revista) Journal of Food Engineering, 109 (1), pp. 22-31, 2012, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Reinheimer201222, title = {Optimization of operating conditions of a cooling tunnel for production of hard candies}, author = { M.A. Reinheimer and S.F. Mussati and N.J. Scenna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-81055156573&partnerID=40&md5=926a2a956071879df8e03b5301510e77}, doi = {10.1016/j.jfoodeng.2011.10.009}, year = {2012}, date = {2012-01-01}, journal = {Journal of Food Engineering}, volume = {109}, number = {1}, pages = {22-31}, abstract = {On large industrial scales, the cooling stage in the production process of hard candies is one of the most critical unit operations because many quality problems such as deformation, fragility and aggregation may appear in this stage. Therefore, the optimization of the operating conditions of the cooling tunnel plays an essential role from a quality point of view. The objective of this work is to develop a mathematical model to determine the optimal operating conditions of the cooling tunnel in order to assure the product quality. The resulting PDAEs were converted to a set of nonlinear algebraic equations using the centered finite difference approximation (CFDM) and were implemented into the optimization environment GAMS (General Algebraic Modeling System). Thus, the temperature profiles in the center and surface of the candy, air cooling temperature and velocity are simultaneously optimized. Three objectives functions based on the product quality are proposed. A sensitive analysis on the main model parameters was also made. The obtained results are discussed in detail. It is concluded that the developed model can be used as a useful tool to improve its operating efficiency and even to design a new cooling tunnel. © 2011 Elsevier Ltd. All rights reserved.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } On large industrial scales, the cooling stage in the production process of hard candies is one of the most critical unit operations because many quality problems such as deformation, fragility and aggregation may appear in this stage. Therefore, the optimization of the operating conditions of the cooling tunnel plays an essential role from a quality point of view. The objective of this work is to develop a mathematical model to determine the optimal operating conditions of the cooling tunnel in order to assure the product quality. The resulting PDAEs were converted to a set of nonlinear algebraic equations using the centered finite difference approximation (CFDM) and were implemented into the optimization environment GAMS (General Algebraic Modeling System). Thus, the temperature profiles in the center and surface of the candy, air cooling temperature and velocity are simultaneously optimized. Three objectives functions based on the product quality are proposed. A sensitive analysis on the main model parameters was also made. The obtained results are discussed in detail. It is concluded that the developed model can be used as a useful tool to improve its operating efficiency and even to design a new cooling tunnel. © 2011 Elsevier Ltd. All rights reserved. |
M.A. Reinheimer, S.F. Mussati, N.J. Scenna Production process and technological aspects of honey flavored hard candy (Libro) 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @book{Reinheimer2012183, title = {Production process and technological aspects of honey flavored hard candy}, author = { M.A. Reinheimer and S.F. Mussati and N.J. Scenna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84895364299&partnerID=40&md5=7afadfa20c56bbcdd163be634064e444}, year = {2012}, date = {2012-01-01}, journal = {Honey: Production, Consumption and Health Benefits}, pages = {183-192}, abstract = {Hard candies are classic examples of sugar glasses. Apparently, they are solids, butactually, in fact they are supercooled liquids in a non-crystalline state. Despite theirsimple composition, different process aspects are strongly influenced by themicrostructural characterization of hard candies. In the confectionary industry, severalprocess operating parameters are critical to produce high quality products. For thisreason, such parameters should be controlled. Also, the behavior of the ingredients playsan important role on the product quality and therefore should be considered.This proposed chapter is focused on the description of the production process ofhoney flavored hard candy. During different process stages, some technologicalconsiderations related with the composition and the vitrification process of the productwill be mentioned on the chapter. In addition, cooking and cooling process conditionsaffect quality and storage aspects as well as hard candy's shell life. For those mentionedfeatures, it is necessary to interlink process stages with product characterization in orderto produce high quality candies. Finally, these considerations will be explored in detail inthe chapter. © 2012 by Nova Science Publishers, Inc. All rights reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {book} } Hard candies are classic examples of sugar glasses. Apparently, they are solids, butactually, in fact they are supercooled liquids in a non-crystalline state. Despite theirsimple composition, different process aspects are strongly influenced by themicrostructural characterization of hard candies. In the confectionary industry, severalprocess operating parameters are critical to produce high quality products. For thisreason, such parameters should be controlled. Also, the behavior of the ingredients playsan important role on the product quality and therefore should be considered.This proposed chapter is focused on the description of the production process ofhoney flavored hard candy. During different process stages, some technologicalconsiderations related with the composition and the vitrification process of the productwill be mentioned on the chapter. In addition, cooking and cooling process conditionsaffect quality and storage aspects as well as hard candy's shell life. For those mentionedfeatures, it is necessary to interlink process stages with product characterization in orderto produce high quality candies. Finally, these considerations will be explored in detail inthe chapter. © 2012 by Nova Science Publishers, Inc. All rights reserved. |
P. Mores, N. Scenna, S.F. Mussati A rate based model of a packed column for CO 2 absorption using aqueous monoethanolamine solution (Artículo de revista) International Journal of Greenhouse Gas Control, 6 , pp. 21-36, 2012, (cited By 19). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Mores201221, title = {A rate based model of a packed column for CO 2 absorption using aqueous monoethanolamine solution}, author = { P. Mores and N. Scenna and S.F. Mussati}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84855165592&partnerID=40&md5=156ddf45f0237cd85af31cb971580340}, doi = {10.1016/j.ijggc.2011.10.012}, year = {2012}, date = {2012-01-01}, journal = {International Journal of Greenhouse Gas Control}, volume = {6}, pages = {21-36}, abstract = {A comprehensive and simplified rate based mathematical model of a packed column for CO 2 capture using aqueous monoethanolamine (MEA) solution is developed. The absorption unit model takes into account the effect of kinetic reactions on the mass transfer, the thermodynamic non-idealities, the hydraulics of the random packing and the absorber dimensions (diameter and height). It is implemented into the optimization environment GAMS (General Algebraic Modeling System).The proposed NLP model was validated by comparison of obtained results with published experimental data. Good accuracy of results has been obtained for experimental pilot plant scales. Once validated, the model was used to investigate the influence of main process parameters and the effect of different correlations to compute the effective interfacial area for mass transfer (a/a t) on the absorption efficiency. Obtained results indicate that model solutions depend strongly on the correlations used to compute the (a/a t). In addition, results assuming thermal equilibrium and thermal non-equilibrium in liquid and vapor phases were also compared. For both conditions and specific cases, similar concentration and temperature profiles in the liquid phase in the absorber were obtained.Finally, results obtained by solving different optimization problems are discussed. More precisely, the optimization consisted in determining the operating conditions to maximize the absorption efficiency defined as the ratio between the CO 2 recovery in rich solution and the packing volume of the column. The effect of the main process parameters on the optimized results was also investigated. © 2011 Elsevier Ltd.}, note = {cited By 19}, keywords = {}, pubstate = {published}, tppubtype = {article} } A comprehensive and simplified rate based mathematical model of a packed column for CO 2 capture using aqueous monoethanolamine (MEA) solution is developed. The absorption unit model takes into account the effect of kinetic reactions on the mass transfer, the thermodynamic non-idealities, the hydraulics of the random packing and the absorber dimensions (diameter and height). It is implemented into the optimization environment GAMS (General Algebraic Modeling System).The proposed NLP model was validated by comparison of obtained results with published experimental data. Good accuracy of results has been obtained for experimental pilot plant scales. Once validated, the model was used to investigate the influence of main process parameters and the effect of different correlations to compute the effective interfacial area for mass transfer (a/a t) on the absorption efficiency. Obtained results indicate that model solutions depend strongly on the correlations used to compute the (a/a t). In addition, results assuming thermal equilibrium and thermal non-equilibrium in liquid and vapor phases were also compared. For both conditions and specific cases, similar concentration and temperature profiles in the liquid phase in the absorber were obtained.Finally, results obtained by solving different optimization problems are discussed. More precisely, the optimization consisted in determining the operating conditions to maximize the absorption efficiency defined as the ratio between the CO 2 recovery in rich solution and the packing volume of the column. The effect of the main process parameters on the optimized results was also investigated. © 2011 Elsevier Ltd. |
Y. Fumero, G. Corsano, J.M. Montagna Planning and scheduling of multistage multiproduct batch plants operating under production campaigns (Artículo de revista) Annals of Operations Research, 199 (1), pp. 249-268, 2012, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fumero2012249, title = {Planning and scheduling of multistage multiproduct batch plants operating under production campaigns}, author = { Y. Fumero and G. Corsano and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84866090422&partnerID=40&md5=330e636b7a2db722dc0e00f1b56f6e9c}, doi = {10.1007/s10479-011-0954-8}, year = {2012}, date = {2012-01-01}, journal = {Annals of Operations Research}, volume = {199}, number = {1}, pages = {249-268}, abstract = {When plants are operated under stable conditions during reasonable time periods, operation with campaigns is particularly appropriate. The regular operation of the facilities simplifies the production control, the inventory management, the plant operability, etc. A campaign includes several batches of different products that are going to be manufactured and the same one is cyclically repeated over the time horizon. In this work, a mixed integer linear programming formulation is proposed for the planning and scheduling of given multiproduct batch plants operating with campaigns. The number and size of batches for each product, the campaign composition, the assignment of batches to units and their sequencing, and the number of times that the campaign is repeated over the time horizon must be determined. Taking into account this scenario, an appropriate performance measure is the minimization of the cycle time. An asynchronous slot-based continuous-time representation for modeling the assignment of batches to units and their sequencing is employed, and a novel rule for determining the maximum number of slots postulated for each unit is proposed. © 2011 Springer Science+Business Media, LLC.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } When plants are operated under stable conditions during reasonable time periods, operation with campaigns is particularly appropriate. The regular operation of the facilities simplifies the production control, the inventory management, the plant operability, etc. A campaign includes several batches of different products that are going to be manufactured and the same one is cyclically repeated over the time horizon. In this work, a mixed integer linear programming formulation is proposed for the planning and scheduling of given multiproduct batch plants operating with campaigns. The number and size of batches for each product, the campaign composition, the assignment of batches to units and their sequencing, and the number of times that the campaign is repeated over the time horizon must be determined. Taking into account this scenario, an appropriate performance measure is the minimization of the cycle time. An asynchronous slot-based continuous-time representation for modeling the assignment of batches to units and their sequencing is employed, and a novel rule for determining the maximum number of slots postulated for each unit is proposed. © 2011 Springer Science+Business Media, LLC. |
M.S. Moreno, J.M. Montagna Multiperiod production planning and design of batch plants under uncertainty (Artículo de revista) Computers and Chemical Engineering, 40 , pp. 181-190, 2012, (cited By 4). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Moreno2012181, title = {Multiperiod production planning and design of batch plants under uncertainty}, author = { M.S. Moreno and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84860365713&partnerID=40&md5=b5cd3c1c3e6e21a385470c739586f904}, doi = {10.1016/j.compchemeng.2012.01.008}, year = {2012}, date = {2012-01-01}, journal = {Computers and Chemical Engineering}, volume = {40}, pages = {181-190}, abstract = {A two-stage stochastic multiperiod LGDP (linear generalized disjunctive programming) model was developed to address the integrated design and production planning of multiproduct batch plants. Both problems are encompassed considering uncertainty in product demands represented by a set of scenarios. The design variables are modeled as here-and-now decisions which are made before the demand realization, while the production planning variables are delayed in a wait-and-see mode to optimize in the face of uncertainty. Specifically, the proposed model determines the structure of the batch plant (duplication of units in series and in parallel) and the unit sizes, together with the production planning decisions in each time period within each scenario. The model also allows the incorporation of new equipment items at different periods. The objective is to maximize the expected net present value of the benefit. To assess the advantages of the proposed formulation, an extraction process that produces oleoresins is solved. © 2012 Elsevier Ltd.}, note = {cited By 4}, keywords = {}, pubstate = {published}, tppubtype = {article} } A two-stage stochastic multiperiod LGDP (linear generalized disjunctive programming) model was developed to address the integrated design and production planning of multiproduct batch plants. Both problems are encompassed considering uncertainty in product demands represented by a set of scenarios. The design variables are modeled as here-and-now decisions which are made before the demand realization, while the production planning variables are delayed in a wait-and-see mode to optimize in the face of uncertainty. Specifically, the proposed model determines the structure of the batch plant (duplication of units in series and in parallel) and the unit sizes, together with the production planning decisions in each time period within each scenario. The model also allows the incorporation of new equipment items at different periods. The objective is to maximize the expected net present value of the benefit. To assess the advantages of the proposed formulation, an extraction process that produces oleoresins is solved. © 2012 Elsevier Ltd. |
Y. Fumero, G. Corsano, J.M. Montagna Scheduling of multistage multiproduct batch plants operating in a campaign-mode (Artículo de revista) Industrial and Engineering Chemistry Research, 51 (10), pp. 3988-4001, 2012, (cited By 6). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fumero20123988, title = {Scheduling of multistage multiproduct batch plants operating in a campaign-mode}, author = { Y. Fumero and G. Corsano and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84858304484&partnerID=40&md5=154d7ebb053d3ee53210d3281ba6957a}, doi = {10.1021/ie201757t}, year = {2012}, date = {2012-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {51}, number = {10}, pages = {3988-4001}, abstract = {In this work, mixed integer linear programming models for scheduling multistage multiproduct batch plants operating under campaign mode are proposed. It is assumed that each plant stage includes identical parallel units operating out of phase. Given the plant topology and the number of batches of each product to be processed in the campaign, the objective is assigning batches to units in each stage in order to minimize the cycle time of the campaign. An asynchronous slot-based continuous-time representation for modeling the assignment of batches to units is used. These formulations require postulating a priori a suitable number of production slots for each unit that integrates the plant, which severely affects the model computational performance. Then, to reduce the computational effort, a solution strategy is proposed where a simplified model, which includes preordering constraints, is first solved. Finally, a detailed scheduling model is posed where the optimal cycle time of simplified model is used as bound for the cycle time and a novel expression for the number of proposed slots for each unit is considered. The strategy is highlighted through examples that show how the computational burden is reduced. © 2012 American Chemical Society.}, note = {cited By 6}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this work, mixed integer linear programming models for scheduling multistage multiproduct batch plants operating under campaign mode are proposed. It is assumed that each plant stage includes identical parallel units operating out of phase. Given the plant topology and the number of batches of each product to be processed in the campaign, the objective is assigning batches to units in each stage in order to minimize the cycle time of the campaign. An asynchronous slot-based continuous-time representation for modeling the assignment of batches to units is used. These formulations require postulating a priori a suitable number of production slots for each unit that integrates the plant, which severely affects the model computational performance. Then, to reduce the computational effort, a solution strategy is proposed where a simplified model, which includes preordering constraints, is first solved. Finally, a detailed scheduling model is posed where the optimal cycle time of simplified model is used as bound for the cycle time and a novel expression for the number of proposed slots for each unit is considered. The strategy is highlighted through examples that show how the computational burden is reduced. © 2012 American Chemical Society. |
Y. Fumero, J.M. Montagna, G. Corsano Simultaneous design and scheduling of a semicontinuous/batch plant for ethanol and derivatives production (Artículo de revista) Computers and Chemical Engineering, 36 (1), pp. 342-357, 2012, (cited By 6). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fumero2012342, title = {Simultaneous design and scheduling of a semicontinuous/batch plant for ethanol and derivatives production}, author = { Y. Fumero and J.M. Montagna and G. Corsano}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-82055191632&partnerID=40&md5=2fb72df875edd5838816eb8322b98105}, doi = {10.1016/j.compchemeng.2011.08.004}, year = {2012}, date = {2012-01-01}, journal = {Computers and Chemical Engineering}, volume = {36}, number = {1}, pages = {342-357}, abstract = {The interest on renewable fuels has greatly increased in the last years. Particularly, ethanol production arises as a good solution to many current economic-environmental problems. Yeast production from the ethanol residuals constitutes a sustainable alternative. Usually, this kind of plants is designed using single product campaigns. However, since yeast degradation is fast and a continuous supply must be assured, the mixed product campaign policy is the most appropriate. Besides, a stable context can be assumed to justify this approach that takes advantage of the special structure of the plant. Therefore, in this paper, a mixed integer linear programming model is formulated for simultaneous design and scheduling of a semicontinuous/batch plant for ethanol and derivatives production. The optimal plant configuration, unit sizes, number of batches of each product in the campaign and its sequencing is obtained in order to fulfill the ethanol and yeast demands minimizing the investment cost. © 2011 Elsevier Ltd.}, note = {cited By 6}, keywords = {}, pubstate = {published}, tppubtype = {article} } The interest on renewable fuels has greatly increased in the last years. Particularly, ethanol production arises as a good solution to many current economic-environmental problems. Yeast production from the ethanol residuals constitutes a sustainable alternative. Usually, this kind of plants is designed using single product campaigns. However, since yeast degradation is fast and a continuous supply must be assured, the mixed product campaign policy is the most appropriate. Besides, a stable context can be assumed to justify this approach that takes advantage of the special structure of the plant. Therefore, in this paper, a mixed integer linear programming model is formulated for simultaneous design and scheduling of a semicontinuous/batch plant for ethanol and derivatives production. The optimal plant configuration, unit sizes, number of batches of each product in the campaign and its sequencing is obtained in order to fulfill the ethanol and yeast demands minimizing the investment cost. © 2011 Elsevier Ltd. |
R.M. Lima, M.G. Marcovecchio, A. Novais A global optimization approach for the short-term scheduling of hydro power generation (Conferencia) 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Lima2012, title = {A global optimization approach for the short-term scheduling of hydro power generation}, author = { R.M. Lima and M.G. Marcovecchio and A. Novais}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84872854701&partnerID=40&md5=caf934e7448adb0113e5caaf89711460}, year = {2012}, date = {2012-01-01}, journal = {AIChE 2012 - 2012 AIChE Annual Meeting, Conference Proceedings}, abstract = {Short-term scheduling of a cascade of hydro plants was optimized using detailed and accurate models, and a deterministic global optimization approach to deal with the non-convexities. Cascades were represented by integrated systems of hydro plants involving plants with reservoirs and plants without the capacity to store water, the so-called run-off-the-river plants. To find the global optimum within a pre-specified tolerance, a Branch and Bound framework was proposed, which is based on the solution of an MILP overestimator model and of a MINLP model in each node of the tree. The MILP model provided a tight linear overestimation of the non-convex region of the original MINLP problem and thus a valid upper bound of the objective function on each node of the Branch and Bound framework. To assess the performance of the proposed framework, three case studies consisting on cascades with a different number of hydro plants and turbines with varied specifications are presented. The proposed MILP over estimator led to a tighter upper bound, and then small gaps between the upper and lower bound could be achieved. This is an abstract of a paper presented at the AIChE Annual Meeting (Pittsburgh, PA 10/28/2012-11/2/2012).}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Short-term scheduling of a cascade of hydro plants was optimized using detailed and accurate models, and a deterministic global optimization approach to deal with the non-convexities. Cascades were represented by integrated systems of hydro plants involving plants with reservoirs and plants without the capacity to store water, the so-called run-off-the-river plants. To find the global optimum within a pre-specified tolerance, a Branch and Bound framework was proposed, which is based on the solution of an MILP overestimator model and of a MINLP model in each node of the tree. The MILP model provided a tight linear overestimation of the non-convex region of the original MINLP problem and thus a valid upper bound of the objective function on each node of the Branch and Bound framework. To assess the performance of the proposed framework, three case studies consisting on cascades with a different number of hydro plants and turbines with varied specifications are presented. The proposed MILP over estimator led to a tighter upper bound, and then small gaps between the upper and lower bound could be achieved. This is an abstract of a paper presented at the AIChE Annual Meeting (Pittsburgh, PA 10/28/2012-11/2/2012). |
O.C. Martinez, S. Gonnet, H.P. Leone, N. Díaz Product feasibility verification in software product line (Conferencia) 2012, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Martinez2012, title = {Product feasibility verification in software product line}, author = { O.C. Martinez and S. Gonnet and H.P. Leone and N. Díaz}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84874300121&partnerID=40&md5=4c494868876ac75eb91291e2f8ddbd0f}, doi = {10.1109/CLEI.2012.6427224}, year = {2012}, date = {2012-01-01}, journal = {38th Latin America Conference on Informatics, CLEI 2012 - Conference Proceedings}, abstract = {Nowadays, the software product lines (SPLs) are one of the production paradigm to improve the performance of the software companies. Nevertheless, SPLs have some complexity issues like the verification of product feasibility that have to be address. SPLs frequently use Orthogonal Variability Models (OVMs) to express variability in their portfolio of products. In our approach we use Petri Nets to represent and analyze OVMs and we focus on the decisions regarding with the inclusion and exclusion of variation points and variants. This approach offers a more comprehensive knowledge about the activities sequence necessary to build a product in a SPLs context. We adopt an event/condition perspective to model the dependencies using Petri nets formalism. The Petri nets bring us the capability to simulate the dynamic behavior of systems and to use many properties of them to avoid the product feasibility problems aforementioned. © 2012 IEEE.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Nowadays, the software product lines (SPLs) are one of the production paradigm to improve the performance of the software companies. Nevertheless, SPLs have some complexity issues like the verification of product feasibility that have to be address. SPLs frequently use Orthogonal Variability Models (OVMs) to express variability in their portfolio of products. In our approach we use Petri Nets to represent and analyze OVMs and we focus on the decisions regarding with the inclusion and exclusion of variation points and variants. This approach offers a more comprehensive knowledge about the activities sequence necessary to build a product in a SPLs context. We adopt an event/condition perspective to model the dependencies using Petri nets formalism. The Petri nets bring us the capability to simulate the dynamic behavior of systems and to use many properties of them to avoid the product feasibility problems aforementioned. © 2012 IEEE. |
M.D.L.M. Gutierrez, H.P. Leone DE 2M: An environment for developing distributed and executable enterprise models (Artículo de revista) Advances in Engineering Software, 47 (1), pp. 80-103, 2012, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Gutierrez201280, title = {DE 2M: An environment for developing distributed and executable enterprise models}, author = { M.D.L.M. Gutierrez and H.P. Leone}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84855796687&partnerID=40&md5=ff2c56c5fe752d7c3baa0ccfef570939}, doi = {10.1016/j.advengsoft.2011.12.002}, year = {2012}, date = {2012-01-01}, journal = {Advances in Engineering Software}, volume = {47}, number = {1}, pages = {80-103}, abstract = {The distributed and executable enterprise models are one of the most important sources of an organization's information requirements where the business expert has not only an appropriate representation of the organization in terms of processes, information flows and user roles, but also a simulation capability for the interpretation of the dynamic behavior. We present an environment to support the development of such a model. It uses a MDA approach to acquire the simulation model from conceptual model. The simulation model can run both distributed and local environment. © 2011 Elsevier Ltd. All rights reserved.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } The distributed and executable enterprise models are one of the most important sources of an organization's information requirements where the business expert has not only an appropriate representation of the organization in terms of processes, information flows and user roles, but also a simulation capability for the interpretation of the dynamic behavior. We present an environment to support the development of such a model. It uses a MDA approach to acquire the simulation model from conceptual model. The simulation model can run both distributed and local environment. © 2011 Elsevier Ltd. All rights reserved. |
2011 |
M.D. Paula, E.C. Martínez Simulation-based dynamic optimization of discretely controlled continuous processes (Artículo de revista) Computer Aided Chemical Engineering, 29 , pp. 543-546, 2011, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Paula2011543, title = {Simulation-based dynamic optimization of discretely controlled continuous processes}, author = { M.D. Paula and E.C. Martínez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79959278575&partnerID=40&md5=e1b16f5fb5715a0e1180ebfaa88dc0b9}, doi = {10.1016/B978-0-444-53711-9.50109-7}, year = {2011}, date = {2011-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {29}, pages = {543-546}, abstract = {Discretely controlled continuous processes (DCCPs) is a special type of hybrid dynamical systems which is of great practical relevance. In this work, a novel simulation-based approach to dynamic optimization under uncertainty of DCCPs is proposed using multi-modal Gaussian Process Dynamic Programming (mGPDP). A remarkable advantage of the proposed approach is that instead of resorting to a global metamodel, which is very inefficient, mGPDP uses probabilistic models (Gaussian Processes) to simultaneously learn the transition dynamics descriptive of mode execution and to represent the optimal control policy for mode switching. Throughput maximization and smoothness in a typical PVC production line in the face of significant schedule variability due to resource sharing is used as a case study. © 2011 Elsevier B.V.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } Discretely controlled continuous processes (DCCPs) is a special type of hybrid dynamical systems which is of great practical relevance. In this work, a novel simulation-based approach to dynamic optimization under uncertainty of DCCPs is proposed using multi-modal Gaussian Process Dynamic Programming (mGPDP). A remarkable advantage of the proposed approach is that instead of resorting to a global metamodel, which is very inefficient, mGPDP uses probabilistic models (Gaussian Processes) to simultaneously learn the transition dynamics descriptive of mode execution and to represent the optimal control policy for mode switching. Throughput maximization and smoothness in a typical PVC production line in the face of significant schedule variability due to resource sharing is used as a case study. © 2011 Elsevier B.V. |
E.C. Martínez, M. Cristaldi, R. Grau, J. Lopes Dynamic optimization of bioreactors using probabilistic tendency models and Bayesian active learning (Artículo de revista) Computer Aided Chemical Engineering, 29 , pp. 783-787, 2011, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Martínez2011783, title = {Dynamic optimization of bioreactors using probabilistic tendency models and Bayesian active learning}, author = { E.C. Martínez and M. Cristaldi and R. Grau and J. Lopes}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79958819421&partnerID=40&md5=2271084253fa85ae90363f1b5905499d}, doi = {10.1016/B978-0-444-53711-9.50157-7}, year = {2011}, date = {2011-01-01}, journal = {Computer Aided Chemical Engineering}, volume = {29}, pages = {783-787}, abstract = {First-principles models of fermentation processes typically have built-in errors in the form of structural mismatch and parametric uncertainty. A model-based optimization approach for run-to-run improvement under uncertainty of fed-batch bioreactors by integrating probabilistic tendency models with Bayesian inference is proposed. Probabilistic models grounded on first principles are used in the design of dynamic experiments to bias data gathering towards the subspace of most promising operating conditions. Results obtained in the fed-batch fermentation of penicillin G are presented. © 2011 Elsevier B.V.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } First-principles models of fermentation processes typically have built-in errors in the form of structural mismatch and parametric uncertainty. A model-based optimization approach for run-to-run improvement under uncertainty of fed-batch bioreactors by integrating probabilistic tendency models with Bayesian inference is proposed. Probabilistic models grounded on first principles are used in the design of dynamic experiments to bias data gathering towards the subspace of most promising operating conditions. Results obtained in the fed-batch fermentation of penicillin G are presented. © 2011 Elsevier B.V. |
S. Syafiie, F. Tadeo, E.C. Martínez, T. Alvarez Model-free control based on reinforcement learning for a wastewater treatment problem (Artículo de revista) Applied Soft Computing Journal, 11 (1), pp. 73-82, 2011, (cited By 10). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Syafiie201173, title = {Model-free control based on reinforcement learning for a wastewater treatment problem}, author = { S. Syafiie and F. Tadeo and E.C. Martínez and T. Alvarez}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-77957930453&partnerID=40&md5=4151cf20fe46344415b3903cc2d52e53}, doi = {10.1016/j.asoc.2009.10.018}, year = {2011}, date = {2011-01-01}, journal = {Applied Soft Computing Journal}, volume = {11}, number = {1}, pages = {73-82}, abstract = {This article presents a proposal, based on the model-free learning control (MFLC) approach, for the control of the advanced oxidation process in wastewater plants. This is prompted by the fact that many organic pollutants in industrial wastewaters are resistant to conventional biological treatments, and the fact that advanced oxidation processes, controlled with learning controllers measuring the oxidation-reduction potential (ORP), give a cost-effective solution. The proposed automation strategy denoted MFLC-MSA is based on the integration of reinforcement learning with multiple step actions. This enables the most adequate control strategy to be learned directly from the process response to selected control inputs. Thus, the proposed methodology is satisfactory for oxidation processes of wastewater treatment plants, where the development of an adequate model for control design is usually too costly. The algorithm proposed has been tested in a lab pilot plant, where phenolic wastewater is oxidized to carboxylic acids and carbon dioxide. The obtained experimental results show that the proposed MFLC-MSA strategy can achieve good performance to guarantee on-specification discharge at maximum degradation rate using readily available measurements such as pH and ORP, inferential measurements of oxidation kinetics and peroxide consumption, respectively. © 2010 Elsevier B.V. All rights reserved.}, note = {cited By 10}, keywords = {}, pubstate = {published}, tppubtype = {article} } This article presents a proposal, based on the model-free learning control (MFLC) approach, for the control of the advanced oxidation process in wastewater plants. This is prompted by the fact that many organic pollutants in industrial wastewaters are resistant to conventional biological treatments, and the fact that advanced oxidation processes, controlled with learning controllers measuring the oxidation-reduction potential (ORP), give a cost-effective solution. The proposed automation strategy denoted MFLC-MSA is based on the integration of reinforcement learning with multiple step actions. This enables the most adequate control strategy to be learned directly from the process response to selected control inputs. Thus, the proposed methodology is satisfactory for oxidation processes of wastewater treatment plants, where the development of an adequate model for control design is usually too costly. The algorithm proposed has been tested in a lab pilot plant, where phenolic wastewater is oxidized to carboxylic acids and carbon dioxide. The obtained experimental results show that the proposed MFLC-MSA strategy can achieve good performance to guarantee on-specification discharge at maximum degradation rate using readily available measurements such as pH and ORP, inferential measurements of oxidation kinetics and peroxide consumption, respectively. © 2010 Elsevier B.V. All rights reserved. |
C.D. Fischer, O.A. Iribarren Mass integration as a design heuristic: Improvements in the HDA process (Artículo de revista) Industrial and Engineering Chemistry Research, 50 (22), pp. 12664-12677, 2011, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fischer201112664, title = {Mass integration as a design heuristic: Improvements in the HDA process}, author = { C.D. Fischer and O.A. Iribarren}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80855131767&partnerID=40&md5=d784ffa82198856046ddf4259cd3cc51}, doi = {10.1021/ie2013554}, year = {2011}, date = {2011-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {50}, number = {22}, pages = {12664-12677}, abstract = {This paper explores mass exchanging the outlet and inlet streams of a reactor, as a design heuristic within the hierarchical process design procedure by Douglas [AIChE J. 1985, 31 (3), 353-361 and Conceptual Design of Chemical Processes; McGraw-Hill, 1988], who worked on the HDA process to test the proposal. The heuristic is used at an early stage of the hierarchy, when deciding the recycle and separation system structure of the process. If the reaction requires operating conditions with reactants in excess or that catalyze the reaction, which must be removed after the reaction, there is a concentration gradient between the inlet and outlet streams that may be used as the driving force in a mass exchanger (if such a device is available for the particular case). When applied to the HDA process, this methodology generated alternatives different from the previously proposed by other authors by resorting to a ceramic membrane gas permeation unit to perform the mass exchange of hydrogen. The performance of applying the heuristic was tested comparing the flow sheets proposed by several authors with and without this mass exchanger. The success of implementing this mass exchange networks synthesis concept was dependent on the concentration of the component to be transferred in the rich stream (i.e., it works if there is an appropriate driving force). © 2011 American Chemical Society.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper explores mass exchanging the outlet and inlet streams of a reactor, as a design heuristic within the hierarchical process design procedure by Douglas [AIChE J. 1985, 31 (3), 353-361 and Conceptual Design of Chemical Processes; McGraw-Hill, 1988], who worked on the HDA process to test the proposal. The heuristic is used at an early stage of the hierarchy, when deciding the recycle and separation system structure of the process. If the reaction requires operating conditions with reactants in excess or that catalyze the reaction, which must be removed after the reaction, there is a concentration gradient between the inlet and outlet streams that may be used as the driving force in a mass exchanger (if such a device is available for the particular case). When applied to the HDA process, this methodology generated alternatives different from the previously proposed by other authors by resorting to a ceramic membrane gas permeation unit to perform the mass exchange of hydrogen. The performance of applying the heuristic was tested comparing the flow sheets proposed by several authors with and without this mass exchanger. The success of implementing this mass exchange networks synthesis concept was dependent on the concentration of the component to be transferred in the rich stream (i.e., it works if there is an appropriate driving force). © 2011 American Chemical Society. |
C.D. Fischer, O.A. Iribarren Synthesis of a mass integrated biodiesel process (Artículo de revista) Industrial and Engineering Chemistry Research, 50 (11), pp. 6849-6859, 2011, (cited By 8). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fischer20116849, title = {Synthesis of a mass integrated biodiesel process}, author = { C.D. Fischer and O.A. Iribarren}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79959994118&partnerID=40&md5=e0cf73e205a0f55d8122c2be16e5565c}, doi = {10.1021/ie102595x}, year = {2011}, date = {2011-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {50}, number = {11}, pages = {6849-6859}, abstract = {A hierarchical decision procedure is proposed for the synthesis of processes that are mass integrated to an existing industrial environment. The procedure takes the information about already installed processes as input. Decisions required to complete the design of the new process are then made considering this environment. The procedure decides among alternative technologies for producing the new product and on the use of output streams from the environment as inputs to the new process and vice versa. It also decides whether this use should be done through new processing facilities to condition these streams. The hierarchical decision procedure described by Douglas was adapted to include these mass integration decisions and to resort to mass exchange network synthesis and source-sink allocation procedures at the appropriate level of the hierarchy. The procedure is illustrated by designing a new biodiesel process, integrated to the agro-industrial complex already existing in the city of Reconquista, Argentina. The alternative technologies considered for producing biodiesel are acid, basic, and enzymatic catalyzed transesterification processes. The considered raw materials are a pretreated stream from an edible oils refinery and residual fatty streams from other factories, with high free fatty acid content. The acid raw materials could be converted to biodiesel either directly through an acid process or after an esterification treatment through the same basic process that is utilized for good quality edible oils raw material. The produced biodiesel could be sold to oil companies or to the local agro-machinery business. The raw glycerin byproduct could be sold as glycerin after purification, fermented to ethanol in a cereals bioethanol factory, used as fuel in properly designed boilers, or used in the formulation of cattle food in local feed lots. This paper discusses (i) the trade-off between implementing mass exchangers or separation processes in the case of methanol added in excess and alkali added as a catalyst in the reaction step, (ii) the adaptation of the standard source sink allocation approach adding constraints of the diet problem, and (iii) the order of decisions in the hierarchy, which strongly affects the programming effort to solve this synthesis and design problem. Results analysis shows that both the economic and environmental performance of the biodiesel process is significantly improved when taking into account the local industrial environment of the new plant. Furthermore, as the integrated plant is able to process multiple raw materials and offer alternative product specifications to end consumers, its operation is adjustable to changing market conditions. © 2011 American Chemical Society.}, note = {cited By 8}, keywords = {}, pubstate = {published}, tppubtype = {article} } A hierarchical decision procedure is proposed for the synthesis of processes that are mass integrated to an existing industrial environment. The procedure takes the information about already installed processes as input. Decisions required to complete the design of the new process are then made considering this environment. The procedure decides among alternative technologies for producing the new product and on the use of output streams from the environment as inputs to the new process and vice versa. It also decides whether this use should be done through new processing facilities to condition these streams. The hierarchical decision procedure described by Douglas was adapted to include these mass integration decisions and to resort to mass exchange network synthesis and source-sink allocation procedures at the appropriate level of the hierarchy. The procedure is illustrated by designing a new biodiesel process, integrated to the agro-industrial complex already existing in the city of Reconquista, Argentina. The alternative technologies considered for producing biodiesel are acid, basic, and enzymatic catalyzed transesterification processes. The considered raw materials are a pretreated stream from an edible oils refinery and residual fatty streams from other factories, with high free fatty acid content. The acid raw materials could be converted to biodiesel either directly through an acid process or after an esterification treatment through the same basic process that is utilized for good quality edible oils raw material. The produced biodiesel could be sold to oil companies or to the local agro-machinery business. The raw glycerin byproduct could be sold as glycerin after purification, fermented to ethanol in a cereals bioethanol factory, used as fuel in properly designed boilers, or used in the formulation of cattle food in local feed lots. This paper discusses (i) the trade-off between implementing mass exchangers or separation processes in the case of methanol added in excess and alkali added as a catalyst in the reaction step, (ii) the adaptation of the standard source sink allocation approach adding constraints of the diet problem, and (iii) the order of decisions in the hierarchy, which strongly affects the programming effort to solve this synthesis and design problem. Results analysis shows that both the economic and environmental performance of the biodiesel process is significantly improved when taking into account the local industrial environment of the new plant. Furthermore, as the integrated plant is able to process multiple raw materials and offer alternative product specifications to end consumers, its operation is adjustable to changing market conditions. © 2011 American Chemical Society. |
E.R. Henquín, J.M. Bisang Performance of a multipurpose research electrochemical reactor (Artículo de revista) Electrochimica Acta, 56 (17), pp. 5926-5933, 2011, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Henquín20115926, title = {Performance of a multipurpose research electrochemical reactor}, author = { E.R. Henquín and J.M. Bisang}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79958775030&partnerID=40&md5=02fe289baebcd459a5d09b7be743523f}, doi = {10.1016/j.electacta.2011.04.115}, year = {2011}, date = {2011-01-01}, journal = {Electrochimica Acta}, volume = {56}, number = {17}, pages = {5926-5933}, abstract = {This paper reports on a multipurpose research electrochemical reactor with an innovative design feature, which is based on a filter press arrangement with inclined segmented electrodes and under a modular assembly. Under bipolar connection, the fraction of leakage current is lower than 4%, depending on the bipolar Wagner number, and the current distribution is closely uniform. When a turbulence promoter is used, the local mass-transfer coefficient shows a variation of ±10% with respect to its mean value. The fluidodynamics of the reactor responds to the dispersion model with a Peclet number higher than 10. It is concluded that this reactor is convenient for laboratory research. © 2011 Elsevier Ltd. All Rights Reserved.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper reports on a multipurpose research electrochemical reactor with an innovative design feature, which is based on a filter press arrangement with inclined segmented electrodes and under a modular assembly. Under bipolar connection, the fraction of leakage current is lower than 4%, depending on the bipolar Wagner number, and the current distribution is closely uniform. When a turbulence promoter is used, the local mass-transfer coefficient shows a variation of ±10% with respect to its mean value. The fluidodynamics of the reactor responds to the dispersion model with a Peclet number higher than 10. It is concluded that this reactor is convenient for laboratory research. © 2011 Elsevier Ltd. All Rights Reserved. |
K.A. Torres, J. Espinosa Influence of tangent pinch points on the energy demand of batch distillations: Development of a conceptual model for binary mixtures (Artículo de revista) Industrial and Engineering Chemistry Research, 50 (10), pp. 6260-6275, 2011, (cited By 1). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Torres20116260, title = {Influence of tangent pinch points on the energy demand of batch distillations: Development of a conceptual model for binary mixtures}, author = { K.A. Torres and J. Espinosa}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79955879179&partnerID=40&md5=3de44ceeec45468dbfa92c83de6ec60d}, doi = {10.1021/ie101894q}, year = {2011}, date = {2011-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {50}, number = {10}, pages = {6260-6275}, abstract = {This contribution explores the influence of tangent pinch points on the performance of batch distillations of highly nonideal binary mixtures and its incorporation into a conceptual modeling framework under the assumption of a rectifier with an infinite number of stages. First of all, the y-x diagram is divided into three regions taking into account both the inflection point (IP) in the phase diagram and the slope of the equilibrium curve at xD = 1. After that, limiting values for the reflux ratio and distillate composition; namely, r* and xD*, are calculated for the instantaneous still composition xB with the aid of region-dependent algorithms, which incorporate the tangency condition in different ways. Finally, the instantaneous column performance is estimated taking into account the selected operation mode; i.e., constant reflux ratio or constant distillate composition. Applied to the mixture acetone-water, results for complete simulations are presented in terms of both rectification advance and operation time. A brief comparative study on the minimum energy demand required for a given separation as estimated from models with and without tangent pinches is also carried out. © 2011 American Chemical Society.}, note = {cited By 1}, keywords = {}, pubstate = {published}, tppubtype = {article} } This contribution explores the influence of tangent pinch points on the performance of batch distillations of highly nonideal binary mixtures and its incorporation into a conceptual modeling framework under the assumption of a rectifier with an infinite number of stages. First of all, the y-x diagram is divided into three regions taking into account both the inflection point (IP) in the phase diagram and the slope of the equilibrium curve at xD = 1. After that, limiting values for the reflux ratio and distillate composition; namely, r* and xD*, are calculated for the instantaneous still composition xB with the aid of region-dependent algorithms, which incorporate the tangency condition in different ways. Finally, the instantaneous column performance is estimated taking into account the selected operation mode; i.e., constant reflux ratio or constant distillate composition. Applied to the mixture acetone-water, results for complete simulations are presented in terms of both rectification advance and operation time. A brief comparative study on the minimum energy demand required for a given separation as estimated from models with and without tangent pinches is also carried out. © 2011 American Chemical Society. |
M.A. Sosa, J. Espinosa Feasibility analysis of isopropanol recovery by hybrid distillation/ pervaporation process with the aid of conceptual models (Artículo de revista) Separation and Purification Technology, 78 (2), pp. 237-244, 2011, (cited By 9). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Sosa2011237, title = {Feasibility analysis of isopropanol recovery by hybrid distillation/ pervaporation process with the aid of conceptual models}, author = { M.A. Sosa and J. Espinosa}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79953065432&partnerID=40&md5=9499bd9d70634d2e980abd7af2beaad1}, doi = {10.1016/j.seppur.2011.02.009}, year = {2011}, date = {2011-01-01}, journal = {Separation and Purification Technology}, volume = {78}, number = {2}, pages = {237-244}, abstract = {In this contribution, main results of a techno-economic feasibility study to recover spent isopropyl alcohol (IPA) from a pre-treated waste stream composed by IPA (95.64 wt.%) and water (4.36 wt.%) are presented. Based on conceptual models for the unit operations, a quasi-optimal design for a hybrid process combining pervaporation and distillation is found under process specifications given by a pharmaceutical company. The proposed procedure allows a separated design of each unit with the aid of conceptual models. While distillation is evaluated from pinch theory, the conceptual model for pervaporation considers that the maximum driving force (i.e., no liquid temperature drop) is maintained across the membrane unit. A brief performance comparison for different membranes is also performed as part of the assessment to the company. For this purpose, the pervaporation separation index (PSI index) defined as the product of the permeate mass flux and the separation factor was used for membranes for which either literature data or membrane supplier brochures were available. In the case of the membrane PERVAP 2216 from Sulzer, several pervaporation experiments at 80 °C and permeate pressure of 1.52 kPa were carried out. The PSI index was then redefined as the overall separation factor times the inverse of the minimum membrane area required to perform a given separation. The results obtained emphasize the usefulness of conceptual modeling in all steps of process design. © 2011 Elsevier B.V. All rights reserved.}, note = {cited By 9}, keywords = {}, pubstate = {published}, tppubtype = {article} } In this contribution, main results of a techno-economic feasibility study to recover spent isopropyl alcohol (IPA) from a pre-treated waste stream composed by IPA (95.64 wt.%) and water (4.36 wt.%) are presented. Based on conceptual models for the unit operations, a quasi-optimal design for a hybrid process combining pervaporation and distillation is found under process specifications given by a pharmaceutical company. The proposed procedure allows a separated design of each unit with the aid of conceptual models. While distillation is evaluated from pinch theory, the conceptual model for pervaporation considers that the maximum driving force (i.e., no liquid temperature drop) is maintained across the membrane unit. A brief performance comparison for different membranes is also performed as part of the assessment to the company. For this purpose, the pervaporation separation index (PSI index) defined as the product of the permeate mass flux and the separation factor was used for membranes for which either literature data or membrane supplier brochures were available. In the case of the membrane PERVAP 2216 from Sulzer, several pervaporation experiments at 80 °C and permeate pressure of 1.52 kPa were carried out. The PSI index was then redefined as the overall separation factor times the inverse of the minimum membrane area required to perform a given separation. The results obtained emphasize the usefulness of conceptual modeling in all steps of process design. © 2011 Elsevier B.V. All rights reserved. |
C.M. Toledo, M.A. Ale, O.J. Chiotti, M.R. Galli An ontology-driven document retrieval strategy for organizational knowledge management systems (Artículo de revista) Electronic Notes in Theoretical Computer Science, 281 , pp. 21-34, 2011, (cited By 6). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Toledo201121, title = {An ontology-driven document retrieval strategy for organizational knowledge management systems}, author = { C.M. Toledo and M.A. Ale and O.J. Chiotti and M.R. Galli}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84855255338&partnerID=40&md5=f90aaadf2e1107abe235d8b56974ddbc}, doi = {10.1016/j.entcs.2011.11.023}, year = {2011}, date = {2011-01-01}, journal = {Electronic Notes in Theoretical Computer Science}, volume = {281}, pages = {21-34}, abstract = {Enterprises are inserted in a competitive environment in which knowledge is vital to survive in the current global market. Competition is no longer conceived as it was in traditional markets. In this global market, knowledge is considered an asset that has an economic value for an organization and a strategic resource used to increase productivity and offer stability in dynamic competitive environments. Such significance of knowledge implies the need for protecting this vital resource by safeguarding the right access, its persistence over time, and its adequate retrieval. In this work, we propose an organizational memory architecture, and annotation and retrieval information strategies based on domain ontologies that take in account complex words to retrieve information through natural language queries. To test these strategies, we implemented a flexible framework to experiment with knowledge retrieval approaches. Finally, experimental results are evaluated and analyzed through standard measures. © 2011 Elsevier B.V.}, note = {cited By 6}, keywords = {}, pubstate = {published}, tppubtype = {article} } Enterprises are inserted in a competitive environment in which knowledge is vital to survive in the current global market. Competition is no longer conceived as it was in traditional markets. In this global market, knowledge is considered an asset that has an economic value for an organization and a strategic resource used to increase productivity and offer stability in dynamic competitive environments. Such significance of knowledge implies the need for protecting this vital resource by safeguarding the right access, its persistence over time, and its adequate retrieval. In this work, we propose an organizational memory architecture, and annotation and retrieval information strategies based on domain ontologies that take in account complex words to retrieve information through natural language queries. To test these strategies, we implemented a flexible framework to experiment with knowledge retrieval approaches. Finally, experimental results are evaluated and analyzed through standard measures. © 2011 Elsevier B.V. |
I.M. Lazarte, P.D. Villarreal, O.J. Chiotti, L.H. Thom, C. Iochpe An MDA-based method for designing integration process models in B2B collaborations (Conferencia) 3 ISAS , 2011, (cited By 4). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Lazarte201155, title = {An MDA-based method for designing integration process models in B2B collaborations}, author = { I.M. Lazarte and P.D. Villarreal and O.J. Chiotti and L.H. Thom and C. Iochpe}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84865123841&partnerID=40&md5=9429a65fb734638e94a9e1aa73232c24}, year = {2011}, date = {2011-01-01}, journal = {ICEIS 2011 - Proceedings of the 13th International Conference on Enterprise Information Systems}, volume = {3 ISAS}, pages = {55-65}, abstract = {The design of integration processes is a key issue for implementing collaborative business processes in Business-to-Business collaborations. A collaborative process is executed through the enactment of the integration process of each organization, which contains the public and private logic required to support the role an organization performs in the collaborative process. Integration process models must be aligned with and derived from their corresponding collaborative process models to guarantee interoperability among organizations. In this work, we propose a method based on a Model-Driven Architecture to enable organizations to support and automate the design of integration process models. This method provides a model transformation process that uses Workflow Activity Patterns to generate the public/private activities required in integration processes to support cross-organizational message exchanges.}, note = {cited By 4}, keywords = {}, pubstate = {published}, tppubtype = {conference} } The design of integration processes is a key issue for implementing collaborative business processes in Business-to-Business collaborations. A collaborative process is executed through the enactment of the integration process of each organization, which contains the public and private logic required to support the role an organization performs in the collaborative process. Integration process models must be aligned with and derived from their corresponding collaborative process models to guarantee interoperability among organizations. In this work, we propose a method based on a Model-Driven Architecture to enable organizations to support and automate the design of integration process models. This method provides a model transformation process that uses Workflow Activity Patterns to generate the public/private activities required in integration processes to support cross-organizational message exchanges. |
L.J.R. Stroppi, O.J. Chiotti, P.D. Villarreal A BPMN 2.0 extension to define the resource perspective of business process models (Conferencia) 2011, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Stroppi201125, title = {A BPMN 2.0 extension to define the resource perspective of business process models}, author = { L.J.R. Stroppi and O.J. Chiotti and P.D. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84886646613&partnerID=40&md5=bc81e39c9e9f88604a45e81ce9b26808}, year = {2011}, date = {2011-01-01}, journal = {14th Ibero-American Conference on Software Engineering and 14th Workshop on Requirements Engineering, CIbSE 2011}, pages = {25-38}, abstract = {One of the primary motivations of BPMN is to provide an easy to understand standard way to define and visualize business process models. This goal is addressed with respect to the control flow perspective. However, the support it provides to the resource perspective is poor. This work presents an extension to the BPMN 2.0 metamodel and notation to support the modeling and visualization of resource perspective requirements. It considers three aspects of the resource perspective: resource structure, authorization and work distribution. This extension is validated against the Workflow Resource Patterns that define recurrent requirements regarding this perspective. The aim of this BPMN extension is to improve the communication of the resource perspective requirements between business analysts and technical developers.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } One of the primary motivations of BPMN is to provide an easy to understand standard way to define and visualize business process models. This goal is addressed with respect to the control flow perspective. However, the support it provides to the resource perspective is poor. This work presents an extension to the BPMN 2.0 metamodel and notation to support the modeling and visualization of resource perspective requirements. It considers three aspects of the resource perspective: resource structure, authorization and work distribution. This extension is validated against the Workflow Resource Patterns that define recurrent requirements regarding this perspective. The aim of this BPMN extension is to improve the communication of the resource perspective requirements between business analysts and technical developers. |
E. Tello-Leal, O.J. Chiotti, P.D. Villarreal Agents for managing Business-to-Business interactions: Software agents for managing business-to-business collaborations (Conferencia) 2 , 2011, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @conference{Tello-Leal2011238, title = {Agents for managing Business-to-Business interactions: Software agents for managing business-to-business collaborations}, author = { E. Tello-Leal and O.J. Chiotti and P.D. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960130978&partnerID=40&md5=6b799ace246f15c8ab6f6f048ce8e12c}, year = {2011}, date = {2011-01-01}, journal = {ICAART 2011 - Proceedings of the 3rd International Conference on Agents and Artificial Intelligence}, volume = {2}, pages = {238-244}, abstract = {Current market opportunities and the growth of new Internet technologies encourage organizations to dynamically establish Business-to-Business (B2B) collaborations. B2B interactions are carried out by executing collaborative business processes among the parties. In this work we propose B2B collaboration agents for managing B2B interactions that allow organizations to dynamically establish collaborations and execute collaborative processes with their partners. The planning and execution of the actions of the agents that execute collaborative processes are driven by a Petri Net engine embedded in these agents. The role an organization fulfills in a collaborative process is represented by a high-level Petri Net model which is used to drive the behavior of the B2B collaboration agents representing the organization. Moreover, interaction protocols representing collaborative processes are executed by these agents without the need for protocols defined at design-time. Finally, an implementation of the B2B agents is presented.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {conference} } Current market opportunities and the growth of new Internet technologies encourage organizations to dynamically establish Business-to-Business (B2B) collaborations. B2B interactions are carried out by executing collaborative business processes among the parties. In this work we propose B2B collaboration agents for managing B2B interactions that allow organizations to dynamically establish collaborations and execute collaborative processes with their partners. The planning and execution of the actions of the agents that execute collaborative processes are driven by a Petri Net engine embedded in these agents. The role an organization fulfills in a collaborative process is represented by a high-level Petri Net model which is used to drive the behavior of the B2B collaboration agents representing the organization. Moreover, interaction protocols representing collaborative processes are executed by these agents without the need for protocols defined at design-time. Finally, an implementation of the B2B agents is presented. |
L.H. Thom, I.M. Lazarte, C. Iochpe, L.M. Priego, C. Verdier, O.J. Chiotti, P.D. Villarreal On the capabilities of BPMN for workflow activity patterns representation (Artículo de revista) Lecture Notes in Business Information Processing, 95 LNBIP , pp. 172-177, 2011, (cited By 0). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Thom2011172, title = {On the capabilities of BPMN for workflow activity patterns representation}, author = { L.H. Thom and I.M. Lazarte and C. Iochpe and L.M. Priego and C. Verdier and O.J. Chiotti and P.D. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-81855217393&partnerID=40&md5=a239e7196dc845dffc09dcde485c988d}, doi = {10.1007/978-3-642-25160-3_18}, year = {2011}, date = {2011-01-01}, journal = {Lecture Notes in Business Information Processing}, volume = {95 LNBIP}, pages = {172-177}, abstract = {This paper provides a complete version of the Workflow Activity Patterns (WAP) in the Business Process Modeling Notation (BPMN) as well as an extended evaluation of the capabilities of BPMN and their strengths and weaknesses when being utilizing for representing WAPs. When implementing the activity patterns in existing Business Process Modeling tools, it is fundamental to represent them in BPMN. This representation may facilitate the adoption of the WAPs by BPMN tools as well as the use of the WAPs in process design. © 2011 Springer-Verlag.}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper provides a complete version of the Workflow Activity Patterns (WAP) in the Business Process Modeling Notation (BPMN) as well as an extended evaluation of the capabilities of BPMN and their strengths and weaknesses when being utilizing for representing WAPs. When implementing the activity patterns in existing Business Process Modeling tools, it is fundamental to represent them in BPMN. This representation may facilitate the adoption of the WAPs by BPMN tools as well as the use of the WAPs in process design. © 2011 Springer-Verlag. |
L.J.R. Stroppi, O.J. Chiotti, P.D. Villarreal Extending BPMN 2.0: Method and tool support (Artículo de revista) Lecture Notes in Business Information Processing, 95 LNBIP , pp. 59-73, 2011, (cited By 9). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Stroppi201159, title = {Extending BPMN 2.0: Method and tool support}, author = { L.J.R. Stroppi and O.J. Chiotti and P.D. Villarreal}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-81855183872&partnerID=40&md5=d36fca5a37acb8f4cc829a9ad39893fe}, doi = {10.1007/978-3-642-25160-3_5}, year = {2011}, date = {2011-01-01}, journal = {Lecture Notes in Business Information Processing}, volume = {95 LNBIP}, pages = {59-73}, abstract = {There are two major pitfalls in the development of extensions to the BPMN 2.0 metamodel. First, there is a lack of methodological guides considering the extensibility approach supported by the extension mechanism of the language. Second, BPMN does not provide any graphical notation for the representation of extensions. This work proposes a method based on Model-Driven Architecture for the development of extensions to the BPMN 2.0 metamodel. It enables the conceptual modeling of extensions by using UML, their graphical representation in terms of the BPMN extension mechanism, and their transformation into XML Schema documents that can be processed by BPMN tools. A tool supporting the proposed method is also presented. © 2011 Springer-Verlag.}, note = {cited By 9}, keywords = {}, pubstate = {published}, tppubtype = {article} } There are two major pitfalls in the development of extensions to the BPMN 2.0 metamodel. First, there is a lack of methodological guides considering the extensibility approach supported by the extension mechanism of the language. Second, BPMN does not provide any graphical notation for the representation of extensions. This work proposes a method based on Model-Driven Architecture for the development of extensions to the BPMN 2.0 metamodel. It enables the conceptual modeling of extensions by using UML, their graphical representation in terms of the BPMN extension mechanism, and their transformation into XML Schema documents that can be processed by BPMN tools. A tool supporting the proposed method is also presented. © 2011 Springer-Verlag. |
M. Vegetti, S. Gonnet, H.P. Leone, G.P. Henning Ontologies and conceptual models in industrial enterprises and software development processes (Conferencia) 728 , 2011, (cited By 0). (Enlaces | BibTeX | Etiquetas: ) @conference{Vegetti2011, title = {Ontologies and conceptual models in industrial enterprises and software development processes}, author = { M. Vegetti and S. Gonnet and H.P. Leone and G.P. Henning}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-84890607855&partnerID=40&md5=3cd6e9491a8b75c594a575cd9cfcc134}, year = {2011}, date = {2011-01-01}, journal = {CEUR Workshop Proceedings}, volume = {728}, note = {cited By 0}, keywords = {}, pubstate = {published}, tppubtype = {conference} } |
M. Vegetti, H.P. Leone, G. Henning PRONTO: An ontology for comprehensive and consistent representation of product information (Artículo de revista) Engineering Applications of Artificial Intelligence, 24 (8), pp. 1305-1327, 2011, (cited By 14). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Vegetti20111305, title = {PRONTO: An ontology for comprehensive and consistent representation of product information}, author = { M. Vegetti and H.P. Leone and G. Henning}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80054698407&partnerID=40&md5=9ad734682dc08d1f2c6e92ae85a0385e}, doi = {10.1016/j.engappai.2011.02.014}, year = {2011}, date = {2011-01-01}, journal = {Engineering Applications of Artificial Intelligence}, volume = {24}, number = {8}, pages = {1305-1327}, abstract = {Nowadays, it is quite common for collaborating organizations (or even different areas within a company) to develop and maintain their own product model. This situation leads to information duplication and its associated problems. Besides, traditional product models do not properly handle the high number of variants managed in today competitive markets. In addition, there is a need for an integrated product model to be shared by all the organizations participating in global supply chains (SCs) or all the areas within a company. One way to reach an intelligent integration among product models is by means of an ontology. PRoduct ONTOlogy (PRONTO) is an ontology for the product modeling domain, able to efficiently handle product variants. It defines and integrates two hierarchies to represent product information: the abstraction hierarchy (AH) and the structural one (SH). This contribution presents a ConceptBase formal specification of PRONTO that focuses on the structural hierarchy of products. This hierarchy is a tool to handle product information associated with the multiple available recipes or processes to manufacture a particular product or a set of similar products. The formal specification presented in the paper also includes mechanisms to infer structural information from the explicit knowledge represented at each of the AH levels: Family, VariantSet and Product. This proposal efficiently handles a great number of variants and allows representing product information with distinct granularity degrees, which is a requirement for planning activities taking place at different time horizons. PRONTO easily manages crucial features that should be taken into account in a product representation, such as the efficient handling of product families and variants concepts, composition and decomposition structures and the possibility of specifying constraints. To demonstrate the semantic expressiveness of the proposed ontology a food industry related case-study is addressed and discussed in detail. © 2011 Elsevier Ltd. All rights reserved.}, note = {cited By 14}, keywords = {}, pubstate = {published}, tppubtype = {article} } Nowadays, it is quite common for collaborating organizations (or even different areas within a company) to develop and maintain their own product model. This situation leads to information duplication and its associated problems. Besides, traditional product models do not properly handle the high number of variants managed in today competitive markets. In addition, there is a need for an integrated product model to be shared by all the organizations participating in global supply chains (SCs) or all the areas within a company. One way to reach an intelligent integration among product models is by means of an ontology. PRoduct ONTOlogy (PRONTO) is an ontology for the product modeling domain, able to efficiently handle product variants. It defines and integrates two hierarchies to represent product information: the abstraction hierarchy (AH) and the structural one (SH). This contribution presents a ConceptBase formal specification of PRONTO that focuses on the structural hierarchy of products. This hierarchy is a tool to handle product information associated with the multiple available recipes or processes to manufacture a particular product or a set of similar products. The formal specification presented in the paper also includes mechanisms to infer structural information from the explicit knowledge represented at each of the AH levels: Family, VariantSet and Product. This proposal efficiently handles a great number of variants and allows representing product information with distinct granularity degrees, which is a requirement for planning activities taking place at different time horizons. PRONTO easily manages crucial features that should be taken into account in a product representation, such as the efficient handling of product families and variants concepts, composition and decomposition structures and the possibility of specifying constraints. To demonstrate the semantic expressiveness of the proposed ontology a food industry related case-study is addressed and discussed in detail. © 2011 Elsevier Ltd. All rights reserved. |
C. Alvez, A.R. Vecchietti Efficiency analysis in content based image retrieval using RDF annotations (Artículo de revista) Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), 7095 LNAI (PART 2), pp. 285-296, 2011, (cited By 3). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Alvez2011285, title = {Efficiency analysis in content based image retrieval using RDF annotations}, author = { C. Alvez and A.R. Vecchietti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-82555167507&partnerID=40&md5=74292d93113a8fdc75bf5adb396d9e53}, doi = {10.1007/978-3-642-25330-0_25}, year = {2011}, date = {2011-01-01}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, volume = {7095 LNAI}, number = {PART 2}, pages = {285-296}, abstract = {Nowadays it is common to combine low-level and semantic data for image retrieval. The images are stored in databases and computer graphics algorithms are employed to get the pictures. Most of the works consider both aspects separately. In this work, using the capabilities of a commercial ORDBMS a reference architecture was implemented for recovering images, and then a performance analysis is realized using several index types to search some specific semantic data stored in the database via RDF triples. The experiments analyzed the mean recovery time of triples in tables having a hundred of thousands to millions of triples. The performance obtained using Bitmap, B-Tree and Hash Partitioned indexes are analyzed. The results obtained with the experiences performed are implemented in the reference architecture in order to speed up the pattern search. © 2011 Springer-Verlag.}, note = {cited By 3}, keywords = {}, pubstate = {published}, tppubtype = {article} } Nowadays it is common to combine low-level and semantic data for image retrieval. The images are stored in databases and computer graphics algorithms are employed to get the pictures. Most of the works consider both aspects separately. In this work, using the capabilities of a commercial ORDBMS a reference architecture was implemented for recovering images, and then a performance analysis is realized using several index types to search some specific semantic data stored in the database via RDF triples. The experiments analyzed the mean recovery time of triples in tables having a hundred of thousands to millions of triples. The performance obtained using Bitmap, B-Tree and Hash Partitioned indexes are analyzed. The results obtained with the experiences performed are implemented in the reference architecture in order to speed up the pattern search. © 2011 Springer-Verlag. |
M.A. Rodriguez, A.R. Vecchietti Multicriteria optimization model for supply process problem under provision and demand uncertainty (Artículo de revista) Industrial and Engineering Chemistry Research, 50 (18), pp. 10630-10642, 2011, (cited By 2). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rodriguez201110630, title = {Multicriteria optimization model for supply process problem under provision and demand uncertainty}, author = { M.A. Rodriguez and A.R. Vecchietti}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-80052868405&partnerID=40&md5=48910a14d6b9b2ec009cf817d0722a55}, doi = {10.1021/ie2005548}, year = {2011}, date = {2011-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {50}, number = {18}, pages = {10630-10642}, abstract = {Supply processes play an important role in customer satisfaction and company costs. The main characteristics of this problem are given by several decisions that follow a hierarchical structure and a very uncertain context, conditioning the success of the solutions proposed. Two significant sources of uncertainty are considered in this work, namely, provision and demand, both modeled as exogenous variables with random behavior. An optimization model is formulated to reduce the effects of the uncertainty in the company supply process. Because of the problem complexity, a multicriteria model is required to bring a comprehensive solution. Several Pareto-optimal solutions are obtained through application of the ε-constraint technique. The original formulation is a nonconvex one that is then transformed to obtain a disjunctive linear model that guarantees a global result. © 2011 American Chemical Society.}, note = {cited By 2}, keywords = {}, pubstate = {published}, tppubtype = {article} } Supply processes play an important role in customer satisfaction and company costs. The main characteristics of this problem are given by several decisions that follow a hierarchical structure and a very uncertain context, conditioning the success of the solutions proposed. Two significant sources of uncertainty are considered in this work, namely, provision and demand, both modeled as exogenous variables with random behavior. An optimization model is formulated to reduce the effects of the uncertainty in the company supply process. Because of the problem complexity, a multicriteria model is required to bring a comprehensive solution. Several Pareto-optimal solutions are obtained through application of the ε-constraint technique. The original formulation is a nonconvex one that is then transformed to obtain a disjunctive linear model that guarantees a global result. © 2011 American Chemical Society. |
G. Corsano, A.R. Vecchietti, J.M. Montagna Optimal design for sustainable bioethanol supply chain considering detailed plant performance model (Artículo de revista) Computers and Chemical Engineering, 35 (8), pp. 1384-1398, 2011, (cited By 28). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Corsano20111384, title = {Optimal design for sustainable bioethanol supply chain considering detailed plant performance model}, author = { G. Corsano and A.R. Vecchietti and J.M. Montagna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960564382&partnerID=40&md5=4fbcec61b8b9d3e49d6bdbee1b14425d}, doi = {10.1016/j.compchemeng.2011.01.008}, year = {2011}, date = {2011-01-01}, journal = {Computers and Chemical Engineering}, volume = {35}, number = {8}, pages = {1384-1398}, abstract = {The always increasing energy demand combined with the declining availability of fossil fuels is driving forces for the investigation of renewable energy sources. In this context, bioethanol is considered as one of the most appropriate solutions for short term gasoline substitution. Then, the motivation of this work is to propose a MINLP optimization model for a sustainable design and behavior analysis of sugar/ethanol supply chain (SC). A detailed model for ethanol plant design is embedded in the SC model, and therefore plant and SC designs are simultaneously obtained. Yeast production and residue recycles are taken into account in order to assess the environmental impact. The inclusion of sustainability issues in the model produces both economic and operative changes in SC and plant designs. The simultaneous optimization of these elements allows the evaluation of several compromises among design and process variables. These issues are highlighted throughout the evaluated studied cases. © 2011 Elsevier Ltd.}, note = {cited By 28}, keywords = {}, pubstate = {published}, tppubtype = {article} } The always increasing energy demand combined with the declining availability of fossil fuels is driving forces for the investigation of renewable energy sources. In this context, bioethanol is considered as one of the most appropriate solutions for short term gasoline substitution. Then, the motivation of this work is to propose a MINLP optimization model for a sustainable design and behavior analysis of sugar/ethanol supply chain (SC). A detailed model for ethanol plant design is embedded in the SC model, and therefore plant and SC designs are simultaneously obtained. Yeast production and residue recycles are taken into account in order to assess the environmental impact. The inclusion of sustainability issues in the model produces both economic and operative changes in SC and plant designs. The simultaneous optimization of these elements allows the evaluation of several compromises among design and process variables. These issues are highlighted throughout the evaluated studied cases. © 2011 Elsevier Ltd. |
B.A. Talagañis, G.O. Meyer, P.A. Aguirre Modeling and simulation of absorption-desorption cyclic processes for hydrogen storage-compression using metal hydrides (Artículo de revista) International Journal of Hydrogen Energy, 36 (21), pp. 13621-13631, 2011, (cited By 8). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Talagañis201113621, title = {Modeling and simulation of absorption-desorption cyclic processes for hydrogen storage-compression using metal hydrides}, author = { B.A. Talagañis and G.O. Meyer and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-83055181333&partnerID=40&md5=3c440240a8ce672678ef28d569fd2f22}, doi = {10.1016/j.ijhydene.2011.07.139}, year = {2011}, date = {2011-01-01}, journal = {International Journal of Hydrogen Energy}, volume = {36}, number = {21}, pages = {13621-13631}, abstract = {This work is aimed to develop and analyze reduced and simplified lumped models of cyclic processes for hydrogen storage and thermal compression using metal hydrides. Rigorous models involve several thousands of variables whereas reduced models we are interested in involve only several tens of variables. The models here presented reproduce the main dynamic behavior of rigorous models and experimental data found in the literature. Furthermore, the main tradeoffs arisen in process design are well described with these models, which is always an objective of optimal process design. In the first part of the work, a simplified lumped model is developed and validated by comparing the simulations outcome with numerical results and experimental measurements obtained from the literature for absorption and desorption individual processes. Our model is then used to simulate the process behavior using real parameters and constraints required by continuous recovery and compression systems such as those found in the metal treatment industry. The simulation results are used to improve the process performance by adjusting some key parameters of the system. These results are also used to perform a sensitivity analysis, i.e. evaluate the storage/compression system behavior when introducing variations to parameters such as operating conditions, reactor design, and material properties. Finally, we further reduce the model by considering that the inlet and outlet hydrogen flow is approximately constant. This particular specification is usually required by continuous processes in the metal treatment industry where hydrogen flow must remain constant. This requirement allows considering reaction rate as a constant. The constant reaction rate constraint allows integrating the ordinary differential equations; hence the system no longer has differential and algebraic equations but just algebraic equations. As a consequence of the simplification, the number of equations to be solved is reduced from over 15,000 to less than 50, maintaining an excellent match in the results. © 2011, Hydrogen Energy Publications, LLC. Published by Elsevier Ltd. All rights reserved.}, note = {cited By 8}, keywords = {}, pubstate = {published}, tppubtype = {article} } This work is aimed to develop and analyze reduced and simplified lumped models of cyclic processes for hydrogen storage and thermal compression using metal hydrides. Rigorous models involve several thousands of variables whereas reduced models we are interested in involve only several tens of variables. The models here presented reproduce the main dynamic behavior of rigorous models and experimental data found in the literature. Furthermore, the main tradeoffs arisen in process design are well described with these models, which is always an objective of optimal process design. In the first part of the work, a simplified lumped model is developed and validated by comparing the simulations outcome with numerical results and experimental measurements obtained from the literature for absorption and desorption individual processes. Our model is then used to simulate the process behavior using real parameters and constraints required by continuous recovery and compression systems such as those found in the metal treatment industry. The simulation results are used to improve the process performance by adjusting some key parameters of the system. These results are also used to perform a sensitivity analysis, i.e. evaluate the storage/compression system behavior when introducing variations to parameters such as operating conditions, reactor design, and material properties. Finally, we further reduce the model by considering that the inlet and outlet hydrogen flow is approximately constant. This particular specification is usually required by continuous processes in the metal treatment industry where hydrogen flow must remain constant. This requirement allows considering reaction rate as a constant. The constant reaction rate constraint allows integrating the ordinary differential equations; hence the system no longer has differential and algebraic equations but just algebraic equations. As a consequence of the simplification, the number of equations to be solved is reduced from over 15,000 to less than 50, maintaining an excellent match in the results. © 2011, Hydrogen Energy Publications, LLC. Published by Elsevier Ltd. All rights reserved. |
D.G. Oliva, J.A. Francesconi, M.C. Mussati, P.A. Aguirre Modeling, synthesis and optimization of heat exchanger networks. Application to fuel processing systems for PEM fuel cells (Artículo de revista) International Journal of Hydrogen Energy, 36 (15), pp. 9098-9114, 2011, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Oliva20119098, title = {Modeling, synthesis and optimization of heat exchanger networks. Application to fuel processing systems for PEM fuel cells}, author = { D.G. Oliva and J.A. Francesconi and M.C. Mussati and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79958786553&partnerID=40&md5=2fa0e848d282e35bf8d6321407d7b5da}, doi = {10.1016/j.ijhydene.2011.04.097}, year = {2011}, date = {2011-01-01}, journal = {International Journal of Hydrogen Energy}, volume = {36}, number = {15}, pages = {9098-9114}, abstract = {The development of biofuels has gained much attention in recent years. Thermodynamic analyses to obtain energy from biofuels using fuel cells were addressed in previous works for a variety of processes. In those processes, the determination of the best conditions to achieve high efficiency values in the conversion of chemical energy into electrical power is a critical issue from the net global energy efficiency point of view. In this regard, a main aspect is to address the energy integration of the whole process. In a previous paper, the authors dealt with energy integration studies for glycerin- and ethanol-based processors coupled to PEM fuel cells resorting on the "multi-stream heat exchanger" feature provided by the simulation tool HYSYS. In that work, the aim was to maximize the energy recovery from the process streams that renders the maximum achievable net global efficiency. In this paper, the aim is to synthesize and design the optimal heat exchangers network (i.e. determination of the process configuration and units sizes) while maintaining the net global efficiency of the whole system at its achievable value. Three modifications to the original SYNHEAT model developed in 1990 by Yee and Grossmann for synthesizing heat exchanger networks are proposed in this work aiming at a better problem description, and consequently searching for best problem solutions. First, a modification in computing the minimum approach temperature difference is proposed. Second, the called "operation line method" is coupled to the SYNHEAT model to built-up the network superstructure to be optimized. Finally, the SYNHEAT model's hypothesis of constant cp value for modeling heat exchange between process streams is improved by considering enthalpy variable instead of temperature variable, which is convenient when latent heat is transferred. The model variables number involved in the heat exchanger network synthesis problems solved has been reduced to less than a half by applying the operation line method. The proposed methodology and modifications made are of general application and not just for the specific cases addressed in this work. © 2011, Hydrogen Energy Publications, LLC. Published by Elsevier Ltd. All rights reserved.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } The development of biofuels has gained much attention in recent years. Thermodynamic analyses to obtain energy from biofuels using fuel cells were addressed in previous works for a variety of processes. In those processes, the determination of the best conditions to achieve high efficiency values in the conversion of chemical energy into electrical power is a critical issue from the net global energy efficiency point of view. In this regard, a main aspect is to address the energy integration of the whole process. In a previous paper, the authors dealt with energy integration studies for glycerin- and ethanol-based processors coupled to PEM fuel cells resorting on the "multi-stream heat exchanger" feature provided by the simulation tool HYSYS. In that work, the aim was to maximize the energy recovery from the process streams that renders the maximum achievable net global efficiency. In this paper, the aim is to synthesize and design the optimal heat exchangers network (i.e. determination of the process configuration and units sizes) while maintaining the net global efficiency of the whole system at its achievable value. Three modifications to the original SYNHEAT model developed in 1990 by Yee and Grossmann for synthesizing heat exchanger networks are proposed in this work aiming at a better problem description, and consequently searching for best problem solutions. First, a modification in computing the minimum approach temperature difference is proposed. Second, the called "operation line method" is coupled to the SYNHEAT model to built-up the network superstructure to be optimized. Finally, the SYNHEAT model's hypothesis of constant cp value for modeling heat exchange between process streams is improved by considering enthalpy variable instead of temperature variable, which is convenient when latent heat is transferred. The model variables number involved in the heat exchanger network synthesis problems solved has been reduced to less than a half by applying the operation line method. The proposed methodology and modifications made are of general application and not just for the specific cases addressed in this work. © 2011, Hydrogen Energy Publications, LLC. Published by Elsevier Ltd. All rights reserved. |
M. Fuentes, N.J. Scenna, P.A. Aguirre A coupling model for EGSB bioreactors: Hydrodynamics and anaerobic digestion processes (Artículo de revista) Chemical Engineering and Processing: Process Intensification, 50 (3), pp. 316-324, 2011, (cited By 5). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Fuentes2011316, title = {A coupling model for EGSB bioreactors: Hydrodynamics and anaerobic digestion processes}, author = { M. Fuentes and N.J. Scenna and P.A. Aguirre}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79953030114&partnerID=40&md5=ec4af1a7b9aee2229d00faa30e5faa36}, doi = {10.1016/j.cep.2011.01.005}, year = {2011}, date = {2011-01-01}, journal = {Chemical Engineering and Processing: Process Intensification}, volume = {50}, number = {3}, pages = {316-324}, abstract = {The aim of this paper is to present a coupling model for calculating both the hydrodynamic and anaerobic digestion processes in expanded granular sludge bed (EGSB) bioreactors for treating wastewaters. The bioreactor is modeled as a dynamic (gas-solid-liquid) three-phase system. An existing set of experimental data of three case studies based on the start-up and operational performance of EGSB reactors is used to adjust and validate the model. A novel parameter, the specific rate of granule rupture, is defined for calculating the biomass transport phenomena. Values around 1×10-20dmd2g-1 are calculated for this parameter. Bioreactor performances were analyzed through the main variable profiles such as pH, COD, VFA and VSS concentration. A good agreement was obtained among experimental and predicted values. It seems to indicate that the proposed EGSB model is able to reproduce the main biological and hydrodynamic successes in the bioreactor. © 2011 Elsevier B.V.}, note = {cited By 5}, keywords = {}, pubstate = {published}, tppubtype = {article} } The aim of this paper is to present a coupling model for calculating both the hydrodynamic and anaerobic digestion processes in expanded granular sludge bed (EGSB) bioreactors for treating wastewaters. The bioreactor is modeled as a dynamic (gas-solid-liquid) three-phase system. An existing set of experimental data of three case studies based on the start-up and operational performance of EGSB reactors is used to adjust and validate the model. A novel parameter, the specific rate of granule rupture, is defined for calculating the biomass transport phenomena. Values around 1×10-20dmd2g-1 are calculated for this parameter. Bioreactor performances were analyzed through the main variable profiles such as pH, COD, VFA and VSS concentration. A good agreement was obtained among experimental and predicted values. It seems to indicate that the proposed EGSB model is able to reproduce the main biological and hydrodynamic successes in the bioreactor. © 2011 Elsevier B.V. |
P. Mores, N. Scenna, S.F. Mussati Post-combustion CO2 capture process: Equilibrium stage mathematical model of the chemical absorption of CO2 into monoethanolamine (MEA) aqueous solution (Artículo de revista) Chemical Engineering Research and Design, 89 (9), pp. 1587-1599, 2011, (cited By 30). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Mores20111587, title = {Post-combustion CO2 capture process: Equilibrium stage mathematical model of the chemical absorption of CO2 into monoethanolamine (MEA) aqueous solution}, author = { P. Mores and N. Scenna and S.F. Mussati}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960705391&partnerID=40&md5=ba26f0e14c7ed2b41fcb8ceaae8f564b}, doi = {10.1016/j.cherd.2010.10.012}, year = {2011}, date = {2011-01-01}, journal = {Chemical Engineering Research and Design}, volume = {89}, number = {9}, pages = {1587-1599}, abstract = {This paper deals with the modeling and optimization of the chemical absorption process to CO2 removal using monoethanolamine (MEA) aqueous solution. Precisely, an optimization mathematical model is proposed to determine the best operating conditions of the CO2 post-combustion process in order to maximize the CO2 removal efficiency. Certainly, the following two objective functions are considered for maximization: (a) ratio between the total absorbed CO2 and the total heating and cooling utilities and (b) ratio between total absorbed CO2 and the total amine flow-rate.Temperature, composition and flow-rate profiles of the aqueous solution and gas streams along the absorber and regenerator as well as the reboiler and condenser duties are considered as optimization variables. The number of trays or height equivalent to a theoretical plate (HETP) on the absorber and regenerator columns as well as the CO2 composition in flue gas are treated as model parameters. Correlations used to compute physical-chemical properties of the aqueous amine solution are taken from different specialized literature and are valid for a wide range of operating conditions. For the modeling, both columns (absorber and regenerator) are divided into a number of segments assuming that liquid and gas phases are well mixed.GAMS (General Algebraic Modeling System) and CONOPT are used, respectively, to implement and to solve the resulting mathematical model.The robustness and computational performance of the proposed model and a detailed discussion of the optimization results will be presented through different case studies. Finally, the proposed model cannot only be used as optimizer but also as a simulator by fixing the degree of freedom of the equation system. © 2010 The Institution of Chemical Engineers.}, note = {cited By 30}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper deals with the modeling and optimization of the chemical absorption process to CO2 removal using monoethanolamine (MEA) aqueous solution. Precisely, an optimization mathematical model is proposed to determine the best operating conditions of the CO2 post-combustion process in order to maximize the CO2 removal efficiency. Certainly, the following two objective functions are considered for maximization: (a) ratio between the total absorbed CO2 and the total heating and cooling utilities and (b) ratio between total absorbed CO2 and the total amine flow-rate.Temperature, composition and flow-rate profiles of the aqueous solution and gas streams along the absorber and regenerator as well as the reboiler and condenser duties are considered as optimization variables. The number of trays or height equivalent to a theoretical plate (HETP) on the absorber and regenerator columns as well as the CO2 composition in flue gas are treated as model parameters. Correlations used to compute physical-chemical properties of the aqueous amine solution are taken from different specialized literature and are valid for a wide range of operating conditions. For the modeling, both columns (absorber and regenerator) are divided into a number of segments assuming that liquid and gas phases are well mixed.GAMS (General Algebraic Modeling System) and CONOPT are used, respectively, to implement and to solve the resulting mathematical model.The robustness and computational performance of the proposed model and a detailed discussion of the optimization results will be presented through different case studies. Finally, the proposed model cannot only be used as optimizer but also as a simulator by fixing the degree of freedom of the equation system. © 2010 The Institution of Chemical Engineers. |
N. Rodríguez, S.F. Mussati, N. Scenna Optimization of post-combustion CO2 process using DEA-MDEA mixtures (Artículo de revista) Chemical Engineering Research and Design, 89 (9), pp. 1763-1773, 2011, (cited By 26). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Rodríguez20111763, title = {Optimization of post-combustion CO2 process using DEA-MDEA mixtures}, author = { N. Rodríguez and S.F. Mussati and N. Scenna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79960705376&partnerID=40&md5=380039cd381de6b63f7ceb3c264c0e9c}, doi = {10.1016/j.cherd.2010.11.009}, year = {2011}, date = {2011-01-01}, journal = {Chemical Engineering Research and Design}, volume = {89}, number = {9}, pages = {1763-1773}, abstract = {This paper presents optimal operating conditions for the post-combustion CO2 capture process utilizing aqueous amine solutions obtained using a process simulator (HYSYS). Three alkanolamine solutions (Methyldiethanolamine MDEA, DiEthanolAmine DEA and MDEA-DEA mixture) are considered to study the performance of the capture process.The design problem addressed in this paper requires specifying the optimal operating conditions (inlet and outlet temperature of the lean solution stream on the absorber, CO2 loading, amine composition and flow rates, among others) to achieve the given CO2 emission targets at a minimum total annual cost. A detailed objective function including total operating costs and investment is considered.The influence of the variation of CO2 reduction targets and the mixing proportion of amines on the total annual cost is analyzed in detail. Numerical results are presented and discussed using different case studies.The results demonstrate that process simulators can be used as a powerful tool not only to simulate but also to optimize the most important design parameters of the post-combustion CO2 capture process. © 2010 The Institution of Chemical Engineers.}, note = {cited By 26}, keywords = {}, pubstate = {published}, tppubtype = {article} } This paper presents optimal operating conditions for the post-combustion CO2 capture process utilizing aqueous amine solutions obtained using a process simulator (HYSYS). Three alkanolamine solutions (Methyldiethanolamine MDEA, DiEthanolAmine DEA and MDEA-DEA mixture) are considered to study the performance of the capture process.The design problem addressed in this paper requires specifying the optimal operating conditions (inlet and outlet temperature of the lean solution stream on the absorber, CO2 loading, amine composition and flow rates, among others) to achieve the given CO2 emission targets at a minimum total annual cost. A detailed objective function including total operating costs and investment is considered.The influence of the variation of CO2 reduction targets and the mixing proportion of amines on the total annual cost is analyzed in detail. Numerical results are presented and discussed using different case studies.The results demonstrate that process simulators can be used as a powerful tool not only to simulate but also to optimize the most important design parameters of the post-combustion CO2 capture process. © 2010 The Institution of Chemical Engineers. |
J.I. Manassaldi, S.F. Mussati, N.J. Scenna Optimal synthesis and design of Heat Recovery Steam Generation (HRSG) via mathematical programming (Artículo de revista) Energy, 36 (1), pp. 475-485, 2011, (cited By 16). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Manassaldi2011475, title = {Optimal synthesis and design of Heat Recovery Steam Generation (HRSG) via mathematical programming}, author = { J.I. Manassaldi and S.F. Mussati and N.J. Scenna}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-78650758694&partnerID=40&md5=7997fae7373056627fa01ca89c550d38}, doi = {10.1016/j.energy.2010.10.017}, year = {2011}, date = {2011-01-01}, journal = {Energy}, volume = {36}, number = {1}, pages = {475-485}, abstract = {Thermal efficiency of Combined Cycle Power Plants (CCPPs) depends strongly on the Heat Recovery Steam Generation (HRSG) design which links the gas cycle with the steam cycle. Therefore, the HRSG must be carefully designed in order to maximize the heat exchanged and to improve the overall performance of the plant. In this paper, a mixed integer non-linear programming (MINLP) model to simultaneously optimize the equipment arrangement, geometric design and operating conditions of CCPPs is proposed. General Algebraic Modelling System (GAMS) is used to implement and to solve the mathematical model. The HRSG model involves discrete decisions connected with the geometric design and the selection of tube diameters as well as the length and width of each solid fin. Continuous variables are used to model the operating conditions of the HRSG and steam turbines (ST). The solution strategy for the resulting model comprises two phases: the first one focuses the process optimization but considering only global energy and mass balances and this phase provides initial-bounds values for the second phase where the complete and rigorous model involving discrete decisions is solved. Different case studies with increasing complexity have been successfully solved. Model validation and results obtained from the MINLP model by considering different objective functions are discussed. © 2010 Elsevier Ltd.}, note = {cited By 16}, keywords = {}, pubstate = {published}, tppubtype = {article} } Thermal efficiency of Combined Cycle Power Plants (CCPPs) depends strongly on the Heat Recovery Steam Generation (HRSG) design which links the gas cycle with the steam cycle. Therefore, the HRSG must be carefully designed in order to maximize the heat exchanged and to improve the overall performance of the plant. In this paper, a mixed integer non-linear programming (MINLP) model to simultaneously optimize the equipment arrangement, geometric design and operating conditions of CCPPs is proposed. General Algebraic Modelling System (GAMS) is used to implement and to solve the mathematical model. The HRSG model involves discrete decisions connected with the geometric design and the selection of tube diameters as well as the length and width of each solid fin. Continuous variables are used to model the operating conditions of the HRSG and steam turbines (ST). The solution strategy for the resulting model comprises two phases: the first one focuses the process optimization but considering only global energy and mass balances and this phase provides initial-bounds values for the second phase where the complete and rigorous model involving discrete decisions is solved. Different case studies with increasing complexity have been successfully solved. Model validation and results obtained from the MINLP model by considering different objective functions are discussed. © 2010 Elsevier Ltd. |
M.C. Inalbon, M.C. Mussati, P. Mocchiutti, M.A. Zanuttini Modeling of Alkali impregnation of eucalyptus wood (Artículo de revista) Industrial and Engineering Chemistry Research, 50 (5), pp. 2898-2904, 2011, (cited By 6). (Resumen | Enlaces | BibTeX | Etiquetas: ) @article{Inalbon20112898, title = {Modeling of Alkali impregnation of eucalyptus wood}, author = { M.C. Inalbon and M.C. Mussati and P. Mocchiutti and M.A. Zanuttini}, url = {http://www.scopus.com/inward/record.url?eid=2-s2.0-79951995150&partnerID=40&md5=49b3b088a7589086df910fa074c677a8}, doi = {10.1021/ie1019408}, year = {2011}, date = {2011-01-01}, journal = {Industrial and Engineering Chemistry Research}, volume = {50}, number = {5}, pages = {2898-2904}, abstract = {In wood pulping processes, the quality of the impregnation stage has a significant influence on the final pulp properties. In this work, a refined version of a previously published model is used to analyze this operation stage. For liquor containing NaOH and Na 2S, the study considers the reaction of wood acetyls and acidic groups. The Donnan effect is applied for analysis of ion concentrations in the wood-liquid interphase. The predicted concentration profiles are in acceptable agreement with the experimental results. The model is used afterward to simulate a batch or a cocurrent continuous kraft impregnation process. In the impregnating liquor, the alkali concentration is gradually reduced due to chemical reactions and alkali accumulation in the wood chips. The level of impregnation is finally analyzed considering different chip thickness values corresponding to an industrial chip stock sample. © 2011 American Chemical Society.}, note = {cited By 6}, keywords = {}, pubstate = {published}, tppubtype = {article} } In wood pulping processes, the quality of the impregnation stage has a significant influence on the final pulp properties. In this work, a refined version of a previously published model is used to analyze this operation stage. For liquor containing NaOH and Na 2S, the study considers the reaction of wood acetyls and acidic groups. The Donnan effect is applied for analysis of ion concentrations in the wood-liquid interphase. The predicted concentration profiles are in acceptable agreement with the experimental results. The model is used afterward to simulate a batch or a cocurrent continuous kraft impregnation process. In the impregnating liquor, the alkali concentration is gradually reduced due to chemical reactions and alkali accumulation in the wood chips. The level of impregnation is finally analyzed considering different chip thickness values corresponding to an industrial chip stock sample. © 2011 American Chemical Society. |